File size: 2,077 Bytes
3487bf5 041600c 6e74aff 3487bf5 6e74aff 3487bf5 041600c 3487bf5 6e74aff 041600c 3487bf5 6e74aff 3487bf5 6e74aff 041600c 3487bf5 041600c 3487bf5 6e74aff 3487bf5 6e74aff 3487bf5 6e74aff 3487bf5 6e74aff cda417f 6e74aff 041600c 6e74aff 041600c 6e74aff 041600c 6e74aff 041600c 6e74aff ad565f1 3487bf5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
---
dataset_info:
features:
- name: login
dtype: string
- name: dates
dtype: string
splits:
- name: transformers
num_bytes: 4306654
num_examples: 115428
- name: peft
num_bytes: 409503
num_examples: 11016
- name: evaluate
num_bytes: 60067
num_examples: 1604
- name: huggingface_hub
num_bytes: 50035
num_examples: 1330
- name: accelerate
num_bytes: 225215
num_examples: 6039
- name: datasets
num_bytes: 660148
num_examples: 17582
- name: optimum
num_bytes: 65326
num_examples: 1743
- name: pytorch_image_models
num_bytes: 1032799
num_examples: 27698
- name: gradio
num_bytes: 879324
num_examples: 23641
- name: tokenizers
num_bytes: 289780
num_examples: 7815
- name: diffusers
num_bytes: 710122
num_examples: 19098
- name: safetensors
num_bytes: 65000
num_examples: 1754
- name: candle
num_bytes: 374881
num_examples: 10113
- name: text_generation_inference
num_bytes: 222899
num_examples: 5985
- name: chat_ui
num_bytes: 154888
num_examples: 4168
- name: hub_docs
num_bytes: 7344
num_examples: 196
download_size: 5247081
dataset_size: 9513985
configs:
- config_name: default
data_files:
- split: peft
path: data/peft-*
- split: hub_docs
path: data/hub_docs-*
- split: evaluate
path: data/evaluate-*
- split: huggingface_hub
path: data/huggingface_hub-*
- split: accelerate
path: data/accelerate-*
- split: datasets
path: data/datasets-*
- split: optimum
path: data/optimum-*
- split: pytorch_image_models
path: data/pytorch_image_models-*
- split: gradio
path: data/gradio-*
- split: tokenizers
path: data/tokenizers-*
- split: diffusers
path: data/diffusers-*
- split: transformers
path: data/transformers-*
- split: safetensors
path: data/safetensors-*
---
# Dataset Card for "stars"
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |