stars / README.md
lhoestq's picture
lhoestq HF staff
Fix viewer
99e6912 verified
|
raw
history blame
2.24 kB
metadata
dataset_info:
  features:
    - name: login
      dtype: string
    - name: dates
      dtype: string
  splits:
    - name: transformers
      num_bytes: 4484080
      num_examples: 120154
    - name: peft
      num_bytes: 465816
      num_examples: 12524
    - name: evaluate
      num_bytes: 64197
      num_examples: 1714
    - name: huggingface_hub
      num_bytes: 56558
      num_examples: 1503
    - name: accelerate
      num_bytes: 242677
      num_examples: 6504
    - name: datasets
      num_bytes: 676788
      num_examples: 18024
    - name: optimum
      num_bytes: 73568
      num_examples: 1964
    - name: pytorch_image_models
      num_bytes: 1069862
      num_examples: 28691
    - name: gradio
      num_bytes: 978080
      num_examples: 26282
    - name: tokenizers
      num_bytes: 301458
      num_examples: 8126
    - name: diffusers
      num_bytes: 779023
      num_examples: 20936
    - name: safetensors
      num_bytes: 75658
      num_examples: 2041
    - name: candle
      num_bytes: 447690
      num_examples: 12076
    - name: text_generation_inference
      num_bytes: 258847
      num_examples: 6951
    - name: chat_ui
      num_bytes: 197261
      num_examples: 5301
    - name: hub_docs
      num_bytes: 8224
      num_examples: 219
  download_size: 6188098
  dataset_size: 10179787
configs:
  - config_name: default
    data_files:
      - split: peft
        path: data/peft-*
      - split: hub_docs
        path: data/hub_docs-*
      - split: evaluate
        path: data/evaluate-*
      - split: huggingface_hub
        path: data/huggingface_hub-*
      - split: accelerate
        path: data/accelerate-*
      - split: datasets
        path: data/datasets-*
      - split: optimum
        path: data/optimum-*
      - split: pytorch_image_models
        path: data/pytorch_image_models-*
      - split: gradio
        path: data/gradio-*
      - split: tokenizers
        path: data/tokenizers-*
      - split: diffusers
        path: data/diffusers-*
      - split: transformers
        path: data/transformers-*
      - split: safetensors
        path: data/safetensors-*
      - split: candle
        path: data/candle-*
      - split: text_generation_inference
        path: data/text_generation_inference-*
      - split: chat_ui
        path: data/chat_ui-*

Dataset Card for "stars"

More Information needed