--- dataset_info: features: - name: dates dtype: string - name: type struct: - name: authorAssociation dtype: string - name: comment dtype: bool - name: issue dtype: bool splits: - name: peft num_bytes: 121361 num_examples: 3460 - name: hub_docs num_bytes: 111194 num_examples: 3067 - name: evaluate num_bytes: 57644 num_examples: 1659 - name: huggingface_hub num_bytes: 230213 num_examples: 6352 - name: accelerate num_bytes: 269699 num_examples: 7621 - name: datasets num_bytes: 754252 num_examples: 21266 - name: optimum num_bytes: 165750 num_examples: 4536 - name: pytorch_image_models num_bytes: 137695 num_examples: 3756 - name: gradio num_bytes: 756912 num_examples: 20863 - name: tokenizers num_bytes: 178334 num_examples: 5003 - name: diffusers num_bytes: 907737 num_examples: 25947 - name: transformers num_bytes: 4068316 num_examples: 115219 - name: safetensors num_bytes: 39874 num_examples: 1063 download_size: 2416189 dataset_size: 7798981 configs: - config_name: default data_files: - split: peft path: data/peft-* - split: hub_docs path: data/hub_docs-* - split: evaluate path: data/evaluate-* - split: huggingface_hub path: data/huggingface_hub-* - split: accelerate path: data/accelerate-* - split: datasets path: data/datasets-* - split: optimum path: data/optimum-* - split: pytorch_image_models path: data/pytorch_image_models-* - split: gradio path: data/gradio-* - split: tokenizers path: data/tokenizers-* - split: diffusers path: data/diffusers-* - split: transformers path: data/transformers-* - split: safetensors path: data/safetensors-* --- # Dataset Card for "issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)