--- dataset_info: features: - name: dates dtype: string - name: type struct: - name: authorAssociation dtype: string - name: comment dtype: bool - name: issue dtype: bool splits: - name: peft num_bytes: 132311 num_examples: 3776 - name: hub_docs num_bytes: 113192 num_examples: 3122 - name: evaluate num_bytes: 58174 num_examples: 1674 - name: huggingface_hub num_bytes: 235148 num_examples: 6476 - name: accelerate num_bytes: 285562 num_examples: 7866 - name: datasets num_bytes: 761951 num_examples: 21461 - name: optimum num_bytes: 171865 num_examples: 4687 - name: pytorch_image_models num_bytes: 140079 num_examples: 3821 - name: gradio num_bytes: 780098 num_examples: 21500 - name: tokenizers num_bytes: 179223 num_examples: 5026 - name: diffusers num_bytes: 953692 num_examples: 27245 - name: transformers num_bytes: 4185909 num_examples: 116880 - name: safetensors num_bytes: 42048 num_examples: 1120 download_size: 2466949 dataset_size: 8039252 configs: - config_name: default data_files: - split: peft path: data/peft-* - split: hub_docs path: data/hub_docs-* - split: evaluate path: data/evaluate-* - split: huggingface_hub path: data/huggingface_hub-* - split: accelerate path: data/accelerate-* - split: datasets path: data/datasets-* - split: optimum path: data/optimum-* - split: pytorch_image_models path: data/pytorch_image_models-* - split: gradio path: data/gradio-* - split: tokenizers path: data/tokenizers-* - split: diffusers path: data/diffusers-* - split: transformers path: data/transformers-* - split: safetensors path: data/safetensors-* --- # Dataset Card for "issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)