--- dataset_info: features: - name: dates dtype: string - name: type struct: - name: authorAssociation dtype: string - name: comment dtype: bool - name: issue dtype: bool splits: - name: peft num_bytes: 136951 num_examples: 4018 - name: hub_docs num_bytes: 112385 num_examples: 3190 - name: evaluate num_bytes: 59167 num_examples: 1689 - name: huggingface_hub num_bytes: 233351 num_examples: 6701 - name: accelerate num_bytes: 286487 num_examples: 8152 - name: datasets num_bytes: 759386 num_examples: 21614 - name: optimum num_bytes: 167553 num_examples: 4824 - name: pytorch_image_models num_bytes: 132620 num_examples: 3848 - name: gradio num_bytes: 812110 num_examples: 22390 - name: tokenizers num_bytes: 174301 num_examples: 5073 - name: diffusers num_bytes: 988818 num_examples: 28327 - name: transformers num_bytes: 4189676 num_examples: 118517 - name: safetensors num_bytes: 40320 num_examples: 1162 download_size: 2525218 dataset_size: 8093125 configs: - config_name: default data_files: - split: peft path: data/peft-* - split: hub_docs path: data/hub_docs-* - split: evaluate path: data/evaluate-* - split: huggingface_hub path: data/huggingface_hub-* - split: accelerate path: data/accelerate-* - split: datasets path: data/datasets-* - split: optimum path: data/optimum-* - split: pytorch_image_models path: data/pytorch_image_models-* - split: gradio path: data/gradio-* - split: tokenizers path: data/tokenizers-* - split: diffusers path: data/diffusers-* - split: transformers path: data/transformers-* - split: safetensors path: data/safetensors-* --- # Dataset Card for "issues" [More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)