File size: 28,317 Bytes
2156389 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install -r requirement.txt"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2b1ac199-738d-483e-93cb-6624fd9b9de4",
"metadata": {},
"outputs": [],
"source": [
"from ultralytics import YOLO\n",
"\n",
"# Load a model\n",
"model = YOLO(\"yolov8n.yaml\") # build a new model from scratch\n",
"model = YOLO(\"yolov8n.pt\") # load a pretrained model (recommended for training)\n",
"\n",
"# Use the model\n",
"model.train(data=\"coco128.yaml\", epochs=3) # train the model\n",
"metrics = model.val() # evaluate model performance on the validation set\n",
"results = model(\"https://ultralytics.com/images/bus.jpg\") # predict on an image\n",
"path = model.export(format=\"onnx\") # export the model to ONNX format"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bb73b4b6-d444-4059-b64a-03479b4ab05c",
"metadata": {},
"outputs": [],
"source": [
"\n",
" from n params module arguments \n",
" 0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2] \n",
" 1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2] \n",
" 2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True] \n",
" 3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2] \n",
" 4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True] \n",
" 5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2] \n",
" 6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True] \n",
" 7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2] \n",
" 8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True] \n",
" 9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5] \n",
" 10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1] \n",
" 13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1] \n",
" 16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2] \n",
" 17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1] \n",
" 19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2] \n",
" 20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1] \n",
" 22 [15, 18, 21] 1 897664 ultralytics.nn.modules.head.Detect [80, [64, 128, 256]] \n",
"YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients\n",
"\n",
"Ultralytics YOLOv8.0.145 🚀 Python-3.7.10 torch-1.13.1+cu117 CPU (Hygon C86 7185 32-core Processor)\n",
"WARNING ⚠️ Upgrade to torch>=2.0.0 for deterministic training.\n",
"engine/trainer: task=detect, mode=train, model=yolov8n.pt, data=coco128.yaml, epochs=3, patience=50, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=None, exist_ok=False, pretrained=True, optimizer=auto, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, show=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, vid_stride=1, line_width=None, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, boxes=True, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=False, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0, cfg=None, tracker=botsort.yaml, save_dir=runs/detect/train7\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2] \n",
" 1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2] \n",
" 2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True] \n",
" 3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2] \n",
" 4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True] \n",
" 5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2] \n",
" 6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True] \n",
" 7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2] \n",
" 8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True] \n",
" 9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5] \n",
" 10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1] \n",
" 13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1] \n",
" 16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2] \n",
" 17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1] \n",
" 19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2] \n",
" 20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1] \n",
" 22 [15, 18, 21] 1 897664 ultralytics.nn.modules.head.Detect [80, [64, 128, 256]] \n",
"Model summary: 225 layers, 3157200 parameters, 3157184 gradients\n",
"\n",
"Transferred 355/355 items from pretrained weights\n",
"TensorBoard: Start with 'tensorboard --logdir runs/detect/train7', view at http://localhost:6006/\n",
"train: Scanning /public/home/jsyadmin/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100%|██████████| 128/128 [00:00<?, ?it/s]\n",
"val: Scanning /public/home/jsyadmin/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100%|██████████| 128/128 [00:00<?, ?it/s]\n",
"Plotting labels to runs/detect/train7/labels.jpg... \n",
"optimizer: AdamW(lr=0.000119, momentum=0.9) with parameter groups 57 weight(decay=0.0), 64 weight(decay=0.0005), 63 bias(decay=0.0)\n",
"Image sizes 640 train, 640 val\n",
"Using 0 dataloader workers\n",
"Logging results to runs/detect/train7\n",
"Starting training for 3 epochs...\n",
"\n",
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
" 1/3 0G 1.096 1.365 1.202 201 640: 100%|██████████| 8/8 [00:50<00:00, 6.30s/it]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 4/4 [00:15<00:00, 3.85s/it]\n",
" all 128 929 0.658 0.532 0.615 0.457\n",
"\n",
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
" 2/3 0G 1.216 1.443 1.268 136 640: 100%|██████████| 8/8 [00:46<00:00, 5.85s/it]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 4/4 [00:15<00:00, 3.80s/it]\n",
" all 128 929 0.672 0.542 0.628 0.466\n",
"\n",
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
" 3/3 0G 1.193 1.342 1.243 206 640: 100%|██████████| 8/8 [00:47<00:00, 5.92s/it]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 4/4 [00:15<00:00, 3.94s/it]\n",
" all 128 929 0.69 0.532 0.631 0.468\n",
"\n",
"3 epochs completed in 0.057 hours.\n",
"Optimizer stripped from runs/detect/train7/weights/last.pt, 6.5MB\n",
"Optimizer stripped from runs/detect/train7/weights/best.pt, 6.5MB\n",
"\n",
"Validating runs/detect/train7/weights/best.pt...\n",
"Ultralytics YOLOv8.0.145 🚀 Python-3.7.10 torch-1.13.1+cu117 CPU (Hygon C86 7185 32-core Processor)\n",
"Model summary (fused): 168 layers, 3151904 parameters, 0 gradients\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 4/4 [00:14<00:00, 3.53s/it]\n",
" all 128 929 0.691 0.531 0.631 0.469\n",
" person 128 254 0.818 0.655 0.764 0.538\n",
" bicycle 128 6 0.663 0.329 0.331 0.26\n",
" car 128 46 0.867 0.217 0.279 0.174\n",
" motorcycle 128 5 0.678 0.847 0.938 0.743\n",
" airplane 128 6 0.817 0.752 0.927 0.687\n",
" bus 128 7 0.753 0.714 0.728 0.671\n",
" train 128 3 0.565 0.667 0.83 0.681\n",
" truck 128 12 1 0.327 0.522 0.328\n",
" boat 128 6 0.299 0.167 0.36 0.233\n",
" traffic light 128 14 0.738 0.204 0.201 0.138\n",
" stop sign 128 2 1 0.971 0.995 0.707\n",
" bench 128 9 0.815 0.494 0.633 0.382\n",
" bird 128 16 0.911 0.625 0.894 0.556\n",
" cat 128 4 0.876 1 0.995 0.791\n",
" dog 128 9 0.658 0.889 0.871 0.648\n",
" horse 128 2 0.574 1 0.995 0.518\n",
" elephant 128 17 0.848 0.824 0.918 0.674\n",
" bear 128 1 0.624 1 0.995 0.895\n",
" zebra 128 4 0.865 1 0.995 0.965\n",
" giraffe 128 9 0.898 0.98 0.973 0.713\n",
" backpack 128 6 0.648 0.333 0.379 0.22\n",
" umbrella 128 18 0.814 0.5 0.668 0.465\n",
" handbag 128 19 1 0 0.207 0.126\n",
" tie 128 7 0.83 0.7 0.677 0.495\n",
" suitcase 128 4 0.61 0.787 0.828 0.592\n",
" frisbee 128 5 0.63 0.8 0.76 0.664\n",
" skis 128 1 0.746 1 0.995 0.497\n",
" snowboard 128 7 0.75 0.714 0.755 0.492\n",
" sports ball 128 6 0.689 0.378 0.502 0.274\n",
" kite 128 10 0.805 0.415 0.588 0.203\n",
" baseball bat 128 4 0.45 0.25 0.378 0.187\n",
" baseball glove 128 7 0.679 0.429 0.429 0.294\n",
" skateboard 128 5 0.811 0.6 0.599 0.446\n",
" tennis racket 128 7 0.695 0.334 0.502 0.365\n",
" bottle 128 18 0.508 0.344 0.366 0.219\n",
" wine glass 128 16 0.795 0.487 0.61 0.363\n",
" cup 128 36 0.651 0.26 0.402 0.289\n",
" fork 128 6 0.609 0.167 0.294 0.205\n",
" knife 128 16 0.745 0.549 0.628 0.362\n",
" spoon 128 22 0.511 0.191 0.349 0.182\n",
" bowl 128 28 0.693 0.679 0.641 0.528\n",
" banana 128 1 0 0 0.111 0.0369\n",
" sandwich 128 2 0.323 0.5 0.695 0.695\n",
" orange 128 4 1 0.334 0.995 0.666\n",
" broccoli 128 11 0.396 0.182 0.254 0.213\n",
" carrot 128 24 0.818 0.458 0.673 0.434\n",
" hot dog 128 2 0.634 1 0.828 0.795\n",
" pizza 128 5 0.791 1 0.995 0.843\n",
" donut 128 14 0.65 1 0.926 0.84\n",
" cake 128 4 0.725 1 0.995 0.88\n",
" chair 128 35 0.565 0.514 0.462 0.26\n",
" couch 128 6 0.423 0.333 0.614 0.469\n",
" potted plant 128 14 0.818 0.643 0.722 0.498\n",
" bed 128 3 0.793 0.667 0.775 0.676\n",
" dining table 128 13 0.512 0.615 0.514 0.41\n",
" toilet 128 2 0.644 0.5 0.745 0.721\n",
" tv 128 2 0.543 0.63 0.828 0.762\n",
" laptop 128 3 1 0 0.665 0.522\n",
" mouse 128 2 1 0 0.0443 0.00443\n",
" remote 128 8 0.874 0.5 0.578 0.502\n",
" cell phone 128 8 0 0 0.0582 0.0384\n",
" microwave 128 3 0.6 0.667 0.83 0.699\n",
" oven 128 5 0.456 0.4 0.343 0.272\n",
" sink 128 6 0.372 0.167 0.203 0.125\n",
" refrigerator 128 5 0.667 0.4 0.65 0.522\n",
" book 128 29 0.619 0.113 0.355 0.18\n",
" clock 128 9 0.778 0.781 0.891 0.721\n",
" vase 128 2 0.411 1 0.828 0.795\n",
" scissors 128 1 1 0 0.249 0.0746\n",
" teddy bear 128 21 0.939 0.333 0.636 0.431\n",
" toothbrush 128 5 0.747 0.4 0.672 0.424\n",
"Speed: 1.0ms preprocess, 91.7ms inference, 0.0ms loss, 2.2ms postprocess per image\n",
"Results saved to runs/detect/train7\n",
"Ultralytics YOLOv8.0.145 🚀 Python-3.7.10 torch-1.13.1+cu117 CPU (Hygon C86 7185 32-core Processor)\n",
"Model summary (fused): 168 layers, 3151904 parameters, 0 gradients\n",
"val: Scanning /public/home/jsyadmin/datasets/coco128/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100%|██████████| 128/128 [00:00<?, ?it/s]\n",
" Class Images Instances Box(P R mAP50 mAP50-95): 100%|██████████| 8/8 [00:12<00:00, 1.62s/it]\n",
" all 128 929 0.677 0.547 0.626 0.463\n",
" person 128 254 0.805 0.665 0.766 0.539\n",
" bicycle 128 6 0.499 0.333 0.328 0.258\n",
" car 128 46 0.816 0.217 0.279 0.174\n",
" motorcycle 128 5 0.684 0.875 0.938 0.743\n",
" airplane 128 6 0.82 0.767 0.927 0.687\n",
" bus 128 7 0.74 0.714 0.728 0.671\n",
" train 128 3 0.555 0.667 0.83 0.681\n",
" truck 128 12 1 0.371 0.496 0.286\n",
" boat 128 6 0.238 0.167 0.327 0.205\n",
" traffic light 128 14 0.738 0.204 0.201 0.138\n",
" stop sign 128 2 1 0.989 0.995 0.708\n",
" bench 128 9 0.825 0.526 0.632 0.382\n",
" bird 128 16 0.85 0.708 0.884 0.549\n",
" cat 128 4 0.867 1 0.995 0.791\n",
" dog 128 9 0.649 0.889 0.871 0.648\n",
" horse 128 2 0.513 1 0.995 0.518\n",
" elephant 128 17 0.825 0.829 0.918 0.674\n",
" bear 128 1 0.609 1 0.995 0.895\n",
" zebra 128 4 0.86 1 0.995 0.965\n",
" giraffe 128 9 0.807 0.935 0.951 0.753\n",
" backpack 128 6 0.629 0.333 0.379 0.231\n",
" umbrella 128 18 0.703 0.5 0.663 0.462\n",
" handbag 128 19 1 0.0683 0.206 0.126\n",
" tie 128 7 0.827 0.714 0.677 0.495\n",
" suitcase 128 4 0.662 1 0.828 0.592\n",
" frisbee 128 5 0.604 0.8 0.759 0.664\n",
" skis 128 1 0.627 1 0.995 0.497\n",
" snowboard 128 7 0.727 0.714 0.755 0.491\n",
" sports ball 128 6 0.698 0.396 0.502 0.274\n",
" kite 128 10 0.824 0.472 0.589 0.204\n",
" baseball bat 128 4 0.526 0.25 0.353 0.199\n",
" baseball glove 128 7 0.641 0.429 0.429 0.316\n",
" skateboard 128 5 0.863 0.6 0.599 0.44\n",
" tennis racket 128 7 0.708 0.357 0.502 0.337\n",
" bottle 128 18 0.524 0.369 0.368 0.217\n",
" wine glass 128 16 0.776 0.433 0.569 0.354\n",
" cup 128 36 0.671 0.278 0.419 0.302\n",
" fork 128 6 0.584 0.167 0.236 0.183\n",
" knife 128 16 0.65 0.562 0.607 0.353\n",
" spoon 128 22 0.595 0.202 0.351 0.197\n",
" bowl 128 28 0.681 0.687 0.667 0.517\n",
" banana 128 1 0 0 0.0829 0.0344\n",
" sandwich 128 2 0.323 0.5 0.308 0.308\n",
" orange 128 4 1 0.371 0.995 0.666\n",
" broccoli 128 11 0.358 0.182 0.263 0.214\n",
" carrot 128 24 0.736 0.58 0.669 0.428\n",
" hot dog 128 2 0.658 0.974 0.828 0.828\n",
" pizza 128 5 0.857 1 0.995 0.843\n",
" donut 128 14 0.647 1 0.926 0.841\n",
" cake 128 4 0.612 1 0.995 0.88\n",
" chair 128 35 0.54 0.514 0.451 0.249\n",
" couch 128 6 0.524 0.5 0.715 0.553\n",
" potted plant 128 14 0.784 0.643 0.722 0.498\n",
" bed 128 3 0.764 0.667 0.863 0.655\n",
" dining table 128 13 0.465 0.538 0.486 0.387\n",
" toilet 128 2 0.629 0.5 0.745 0.721\n",
" tv 128 2 0.563 0.69 0.828 0.762\n",
" laptop 128 3 1 0 0.6 0.48\n",
" mouse 128 2 1 0 0.0683 0.00683\n",
" remote 128 8 0.858 0.5 0.613 0.523\n",
" cell phone 128 8 0 0 0.0576 0.0383\n",
" microwave 128 3 0.566 0.667 0.83 0.683\n",
" oven 128 5 0.459 0.4 0.343 0.272\n",
" sink 128 6 0.383 0.167 0.166 0.104\n",
" refrigerator 128 5 0.638 0.4 0.654 0.506\n",
" book 128 29 0.638 0.123 0.376 0.201\n",
" clock 128 9 0.782 0.798 0.892 0.722\n",
" vase 128 2 0.367 1 0.828 0.795\n",
" scissors 128 1 1 0 0.249 0.0746\n",
" teddy bear 128 21 0.92 0.333 0.632 0.428\n",
" toothbrush 128 5 0.745 0.6 0.743 0.478\n",
"Speed: 0.8ms preprocess, 86.0ms inference, 0.0ms loss, 2.4ms postprocess per image\n",
"Results saved to runs/detect/val3\n",
"\n",
"Found https://ultralytics.com/images/bus.jpg locally at bus.jpg\n",
"image 1/1 /public/home/jsyadmin/yolotest/bus.jpg: 640x480 4 persons, 1 bus, 399.2ms\n",
"Speed: 67.8ms preprocess, 399.2ms inference, 44.2ms postprocess per image at shape (1, 3, 640, 480)\n",
"Ultralytics YOLOv8.0.145 🚀 Python-3.7.10 torch-1.13.1+cu117 CPU (Hygon C86 7185 32-core Processor)\n",
"\n",
"PyTorch: starting from 'runs/detect/train7/weights/best.pt' with input shape (1, 3, 640, 640) BCHW and output shape(s) (1, 84, 8400) (6.2 MB)\n",
"\n",
"ONNX: starting export with onnx 1.14.1 opset 16...\n",
"ONNX: export success ✅ 2.6s, saved as 'runs/detect/train7/weights/best.onnx' (12.2 MB)\n",
"\n",
"Export complete (3.0s)\n",
"Results saved to /public/home/jsyadmin/yolotest/runs/detect/train7/weights\n",
"Predict: yolo predict task=detect model=runs/detect/train7/weights/best.onnx imgsz=640 \n",
"Validate: yolo val task=detect model=runs/detect/train7/weights/best.onnx imgsz=640 data=None \n",
"Visualize: https://netron.app"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|