Geohunterr commited on
Commit
ead690c
·
verified ·
1 Parent(s): af43642

Training in progress, epoch 0

Browse files
.ipynb_checkpoints/Untitled-checkpoint.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
Logs/events.out.tfevents.1718384259.e29ec45d9208.134.10 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d5fbfb0e3d22e8d7b7e86191d233a05f497f4efe9d3d40170281b56d5ad1133
3
+ size 6094
Untitled.ipynb CHANGED
@@ -429,7 +429,7 @@
429
  {
430
  "data": {
431
  "application/vnd.jupyter.widget-view+json": {
432
- "model_id": "a582824b64b041249949ed78eaade5d4",
433
  "version_major": 2,
434
  "version_minor": 0
435
  },
@@ -443,7 +443,7 @@
443
  {
444
  "data": {
445
  "application/vnd.jupyter.widget-view+json": {
446
- "model_id": "dc5f4463bc564fcda93b0e03f4539c9f",
447
  "version_major": 2,
448
  "version_minor": 0
449
  },
@@ -486,8 +486,8 @@
486
  "\n",
487
  " <div>\n",
488
  " \n",
489
- " <progress value='387' max='645' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
490
- " [387/645 1:30:38 < 1:00:44, 0.07 it/s, Epoch 2.98/5]\n",
491
  " </div>\n",
492
  " <table border=\"1\" class=\"dataframe\">\n",
493
  " <thead>\n",
@@ -498,16 +498,6 @@
498
  " </tr>\n",
499
  " </thead>\n",
500
  " <tbody>\n",
501
- " <tr>\n",
502
- " <td>0</td>\n",
503
- " <td>3.360700</td>\n",
504
- " <td>0.005133</td>\n",
505
- " </tr>\n",
506
- " <tr>\n",
507
- " <td>2</td>\n",
508
- " <td>0.003900</td>\n",
509
- " <td>0.001245</td>\n",
510
- " </tr>\n",
511
  " </tbody>\n",
512
  "</table><p>"
513
  ],
@@ -517,20 +507,6 @@
517
  },
518
  "metadata": {},
519
  "output_type": "display_data"
520
- },
521
- {
522
- "name": "stderr",
523
- "output_type": "stream",
524
- "text": [
525
- "/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
526
- " warnings.warn(\n",
527
- "/usr/local/lib/python3.10/dist-packages/torch/utils/checkpoint.py:31: UserWarning: None of the inputs have requires_grad=True. Gradients will be None\n",
528
- " warnings.warn(\"None of the inputs have requires_grad=True. Gradients will be None\")\n",
529
- "/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n",
530
- " warnings.warn(\n",
531
- "/usr/local/lib/python3.10/dist-packages/torch/utils/checkpoint.py:31: UserWarning: None of the inputs have requires_grad=True. Gradients will be None\n",
532
- " warnings.warn(\"None of the inputs have requires_grad=True. Gradients will be None\")\n"
533
- ]
534
  }
535
  ],
536
  "source": [
@@ -888,35 +864,27 @@
888
  "source": [
889
  "def ModelInference():\n",
890
  "\n",
891
- " InputTxt = \"What is/are all the conditions that could be detected in this 12 Lead ECG ?\"\n",
892
- " InputImg = TestBase64ECG119.replace(\"data:image/png;base64,\",\"\")\n",
 
 
893
  "\n",
894
  "\n",
 
 
895
  " InputImgTensor = Image.open(BytesIO(base64.b64decode(InputImg))).convert(\"RGB\")\n",
896
  "\n",
897
- " print(\"Img to Process\",InputImgTensor)\n",
898
  "\n",
899
  " device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
900
- " Processor = PaliGemmaProcessor.from_pretrained(\"google/paligemma-3b-pt-224\")\n",
901
- "\n",
902
- " inputs = Processor(text=InputTxt, images=InputImgTensor, return_tensors=\"pt\")\n",
903
- "\n",
904
- " model = PaliGemmaForConditionalGeneration.from_pretrained(\"/workspace\")\n",
905
- "\n",
906
- " # Autoregressively generate\n",
907
- " # We use greedy decoding here, for more fancy methods see https://huggingface.co/blog/how-to-generate\n",
908
- " generated_ids = model.generate(**inputs, max_new_tokens=496)\n",
909
- "\n",
910
- " # Next we turn each predicted token ID back into a string using the decode method\n",
911
- " # We chop of the prompt, which consists of image tokens and our text prompt\n",
912
- " # image_token_index = model.config.image_token_index\n",
913
- " # num_image_tokens = len(generated_ids[generated_ids == image_token_index])\n",
914
- " # num_text_tokens = len(Processor.tokenizer.encode(InputTxt))\n",
915
- " # num_prompt_tokens = num_image_tokens + num_text_tokens + 2\n",
916
- " \n",
917
- " generated_text = Processor.batch_decode(generated_ids, skip_special_tokens=True,\n",
918
- " clean_up_tokenization_spaces=False)[0]\n",
919
- " print(generated_text)"
920
  ]
921
  },
922
  {
 
429
  {
430
  "data": {
431
  "application/vnd.jupyter.widget-view+json": {
432
+ "model_id": "cfa272f52f4a42629057b2566b4dd820",
433
  "version_major": 2,
434
  "version_minor": 0
435
  },
 
443
  {
444
  "data": {
445
  "application/vnd.jupyter.widget-view+json": {
446
+ "model_id": "f08e1f4a5ed443a6ae6378bf2be0d759",
447
  "version_major": 2,
448
  "version_minor": 0
449
  },
 
486
  "\n",
487
  " <div>\n",
488
  " \n",
489
+ " <progress value='129' max='645' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
490
+ " [129/645 28:55 < 1:57:29, 0.07 it/s, Epoch 0.99/5]\n",
491
  " </div>\n",
492
  " <table border=\"1\" class=\"dataframe\">\n",
493
  " <thead>\n",
 
498
  " </tr>\n",
499
  " </thead>\n",
500
  " <tbody>\n",
 
 
 
 
 
 
 
 
 
 
501
  " </tbody>\n",
502
  "</table><p>"
503
  ],
 
507
  },
508
  "metadata": {},
509
  "output_type": "display_data"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510
  }
511
  ],
512
  "source": [
 
864
  "source": [
865
  "def ModelInference():\n",
866
  "\n",
867
+ " BaseModelID = \"google/paligemma-3b-pt-224\"\n",
868
+ " config = PeftConfig.from_pretrained(\"Geohunterr/ECG_FT_PG\")\n",
869
+ " BaseModel = AutoModelForPreTraining.from_pretrained(BaseModelID)\n",
870
+ " PeftFTModel = PeftModel.from_pretrained(BaseModel,\"Geohunterr/ECG_FT_PG\")\n",
871
  "\n",
872
  "\n",
873
+ " InputTxt = \"What is/are all the conditions that could be detected in this 12 Lead ECG ?\"\n",
874
+ " InputImg = TestBase64ECG119.replace(\"data:image/png;base64,\",\"\")\n",
875
  " InputImgTensor = Image.open(BytesIO(base64.b64decode(InputImg))).convert(\"RGB\")\n",
876
  "\n",
877
+ " Processor = PaliGemmaProcessor.from_pretrained(BaseModelID)\n",
878
  "\n",
879
  " device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
880
+ " Inputs = Processor(text=InputTxt,images=InputImgTensor,padding=\"longest\",do_convert_rgb=True,return_tensors=\"pt\").to(device)\n",
881
+ " PeftFTModel.to(device)\n",
882
+ " Inputs = Inputs.to(dtype=PeftFTModel.dtype)\n",
883
+ "\n",
884
+ " with torch.no_grad():\n",
885
+ " output = PeftFTModel.generate(**Inputs,max_length=496)\n",
886
+ "\n",
887
+ " print(Processor.decode(output[0],skip_special_tokens=True))"
 
 
 
 
 
 
 
 
 
 
 
 
888
  ]
889
  },
890
  {
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d98bdb13cb5bf38cf2f90726b76f65cb6cc70b179cc979259c726f0b627aef12
3
  size 45258384
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dba10c88c9bd05f014aeb557d65dfd6a794c7c4b00e8b40e66701815b964d51
3
  size 45258384