Duc Haba
commited on
Commit
•
db3c665
1
Parent(s):
2fd0492
update to pluto deploy
Browse files- app.py +111 -1818
- requirements.txt +2 -14
app.py
CHANGED
@@ -1,34 +1,27 @@
|
|
1 |
# [BEGIN OF pluto_happy]
|
2 |
-
|
3 |
-
|
4 |
-
import
|
5 |
-
import cryptography.fernet
|
6 |
-
from flopth import flopth
|
7 |
-
import huggingface_hub
|
8 |
-
import huggingface_hub.hf_api
|
9 |
## standard libs, no need to install
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
import json
|
11 |
-
import requests
|
12 |
import time
|
13 |
import os
|
14 |
import random
|
15 |
import re
|
16 |
import sys
|
17 |
import psutil
|
18 |
-
import threading
|
19 |
import socket
|
20 |
-
import PIL
|
21 |
-
import pandas
|
22 |
-
import matplotlib
|
23 |
-
import numpy
|
24 |
import importlib.metadata
|
25 |
import types
|
26 |
import cpuinfo
|
27 |
-
import pynvml
|
28 |
import pathlib
|
29 |
-
import re
|
30 |
import subprocess
|
31 |
-
import datetime
|
32 |
# define class Pluto_Happy
|
33 |
class Pluto_Happy(object):
|
34 |
"""
|
@@ -40,23 +33,10 @@ class Pluto_Happy(object):
|
|
40 |
such as Pluto_HFace with a lot more function on HuggingFace, LLM and Transformers.
|
41 |
|
42 |
Args:
|
43 |
-
|
44 |
|
45 |
Returns:
|
46 |
-
|
47 |
-
|
48 |
-
Notes:
|
49 |
-
- All function begins with one of the following:
|
50 |
-
1. fetch_
|
51 |
-
2. push_
|
52 |
-
3. print_
|
53 |
-
4. say_
|
54 |
-
5. shake_hand_
|
55 |
-
6. make_
|
56 |
-
7. write_
|
57 |
-
8. draw_
|
58 |
-
9. fix_
|
59 |
-
_
|
60 |
"""
|
61 |
|
62 |
# initialize the object
|
@@ -71,29 +51,6 @@ class Pluto_Happy(object):
|
|
71 |
self._ph()
|
72 |
#
|
73 |
# define class var for stable division
|
74 |
-
self._huggingface_crkey=""
|
75 |
-
self._gpt_crkey=""
|
76 |
-
self._fkey="your_key_goes_here"
|
77 |
-
self._github_crkey=""
|
78 |
-
self._kaggle_crkey=""
|
79 |
-
self._meta_project_name = "?"
|
80 |
-
self._meta_error_rate = "?"
|
81 |
-
self._meta_base_model_name = "?"
|
82 |
-
self._meta_data_source = "?"
|
83 |
-
self._meta_data_info = "?"
|
84 |
-
self._meta_training_unix_time = 3422123
|
85 |
-
self._meta_ai_dev_stack = 'Fast.ai (framework), PyTorch, Pandas, Matplotlib, Numpy, Python-3.10'
|
86 |
-
self._meta_author = "Duc Haba"
|
87 |
-
self._meta_ai_assistant = "Foxy, the nine tails."
|
88 |
-
self._meta_genai = "Codey, GPT-4 Copilot, Gemini"
|
89 |
-
self._meta_human_coder = "Duc Haba and [he has no human :-) friend]"
|
90 |
-
self._meta_license = "GNU 3.0"
|
91 |
-
self._meta_notes = "Rocking and rolling"
|
92 |
-
#
|
93 |
-
self.fname_id = 0
|
94 |
-
self.dname_img = "img_colab/"
|
95 |
-
self.flops_per_sec_gcolab_cpu = 4887694725 # 925,554,209 | 9,276,182,810 | 1,722,089,747 | 5,287,694,725
|
96 |
-
self.flops_per_sec_gcolab_gpu = 6365360673 # 1,021,721,764 | 9,748,048,188 | 2,245,406,502 | 6,965,360,673
|
97 |
self.fname_requirements = './pluto_happy/requirements.txt'
|
98 |
#
|
99 |
self.color_primary = '#2780e3' #blue
|
@@ -150,149 +107,9 @@ class Pluto_Happy(object):
|
|
150 |
y = x
|
151 |
return y
|
152 |
#
|
153 |
-
|
154 |
-
def fetch_hface_files(self,
|
155 |
-
hf_names,
|
156 |
-
hf_space="duchaba/monty",
|
157 |
-
local_dir="/content/"):
|
158 |
-
"""
|
159 |
-
Given a list of huggingface file names, download them from the provided huggingface space.
|
160 |
-
|
161 |
-
Args:
|
162 |
-
hf_names: (list) list of huggingface file names to download
|
163 |
-
hf_space: (str) huggingface space to download from.
|
164 |
-
local_dir: (str) local directory to store the files.
|
165 |
-
|
166 |
-
Returns:
|
167 |
-
status: (bool) True if download was successful, False otherwise.
|
168 |
-
"""
|
169 |
-
status = True
|
170 |
-
# f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
|
171 |
-
try:
|
172 |
-
for f in hf_names:
|
173 |
-
lo = local_dir + f
|
174 |
-
huggingface_hub.hf_hub_download(repo_id=hf_space,
|
175 |
-
filename=f,
|
176 |
-
use_auth_token=True,
|
177 |
-
repo_type=huggingface_hub.REPO_TYPE_SPACE,
|
178 |
-
force_filename=lo)
|
179 |
-
except:
|
180 |
-
self._pp("*Error", f)
|
181 |
-
status = False
|
182 |
-
return status
|
183 |
-
#
|
184 |
-
# push files to huggingface
|
185 |
-
def push_hface_files(self,
|
186 |
-
hf_names,
|
187 |
-
hf_space="duchaba/skin_cancer_diagnose",
|
188 |
-
local_dir="/content/"):
|
189 |
-
# push files to huggingface space
|
190 |
-
|
191 |
-
"""
|
192 |
-
Pushes files to huggingface space.
|
193 |
-
|
194 |
-
The function takes a list of file names as a
|
195 |
-
paramater and pushes to the provided huggingface space.
|
196 |
-
|
197 |
-
Args:
|
198 |
-
hf_names: list(of strings), list of file names to be pushed.
|
199 |
-
hf_space: (str), the huggingface space to push to.
|
200 |
-
local_dir: (str), the local directory where the files
|
201 |
-
are stored.
|
202 |
-
|
203 |
-
Returns:
|
204 |
-
status: (bool) True if successfully pushed else False.
|
205 |
-
"""
|
206 |
-
status = True
|
207 |
-
try:
|
208 |
-
for f in hf_names:
|
209 |
-
lo = local_dir + f
|
210 |
-
huggingface_hub.upload_file(
|
211 |
-
path_or_fileobj=lo,
|
212 |
-
path_in_repo=f,
|
213 |
-
repo_id=hf_space,
|
214 |
-
repo_type=huggingface_hub.REPO_TYPE_SPACE)
|
215 |
-
except Exception as e:
|
216 |
-
self._pp("*Error", e)
|
217 |
-
status = False
|
218 |
-
return status
|
219 |
-
#
|
220 |
-
# push the folder to huggingface space
|
221 |
-
def push_hface_folder(self,
|
222 |
-
hf_folder,
|
223 |
-
hf_space_id,
|
224 |
-
hf_dest_folder=None):
|
225 |
-
|
226 |
-
"""
|
227 |
-
|
228 |
-
This function pushes the folder to huggingface space.
|
229 |
-
|
230 |
-
Args:
|
231 |
-
hf_folder: (str). The path to the folder to push.
|
232 |
-
hf_space_id: (str). The space id to push the folder to.
|
233 |
-
hf_dest_folder: (str). The destination folder in the space. If not specified,
|
234 |
-
the folder name will be used as the destination folder.
|
235 |
-
|
236 |
-
Returns:
|
237 |
-
status: (bool) True if the folder is pushed successfully, otherwise False.
|
238 |
-
"""
|
239 |
-
|
240 |
-
status = True
|
241 |
-
try:
|
242 |
-
api = huggingface_hub.HfApi()
|
243 |
-
api.upload_folder(folder_path=hf_folder,
|
244 |
-
repo_id=hf_space_id,
|
245 |
-
path_in_repo=hf_dest_folder,
|
246 |
-
repo_type="space")
|
247 |
-
except Exception as e:
|
248 |
-
self._pp("*Error: ",e)
|
249 |
-
status = False
|
250 |
-
return status
|
251 |
-
#
|
252 |
-
# automatically restart huggingface space
|
253 |
-
def fix_restart_hface_periodically(self):
|
254 |
-
|
255 |
-
"""
|
256 |
-
This function restarts the huggingface space automatically in random
|
257 |
-
periodically.
|
258 |
-
|
259 |
-
Args:
|
260 |
-
None
|
261 |
-
|
262 |
-
Returns:
|
263 |
-
None
|
264 |
-
"""
|
265 |
-
|
266 |
-
while True:
|
267 |
-
random_time = random.randint(15800, 21600)
|
268 |
-
time.sleep(random_time)
|
269 |
-
os.execl(sys.executable, sys.executable, *sys.argv)
|
270 |
-
return
|
271 |
-
#
|
272 |
-
# log into huggingface
|
273 |
-
def shake_hand_login_hface(self, key=None):
|
274 |
-
|
275 |
-
"""
|
276 |
-
Log into HuggingFace.
|
277 |
-
|
278 |
-
Args:
|
279 |
-
key: (str, optional) If key is set, this key will be used to log in,
|
280 |
-
otherwise the key will be decrypted from the key file.
|
281 |
-
|
282 |
-
Returns:
|
283 |
-
None
|
284 |
-
"""
|
285 |
-
|
286 |
-
if (key is None):
|
287 |
-
x = self._make_decrypt(self._huggingface_crkey)
|
288 |
-
else:
|
289 |
-
x = key
|
290 |
-
huggingface_hub.login(x, add_to_git_credential=True) # non-blocking login
|
291 |
-
self._ph()
|
292 |
-
return
|
293 |
-
#
|
294 |
# Define a function to display available CPU and RAM
|
295 |
-
def fetch_info_system(self):
|
296 |
|
297 |
"""
|
298 |
Fetches system information, such as CPU usage and memory usage.
|
@@ -313,6 +130,8 @@ class Pluto_Happy(object):
|
|
313 |
mem_total_gb = mem.total / (1024 ** 3)
|
314 |
mem_available_gb = mem.available / (1024 ** 3)
|
315 |
mem_used_gb = mem.used / (1024 ** 3)
|
|
|
|
|
316 |
# save the results
|
317 |
s += f"Total memory: {mem_total_gb:.2f} GB\n"
|
318 |
s += f"Available memory: {mem_available_gb:.2f} GB\n"
|
@@ -324,12 +143,26 @@ class Pluto_Happy(object):
|
|
324 |
s += f'Number of CPU cores: {cpu_info["count"]}\n'
|
325 |
s += f"CPU usage: {cpu_usage}%\n"
|
326 |
s += f'Python version: {cpu_info["python_version"]}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
327 |
except Exception as e:
|
328 |
s += f'CPU type: Not accessible, Error: {e}'
|
|
|
|
|
|
|
329 |
return s
|
330 |
#
|
331 |
# fetch GPU RAM info
|
332 |
-
def fetch_info_gpu(self):
|
333 |
|
334 |
"""
|
335 |
Function to fetch GPU RAM info
|
@@ -362,12 +195,26 @@ class Pluto_Happy(object):
|
|
362 |
s += f'Free Memory: {mfree:.2f} GB\n'
|
363 |
s += f'GPU allocated RAM: {round(torch.cuda.memory_allocated(0)/1024**3,2)} GB\n'
|
364 |
s += f'GPU reserved RAM {round(torch.cuda.memory_reserved(0)/1024**3,2)} GB\n'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
365 |
except Exception as e:
|
366 |
s += f'**Warning, No GPU: {e}'
|
|
|
|
|
|
|
367 |
return s
|
368 |
#
|
369 |
# fetch info about host ip
|
370 |
-
def fetch_info_host_ip(self):
|
371 |
"""
|
372 |
Function to fetch current host name and ip address
|
373 |
|
@@ -383,107 +230,29 @@ class Pluto_Happy(object):
|
|
383 |
ip_address = socket.gethostbyname(hostname)
|
384 |
s += f"Hostname: {hostname}\n"
|
385 |
s += f"IP Address: {ip_address}\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
386 |
except Exception as e:
|
387 |
s += f"**Warning, No hostname: {e}"
|
|
|
|
|
|
|
388 |
return s
|
389 |
#
|
390 |
-
# fetch files name
|
391 |
-
def fetch_file_names(self,directory, file_extension=None):
|
392 |
-
"""
|
393 |
-
This function gets all the filenames with a given extension.
|
394 |
-
Args:
|
395 |
-
directory (str):
|
396 |
-
directory path to scan for files in.
|
397 |
-
file_extension (list):
|
398 |
-
file extension to look for or "None" (default) to get all files.
|
399 |
-
Returns:
|
400 |
-
filenames (list):
|
401 |
-
list of strings containing the filenames with the given extension.
|
402 |
-
"""
|
403 |
-
filenames = []
|
404 |
-
for (root, subFolders, files) in os.walk(directory):
|
405 |
-
for fname in files:
|
406 |
-
if (file_extension is None):
|
407 |
-
filenames.append(os.path.join(root, fname))
|
408 |
-
else:
|
409 |
-
for ext in file_extension:
|
410 |
-
if fname.endswith(ext):
|
411 |
-
filenames.append(os.path.join(root, fname))
|
412 |
-
return filenames
|
413 |
-
#
|
414 |
-
# fetch the crypto key
|
415 |
-
def _make_crypt(self,has_new_key=False):
|
416 |
-
|
417 |
-
"""
|
418 |
-
This function fetches the crypto key from the file or from the
|
419 |
-
variable created previously in the class.
|
420 |
-
Args:
|
421 |
-
has_new_key (bool):
|
422 |
-
is_generate flag to indicate whether the key should be
|
423 |
-
use as-is or fetch from the file.
|
424 |
-
Returns:
|
425 |
-
s (str):
|
426 |
-
string value containing the crypto key.
|
427 |
-
"""
|
428 |
-
if self._fkey == 'your_key_goes_here':
|
429 |
-
raise Exception('Cryto Key is not correct!')
|
430 |
-
#
|
431 |
-
s=self._fkey[::-1]
|
432 |
-
if (has_new_key):
|
433 |
-
s=open(self._xkeyfile, "rb").read()
|
434 |
-
self._fkey = s[::-1]
|
435 |
-
return s
|
436 |
-
#
|
437 |
-
# generate new cryto key
|
438 |
-
def make_crypt_key(self):
|
439 |
-
"""
|
440 |
-
This function generates a new cryto key and saves it to a file
|
441 |
-
|
442 |
-
Args:
|
443 |
-
None
|
444 |
-
|
445 |
-
Returns:
|
446 |
-
(str) crypto key
|
447 |
-
"""
|
448 |
-
|
449 |
-
key = cryptography.fernet.Fernet.generate_key()
|
450 |
-
with open(self._xkeyfile, "wb") as key_file:
|
451 |
-
key_file.write(key[::-1]) # write in reversed
|
452 |
-
return key
|
453 |
-
#
|
454 |
-
# decrypt message
|
455 |
-
def make_decrypt(self, x):
|
456 |
-
"""
|
457 |
-
Decrypts the encrypted string using the stored crypto key.
|
458 |
-
|
459 |
-
Args:
|
460 |
-
x: (str) to be decrypted.
|
461 |
-
|
462 |
-
Returns:
|
463 |
-
x: (str) decrypted version of x.
|
464 |
-
"""
|
465 |
-
y = self._make_crypt()
|
466 |
-
f = cryptography.fernet.Fernet(y)
|
467 |
-
m = f.decrypt(x)
|
468 |
-
return m.decode()
|
469 |
-
#
|
470 |
-
# encrypt message
|
471 |
-
def make_crypt(self, x):
|
472 |
-
"""
|
473 |
-
encrypt message
|
474 |
-
|
475 |
-
Args:
|
476 |
-
x (str): message to encrypt
|
477 |
-
|
478 |
-
Returns:
|
479 |
-
str: encrypted message
|
480 |
-
"""
|
481 |
-
|
482 |
-
key = self._make_crypt()
|
483 |
-
p = x.encode()
|
484 |
-
f = cryptography.fernet.Fernet(key)
|
485 |
-
y = f.encrypt(p)
|
486 |
-
return y
|
487 |
#
|
488 |
# fetch import libraries
|
489 |
def _fetch_lib_import(self):
|
@@ -576,92 +345,7 @@ class Pluto_Happy(object):
|
|
576 |
f.close()
|
577 |
return
|
578 |
#
|
579 |
-
|
580 |
-
def fetch_info_flops(self,model, input_shape=(1, 3, 224, 224), device="cpu", max_epoch=1):
|
581 |
-
|
582 |
-
"""
|
583 |
-
Calculates the number of floating point operations (FLOPs).
|
584 |
-
|
585 |
-
Args:
|
586 |
-
model (torch.nn.Module): neural network model.
|
587 |
-
input_shape (tuple): input tensor size.
|
588 |
-
device (str): device to perform computation on.
|
589 |
-
max_epoch (int): number of times
|
590 |
-
|
591 |
-
Returns:
|
592 |
-
(float): number of FLOPs, average from epoch, default is 1 epoch.
|
593 |
-
(float): elapsed seconds
|
594 |
-
(list): of string for a friendly human readable output
|
595 |
-
"""
|
596 |
-
|
597 |
-
ttm_input = torch.rand(input_shape, dtype=torch.float32, device=device)
|
598 |
-
# ttm_input = torch.rand((1, 3, 224, 224), dtype=torch.float32, device=device)
|
599 |
-
tstart = time.time()
|
600 |
-
for i in range(max_epoch):
|
601 |
-
flops, params = flopth(model, inputs=(ttm_input,), bare_number=True)
|
602 |
-
tend = time.time()
|
603 |
-
etime = (tend - tstart)/max_epoch
|
604 |
-
|
605 |
-
# kilo = 10^3, maga = 10^6, giga = 10^9, tera=10^12, peta=10^15, exa=10^18, zetta=10^21
|
606 |
-
valstr = []
|
607 |
-
valstr.append(f'Tensors device: {device}')
|
608 |
-
valstr.append(f'flops: {flops:,}')
|
609 |
-
valstr.append(f'params: {params:,}')
|
610 |
-
valstr.append(f'epoch: {max_epoch}')
|
611 |
-
valstr.append(f'sec: {etime}')
|
612 |
-
# valstr += f'Tensors device: {device}, flops: {flops}, params: {params}, epoch: {max_epoch}, sec: {etime}\n'
|
613 |
-
x = flops/etime
|
614 |
-
y = (x/10**15)*86400
|
615 |
-
valstr.append(f'Flops/s: {x:,}')
|
616 |
-
valstr.append(f'PetaFlops/s: {x/10**15}')
|
617 |
-
valstr.append(f'PetaFlops/day: {y}')
|
618 |
-
valstr.append(f'1 PetaFlopsDay (on this system will take): {round(1/y, 2):,.2f} days')
|
619 |
-
return flops, etime, valstr
|
620 |
-
#
|
621 |
-
def print_petaflops(self):
|
622 |
-
|
623 |
-
"""
|
624 |
-
Prints the flops and peta-flops-day calculation.
|
625 |
-
**WARING**: This method will break/interfer with Stable Diffusion use of LoRA.
|
626 |
-
I can't debug why yet.
|
627 |
-
|
628 |
-
Args:
|
629 |
-
None
|
630 |
-
|
631 |
-
Returns:
|
632 |
-
None
|
633 |
-
"""
|
634 |
-
self._pp('Model', 'TTM, Tiny Torch Model on: CPU')
|
635 |
-
mtoy = TTM()
|
636 |
-
# my_model = MyModel()
|
637 |
-
dev = torch.device("cuda:0")
|
638 |
-
a,b,c = self.fetch_info_flops(mtoy)
|
639 |
-
y = round((a/b)/self.flops_per_sec_gcolab_cpu * 100, 2)
|
640 |
-
self._pp('Flops', f'{a:,} flops')
|
641 |
-
self._pp('Total elapse time', f'{b:,} seconds')
|
642 |
-
self._pp('Flops compared', f'{y:,}% of Google Colab Pro')
|
643 |
-
for i, val in enumerate(c):
|
644 |
-
self._pp(f'Info {i}', val)
|
645 |
-
self._ph()
|
646 |
-
|
647 |
-
try:
|
648 |
-
self._pp('Model', 'TTM, Tiny Torch Model on: GPU')
|
649 |
-
dev = torch.device("cuda:0")
|
650 |
-
a2,b2,c2 = self.fetch_info_flops(mtoy, device=dev)
|
651 |
-
y2 = round((a2/b2)/self.flops_per_sec_gcolab_gpu * 100, 2)
|
652 |
-
self._pp('Flops', f'{a2:,} flops')
|
653 |
-
self._pp('Total elapse time', f'{b2:,} seconds')
|
654 |
-
self._pp('Flops compared', f'{y2:,}% of Google Colab Pro')
|
655 |
-
d2 = round(((a2/b2)/(a/b))*100, 2)
|
656 |
-
self._pp('Flops GPU compared', f'{d2:,}% of CPU (or {round(d2-100,2):,}% faster)')
|
657 |
-
for i, val in enumerate(c2):
|
658 |
-
self._pp(f'Info {i}', val)
|
659 |
-
except Exception as e:
|
660 |
-
self._pp('Error', e)
|
661 |
-
self._ph()
|
662 |
-
return
|
663 |
-
#
|
664 |
-
#
|
665 |
def fetch_installed_libraries(self):
|
666 |
"""
|
667 |
Retrieves and prints the names and versions of Python libraries installed by the user,
|
@@ -682,6 +366,8 @@ class Pluto_Happy(object):
|
|
682 |
for name, version in libraries.items():
|
683 |
print(f"{name}: {version}")
|
684 |
"""
|
|
|
|
|
685 |
result = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE)
|
686 |
|
687 |
# Decode result and split by lines
|
@@ -733,7 +419,7 @@ class Pluto_Happy(object):
|
|
733 |
|
734 |
# Check if each line (stripped of whitespace and newline characters) exists in the reference dictionary.
|
735 |
# If it exists, fetch its value. Otherwise, set the value to None.
|
736 |
-
results = {line.strip(): reference_dict.get(line.strip().replace('_','-'), None) for line in lines}
|
737 |
|
738 |
return results
|
739 |
# print fech_info about myself
|
@@ -748,23 +434,19 @@ class Pluto_Happy(object):
|
|
748 |
Returns:
|
749 |
None
|
750 |
"""
|
751 |
-
|
752 |
self._ph()
|
753 |
self._pp("Hello, I am", self.name)
|
754 |
self._pp("I will display", "Python, Jupyter, and system info.")
|
755 |
-
self._pp("For
|
756 |
-
self._pp(
|
757 |
-
self._pp("...", "¯\_(ツ)_/¯")
|
758 |
-
self._ph()
|
759 |
# system
|
760 |
-
self.
|
761 |
-
|
762 |
-
|
763 |
-
self._ph()
|
764 |
# gpu
|
765 |
-
self._pp('GPU', 'Info')
|
766 |
-
x = self.fetch_info_gpu()
|
767 |
-
print(x)
|
768 |
self._ph()
|
769 |
# lib used
|
770 |
self._pp('Installed lib from', self.fname_requirements)
|
@@ -772,6 +454,7 @@ class Pluto_Happy(object):
|
|
772 |
x = self.fetch_match_file_dict(self.fname_requirements, self.fetch_installed_libraries())
|
773 |
for item, value in x.items():
|
774 |
self._pp(f'{item} version', value)
|
|
|
775 |
self._ph()
|
776 |
self._pp('Standard lib from', 'System')
|
777 |
self._ph()
|
@@ -780,267 +463,55 @@ class Pluto_Happy(object):
|
|
780 |
self._pp('pandas version',pandas.__version__)
|
781 |
self._pp('PIL version', PIL.__version__)
|
782 |
self._pp('torch version', torch.__version__)
|
783 |
-
|
|
|
784 |
# host ip
|
785 |
-
self._pp('Host', 'Info')
|
786 |
x = self.fetch_info_host_ip()
|
787 |
-
print(x)
|
788 |
self._ph()
|
789 |
-
# self.make_key_environment()
|
790 |
-
#
|
791 |
-
return
|
792 |
-
#
|
793 |
-
def draw_foxy_methods(self, items):
|
794 |
-
"""
|
795 |
-
Draw all methods from Foxy, but not the "private" starting with "_" (underscore)
|
796 |
-
|
797 |
-
Args: None
|
798 |
-
|
799 |
-
Return: None
|
800 |
-
"""
|
801 |
-
actions = ["draw_", "fetch_", "fix_", "make_", "print_", "push_", "say_", "shake_hand_", "write_"]
|
802 |
-
for action in actions:
|
803 |
-
i = 0
|
804 |
-
nodes = [f"{i}"]
|
805 |
-
edges = []
|
806 |
-
labels = [action]
|
807 |
-
for item in items:
|
808 |
-
if item.startswith(action):
|
809 |
-
i += 1
|
810 |
-
labels.append(f"{item}")
|
811 |
-
nodes.append(f"{i}" )
|
812 |
-
edges.append(("0", f"{i}"))
|
813 |
-
# #
|
814 |
-
# print(nodes, type(nodes))
|
815 |
-
# print(labels, type(labels))
|
816 |
-
# print(edges, type(edges))
|
817 |
-
d = self.draw_diagram(nodes, edges, labels, horizontal=True)
|
818 |
-
display(d)
|
819 |
-
return
|
820 |
-
#
|
821 |
-
def draw_fastai_data_block_v2(self):
|
822 |
-
"""
|
823 |
-
Draw a Fast.ai DataBlock structure.
|
824 |
-
|
825 |
-
Args: None
|
826 |
-
|
827 |
-
Return: the matplotlib plot
|
828 |
-
"""
|
829 |
-
nodes = ["A1", "A2", "A3", "A4", "A5", "A6", "A7",
|
830 |
-
"B1", "B2",
|
831 |
-
"C1", "C2", "C3",
|
832 |
-
"D1", "D2",
|
833 |
-
"E1", "E2",
|
834 |
-
"F1", "F2",
|
835 |
-
"G1", "G2"]
|
836 |
-
labels = ["@1_SOURCE", "Pandas", "@2_Blocks", "@3_Splitter", "@4_Transform", "Batch_Size", "@A5_Data_Loader",
|
837 |
-
"X:Block", "Y:Block",
|
838 |
-
"get_x()", "get_items()", "get_y()",
|
839 |
-
"Random", "Pandas_col",
|
840 |
-
"Item_tfms", "Batch_tfms",
|
841 |
-
"Resize", "Augmentation",
|
842 |
-
"ImageDataLoaders\n.from_df()", "Other_Shortcut"]
|
843 |
-
edges = [("A1", "A2"), ("A2", "A3"), ("A3", "A4"), ("A4", "A5"), ("A5", "A6"), ("A6", "A7"),
|
844 |
-
("A3", "B1"), ("A3","B2"),
|
845 |
-
("B1", "C1"), ("B1", "C2"), ("B2", "C3"),
|
846 |
-
("A4", "D1"), ("A4", "D2"),
|
847 |
-
("A5", "E1"), ("A5", "E2"),
|
848 |
-
("E1", "F1"), ("E2", "F2"),
|
849 |
-
("A2", "G1"), ("A2", "G2")]
|
850 |
#
|
851 |
-
# draw it
|
852 |
-
diagram = self.draw_diagram(nodes, edges, labels, node_color=None,
|
853 |
-
horizontal=True, title='Pluto view of FastAI Datablocks 5-Steps :-)',
|
854 |
-
fontsize='8')
|
855 |
-
|
856 |
-
# display it
|
857 |
-
display(diagram)
|
858 |
-
return diagram
|
859 |
-
#
|
860 |
-
def print_dataloader_spec(self,dl):
|
861 |
-
"""
|
862 |
-
Print the Data Loarder specification.
|
863 |
-
|
864 |
-
Args: the fast.ai DataLoader
|
865 |
-
|
866 |
-
Return: None.
|
867 |
-
"""
|
868 |
-
tsize = len(dl.train_ds)
|
869 |
-
vsize = len(dl.valid_ds)
|
870 |
-
ttsize = tsize+vsize
|
871 |
-
vcsize = len(dl.vocab)
|
872 |
-
self._ph()
|
873 |
-
self._pp("Total Image", ttsize)
|
874 |
-
t = str(tsize)+" x "+str(vsize) + ", " + str(numpy.round((tsize/ttsize)*100, 0)) + "% x " + str(numpy.round((vsize/ttsize)*100, 0)) + "%"
|
875 |
-
self._pp("Train .vs. Valid Image", t)
|
876 |
-
self._pp("Batch size", dl.bs)
|
877 |
-
self._pp("Number of Vocab/Label",vcsize)
|
878 |
-
self._pp("First and Last vocab", str(dl.vocab[0]) + ", " + str(dl.vocab[-1]))
|
879 |
-
self._pp("Image type", dl.train_ds[0])
|
880 |
-
self._ph()
|
881 |
return
|
882 |
#
|
883 |
-
def
|
884 |
"""
|
885 |
-
|
886 |
-
|
887 |
-
Args: None
|
888 |
|
889 |
-
|
|
|
890 |
"""
|
891 |
self._ph()
|
892 |
-
self._pp("
|
893 |
-
self._ph()
|
894 |
-
self._pp("Error_rate", learner._meta_error_rate)
|
895 |
-
self._pp("Base Model", learner._meta_base_model_name)
|
896 |
-
self._pp("Data Source", learner._meta_data_source)
|
897 |
-
self._pp("Data Info", learner._meta_data_info)
|
898 |
try:
|
899 |
-
|
900 |
-
|
901 |
-
|
902 |
-
|
903 |
-
# self._pp("Time Stamp", learner._meta_training_unix_time)
|
904 |
-
self._pp("Learning Rate", learner.lr)
|
905 |
-
self._pp("Base Learning Rate", learner._meta_base_lr)
|
906 |
-
self._pp("Batch Size", learner.dls.bs)
|
907 |
-
self._pp("Momentum", learner.moms)
|
908 |
-
self._pp("AI Dev Stack", learner._meta_ai_dev_stack)
|
909 |
-
self._pp("Learner Vocab", learner.dls.vocab)
|
910 |
-
self._pp("Learner Vocab Size", len(learner.dls.vocab))
|
911 |
-
#
|
912 |
-
self._ph()
|
913 |
-
self._pp("Author", learner._meta_author)
|
914 |
-
self._pp("AI Assistant", learner._meta_ai_assistant)
|
915 |
-
self._pp("GenAI Coder", learner._meta_genai)
|
916 |
-
self._pp("[Friends] Human Coder", learner._meta_human_coder)
|
917 |
-
self._pp("License", learner._meta_license)
|
918 |
-
#
|
919 |
-
self._ph()
|
920 |
-
self._pp("Conclusion", learner._meta_notes)
|
921 |
-
self._ph()
|
922 |
-
return
|
923 |
-
#
|
924 |
-
def make_learner_meta_tags(self, learner):
|
925 |
-
"""
|
926 |
-
Copy all meta data from Foxy/self to learner object.
|
927 |
-
|
928 |
-
Args: (fastai.learner) the learner object
|
929 |
-
|
930 |
-
Returns: None
|
931 |
-
"""
|
932 |
-
self._meta_training_unix_time = int(time.time())
|
933 |
-
meta = ['_meta_project_name', '_meta_error_rate', '_meta_base_model_name',
|
934 |
-
'_meta_data_source', '_meta_data_info', '_meta_training_unix_time',
|
935 |
-
'_meta_ai_dev_stack', '_meta_author', '_meta_ai_assistant',
|
936 |
-
'_meta_genai', '_meta_human_coder', '_meta_license',
|
937 |
-
'_meta_notes', '_meta_base_lr']
|
938 |
-
learner.__po__ = "4475632048616261202843292032303234"
|
939 |
-
for i in meta:
|
940 |
-
a = getattr(self, i)
|
941 |
-
setattr(learner, i, a)
|
942 |
-
return
|
943 |
-
#
|
944 |
-
def make_prediction(self, img_down, learner, max=1):
|
945 |
-
"""
|
946 |
-
Predict a butterfly image from a list of downloaded images.
|
947 |
-
|
948 |
-
Args:
|
949 |
-
img_down: (list) A list of downloaded image full-path file names. The test dataset.
|
950 |
-
learner: (fastai.learner) The learner object.
|
951 |
-
max: (int) the maximum number of images to predict.
|
952 |
-
If max is negative then do the entire list.
|
953 |
-
If max is one then choose one random image from the list.
|
954 |
-
|
955 |
-
Returns:
|
956 |
-
(list) An array of the prediction (dictionary):
|
957 |
-
1. classification: (str) the classification prediction
|
958 |
-
2. accuracy score: (float) the accuracy value of the prediction
|
959 |
-
3. index: (int) the index of the prediction array
|
960 |
-
4. pre_arr: (list) the the prediction array
|
961 |
-
5. file_name: (str) the full-path file name of the image.
|
962 |
-
"""
|
963 |
-
if max <= 0:
|
964 |
-
max = len(img_down)
|
965 |
#
|
966 |
-
|
|
|
|
|
|
|
|
|
967 |
#
|
968 |
-
|
969 |
-
|
970 |
-
|
971 |
-
|
972 |
-
|
973 |
-
a1,b1,c1 = learner.predict(fastai.vision.core.PILImage.create(fname))
|
974 |
-
# print(f"This is prediction: {a1},\n index-value: {b1},\n Prediction-array: {c1}\nFilename: {fname}")
|
975 |
-
item = {
|
976 |
-
"classification": a1,
|
977 |
-
"accuracy_score": c1[b1],
|
978 |
-
"index": b1,
|
979 |
-
"pre_arr": c1,
|
980 |
-
"file_name": fname
|
981 |
-
}
|
982 |
-
val.append(item)
|
983 |
-
return val
|
984 |
-
#
|
985 |
-
def make_top_3_plus(self, pre_arr, learner):
|
986 |
-
"""
|
987 |
-
Choose the top 3 highest accuracy score plus the "other" total.
|
988 |
-
|
989 |
-
Args:
|
990 |
-
prediction array (list) a list of accuracy score in torch-value type.
|
991 |
-
learner (fastai.learner) the learner object
|
992 |
-
|
993 |
-
Return:
|
994 |
-
(list) An array of four record:
|
995 |
-
item name (str) the predict item name/vocab
|
996 |
-
accuracy score (float)
|
997 |
-
"""
|
998 |
-
predict_list = pre_arr.tolist()
|
999 |
-
top_3 = sorted(range(len(predict_list)), key=lambda k: predict_list[k], reverse=True)[:3]
|
1000 |
-
val = []
|
1001 |
-
total = 0
|
1002 |
-
for idx in top_3:
|
1003 |
-
item = {"name": learner.dls.vocab[idx], "accuracy_score": predict_list[idx]}
|
1004 |
-
val.append(item)
|
1005 |
-
total += predict_list[idx]
|
1006 |
#
|
1007 |
-
|
1008 |
-
|
1009 |
-
|
1010 |
-
|
1011 |
-
|
1012 |
-
#
|
1013 |
-
# define TTM for use in calculating flops
|
1014 |
-
class TTM(torch.nn.Module):
|
1015 |
-
|
1016 |
-
"""
|
1017 |
-
Tiny Torch Model (TTM)
|
1018 |
-
|
1019 |
-
This is a toy model consisting of four convolutional layers.
|
1020 |
-
|
1021 |
-
Args:
|
1022 |
-
input_shape (tuple): input tensor size.
|
1023 |
-
|
1024 |
-
Returns:
|
1025 |
-
(tensor): output of the model.
|
1026 |
-
"""
|
1027 |
|
1028 |
-
|
1029 |
-
|
1030 |
-
|
1031 |
-
|
1032 |
-
|
1033 |
-
|
1034 |
-
|
1035 |
-
def forward(self, x1):
|
1036 |
-
x1 = self.conv1(x1)
|
1037 |
-
x1 = self.conv2(x1)
|
1038 |
-
x1 = self.conv3(x1)
|
1039 |
-
x1 = self.conv4(x1)
|
1040 |
-
return x1
|
1041 |
-
#
|
1042 |
#
|
1043 |
-
# ----------[End of TTM model]----------
|
1044 |
# add module/method
|
1045 |
#
|
1046 |
import functools
|
@@ -1055,1191 +526,13 @@ def add_method(cls):
|
|
1055 |
#
|
1056 |
# [END OF pluto_happy]
|
1057 |
#
|
1058 |
-
# ----------[End of add_module moderator]----------
|
1059 |
-
#
|
1060 |
-
# ----------[Begin Extra Pluto functions]----------
|
1061 |
-
#
|
1062 |
-
#
|
1063 |
-
#import gradio
|
1064 |
-
import transformers
|
1065 |
-
import torch
|
1066 |
-
import diffusers
|
1067 |
-
import fastai
|
1068 |
-
from fastai.data.all import *
|
1069 |
-
from fastai.vision.all import *
|
1070 |
-
import torchvision
|
1071 |
-
|
1072 |
-
@add_method(Pluto_Happy)
|
1073 |
-
def fetch_auto_load(self, model='stabilityai/stable-diffusion-xl-base-1.0'):
|
1074 |
-
"""
|
1075 |
-
This function is used to load HuggingFace pretrained model and run inference.
|
1076 |
-
|
1077 |
-
Args:
|
1078 |
-
model: A string param. The name of a pretrained model.
|
1079 |
-
Default is "stabilityai/stable-diffusion-xl-base-1.0"
|
1080 |
-
|
1081 |
-
Returns:
|
1082 |
-
None
|
1083 |
-
"""
|
1084 |
-
|
1085 |
-
model= f'models/{model}'
|
1086 |
-
title='Pluto: Latest Image Generation'
|
1087 |
-
desc='This space Pluto Sandbox.'
|
1088 |
-
examples=['Flowers in Spring', 'Bird in Summer', 'beautiful woman close up on face in autumn.', 'Old man close up on face in winter.']
|
1089 |
-
arti = f'Note: The underline model is: {model}'
|
1090 |
-
gradio.load(model,
|
1091 |
-
title=title,
|
1092 |
-
description=desc,
|
1093 |
-
examples=examples,
|
1094 |
-
article=arti).launch(debug=True)
|
1095 |
-
return
|
1096 |
-
|
1097 |
-
# prompt: write a function using StableDiffusionXLPipeline and huggingface stabilityai/stable-diffusion-xl-base-1.0 to display text to image with documentation
|
1098 |
-
# grade: F // Nothing useable after 3 tries
|
1099 |
-
#
|
1100 |
-
# after I wrote the function, I asked it to write the documentation
|
1101 |
-
#
|
1102 |
-
# prompt: write python inline documentation for the following function: fetch_image_model
|
1103 |
-
# grade: A- // it does not said I stored the pipe in self.pipe
|
1104 |
-
|
1105 |
-
@add_method(Pluto_Happy)
|
1106 |
-
def fetch_image_model(self, model):
|
1107 |
-
|
1108 |
-
"""
|
1109 |
-
Description:
|
1110 |
-
|
1111 |
-
This function is used to load a pre-trained Stable Diffusion model.
|
1112 |
-
|
1113 |
-
Args:
|
1114 |
-
|
1115 |
-
model (str):
|
1116 |
-
The name of the model to load.
|
1117 |
-
|
1118 |
-
Returns:
|
1119 |
-
|
1120 |
-
None (the pipe is safed in self.pipe)
|
1121 |
-
|
1122 |
-
"""
|
1123 |
-
|
1124 |
-
self.device = 'cuda'
|
1125 |
-
pipe = diffusers.StableDiffusionXLPipeline.from_pretrained(
|
1126 |
-
model,
|
1127 |
-
torch_dtype=torch.float16,
|
1128 |
-
use_safetensors=True,
|
1129 |
-
variant="fp16")
|
1130 |
-
pipe.to(self.device)
|
1131 |
-
self.pipe = pipe
|
1132 |
-
self.model = model
|
1133 |
-
return
|
1134 |
-
|
1135 |
-
# prompt: write a function using torch.generator and StableDiffusionXLPipeline for image with documentation
|
1136 |
-
# grade: C+ // tecnially it works with one error, but it is not what I am looking for.
|
1137 |
-
# so I rewrite it.
|
1138 |
-
#
|
1139 |
-
# and I asked it to document my functin for me.
|
1140 |
-
#
|
1141 |
-
# prompt: write python inline documentation for the following function: draw_me
|
1142 |
-
# grade: A // it writes good doc.
|
1143 |
-
|
1144 |
-
@add_method(Pluto_Happy)
|
1145 |
-
def draw_me(self,
|
1146 |
-
prompt,
|
1147 |
-
negative_prompt,
|
1148 |
-
height,
|
1149 |
-
width,
|
1150 |
-
steps,
|
1151 |
-
seed,
|
1152 |
-
denoising_end,
|
1153 |
-
guidance_scale,
|
1154 |
-
prompt_2,
|
1155 |
-
negative_prompt_2
|
1156 |
-
):
|
1157 |
-
|
1158 |
-
"""
|
1159 |
-
Generate image using the prompt using Stable Diffusion.
|
1160 |
-
|
1161 |
-
Args:
|
1162 |
-
prompt (str): Prompt to generate image from. e.g.: "image of a cat."
|
1163 |
-
negative_prompt (str): Negative prompt to generate image from. Default: "incomplete".
|
1164 |
-
height (int): The height of the image to generate. Default: 768.
|
1165 |
-
width (int): The width of the image to generate. Default: 768.
|
1166 |
-
steps (int): Number of steps to run the diffusion model for. Default: 40.
|
1167 |
-
seed (int): Seed for the random number generator. Default: -1, any random seed
|
1168 |
-
|
1169 |
-
Returns:
|
1170 |
-
PIL image.
|
1171 |
-
"""
|
1172 |
-
|
1173 |
-
# Initialize the diffusion model.
|
1174 |
-
# self.fetch_image_model(model=model)
|
1175 |
-
|
1176 |
-
# Generate the image.
|
1177 |
-
gen = torch.Generator(device=self.device).manual_seed(seed)
|
1178 |
-
ximage = 1
|
1179 |
-
result = self.pipe(prompt,
|
1180 |
-
negative_prompt=negative_prompt,
|
1181 |
-
num_inference_steps=steps,
|
1182 |
-
height=height,
|
1183 |
-
width=width,
|
1184 |
-
denoising_end=denoising_end,
|
1185 |
-
guidance_scale=guidance_scale,
|
1186 |
-
prompt_2=prompt_2,
|
1187 |
-
negative_prompt_2=negative_prompt_2,
|
1188 |
-
num_images_per_prompt=ximage,
|
1189 |
-
generator=gen,
|
1190 |
-
output_type="pil",
|
1191 |
-
).images
|
1192 |
-
torch.cuda.empty_cache()
|
1193 |
-
return result[0]
|
1194 |
-
|
1195 |
-
# prompt: write a function to define and launch the gradio interface with text for prompt and negative prompt and slider for steps, height, width, num image per prompt and a generator and output is an image
|
1196 |
-
# grade: F // after a few tries with different prompt, nothing work. So I wrote it manually.
|
1197 |
-
#
|
1198 |
-
# prompt for doc
|
1199 |
-
# prompt: write python inline documentation for the following function:
|
1200 |
-
# grade: A // it writes good doc.
|
1201 |
-
|
1202 |
-
@add_method(Pluto_Happy)
|
1203 |
-
def fetch_gradio_interface(self, predict_fn):
|
1204 |
-
|
1205 |
-
"""
|
1206 |
-
Description:
|
1207 |
-
|
1208 |
-
This function is used to create a Gradio interface based on the `predict_fn` function.
|
1209 |
-
|
1210 |
-
Args:
|
1211 |
-
|
1212 |
-
predict_fn (function):
|
1213 |
-
The function that will be used to generate the image.
|
1214 |
-
|
1215 |
-
Returns:
|
1216 |
-
|
1217 |
-
gradio.Interface:
|
1218 |
-
The Gradio interface.
|
1219 |
-
|
1220 |
-
"""
|
1221 |
-
|
1222 |
-
inp=[
|
1223 |
-
gradio.Textbox(label='Ask me what image do you want to draw.',
|
1224 |
-
value='A picture of a beautiful model on Hawaii beach with super realistic detail, in 4K clarity, soft background focus, and vibrant colors.'),
|
1225 |
-
gradio.Textbox(label='What do you do NOT want in the picture?', value='dirty, pornographic'),
|
1226 |
-
gradio.Slider(512, 1024, 768, step=128, label='Height'),
|
1227 |
-
gradio.Slider(512, 1024, 768, step=128, label='Width'),
|
1228 |
-
gradio.Slider(5, maximum=80, value=40, step=5, label='Number of Iterations'),
|
1229 |
-
gradio.Slider(minimum=1, step=1, maximum=1000000, randomize=True, label='Seed (Generate difference picture)'),
|
1230 |
-
gradio.Slider(0, maximum=1.0, value=1, step=0.02, label='Advance: denoising_end'),
|
1231 |
-
gradio.Slider(0.5, maximum=12.0, value=7.5, step=0.5, label='Advance: guidance_scale'),
|
1232 |
-
gradio.Textbox(label='Advance: prompt_2: for the second decoder.', value=''),
|
1233 |
-
gradio.Textbox(label='Advance: negative_prompt_2: for the second decoder.', value='pixel noise, , mishape feature')
|
1234 |
-
]
|
1235 |
-
out=['image']
|
1236 |
-
title="Stable Diffusion XL model"
|
1237 |
-
desc='It is hacking time.'
|
1238 |
-
arti=f'This model is the {self.model}'
|
1239 |
-
inface = gradio.Interface(fn=predict_fn,
|
1240 |
-
inputs=inp,
|
1241 |
-
outputs=out,
|
1242 |
-
title=title,
|
1243 |
-
description=desc)
|
1244 |
-
return inface
|
1245 |
-
|
1246 |
-
# prompt: write the function from the above print dancer with documentation
|
1247 |
-
# Note: 100% correct, but I did ask it write a function for printing a dancer is ascii art, but it could not do it.
|
1248 |
-
# Note 2: I have to write the code with the comment "# print dancer" first.
|
1249 |
-
|
1250 |
-
@add_method(Pluto_Happy)
|
1251 |
-
def print_dancing(self):
|
1252 |
-
|
1253 |
-
"""
|
1254 |
-
This function prints a dancer
|
1255 |
-
|
1256 |
-
Args:
|
1257 |
-
None
|
1258 |
-
|
1259 |
-
Returns:
|
1260 |
-
None, just a print out
|
1261 |
-
"""
|
1262 |
-
|
1263 |
-
print('|-----------------------------------------------------------------------|')
|
1264 |
-
print('| o \ o / _ o __| \ / |__ o _ \ o / o |')
|
1265 |
-
print('| /|\ | /\ ___\o \o | o/ o/__ /\ | /|\ |')
|
1266 |
-
print('| / \ / \ | \ /) | ( \ /o\ / ) | (\ / | / \ / \ |')
|
1267 |
-
print('|----------------------------Yahoo_ooo----------------------------------|')
|
1268 |
-
return
|
1269 |
-
#
|
1270 |
-
|
1271 |
-
# prompt: define a function for print ascii art for the word happy
|
1272 |
-
# Note: Failed. it could not do it. so I use https://patorjk.com with efti wall
|
1273 |
-
|
1274 |
-
@add_method(Pluto_Happy)
|
1275 |
-
def print_monkey(self):
|
1276 |
-
"""
|
1277 |
-
This function prints the ascii art for the word "happy".
|
1278 |
-
|
1279 |
-
Args:
|
1280 |
-
None
|
1281 |
-
|
1282 |
-
Returns:
|
1283 |
-
None
|
1284 |
-
"""
|
1285 |
-
|
1286 |
-
print("""
|
1287 |
-
0----Monkey_See-------------.-----------------..----------------.--Monkey_Do---0
|
1288 |
-
| > < | || | ._____. |
|
1289 |
-
0 *** |.===.| !=ooO=========Ooo=!!=ooO========Ooo=! | -_- | 0
|
1290 |
-
| (o o) {}o o{} \\\\ (o o) // \\\\ (o o) // ([o o]) |
|
1291 |
-
ooO--(_)--Ooo-ooO--(_)--Ooo---------(_)----------------(_)--------ooO--(_)---Ooo
|
1292 |
-
""")
|
1293 |
-
return
|
1294 |
-
#
|
1295 |
-
# ----------[End of Pluto]----------
|
1296 |
-
#
|
1297 |
-
# ----------[Begin of Foxy]----------
|
1298 |
-
#
|
1299 |
-
# prompt: write new class Pluto_FastAI inherent from Pluto_Happy with documentation
|
1300 |
-
# Note: 90% correct, the "init()" missing self and name parameter, and super() is wrong
|
1301 |
-
# and I add in new method say_tagline() just for fun
|
1302 |
-
import duckduckgo_search
|
1303 |
-
#import IPython
|
1304 |
-
import opendatasets
|
1305 |
-
import graphviz
|
1306 |
-
import timm
|
1307 |
-
import json
|
1308 |
-
from fastai.callback.core import Callback
|
1309 |
-
#
|
1310 |
-
class Pluto_FastAI(Pluto_Happy):
|
1311 |
-
"""
|
1312 |
-
A class that inherits from Pluto_Happy, and add FastAI functionality
|
1313 |
-
|
1314 |
-
Args:
|
1315 |
-
Pluto_Happy: A class that contains common functionality to Pluto.
|
1316 |
-
Returns:
|
1317 |
-
A class that contains both the functionality of Pluto_Happy and FastAI.
|
1318 |
-
"""
|
1319 |
-
def __init__(self, name='Pluto',*args, **kwargs):
|
1320 |
-
super(Pluto_FastAI, self).__init__(name,*args, **kwargs)
|
1321 |
-
return
|
1322 |
-
#
|
1323 |
-
def say_tagline(self):
|
1324 |
-
"""
|
1325 |
-
Print the tagline. For fun and no other purpose.
|
1326 |
-
|
1327 |
-
Args:
|
1328 |
-
None.
|
1329 |
-
|
1330 |
-
Returns:
|
1331 |
-
None
|
1332 |
-
"""
|
1333 |
-
self._ph()
|
1334 |
-
self._pp('Call to arm:', 'I am Pluto the Seeker.')
|
1335 |
-
self._ph()
|
1336 |
-
return
|
1337 |
-
# (end of Pluto_FastAI class)
|
1338 |
-
|
1339 |
-
# prompt: write documentation for the function fetch_image_url_online
|
1340 |
-
# Grade: A // it can document good.
|
1341 |
-
|
1342 |
-
|
1343 |
-
# change name and imports to conform to Pluto standard
|
1344 |
-
@add_method(Pluto_FastAI)
|
1345 |
-
def fetch_image_url_online(self,term):
|
1346 |
-
|
1347 |
-
"""
|
1348 |
-
Searches for images of given term.
|
1349 |
-
|
1350 |
-
Args:
|
1351 |
-
term: The term to search for.
|
1352 |
-
|
1353 |
-
Returns:
|
1354 |
-
A list of dictionaries, each of which contains the following keys:
|
1355 |
-
title: The title of the image.
|
1356 |
-
image: The URL of the image.
|
1357 |
-
thumbnail: The URL of thumbnail of the image.
|
1358 |
-
url: The URL of the webpage containing the image.
|
1359 |
-
height: The height of the image in pixels.
|
1360 |
-
width: The width of the image in pixels.
|
1361 |
-
source: The source of the image.
|
1362 |
-
"""
|
1363 |
-
|
1364 |
-
d = duckduckgo_search.DDGS()
|
1365 |
-
val = d.images(term,size='Medium',type_image='photo',color='color')
|
1366 |
-
return val
|
1367 |
-
|
1368 |
-
# prompt: write a function to display an image from a URL with documentation
|
1369 |
-
# Grade: B- // it works, but import is in function and not clean
|
1370 |
-
|
1371 |
-
@add_method(Pluto_FastAI)
|
1372 |
-
def draw_image_url(self, url, width=0):
|
1373 |
-
|
1374 |
-
"""
|
1375 |
-
Displays an image from a given filename or url=https://...
|
1376 |
-
The image can be any format supported by PIL.
|
1377 |
-
The function uses the IPython.display library to display the image.
|
1378 |
-
|
1379 |
-
Args:
|
1380 |
-
url: The URL from which to display the image.
|
1381 |
-
|
1382 |
-
Returns:
|
1383 |
-
None
|
1384 |
-
"""
|
1385 |
-
|
1386 |
-
# Display the image.
|
1387 |
-
if (width==0):
|
1388 |
-
display(IPython.core.display.Image(url))
|
1389 |
-
else:
|
1390 |
-
display(IPython.core.display.Image(url,width=width))
|
1391 |
-
return
|
1392 |
-
|
1393 |
-
# prompt: define a function to download image, save it in a directory and display it from url with error trapping and documentation
|
1394 |
-
# Note: C- // I add imports, check for directory not exist,
|
1395 |
-
# add default filename, and change the exception to print
|
1396 |
-
|
1397 |
-
# change name and conform to Pluto coding style
|
1398 |
-
@add_method(Pluto_FastAI)
|
1399 |
-
def _fetch_one_image(self,url, directory, filename, is_display=False):
|
1400 |
-
|
1401 |
-
"""
|
1402 |
-
Downloads an image from the given URL, saves it in the given directory, and displays it.
|
1403 |
-
|
1404 |
-
Args:
|
1405 |
-
url: (str) The URL of the image to download.
|
1406 |
-
directory: (str) The directory to save the image in.
|
1407 |
-
filename: (str) The filename to save the image as.
|
1408 |
-
is_display: (bool) If True, display the image. Default is False
|
1409 |
-
|
1410 |
-
Returns:
|
1411 |
-
None
|
1412 |
-
"""
|
1413 |
-
try:
|
1414 |
-
# Download the image
|
1415 |
-
image_file = requests.get(url)
|
1416 |
-
|
1417 |
-
# Create a directory if not exist
|
1418 |
-
if os.path.exists(directory) == False:
|
1419 |
-
os.makedirs(directory)
|
1420 |
-
|
1421 |
-
# Save the image in the given directory
|
1422 |
-
with open(os.path.join(directory, filename), "wb") as f:
|
1423 |
-
f.write(image_file.content)
|
1424 |
-
f.close()
|
1425 |
-
|
1426 |
-
# Display the image
|
1427 |
-
if is_display:
|
1428 |
-
print(f'{directory}/{filename}')
|
1429 |
-
img = PIL.Image.open(f'{directory}/{filename}')
|
1430 |
-
display(img)
|
1431 |
-
except Exception as e:
|
1432 |
-
print(f'Error: Can not download or display image: {directory}/{filename}.\nError: {e}')
|
1433 |
-
return
|
1434 |
-
|
1435 |
-
# prompt: write a function call fetch_images that combine _fetch_one_image and download_images with documentation
|
1436 |
-
# Grade: B // It works, but I change filename format and add in parameter upto_max
|
1437 |
-
|
1438 |
-
# Upate to Pluto coding standard and name
|
1439 |
-
# Fetch images
|
1440 |
-
@add_method(Pluto_FastAI)
|
1441 |
-
def fetch_images_from_search(self, term, directory,
|
1442 |
-
is_display=False, upto_max=300, is_normalize_name=True):
|
1443 |
-
|
1444 |
-
"""
|
1445 |
-
Searches for images of given term, downloads them, and saves them in the given directory.
|
1446 |
-
|
1447 |
-
Args:
|
1448 |
-
term: (str) The term to search for.
|
1449 |
-
directory: (str) The directory to save the images in.
|
1450 |
-
is_display: (bool) If True, display the images. Default is False.
|
1451 |
-
upto_max: (int) The upto maximum number of images to download. Default is 300
|
1452 |
-
is_normalize_name: (bool) If True use normalize the filename (term_0x), else use origitnal name. Default is True.
|
1453 |
-
|
1454 |
-
Returns:
|
1455 |
-
A list of dictionaries, each of which contains the following keys:
|
1456 |
-
|
1457 |
-
title: The title of the image.
|
1458 |
-
image: The URL of the image.
|
1459 |
-
thumbnail: The URL of thumbnail of the image.
|
1460 |
-
url: The URL of the webpage containing the image.
|
1461 |
-
height: The height of the image in pixels.
|
1462 |
-
width: The width of the image in pixels.
|
1463 |
-
source: The source of the image.
|
1464 |
-
and
|
1465 |
-
A list of images download file name
|
1466 |
-
"""
|
1467 |
-
|
1468 |
-
# Search for images
|
1469 |
-
images_info = self.fetch_image_url_online(term)
|
1470 |
-
|
1471 |
-
# Download images
|
1472 |
-
id = 0
|
1473 |
-
img_download = []
|
1474 |
-
img_dict = []
|
1475 |
-
for ix in images_info:
|
1476 |
-
img_dict.append(ix)
|
1477 |
-
#
|
1478 |
-
url = ix['image']
|
1479 |
-
if (is_normalize_name):
|
1480 |
-
# I add the clean filename below
|
1481 |
-
filename = f"{term.replace(' ','_')}-{id}.{url.rsplit('.', 1)[-1]}"
|
1482 |
-
res = re.split('[\\?\\!\\&]', filename)
|
1483 |
-
#
|
1484 |
-
filename = res[0]
|
1485 |
-
else:
|
1486 |
-
filename = url.rsplit('/', 1)[-1]
|
1487 |
-
filename = filename.replace('+', '_')
|
1488 |
-
#
|
1489 |
-
self._fetch_one_image(url, directory, filename, is_display)
|
1490 |
-
img_download.append(f'{directory}/{filename}')
|
1491 |
-
if id == upto_max:
|
1492 |
-
break
|
1493 |
-
id += 1
|
1494 |
-
|
1495 |
-
# Display number of images download
|
1496 |
-
# print(f'Number of images download is: {id}')
|
1497 |
-
return img_dict, img_download
|
1498 |
-
|
1499 |
-
# prompt: write a function to display thumb images from a directory of images in a row and column format
|
1500 |
-
# Grade: C+ // The calculate of the indexes "ax" is wrong. I correct it. And it import numpy but not usig it.
|
1501 |
-
# Note 2: it could be not an image so add in try: except:
|
1502 |
-
|
1503 |
-
|
1504 |
-
# display thumb images
|
1505 |
-
@add_method(Pluto_FastAI)
|
1506 |
-
def draw_thumb_images(self,dname, nrows=2, ncols=4):
|
1507 |
-
|
1508 |
-
"""
|
1509 |
-
Displays thumb images from a directory or a Pandas dataframe of images in a row and column format.
|
1510 |
-
|
1511 |
-
Args:
|
1512 |
-
directory: (str or DataFrame) The directory containing the images Or the dataframe.
|
1513 |
-
nrows: (int) The number of rows to display the images in. Default is 2 rows.
|
1514 |
-
ncols: (int) The number of columns to display the images in. Defaut is 4 columns.
|
1515 |
-
|
1516 |
-
Returns:
|
1517 |
-
A list (list) of displayed images
|
1518 |
-
"""
|
1519 |
-
|
1520 |
-
# os.path.exists(directory)
|
1521 |
-
if isinstance(dname, str):
|
1522 |
-
# Get the list of images in the directory
|
1523 |
-
images = self.fetch_file_names(dname)
|
1524 |
-
else:
|
1525 |
-
# it got to be pandas dataframe
|
1526 |
-
images = dname.sample(nrows*ncols)
|
1527 |
-
|
1528 |
-
# Create a figure with the specified number of rows and columns
|
1529 |
-
fig, axes = matplotlib.pyplot.subplots(nrows=nrows, ncols=ncols)
|
1530 |
-
|
1531 |
-
# keep track of img names
|
1532 |
-
img_names = []
|
1533 |
-
# Display the images in the figure
|
1534 |
-
for i, image in enumerate(images):
|
1535 |
-
if (i == (nrows * ncols)):
|
1536 |
-
break
|
1537 |
-
ax = axes[i // ncols, i % ncols]
|
1538 |
-
try:
|
1539 |
-
ax.imshow(matplotlib.pyplot.imread(image))
|
1540 |
-
ax.axis('off')
|
1541 |
-
img_names.append(image)
|
1542 |
-
except Exception as e:
|
1543 |
-
print(f'Error: Can not display image: {image}. Error: {e}')
|
1544 |
-
|
1545 |
-
# Display the figure
|
1546 |
-
matplotlib.pyplot.tight_layout(pad=0.25)
|
1547 |
-
matplotlib.pyplot.show()
|
1548 |
-
return img_names
|
1549 |
-
|
1550 |
-
# prompt: write a new foxy function with documentation and error checking for the following: delete file with file extension not on a list, the file is in a directory
|
1551 |
-
# Grade: A // it works, and I am getting smarter on how to phrase the prompt.
|
1552 |
-
|
1553 |
-
@add_method(Pluto_FastAI)
|
1554 |
-
def fix_file_extensions(self,directory,file_ext_list):
|
1555 |
-
|
1556 |
-
"""
|
1557 |
-
Deletes files in a directory that are not in the file extension list.
|
1558 |
-
|
1559 |
-
Args:
|
1560 |
-
directory: (str) The directory containing the files.
|
1561 |
-
file_ext_list: (list) The list of file extensions to keep. e.g. (".jpg", ".png")
|
1562 |
-
|
1563 |
-
Returns:
|
1564 |
-
None:
|
1565 |
-
|
1566 |
-
"""
|
1567 |
-
|
1568 |
-
# Get the list of files in the directory
|
1569 |
-
files = self.fetch_file_names(directory)
|
1570 |
-
file_delete = []
|
1571 |
-
|
1572 |
-
# Delete files not in the extension list
|
1573 |
-
for file in files:
|
1574 |
-
file_ext = pathlib.Path(file).suffix
|
1575 |
-
if file_ext not in file_ext_list:
|
1576 |
-
os.remove(file)
|
1577 |
-
print(f'Deleting file not in extension list: {file}')
|
1578 |
-
file_delete.append(file)
|
1579 |
-
|
1580 |
-
# Display a message indicating the completion of the operation
|
1581 |
-
# print(f'Deleting files not in extension list in {directory} is done!')
|
1582 |
-
return file_delete
|
1583 |
-
|
1584 |
-
# prompt: write a function for reading images from a directory if not an image then delete it
|
1585 |
-
# Grade: A // It works, but it should close image before delete else it would be a race condition.
|
1586 |
-
|
1587 |
-
@add_method(Pluto_FastAI)
|
1588 |
-
# delete non images file
|
1589 |
-
def fix_non_image_files(self,directory):
|
1590 |
-
|
1591 |
-
"""
|
1592 |
-
Deletes non-image files from a directory.
|
1593 |
-
|
1594 |
-
Args:
|
1595 |
-
directory: The directory to delete non-image files from.
|
1596 |
-
|
1597 |
-
Returns:
|
1598 |
-
A list (list) of deleted image file name.
|
1599 |
-
A list (list) of deleted file not with image exention.
|
1600 |
-
"""
|
1601 |
-
|
1602 |
-
# Get the list of files in the directory
|
1603 |
-
img_types = ['.png', '.jpg', '.jpeg', '.gif']
|
1604 |
-
file_delete = self.fix_file_extensions(directory, img_types)
|
1605 |
-
files = self.fetch_file_names(directory)
|
1606 |
-
|
1607 |
-
#check on how many files deleted
|
1608 |
-
total_deleted = 0
|
1609 |
-
img_delete = []
|
1610 |
-
|
1611 |
-
# Delete non-image files
|
1612 |
-
for file in files:
|
1613 |
-
try:
|
1614 |
-
img = PIL.Image.open(file)
|
1615 |
-
img.draft(img.mode, (32,32))
|
1616 |
-
img.load()
|
1617 |
-
if not (img.mode == 'RGB'):
|
1618 |
-
img.close()
|
1619 |
-
os.remove(file)
|
1620 |
-
print(f'Delete image not color: {file}')
|
1621 |
-
total_deleted += 1
|
1622 |
-
except Exception as e:
|
1623 |
-
os.remove(file)
|
1624 |
-
print(f'Delete not image: {file}. Error: {e}')
|
1625 |
-
total_deleted += 1
|
1626 |
-
img_delete.append(file)
|
1627 |
-
|
1628 |
-
# Display the number of files deleted
|
1629 |
-
print(f'Total deleted: {total_deleted}. Total available imges: {len(files)-total_deleted}')
|
1630 |
-
return img_delete, file_delete
|
1631 |
-
|
1632 |
-
# prompt: write a function to create a pandas dataframe with two columns from directory of files, the first column is the full path and the second is the name of the file.
|
1633 |
-
# Grade: B // it works, but with some minor error, and I refactor the method because it is too messy.
|
1634 |
-
|
1635 |
-
# update to Pluto standard naming convention
|
1636 |
-
@add_method(Pluto_FastAI)
|
1637 |
-
def make_df_img_name(self, directory,label_fn=None):
|
1638 |
-
"""
|
1639 |
-
Creates/Bakes a pandas dataframe with two columns from directory of files,
|
1640 |
-
the first column name is: "full_path"
|
1641 |
-
and the second name is: "label". It is the filename without the index number and extension.
|
1642 |
-
|
1643 |
-
Args:
|
1644 |
-
directory: (str) The directory containing the files.
|
1645 |
-
label_fn: (funcion) Optional the function to define the label to be used.
|
1646 |
-
The defaul funtion strip all but the core file name.
|
1647 |
-
|
1648 |
-
Returns:
|
1649 |
-
A pandas dataframe with two columns: "full_path" and "label".
|
1650 |
-
"""
|
1651 |
-
|
1652 |
-
# Get the list of files in the directory
|
1653 |
-
files = self.fetch_file_names(directory)
|
1654 |
-
|
1655 |
-
# Create a pandas dataframe with two columns
|
1656 |
-
df = pandas.DataFrame(files, columns=['full_path'])
|
1657 |
-
|
1658 |
-
# Add a column for the label field
|
1659 |
-
if label_fn is None:
|
1660 |
-
df['label'] = df['full_path'].apply(lambda x: re.split('[-]', str(pathlib.Path(x).name))[0])
|
1661 |
-
else:
|
1662 |
-
df['label'] = df['full_path'].apply(label_fn)
|
1663 |
-
|
1664 |
-
# Return the dataframe
|
1665 |
-
return df
|
1666 |
-
|
1667 |
-
# prompt: write a function with documentation for the following: resize all images to a square, image in a directory, use fastai lib
|
1668 |
-
# Grade: A- // it got it right using PIL but not fastai lib, and not set the size as parameter.
|
1669 |
-
# Note: this time it got the @add_method correctly. Yahhoooo :-)
|
1670 |
-
|
1671 |
-
@add_method(Pluto_FastAI)
|
1672 |
-
def fix_resize_img_square(self, directory, img_size=512):
|
1673 |
-
|
1674 |
-
"""
|
1675 |
-
Resizes all images in a directory to a square.
|
1676 |
-
|
1677 |
-
Args:
|
1678 |
-
directory: (str) The directory containing the images.
|
1679 |
-
img_size: (int) the square image size. Default is 512.
|
1680 |
-
|
1681 |
-
Returns:
|
1682 |
-
A list (list) of image file that can not be resize:
|
1683 |
-
|
1684 |
-
"""
|
1685 |
-
|
1686 |
-
img_error = []
|
1687 |
-
# Get the list of files in the directory
|
1688 |
-
files = self.fetch_file_names(directory)
|
1689 |
-
|
1690 |
-
# Resize all images to a square
|
1691 |
-
for file in files:
|
1692 |
-
try:
|
1693 |
-
img = PIL.Image.open(file) # I fixed this with PIL.
|
1694 |
-
img = img.resize((img_size, img_size)) # I fixed this.
|
1695 |
-
img.save(file)
|
1696 |
-
except Exception as e:
|
1697 |
-
print(f'Error file: {file}')
|
1698 |
-
print(f'Error: {e}')
|
1699 |
-
img_error.append(file)
|
1700 |
-
|
1701 |
-
# Display a message indicating the completion of the resize operation
|
1702 |
-
# print(f'Resizing images in {directory} to square is done!')
|
1703 |
-
return img_error
|
1704 |
-
|
1705 |
-
# prompt: write a foxy function to download dataset from Kaggle website using opendatasets lib with documentation
|
1706 |
-
# Grade: B- // It works, but it failded at first many tried. So, I told it "opendatasets" lib.
|
1707 |
-
|
1708 |
-
|
1709 |
-
|
1710 |
-
# Function to download dataset from Kaggle website using opendatasets lib.
|
1711 |
-
@add_method(Pluto_FastAI)
|
1712 |
-
def fetch_kaggle_dataset(self,dataset_name, path_to_save):
|
1713 |
-
|
1714 |
-
"""
|
1715 |
-
Downloads a dataset from Kaggle website using opendatasets library.
|
1716 |
-
|
1717 |
-
Args:
|
1718 |
-
dataset_name: (str) The name of the dataset to download.
|
1719 |
-
path_to_save: (str) The path where the dataset will be saved.
|
1720 |
-
|
1721 |
-
Returns:
|
1722 |
-
None
|
1723 |
-
"""
|
1724 |
-
|
1725 |
-
try:
|
1726 |
-
# Check if the dataset already exists
|
1727 |
-
if os.path.exists(path_to_save):
|
1728 |
-
print(f'Dataset {dataset_name} already exists.')
|
1729 |
-
return
|
1730 |
-
|
1731 |
-
# Download the dataset
|
1732 |
-
print(f'Downloading dataset {dataset_name}...')
|
1733 |
-
opendatasets.download(dataset_name, path_to_save)
|
1734 |
-
print(f'Dataset {dataset_name} downloaded successfully.')
|
1735 |
-
|
1736 |
-
except Exception as e:
|
1737 |
-
print(f'Error downloading dataset {dataset_name}: {e}')
|
1738 |
-
return None
|
1739 |
-
|
1740 |
-
# prompt: update function draw_diagram() with the following: change the node font to san serif
|
1741 |
-
# prompt: 8 more updates prompts. (see #scratch Fun graph divergent section)
|
1742 |
-
# Grade: B // after two hours of fun divergent, I got this to work
|
1743 |
-
|
1744 |
-
@add_method(Pluto_FastAI)
|
1745 |
-
def draw_diagram(self, nodes, edges, labels, node_color=None,
|
1746 |
-
horizontal=False, title='GraphViz', fontsize='10'):
|
1747 |
-
|
1748 |
-
"""Draws a diagram using Graphviz.
|
1749 |
-
|
1750 |
-
Args:
|
1751 |
-
nodes: (list) A list of nodes.
|
1752 |
-
edges: (list) A list of edges.
|
1753 |
-
labels: (list) A list of labels for the nodes.
|
1754 |
-
node_color: (list) A list of colors for the nodes.
|
1755 |
-
horizontal: (bool) A boolean value indicating whether to display the diagram
|
1756 |
-
horizontally.
|
1757 |
-
fontsize: (str) The font size in point. Default is "10"
|
1758 |
-
|
1759 |
-
Returns:
|
1760 |
-
A graph representation of the diagram.
|
1761 |
-
|
1762 |
-
Example:
|
1763 |
-
nodes = ["A", "B", "C", "D", "E", "F"]
|
1764 |
-
edges = [("A", "B"), ("B", "C"), ("C", "D"), ("D", "E"), ("E", "F"),
|
1765 |
-
("F", "A"), ("D", "B"), ("E", "C")]
|
1766 |
-
labels = ["Node A", "Node B", "Node C", "Node D", "Node E", "Node F"]
|
1767 |
-
mute_colors = [
|
1768 |
-
"#e1a06c",
|
1769 |
-
"#c3ced1",
|
1770 |
-
"#e6dfda",
|
1771 |
-
"#c29d9e",
|
1772 |
-
"#df829d",
|
1773 |
-
"#e1a06c",
|
1774 |
-
"#c3ced1",
|
1775 |
-
"#e6dfda",
|
1776 |
-
"#c29d9e",
|
1777 |
-
"#df829d"
|
1778 |
-
]
|
1779 |
-
# draw it
|
1780 |
-
diagram = draw_diagram(nodes, edges, labels, mute_colors, horizontal=True, title='Pluto Path to Success')
|
1781 |
-
|
1782 |
-
# display it
|
1783 |
-
display(diagram)
|
1784 |
-
"""
|
1785 |
-
|
1786 |
-
mute_colors = [
|
1787 |
-
"#e1a06c",
|
1788 |
-
"#c3ced1",
|
1789 |
-
"#e6dfda",
|
1790 |
-
"#c29d9e",
|
1791 |
-
"#df829d",
|
1792 |
-
"#e1a06c",
|
1793 |
-
"#c3ced1",
|
1794 |
-
"#e6dfda",
|
1795 |
-
"#c29d9e",
|
1796 |
-
"#df829d",
|
1797 |
-
"#e1a06c",
|
1798 |
-
"#c3ced1",
|
1799 |
-
"#e6dfda",
|
1800 |
-
"#c29d9e",
|
1801 |
-
"#df829d",
|
1802 |
-
"#e1a06c",
|
1803 |
-
"#c3ced1",
|
1804 |
-
"#e6dfda",
|
1805 |
-
"#c29d9e",
|
1806 |
-
"#df829d",
|
1807 |
-
"#e1a06c",
|
1808 |
-
"#c3ced1",
|
1809 |
-
"#e6dfda",
|
1810 |
-
"#c29d9e",
|
1811 |
-
"#df829d",
|
1812 |
-
"#e1a06c",
|
1813 |
-
"#c3ced1",
|
1814 |
-
"#e6dfda",
|
1815 |
-
"#c29d9e",
|
1816 |
-
"#df829d",
|
1817 |
-
"#e1a06c",
|
1818 |
-
"#c3ced1",
|
1819 |
-
"#e6dfda",
|
1820 |
-
"#c29d9e",
|
1821 |
-
"#df829d",
|
1822 |
-
"#e1a06c",
|
1823 |
-
"#c3ced1",
|
1824 |
-
"#e6dfda",
|
1825 |
-
"#c29d9e",
|
1826 |
-
"#df829d"
|
1827 |
-
]
|
1828 |
-
if node_color is None:
|
1829 |
-
node_color = mute_colors
|
1830 |
-
|
1831 |
-
# Create a graph object.
|
1832 |
-
graph = graphviz.Digraph()
|
1833 |
-
|
1834 |
-
# Add the nodes.
|
1835 |
-
for i, node in enumerate(nodes):
|
1836 |
-
graph.node(node, label=labels[i], color=node_color[i],
|
1837 |
-
fontname='sans-serif', style='filled', fontsize=fontsize)
|
1838 |
-
|
1839 |
-
# Add the edges.
|
1840 |
-
|
1841 |
-
for edge in edges:
|
1842 |
-
graph.edge(edge[0], edge[1])
|
1843 |
-
|
1844 |
-
# Set the title.
|
1845 |
-
graph.attr('graph', label=title)
|
1846 |
-
|
1847 |
-
if horizontal:
|
1848 |
-
graph.attr('graph', rankdir='LR')
|
1849 |
-
else:
|
1850 |
-
graph.attr('graph', rankdir='TB')
|
1851 |
-
|
1852 |
-
# Return the string representation of the diagram.
|
1853 |
-
|
1854 |
-
return graph
|
1855 |
-
|
1856 |
-
# prompt: None
|
1857 |
-
# Note: I am unsure how to describe the following function
|
1858 |
-
|
1859 |
-
# draw GraphViz for FastAI data block
|
1860 |
-
@add_method(Pluto_FastAI)
|
1861 |
-
def draw_fastai_data_block(self):
|
1862 |
-
nodes = ["A1", "A2", "A3", "A4", "A5", "A6", "A7",
|
1863 |
-
"B1", "B2",
|
1864 |
-
"C1", "C2", "C3",
|
1865 |
-
"D1", "D2",
|
1866 |
-
"E1", "E2",
|
1867 |
-
"F1", "F2",
|
1868 |
-
"G1", "G2"]
|
1869 |
-
labels = ["@1_SOURCE", "Pandas", "@2_Blocks", "@3_Splitter", "@4_Transform", "Batch_Size", "@A5_Data_Loader",
|
1870 |
-
"X:Block", "Y:Block",
|
1871 |
-
"get_x()", "get_items()", "get_y()",
|
1872 |
-
"Random", "Pandas_col",
|
1873 |
-
"Item_tfms", "Batch_tfms",
|
1874 |
-
"Resize", "Augmentation",
|
1875 |
-
"ImageDataLoaders\n.from_df()", "Other_Shortcut"]
|
1876 |
-
edges = [("A1", "A2"), ("A2", "A3"), ("A3", "A4"), ("A4", "A5"), ("A5", "A6"), ("A6", "A7"),
|
1877 |
-
("A3", "B1"), ("A3","B2"),
|
1878 |
-
("B1", "C1"), ("B1", "C2"), ("B2", "C3"),
|
1879 |
-
("A4", "D1"), ("A4", "D2"),
|
1880 |
-
("A5", "E1"), ("A5", "E2"),
|
1881 |
-
("E1", "F1"), ("E2", "F2"),
|
1882 |
-
("A2", "G1"), ("A2", "G2")]
|
1883 |
-
#
|
1884 |
-
# draw it
|
1885 |
-
diagram = self.draw_diagram(nodes, edges, labels, node_color=None,
|
1886 |
-
horizontal=True, title='Pluto view of FastAI Datablocks 5-Steps :-)',
|
1887 |
-
fontsize='8')
|
1888 |
-
|
1889 |
-
# display it
|
1890 |
-
display(diagram)
|
1891 |
-
return diagram
|
1892 |
-
# prompt: None
|
1893 |
-
# Note: rewrite to be a function for foxy
|
1894 |
-
|
1895 |
-
@add_method(Pluto_FastAI)
|
1896 |
-
def make_dloader_from_file(self, directory, y_fn):
|
1897 |
-
dblock = fastai.data.block.DataBlock(
|
1898 |
-
get_items=fastai.data.transforms.get_image_files,
|
1899 |
-
get_y=y_fn)
|
1900 |
-
dset = dblock.datasets(directory)
|
1901 |
-
return dset, dblock
|
1902 |
-
|
1903 |
-
# prompt: write documentation for function foxy.bake_dloader_from_file()
|
1904 |
-
# Grade: B // it does it correctly, except it return a datasets and not dataloader,
|
1905 |
-
# and missing the add method
|
1906 |
-
|
1907 |
-
# I rewrote it for extentable
|
1908 |
-
@add_method(Pluto_FastAI)
|
1909 |
-
def make_image_dblock_from_file(self, directory, y_fn, is_dataset=False, is_verbose=False):
|
1910 |
-
|
1911 |
-
"""
|
1912 |
-
Create a fastai datablock object from a directory of images.
|
1913 |
-
|
1914 |
-
Args:
|
1915 |
-
directory: (str) A string path to the directory of images.
|
1916 |
-
y_fn: (fn) A function that takes a file path as input and returns the
|
1917 |
-
corresponding label.
|
1918 |
-
is_dataset: (bool) if True return a dataset or None. Default is False.
|
1919 |
-
is_verbose: (bool) print out step by step operation. Default is False.
|
1920 |
-
|
1921 |
-
Returns:
|
1922 |
-
A fastai datablock object and datasets object.
|
1923 |
-
"""
|
1924 |
-
|
1925 |
-
dblock = fastai.data.block.DataBlock(
|
1926 |
-
get_items=fastai.data.transforms.get_image_files,
|
1927 |
-
get_y=y_fn,
|
1928 |
-
blocks = (fastai.vision.data.ImageBlock, fastai.vision.data.CategoryBlock))
|
1929 |
-
#
|
1930 |
-
dset = None
|
1931 |
-
if (is_dataset):
|
1932 |
-
dset = dblock.datasets(directory)
|
1933 |
-
if (is_verbose):
|
1934 |
-
try:
|
1935 |
-
dblock.summary(directory)
|
1936 |
-
except Exception as e:
|
1937 |
-
print(f'\n*Almost complete. Stop at: {e}')
|
1938 |
-
return dset, dblock
|
1939 |
-
|
1940 |
-
# prompt: No prompt
|
1941 |
-
# Note: write from reading above code. I tried but failed to ask it to
|
1942 |
-
# write a function based on the above 3 code cells.
|
1943 |
-
|
1944 |
-
# show the pandas dataframe and display the y_label pie chart
|
1945 |
-
@add_method(Pluto_FastAI)
|
1946 |
-
def draw_df_ylabel(self, df,y_label='label'):
|
1947 |
-
df[y_label].value_counts().plot(kind='pie')
|
1948 |
-
display(df.describe())
|
1949 |
-
return
|
1950 |
-
|
1951 |
-
# prompt: None
|
1952 |
-
# Note: I am unsure how to write the prompt for the following, other ask it to write document
|
1953 |
-
# Document doc:
|
1954 |
-
# prompt: write python detail inline documentation for the following function: make_step1_data_source
|
1955 |
-
# Grade: B // most of it correct
|
1956 |
-
|
1957 |
-
|
1958 |
-
@add_method(Pluto_FastAI)
|
1959 |
-
def make_step1_data_source(self, df, x_col_index=0, y_col_index=1,is_verbose=False):
|
1960 |
-
|
1961 |
-
"""
|
1962 |
-
Create a fastai DataBlock and DataSet objects from a Pandas dataframe.
|
1963 |
-
The input (X) is the image full path.
|
1964 |
-
The label (Y) is the target
|
1965 |
-
|
1966 |
-
Args:
|
1967 |
-
df: (pandas DataFrame) a dataframe of images with label.
|
1968 |
-
x_col_index: (int) index of the column that contains the image uri.
|
1969 |
-
y_col_index: (int) index of the column that contains the label.
|
1970 |
-
is_verbose: (bool) print out step by step operation. Default is False.
|
1971 |
-
|
1972 |
-
Returns:
|
1973 |
-
A fastai datablock (DataBlock) object and datasets (DataSet) object.
|
1974 |
-
"""
|
1975 |
-
|
1976 |
-
# step 1: Continue using Pandas
|
1977 |
-
dblock = fastai.data.block.DataBlock(
|
1978 |
-
get_x = fastai.data.transforms.ColReader(x_col_index),
|
1979 |
-
get_y = fastai.data.transforms.ColReader(y_col_index),
|
1980 |
-
blocks = (fastai.vision.data.ImageBlock, fastai.vision.data.CategoryBlock)
|
1981 |
-
)
|
1982 |
-
#
|
1983 |
-
dset = dblock.datasets(df)
|
1984 |
-
#
|
1985 |
-
if (is_verbose):
|
1986 |
-
self._ph()
|
1987 |
-
self._pp('Step 1 of 3', 'Source DataSet from Pandas')
|
1988 |
-
self._ph()
|
1989 |
-
print(f'Train: {dset.train[0]}, \nValid: {dset.valid[0]}')
|
1990 |
-
print(f'Vocab: {dset.vocab}, where 0 and 1 used as index')
|
1991 |
-
print(f'It does the auto split to train and valid. ')
|
1992 |
-
print(f'Size valid: {len(dset.valid)}')
|
1993 |
-
print(f'Total size: {len(dset.train)+len(dset.valid)}')
|
1994 |
-
print(f'Default spliter: 80/20: {str(dblock.splitter)}')
|
1995 |
-
# print out status
|
1996 |
-
self._ph()
|
1997 |
-
try:
|
1998 |
-
dblock.summary(df)
|
1999 |
-
except Exception as e:
|
2000 |
-
print(f'\n\n**Not yet complete. We stop at:\n{e}')
|
2001 |
-
self._ph()
|
2002 |
-
x = dset.train[0][0]
|
2003 |
-
display(x.show())
|
2004 |
-
return dset, dblock
|
2005 |
-
|
2006 |
-
# prompt: None
|
2007 |
-
# Note: I am unsure how to write the prompt for the following, other ask it to write document
|
2008 |
-
# use genAI to write doc.
|
2009 |
-
# prompt: write python inline documentation for the following function: foxy.bake_step2_split
|
2010 |
-
# grade: A // it know how to write doc.
|
2011 |
-
|
2012 |
-
@add_method(Pluto_FastAI)
|
2013 |
-
def make_step2_split(self, df, dblock, fn=None, is_verbose=False):
|
2014 |
-
|
2015 |
-
"""
|
2016 |
-
Split the DataFrame into training and validation datasets.
|
2017 |
-
|
2018 |
-
Args:
|
2019 |
-
df: (pandas DataFrame) a dataframe of images with label.
|
2020 |
-
dblock: (fastai DataBlock) the datablock object.
|
2021 |
-
fn: (function) the spliter function. default is the default auto 80/20 split.
|
2022 |
-
is_verbose: (bool) print out step by step operation. Default is False.
|
2023 |
-
|
2024 |
-
Returns:
|
2025 |
-
A fastai datablock (DataBlock) object and datasets (DataSet) object.
|
2026 |
-
"""
|
2027 |
-
if (fn is not None):
|
2028 |
-
dblock.splitter = fn
|
2029 |
-
#
|
2030 |
-
dset = dblock.datasets(df)
|
2031 |
-
#
|
2032 |
-
#
|
2033 |
-
if (is_verbose):
|
2034 |
-
self._ph()
|
2035 |
-
self._pp('Step 2 of 3', 'Split X (train) and Y (valid)')
|
2036 |
-
self._ph()
|
2037 |
-
print(f'Train: {dset.train[0]}, \nValid: {dset.valid[0]}')
|
2038 |
-
print(f'Vocab: {dset.vocab}, where 0 and 1 used as index')
|
2039 |
-
print(f'It does the auto split to train and valid. ')
|
2040 |
-
print(f'Size valid: {len(dset.valid)}')
|
2041 |
-
print(f'Total size: {len(dset.train)+len(dset.valid)}')
|
2042 |
-
print(f'Spliter: {str(dblock.splitter)}')
|
2043 |
-
# print out status
|
2044 |
-
self._ph()
|
2045 |
-
try:
|
2046 |
-
dblock.summary(df)
|
2047 |
-
except Exception as e:
|
2048 |
-
print(f'\n\n**Not yet complete. We stop at:\n{e}')
|
2049 |
-
self._ph()
|
2050 |
-
x = dset.train[0][0]
|
2051 |
-
display(x.show())
|
2052 |
-
return dset, dblock
|
2053 |
-
|
2054 |
-
# prompt: None
|
2055 |
-
# Note: I am unsure how to write the prompt for the following, other ask it to write document
|
2056 |
-
|
2057 |
-
@add_method(Pluto_FastAI)
|
2058 |
-
def make_step3_transform(self, df, dblock, item_fn=None, batch_fn=None, is_verbose=False):
|
2059 |
-
|
2060 |
-
"""
|
2061 |
-
Transform the data into a DataSet and DataLoader objects.
|
2062 |
-
|
2063 |
-
Args:
|
2064 |
-
df: (pandas DataFrame) a dataframe of images with label.
|
2065 |
-
dblock: (fastai DataBlock) the datablock object.
|
2066 |
-
item_fn: (function) the item transformer function. default is resize to 224.
|
2067 |
-
batch_fn: (function) the batch transformer function. default is default augmentation.
|
2068 |
-
is_verbose: (bool) print out step by step operation. Default is False.
|
2069 |
-
|
2070 |
-
Returns:
|
2071 |
-
A fastai dataloader (DataLoader) object and datasets (DataSet) object.
|
2072 |
-
"""
|
2073 |
-
if (item_fn is None):
|
2074 |
-
dblock.default_item_tfms = fastai.vision.augment.Resize(224)
|
2075 |
-
else:
|
2076 |
-
dblock.default_item_tfms = item_fn
|
2077 |
-
#
|
2078 |
-
if (batch_fn is None):
|
2079 |
-
dblock.default_batch_tfms = fastai.vision.augment.aug_transforms() # use all the default settings
|
2080 |
-
else:
|
2081 |
-
dblock.default_batch_tfms = batch_fn
|
2082 |
-
|
2083 |
-
dloader = dblock.dataloaders(df)
|
2084 |
-
#
|
2085 |
-
#
|
2086 |
-
if (is_verbose):
|
2087 |
-
self._ph()
|
2088 |
-
self._pp('Step 3 of 3', 'Item transform (resize), Batch transform (augmentation)')
|
2089 |
-
self._ph()
|
2090 |
-
print(f'Train: {dloader.train_ds[0]}, \nValid: {dloader.valid_ds[0]}')
|
2091 |
-
print(f'Vocab: {dloader.vocab}, where 0 and 1 used as index')
|
2092 |
-
print(f'Size valid: {len(dloader.valid_ds)}')
|
2093 |
-
print(f'Total size: {len(dloader.train_ds)+len(dloader.valid_ds)}')
|
2094 |
-
self._ph()
|
2095 |
-
print(f'Spliter: {str(dblock.splitter)}')
|
2096 |
-
self._ph()
|
2097 |
-
print(f'Item Transform: {str(dblock.default_item_tfms)}')
|
2098 |
-
self._ph()
|
2099 |
-
print(f'Batch Transform: {str(dblock.default_batch_tfms)}')
|
2100 |
-
# print out status
|
2101 |
-
self._ph()
|
2102 |
-
try:
|
2103 |
-
dblock.summary(df)
|
2104 |
-
except Exception as e:
|
2105 |
-
print(f'\n\n**Not yet complete. We stop at:\n{e}')
|
2106 |
-
self._ph()
|
2107 |
-
display(dloader.show_batch())
|
2108 |
-
return dloader, dblock
|
2109 |
-
|
2110 |
-
# prompt: None
|
2111 |
-
# Note: I am unsure how to describe the following function
|
2112 |
-
|
2113 |
-
# draw GraphViz for FastAI data block
|
2114 |
-
@add_method(Pluto_FastAI)
|
2115 |
-
def draw_fastai_train(self):
|
2116 |
-
nodes = ["A", "A1", "A2", "A3", "A4",
|
2117 |
-
"B", "B1", "B2",
|
2118 |
-
"C", "C1", "C2",
|
2119 |
-
"D"]
|
2120 |
-
labels = ["@1_LEARNER", "DataLoader", "Model Arch", "Error Metric", "Learning Rate",
|
2121 |
-
"@2_FINE_TUNE", "Epoch", "Callback",
|
2122 |
-
"@3_MONITOR", "OUT: Save Model", "Break",
|
2123 |
-
"@4_TEA_BREAK :-)"]
|
2124 |
-
edges = [("A", "B"), ("C", "D"),
|
2125 |
-
("A", "A1"), ("A1", "A2"), ("A2", "A3"), ("A3", "A4"),
|
2126 |
-
("B", "B1"), ("B", "B2"), ("B2", "C"),
|
2127 |
-
("C", "C1"), ("C", "C2")]
|
2128 |
-
#
|
2129 |
-
# draw it
|
2130 |
-
diagram = self.draw_diagram(nodes, edges, labels, node_color=None,
|
2131 |
-
horizontal=True, title='Pluto view of FastAI Learn Plus Disco Dancing :-)',
|
2132 |
-
fontsize='8')
|
2133 |
-
|
2134 |
-
# display it
|
2135 |
-
display(diagram)
|
2136 |
-
return diagram
|
2137 |
-
|
2138 |
-
# prompt: write a function with documentation for the following: print all the name begin with partial label, variable avail_pretrained_models
|
2139 |
-
# grade: A // it works
|
2140 |
-
|
2141 |
-
|
2142 |
-
@add_method(Pluto_FastAI)
|
2143 |
-
def fetch_timm_models_name(partial_label):
|
2144 |
-
|
2145 |
-
"""Return all the models name from timm library that begin with partial_label
|
2146 |
-
|
2147 |
-
Args:
|
2148 |
-
partial_label (str): partial label for the model name
|
2149 |
-
|
2150 |
-
Returns:
|
2151 |
-
A list of strings with the models name
|
2152 |
-
"""
|
2153 |
-
|
2154 |
-
avail_pretrained_models = timm.list_models(pretrained=True)
|
2155 |
-
models = [model for model in avail_pretrained_models if partial_label in model]
|
2156 |
-
#
|
2157 |
-
print(f'Total available models: {len(avail_pretrained_models)}')
|
2158 |
-
print(f'Total models with partial label {partial_label}: {len(models)} ')
|
2159 |
-
return models
|
2160 |
-
#
|
2161 |
-
# prompt: Add in a parameter to print the result to a file with the same name as the notebook but with .py file extention
|
2162 |
-
|
2163 |
-
@add_method(Pluto_FastAI)
|
2164 |
-
def fetch_code_cells(self, notebook_name,
|
2165 |
-
filter_magic="# %%write",
|
2166 |
-
write_to_file=True, fname_override=None):
|
2167 |
-
|
2168 |
-
"""
|
2169 |
-
Reads a Jupyter notebook (.ipynb file) and writes out all the code cells
|
2170 |
-
that start with the specified magic command to a .py file.
|
2171 |
-
|
2172 |
-
Parameters:
|
2173 |
-
- notebook_name (str): Name of the notebook file (with .ipynb extension).
|
2174 |
-
- filter_magic (str): Magic command filter. Only cells starting with this command will be written.
|
2175 |
-
The defualt is: "# %%write"
|
2176 |
-
- write_to_file (bool): If True, writes the filtered cells to a .py file.
|
2177 |
-
Otherwise, prints them to the standard output. The default is True.
|
2178 |
-
- fname_override (str): If provided, overrides the output filename. The default is None.
|
2179 |
-
|
2180 |
-
Returns:
|
2181 |
-
- None: Writes the filtered code cells to a .py file or prints them based on the parameters.
|
2182 |
-
|
2183 |
-
"""
|
2184 |
-
with open(notebook_name, 'r', encoding='utf-8') as f:
|
2185 |
-
notebook_content = json.load(f)
|
2186 |
-
|
2187 |
-
output_content = []
|
2188 |
-
|
2189 |
-
# Loop through all the cells in the notebook
|
2190 |
-
for cell in notebook_content['cells']:
|
2191 |
-
# Check if the cell type is 'code' and starts with the specified magic command
|
2192 |
-
if cell['cell_type'] == 'code' and cell['source'] and cell['source'][0].startswith(filter_magic):
|
2193 |
-
# Append the source code of the cell to output_content
|
2194 |
-
output_content.append(''.join(cell['source']))
|
2195 |
-
|
2196 |
-
if write_to_file:
|
2197 |
-
if fname_override is None:
|
2198 |
-
# Derive the output filename by replacing .ipynb with .py
|
2199 |
-
output_filename = notebook_name.replace(".ipynb", ".py")
|
2200 |
-
else:
|
2201 |
-
output_filename = fname_override
|
2202 |
-
with open(output_filename, 'w', encoding='utf-8') as f:
|
2203 |
-
f.write('\n'.join(output_content))
|
2204 |
-
print(f'File: {output_filename} written to disk.')
|
2205 |
-
else:
|
2206 |
-
# Print the code cells to the standard output
|
2207 |
-
print('\n'.join(output_content))
|
2208 |
-
print('-' * 40) # print separator
|
2209 |
-
return
|
2210 |
-
# Example usage:
|
2211 |
-
# print_code_cells_from_notebook('your_notebook_name_here.ipynb')
|
2212 |
-
# prompt: (from gpt4)
|
2213 |
-
#
|
2214 |
-
# -----------------------------------
|
2215 |
-
#
|
2216 |
-
class StopAndSaveOnLowError(Callback):
|
2217 |
-
def __init__(self, threshold=0.009, fname='best_low_error_model'):
|
2218 |
-
self.threshold = threshold
|
2219 |
-
self.fname = fname
|
2220 |
-
return
|
2221 |
-
|
2222 |
-
def after_epoch(self):
|
2223 |
-
# Assuming error_rate is a monitored metric
|
2224 |
-
if 'error_rate' in self.learn.recorder.metric_names:
|
2225 |
-
error = self.learn.recorder.log[self.learn.recorder.metric_names.index('error_rate')]
|
2226 |
-
if error <= self.threshold:
|
2227 |
-
self.fname = f'{self.fname}_{error:.4}'
|
2228 |
-
self.fname = self.fname.replace('.', 'd')
|
2229 |
-
self.learn.save(self.fname)
|
2230 |
-
print(f"Saving model as error rate {error} is less than {self.threshold}: Model name: {self.fname}")
|
2231 |
-
print(f"Stopping training as error rate {error} is less than {self.threshold}")
|
2232 |
-
raise CancelTrainException
|
2233 |
-
return
|
2234 |
-
#
|
2235 |
-
# ----------[END OF pluto_foxy]----------
|
2236 |
#
|
2237 |
# ----------[END OF CODE]----------
|
2238 |
# %%write -a app.py
|
2239 |
# prompt: create the new class foxy from Pluto_FastAI
|
2240 |
|
2241 |
# wake up foxy
|
2242 |
-
foxy =
|
2243 |
# %%write -a app.py
|
2244 |
# check out my environments
|
2245 |
|
|
|
1 |
# [BEGIN OF pluto_happy]
|
2 |
+
# [BEGIN OF pluto_happy]
|
3 |
+
# required pip install
|
4 |
+
import pynvml # for GPU info
|
|
|
|
|
|
|
|
|
5 |
## standard libs, no need to install
|
6 |
+
import numpy
|
7 |
+
import PIL
|
8 |
+
import pandas
|
9 |
+
import matplotlib
|
10 |
+
import torch
|
11 |
+
# standard libs (system)
|
12 |
import json
|
|
|
13 |
import time
|
14 |
import os
|
15 |
import random
|
16 |
import re
|
17 |
import sys
|
18 |
import psutil
|
|
|
19 |
import socket
|
|
|
|
|
|
|
|
|
20 |
import importlib.metadata
|
21 |
import types
|
22 |
import cpuinfo
|
|
|
23 |
import pathlib
|
|
|
24 |
import subprocess
|
|
|
25 |
# define class Pluto_Happy
|
26 |
class Pluto_Happy(object):
|
27 |
"""
|
|
|
33 |
such as Pluto_HFace with a lot more function on HuggingFace, LLM and Transformers.
|
34 |
|
35 |
Args:
|
36 |
+
name (str): the display name, e.g. "Hanna the seeker"
|
37 |
|
38 |
Returns:
|
39 |
+
(object): the class instance.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
"""
|
41 |
|
42 |
# initialize the object
|
|
|
51 |
self._ph()
|
52 |
#
|
53 |
# define class var for stable division
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
self.fname_requirements = './pluto_happy/requirements.txt'
|
55 |
#
|
56 |
self.color_primary = '#2780e3' #blue
|
|
|
107 |
y = x
|
108 |
return y
|
109 |
#
|
110 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
# Define a function to display available CPU and RAM
|
112 |
+
def fetch_info_system(self, is_print=False):
|
113 |
|
114 |
"""
|
115 |
Fetches system information, such as CPU usage and memory usage.
|
|
|
130 |
mem_total_gb = mem.total / (1024 ** 3)
|
131 |
mem_available_gb = mem.available / (1024 ** 3)
|
132 |
mem_used_gb = mem.used / (1024 ** 3)
|
133 |
+
#
|
134 |
+
# print it nicely
|
135 |
# save the results
|
136 |
s += f"Total memory: {mem_total_gb:.2f} GB\n"
|
137 |
s += f"Available memory: {mem_available_gb:.2f} GB\n"
|
|
|
143 |
s += f'Number of CPU cores: {cpu_info["count"]}\n'
|
144 |
s += f"CPU usage: {cpu_usage}%\n"
|
145 |
s += f'Python version: {cpu_info["python_version"]}'
|
146 |
+
if (is_print is True):
|
147 |
+
self._ph()
|
148 |
+
self._pp("System", "Info")
|
149 |
+
self._ph()
|
150 |
+
self._pp("Total Memory", f"{mem_total_gb:.2f} GB")
|
151 |
+
self._pp("Available Memory", f"{mem_available_gb:.2f} GB")
|
152 |
+
self._pp("Memory Usage", f"{mem_used_gb/mem_total_gb:.2f}%")
|
153 |
+
self._pp("CPU Type", f'{cpu_info["brand_raw"]}, arch: {cpu_info["arch"]}')
|
154 |
+
self._pp("CPU Cores Count", f'{cpu_info["count"]}')
|
155 |
+
self._pp("CPU Usage", f"{cpu_usage}%")
|
156 |
+
self._pp("Python Version", f'{cpu_info["python_version"]}')
|
157 |
except Exception as e:
|
158 |
s += f'CPU type: Not accessible, Error: {e}'
|
159 |
+
if (is_print is True):
|
160 |
+
self._ph()
|
161 |
+
self._pp("CPU", f"*Warning* No CPU Access: {e}")
|
162 |
return s
|
163 |
#
|
164 |
# fetch GPU RAM info
|
165 |
+
def fetch_info_gpu(self, is_print=False):
|
166 |
|
167 |
"""
|
168 |
Function to fetch GPU RAM info
|
|
|
195 |
s += f'Free Memory: {mfree:.2f} GB\n'
|
196 |
s += f'GPU allocated RAM: {round(torch.cuda.memory_allocated(0)/1024**3,2)} GB\n'
|
197 |
s += f'GPU reserved RAM {round(torch.cuda.memory_reserved(0)/1024**3,2)} GB\n'
|
198 |
+
if (is_print is True):
|
199 |
+
self._ph()
|
200 |
+
self._pp("GPU", "Info")
|
201 |
+
self._ph()
|
202 |
+
self._pp("GPU Type", f'{torch.cuda.get_device_name(0)}')
|
203 |
+
self._pp("GPU Ready Status", f'{torch.cuda.is_available()}')
|
204 |
+
self._pp("GPU Count", f'{devices}')
|
205 |
+
self._pp("GPU Total Memory", f'{mtotal:.2f} GB')
|
206 |
+
self._pp("GPU Free Memory", f'{mfree:.2f} GB')
|
207 |
+
self._pp("GPU allocated RAM", f'{round(torch.cuda.memory_allocated(0)/1024**3,2)} GB')
|
208 |
+
self._pp("GPU reserved RAM", f'{round(torch.cuda.memory_reserved(0)/1024**3,2)} GB')
|
209 |
except Exception as e:
|
210 |
s += f'**Warning, No GPU: {e}'
|
211 |
+
if (is_print is True):
|
212 |
+
self._ph()
|
213 |
+
self._pp("GPU", f"*Warning* No GPU: {e}")
|
214 |
return s
|
215 |
#
|
216 |
# fetch info about host ip
|
217 |
+
def fetch_info_host_ip(self, is_print=True):
|
218 |
"""
|
219 |
Function to fetch current host name and ip address
|
220 |
|
|
|
230 |
ip_address = socket.gethostbyname(hostname)
|
231 |
s += f"Hostname: {hostname}\n"
|
232 |
s += f"IP Address: {ip_address}\n"
|
233 |
+
if (is_print is True):
|
234 |
+
self._ph()
|
235 |
+
self._pp('Host and Notebook', 'Info')
|
236 |
+
self._ph()
|
237 |
+
self._pp('Host Name', f"{hostname}")
|
238 |
+
self._pp("IP Address", f"{ip_address}")
|
239 |
+
try:
|
240 |
+
from jupyter_server import serverapp
|
241 |
+
self._pp("Jupyter Server", f'{serverapp.__version__}')
|
242 |
+
except ImportError:
|
243 |
+
self._pp("Jupyter Server", "Not accessible")
|
244 |
+
try:
|
245 |
+
import notebook
|
246 |
+
self._pp("Jupyter Notebook", f'{notebook.__version__}')
|
247 |
+
except ImportError:
|
248 |
+
self._pp("Jupyter Notebook ", "Not accessible")
|
249 |
except Exception as e:
|
250 |
s += f"**Warning, No hostname: {e}"
|
251 |
+
if (is_print is True):
|
252 |
+
self._ph()
|
253 |
+
self._pp('Host Name and Notebook', 'Not accessible')
|
254 |
return s
|
255 |
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
256 |
#
|
257 |
# fetch import libraries
|
258 |
def _fetch_lib_import(self):
|
|
|
345 |
f.close()
|
346 |
return
|
347 |
#
|
348 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
349 |
def fetch_installed_libraries(self):
|
350 |
"""
|
351 |
Retrieves and prints the names and versions of Python libraries installed by the user,
|
|
|
366 |
for name, version in libraries.items():
|
367 |
print(f"{name}: {version}")
|
368 |
"""
|
369 |
+
# List of standard libraries (this may not be exhaustive and might need updates based on the Python version)
|
370 |
+
# Run pip freeze command to get list of installed packages with their versions
|
371 |
result = subprocess.run(['pip', 'freeze'], stdout=subprocess.PIPE)
|
372 |
|
373 |
# Decode result and split by lines
|
|
|
419 |
|
420 |
# Check if each line (stripped of whitespace and newline characters) exists in the reference dictionary.
|
421 |
# If it exists, fetch its value. Otherwise, set the value to None.
|
422 |
+
results = {line.strip(): reference_dict.get(line.strip().replace('_', '-'), None) for line in lines}
|
423 |
|
424 |
return results
|
425 |
# print fech_info about myself
|
|
|
434 |
Returns:
|
435 |
None
|
436 |
"""
|
|
|
437 |
self._ph()
|
438 |
self._pp("Hello, I am", self.name)
|
439 |
self._pp("I will display", "Python, Jupyter, and system info.")
|
440 |
+
self._pp("Note", "For doc type: help(pluto) ...or help(your_object_name)")
|
441 |
+
self._pp("Let Rock and Roll", "¯\_(ツ)_/¯")
|
|
|
|
|
442 |
# system
|
443 |
+
x = self.fetch_info_system(is_print=True)
|
444 |
+
# print(x)
|
445 |
+
# self._ph()
|
|
|
446 |
# gpu
|
447 |
+
# self._pp('GPU', 'Info')
|
448 |
+
x = self.fetch_info_gpu(is_print=True)
|
449 |
+
# print(x)
|
450 |
self._ph()
|
451 |
# lib used
|
452 |
self._pp('Installed lib from', self.fname_requirements)
|
|
|
454 |
x = self.fetch_match_file_dict(self.fname_requirements, self.fetch_installed_libraries())
|
455 |
for item, value in x.items():
|
456 |
self._pp(f'{item} version', value)
|
457 |
+
#
|
458 |
self._ph()
|
459 |
self._pp('Standard lib from', 'System')
|
460 |
self._ph()
|
|
|
463 |
self._pp('pandas version',pandas.__version__)
|
464 |
self._pp('PIL version', PIL.__version__)
|
465 |
self._pp('torch version', torch.__version__)
|
466 |
+
#
|
467 |
+
self.print_ml_libraries()
|
468 |
# host ip
|
|
|
469 |
x = self.fetch_info_host_ip()
|
470 |
+
# print(x)
|
471 |
self._ph()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
472 |
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
473 |
return
|
474 |
#
|
475 |
+
def print_ml_libraries(self):
|
476 |
"""
|
477 |
+
Checks for the presence of Gradio, fastai, huggingface_hub, and transformers libraries.
|
|
|
|
|
478 |
|
479 |
+
Prints a message indicating whether each library is found or not.
|
480 |
+
If a library is not found, it prints an informative message specifying the missing library.
|
481 |
"""
|
482 |
self._ph()
|
483 |
+
self._pp("ML Lib", "Info")
|
|
|
|
|
|
|
|
|
|
|
484 |
try:
|
485 |
+
import fastai
|
486 |
+
self._pp("fastai", f"{fastai.__version__}")
|
487 |
+
except ImportError:
|
488 |
+
self._pp("fastai", "*Warning* library not found.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
489 |
#
|
490 |
+
try:
|
491 |
+
import transformers
|
492 |
+
self._pp("transformers", f"{transformers.__version__}")
|
493 |
+
except ImportError:
|
494 |
+
self._pp("transformers", "*Warning* library not found.")
|
495 |
#
|
496 |
+
try:
|
497 |
+
import diffusers
|
498 |
+
self._pp("diffusers", f"{diffusers.__version__}")
|
499 |
+
except ImportError:
|
500 |
+
self._pp("diffusers", "*Warning* library not found.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
501 |
#
|
502 |
+
try:
|
503 |
+
import gradio
|
504 |
+
self._pp("gradio", f"{gradio.__version__}")
|
505 |
+
except ImportError:
|
506 |
+
self._pp("Gradio", "*Warning* library not found.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
507 |
|
508 |
+
try:
|
509 |
+
import huggingface_hub
|
510 |
+
self._pp("HuggingFace Hub", f"{huggingface_hub.__version__}")
|
511 |
+
except ImportError:
|
512 |
+
self._pp("huggingface_hub", "*Warning* library not found.")
|
513 |
+
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
514 |
#
|
|
|
515 |
# add module/method
|
516 |
#
|
517 |
import functools
|
|
|
526 |
#
|
527 |
# [END OF pluto_happy]
|
528 |
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
529 |
#
|
530 |
# ----------[END OF CODE]----------
|
531 |
# %%write -a app.py
|
532 |
# prompt: create the new class foxy from Pluto_FastAI
|
533 |
|
534 |
# wake up foxy
|
535 |
+
foxy = Pluto_Happy('Foxy, the seeker of truth.')
|
536 |
# %%write -a app.py
|
537 |
# check out my environments
|
538 |
|
requirements.txt
CHANGED
@@ -1,17 +1,5 @@
|
|
1 |
-
#numpy==1.26.4
|
2 |
-
cryptography
|
3 |
pynvml
|
4 |
-
|
5 |
-
transformers
|
6 |
-
diffusers
|
7 |
torch
|
8 |
-
|
9 |
fastai
|
10 |
-
duckduckgo_search
|
11 |
-
graphviz
|
12 |
-
kaggle
|
13 |
-
opendatasets
|
14 |
-
timm
|
15 |
-
huggingface_hub
|
16 |
-
flopth
|
17 |
-
gradio
|
|
|
|
|
|
|
1 |
pynvml
|
2 |
+
matplotlib
|
|
|
|
|
3 |
torch
|
4 |
+
py-cpuinfo
|
5 |
fastai
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|