DawnC commited on
Commit
a0f2ca9
1 Parent(s): 36307af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -15
app.py CHANGED
@@ -539,36 +539,44 @@ from ultralytics import YOLO
539
  import asyncio
540
  import traceback
541
 
 
 
 
 
542
  def get_device():
543
  print("Initializing device configuration...")
544
 
545
- # 首先嘗試使用 CUDA,但要更謹慎地處理初始化
546
- if torch.cuda.is_available():
547
  try:
548
- # 設置環境變量,告訴 PyTorch 在沒有 GPU 時自動回退到 CPU
549
- os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512'
 
 
 
550
 
551
  device = torch.device('cuda')
552
- # 使用 try-except 來處理 GPU 信息獲取
553
- try:
554
- print(f"Using GPU: {torch.cuda.get_device_name(0)}")
555
- print(f"GPU Memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.2f} GB")
556
- except Exception as e:
557
- print("GPU detected but couldn't get detailed information")
558
-
559
- # 進行一個小的測試計算來驗證 GPU 功能
560
  test_tensor = torch.rand(1).to(device)
561
  _ = test_tensor * test_tensor
562
- print("GPU test calculation successful")
 
 
 
 
563
 
564
  return device
565
 
566
  except Exception as e:
567
- print(f"GPU initialization failed: {str(e)}")
568
  print("Falling back to CPU")
569
  return torch.device('cpu')
570
  else:
571
- print("CUDA not available, using CPU")
 
 
 
572
  return torch.device('cpu')
573
 
574
  device = get_device()
 
539
  import asyncio
540
  import traceback
541
 
542
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
543
+ os.environ['HF_ZERO_GPU'] = '1' # 明確告訴系統我們要使用 ZeroGPU
544
+ os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:512'
545
+
546
  def get_device():
547
  print("Initializing device configuration...")
548
 
549
+ # 特別針對 ZeroGPU 的檢測邏輯
550
+ if 'HF_ZERO_GPU' in os.environ and torch.cuda.is_available():
551
  try:
552
+ # 強制進行 CUDA 初始化
553
+ torch.cuda.init()
554
+ # 等待一小段時間讓系統完成初始化
555
+ import time
556
+ time.sleep(2)
557
 
558
  device = torch.device('cuda')
559
+
560
+ # 執行一個小的測試來確認 GPU 功能
 
 
 
 
 
 
561
  test_tensor = torch.rand(1).to(device)
562
  _ = test_tensor * test_tensor
563
+
564
+ print("ZeroGPU initialization successful")
565
+ print(f"Using device: {device}")
566
+ if torch.cuda.is_available():
567
+ print(f"GPU: {torch.cuda.get_device_name(0)}")
568
 
569
  return device
570
 
571
  except Exception as e:
572
+ print(f"ZeroGPU initialization failed: {str(e)}")
573
  print("Falling back to CPU")
574
  return torch.device('cpu')
575
  else:
576
+ if not torch.cuda.is_available():
577
+ print("CUDA not available, using CPU")
578
+ elif 'HF_ZERO_GPU' not in os.environ:
579
+ print("HF_ZERO_GPU not set, using CPU")
580
  return torch.device('cpu')
581
 
582
  device = get_device()