DeepMount00 commited on
Commit
5bb120f
1 Parent(s): 2592faf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -16,11 +16,11 @@ subprocess.run(
16
 
17
  DESCRIPTION = '''
18
  <div>
19
- <h1 style="text-align: center;">Lexora-Medium-7B</h1>
20
- <p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/DeepMount00/Lexora-Medium-7B"><b>Lexora-Medium-7B Chat ITA</b></a>.</p>
21
  </div>
22
  <div>
23
- <p>This model, <strong>DeepMount00/Lexora-Medium-7B</strong>, is currently the best open-source large language model for the Italian language. You can view its ranking and compare it with other models on the leaderboard at <a href="https://huggingface.co/spaces/FinancialSupport/open_ita_llm_leaderboard"><b>this site</b></a>.</p>
24
  </div>
25
  '''
26
  MAX_MAX_NEW_TOKENS = 2048
@@ -29,7 +29,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
29
 
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
 
32
- model_id = "DeepMount00/Lexora-Medium-7B"
33
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True,)
34
  model = AutoModelForCausalLM.from_pretrained(
35
  model_id,
 
16
 
17
  DESCRIPTION = '''
18
  <div>
19
+ <h1 style="text-align: center;">Lexora-Medium-3B</h1>
20
+ <p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/DeepMount00/Lexora-Lite-3B"><b>Lexora-Lite-3B Chat ITA</b></a>.</p>
21
  </div>
22
  <div>
23
+ <p>This model, <strong>DeepMount00/Lexora-Lite-3B</strong>, is currently the best open-source large language model for the Italian language. You can view its ranking and compare it with other models on the leaderboard at <a href="https://huggingface.co/spaces/FinancialSupport/open_ita_llm_leaderboard"><b>this site</b></a>.</p>
24
  </div>
25
  '''
26
  MAX_MAX_NEW_TOKENS = 2048
 
29
 
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
 
32
+ model_id = "DeepMount00/Lexora-Lite-3B"
33
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True,)
34
  model = AutoModelForCausalLM.from_pretrained(
35
  model_id,