ChihChiu29 commited on
Commit
1df4f0c
1 Parent(s): 4990d4a

try gpt4free

Browse files
Files changed (2) hide show
  1. Dockerfile +4 -0
  2. main.py +24 -12
Dockerfile CHANGED
@@ -4,8 +4,12 @@ WORKDIR /code
4
 
5
  COPY ./requirements.txt /code/requirements.txt
6
 
 
7
  RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
 
 
 
 
9
  RUN useradd -m -u 1000 user
10
 
11
  USER user
 
4
 
5
  COPY ./requirements.txt /code/requirements.txt
6
 
7
+ # For hugging face
8
  RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
9
 
10
+ # For gpt4free
11
+ RUN pip install gpt4free
12
+
13
  RUN useradd -m -u 1000 user
14
 
15
  USER user
main.py CHANGED
@@ -5,15 +5,21 @@ Based on: https://huggingface.co/docs/hub/spaces-sdks-docker-first-demo
5
 
6
  from fastapi import FastAPI, Request
7
 
8
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
9
- from transformers import T5Tokenizer, T5ForConditionalGeneration
 
 
 
10
 
11
 
12
  token_size_limit = None
13
 
14
  # FROM: https://huggingface.co/facebook/blenderbot-400M-distill?text=Hey+my+name+is+Thomas%21+How+are+you%3F
15
- tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
16
- model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill")
 
 
 
17
  # tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-1B-distill")
18
  # model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-1B-distill")
19
  # token_size_limit = 128
@@ -42,14 +48,20 @@ async def Reply(req: Request):
42
  msg = request.get('msg')
43
  print(f'MSG: {msg}')
44
 
45
- input_ids = tokenizer(msg, return_tensors='pt').input_ids # .to('cuda')
46
- output = model.generate(
47
- input_ids[:, -token_size_limit:],
48
- do_sample=True,
49
- temperature=request.get('temperature', 0.9),
50
- max_length=request.get('max_length', 100),
51
- )
52
- reply = tokenizer.batch_decode(output)[0]
 
 
 
 
 
 
53
  print(f'REPLY: {reply}')
54
  return {'reply': reply}
55
 
 
5
 
6
  from fastapi import FastAPI, Request
7
 
8
+ # from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
9
+ # from transformers import T5Tokenizer, T5ForConditionalGeneration
10
+
11
+ import gpt4free
12
+ from gpt4free import Provider, forefront
13
 
14
 
15
  token_size_limit = None
16
 
17
  # FROM: https://huggingface.co/facebook/blenderbot-400M-distill?text=Hey+my+name+is+Thomas%21+How+are+you%3F
18
+
19
+ # LAST USED
20
+ # tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
21
+ # model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill")
22
+
23
  # tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-1B-distill")
24
  # model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-1B-distill")
25
  # token_size_limit = 128
 
48
  msg = request.get('msg')
49
  print(f'MSG: {msg}')
50
 
51
+ # Hugging face
52
+ # input_ids = tokenizer(msg, return_tensors='pt').input_ids # .to('cuda')
53
+ # output = model.generate(
54
+ # input_ids[:, -token_size_limit:],
55
+ # do_sample=True,
56
+ # temperature=request.get('temperature', 0.9),
57
+ # max_length=request.get('max_length', 100),
58
+ # )
59
+ # reply = tokenizer.batch_decode(output)[0]
60
+
61
+ # gpt4free
62
+ # usage theb
63
+ reply = gpt4free.Completion.create(Provider.Theb, prompt=msg)
64
+
65
  print(f'REPLY: {reply}')
66
  return {'reply': reply}
67