Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -22,9 +22,6 @@ model.to(device)
|
|
22 |
# model_name = "roberta-base"
|
23 |
# tokenizer = RobertaTokenizer.from_pretrained(model_name, map_location=torch.device('cpu'))
|
24 |
|
25 |
-
def count_words(text):
|
26 |
-
words = text.split() # Split the text into a list of words
|
27 |
-
return len(words)
|
28 |
|
29 |
def text_to_sentences(text):
|
30 |
clean_text = text.replace('\n', ' ')
|
@@ -63,37 +60,38 @@ def predict(query):
|
|
63 |
return real
|
64 |
|
65 |
def findRealProb(data):
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
|
|
97 |
|
98 |
demo = gr.Interface(
|
99 |
fn=findRealProb,
|
|
|
22 |
# model_name = "roberta-base"
|
23 |
# tokenizer = RobertaTokenizer.from_pretrained(model_name, map_location=torch.device('cpu'))
|
24 |
|
|
|
|
|
|
|
25 |
|
26 |
def text_to_sentences(text):
|
27 |
clean_text = text.replace('\n', ' ')
|
|
|
60 |
return real
|
61 |
|
62 |
def findRealProb(data):
|
63 |
+
with app.app_context():
|
64 |
+
if data is None or len(data) == 0:
|
65 |
+
return jsonify({'error': 'No query provided'})
|
66 |
+
if len(data) > 9400:
|
67 |
+
return jsonify({'error': 'Cannot analyze more than 9400 characters!'})
|
68 |
+
if len(data.split()) > 1500:
|
69 |
+
return jsonify({'error': 'Cannot analyze more than 1500 words'})
|
70 |
+
|
71 |
+
# return {"Real": predict(data)}
|
72 |
+
chunksOfText = (chunks_of_900(data))
|
73 |
+
results = []
|
74 |
+
for chunk in chunksOfText:
|
75 |
+
outputv1 = predict(chunk)
|
76 |
+
# outputv2 = predict(chunk, modelv2, tokenizerv2)
|
77 |
+
label = "CG"
|
78 |
+
if(outputv1>=0.5):
|
79 |
+
label = "OR"
|
80 |
+
results.append({"Text":chunk, "Label": label, "Confidence":(outputv1)})
|
81 |
+
ans = 0
|
82 |
+
cnt = 0
|
83 |
+
for result in results:
|
84 |
+
length = len(result["Text"])
|
85 |
+
confidence = result["Confidence"]
|
86 |
+
cnt += length
|
87 |
+
ans = ans + (confidence)*(length)
|
88 |
+
realProb = ans/cnt
|
89 |
+
label = "AI"
|
90 |
+
if realProb > 0.7:
|
91 |
+
label = "Human"
|
92 |
+
elif realProb > 0.3 and realProb < 0.7:
|
93 |
+
label = "Might be AI"
|
94 |
+
return jsonify({"Real": realProb, "Fake": 1-realProb, "Label": label, "Chunks": results})
|
95 |
|
96 |
demo = gr.Interface(
|
97 |
fn=findRealProb,
|