raoufjat commited on
Commit
1efe282
1 Parent(s): b72fec8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -3
app.py CHANGED
@@ -1,3 +1,53 @@
1
- # Load model directly
2
- from transformers import AutoModel
3
- model = AutoModel.from_pretrained("Omartificial-Intelligence-Space/Arabic-QWQ-32B-Preview")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ from transformers import pipeline
3
+
4
+ # Initialize Flask app
5
+ app = Flask(__name__)
6
+
7
+ # Load the Arabic-QwQ model (using Hugging Face pipeline for simplicity)
8
+ model_pipeline = pipeline(
9
+ "text-generation",
10
+ model="Omartificial-Intelligence-Space/Arabic-QWQ-32B-Preview"
11
+ )
12
+
13
+ @app.route('/')
14
+ def index():
15
+ """Root endpoint, can serve an HTML form if desired."""
16
+ return """
17
+ <h1>Arabic-QwQ Model Demo</h1>
18
+ <form action="/predict" method="post">
19
+ <label>Enter your prompt:</label><br>
20
+ <input type="text" name="prompt" required><br><br>
21
+ <input type="submit" value="Submit">
22
+ </form>
23
+ """
24
+
25
+ @app.route('/predict', methods=["POST"])
26
+ def predict():
27
+ """
28
+ Route for processing user input with the model.
29
+ - Accepts user input via POST request.
30
+ - Runs inference with Arabic-QwQ model.
31
+ - Returns response.
32
+ """
33
+ try:
34
+ # Extract user input
35
+ user_input = request.form.get("prompt")
36
+
37
+ # Perform model inference
38
+ output = model_pipeline(user_input, max_length=50, num_return_sequences=1)
39
+
40
+ # Return inference results
41
+ return jsonify({
42
+ "input": user_input,
43
+ "response": output[0]['generated_text'] if output else "No response generated"
44
+ })
45
+
46
+ except Exception as e:
47
+ # Handle errors gracefully
48
+ return jsonify({"error": str(e)}), 500
49
+
50
+
51
+ # Run the app
52
+ if __name__ == "__main__":
53
+ app.run(debug=True)