kevin 11 hónapja
szülő
commit
1ba3b03756
1 módosított fájl, 75 hozzáadás és 60 törlés
  1. 75 60
      main.py

+ 75 - 60
main.py

@@ -2,6 +2,7 @@ import logging
 import os
 import json
 import re
+import time
 import uuid
 import random
 
@@ -70,10 +71,11 @@ def roulette() -> str:
     return model_name
 
 def prune_message(message: dict):
-    for k in list(message.keys()):
+    m = message.copy()
+    for k in list(m.keys()):
         if k != 'role' and k != 'content':
-            message.pop(k)
-    return message
+            m.pop(k)
+    return m
 
 
 def recommend(headlines, category):
@@ -88,6 +90,7 @@ def recommend(headlines, category):
                                     """
         }
         chat_messages.append(json_payload)
+        time.sleep(3)
         json_response = app.openai_client.chat.completions.create(model="gpt-4o-mini",
                                                                   messages=chat_messages,
                                                                   response_format={"type": "json_object"}
@@ -126,6 +129,7 @@ def vision(message, image_url=None, image_b64=None):
             ],
         }
         chat_messages.append(json_payload)
+        time.sleep(3)
         json_response = app.openai_client.chat.completions.create(
             model="gpt-4o",
             messages=chat_messages,
@@ -165,6 +169,7 @@ def gpt():
     if not json_payload:
         json_payload = []
     has_named_params = False
+    app.logger.info("Request: " + str(json_payload))
     if isinstance(json_payload, dict):
         has_named_params = 'payload' in json_payload
         if 'payload' in json_payload:
@@ -296,6 +301,7 @@ def gpt():
                 except IndexError:
                     result = {"role": "assistant", "content": "Saat ini saya tidak memiliki informasi yang diperlukan untuk menjawab pertanyaan Anda."}
         else:
+            time.sleep(3)
             json_response = app.openai_client.chat.completions.create(model=chat_model,
                                                                       messages=chat_messages,
                                                                       max_tokens=max_resp_token, temperature=0.7, n=n)
@@ -321,6 +327,7 @@ def gpt():
             exprr = expresso(text=result['content'])
             result['expression'] = exprr['expression']
         if predict_q:
+            json_response_q = None
             if assistant_id:
                 query_q = {
                     "role": "user",
@@ -338,58 +345,58 @@ def gpt():
                     assistant_id=assistant_id,
                     thread={
                         "messages": chat_messages
-                    },
-                    max_completion_tokens=600,
-                    max_prompt_tokens=600
-                )
-                messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
-                message_content = messages[0].content[0].text
-                app.logger.info(message_content.value)
-                pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
-                filtered_message = pattern.sub("", message_content.value)
-                predict_q_arr = [
-                    {
-                        "role": "system",
-                        "content": assistant.instructions
-                    },
-                    {
-                        "role": "assistant",
-                        "content": filtered_message
-                    },
-                    {
-                        "role": "user",
-                        "content": f"Ekstrak {predict_q} pertanyaan tersebut dalam bentuk json array"
                     }
-                ]
-                json_response_q = app.openai_client.chat.completions.create(
-                    model=chat_model,
-                    messages=predict_q_arr,
-                    temperature=0.2,
-                    response_format={"type": "json_object"}
                 )
+                if runs.status == "completed":
+                    messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
+                    message_content = messages[0].content[0].text
+                    app.logger.info(message_content.value)
+                    pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
+                    filtered_message = pattern.sub("", message_content.value)
+                    predict_q_arr = [
+                        {
+                            "role": "system",
+                            "content": assistant.instructions
+                        },
+                        {
+                            "role": "assistant",
+                            "content": filtered_message
+                        },
+                        {
+                            "role": "user",
+                            "content": f"Ekstrak {predict_q} pertanyaan tersebut dalam bentuk json array"
+                        }
+                    ]
+                    json_response_q = app.openai_client.chat.completions.create(
+                        model=chat_model,
+                        messages=predict_q_arr,
+                        temperature=0.2,
+                        response_format={"type": "json_object"}
+                    )
             else:
                 json_response_q = app.openai_client.chat.completions.create(model=chat_model,
                                                                             messages=chat_messages,
                                                                             max_tokens=max_resp_token,
                                                                             temperature=0.2,
                                                                             response_format={"type": "json_object"})
-            json_response_dict = json.loads(json_response_q.choices[0].message.content)
-            if json_response_dict is not None:
-                if isinstance(json_response_dict, dict):
-                    if len(json_response_dict) > 1:
-                        qs = []
-                        for q in json_response_dict.values():
-                            qs.append(q)
-                        json_response_dict = qs
-                    else:
-                        try:
-                            first_key = next(iter(json_response_dict))
-                            json_response_dict = json_response_dict[first_key]
-                        except StopIteration:
-                            json_response_dict = []
-                elif isinstance(json_response_dict, str):
-                    json_response_dict = [json_response_dict]
-                result["predict_q"] = json_response_dict
+            if json_response_q:
+                json_response_dict = json.loads(json_response_q.choices[0].message.content)
+                if json_response_dict is not None:
+                    if isinstance(json_response_dict, dict):
+                        if len(json_response_dict) > 1:
+                            qs = []
+                            for q in json_response_dict.values():
+                                qs.append(q)
+                            json_response_dict = qs
+                        else:
+                            try:
+                                first_key = next(iter(json_response_dict))
+                                json_response_dict = json_response_dict[first_key]
+                            except StopIteration:
+                                json_response_dict = []
+                    elif isinstance(json_response_dict, str):
+                        json_response_dict = [json_response_dict]
+                    result["predict_q"] = json_response_dict
     except openai.APITimeoutError as e:
         app.logger.exception("error")
         result = {"status": "error", "message": e.message}, 408
@@ -399,7 +406,8 @@ def gpt():
     except Exception:
         app.logger.exception("error")
         result = {"status": "error", "message": "Please try again"}, 405
-    return result
+    app.logger.info("Result: " + str(result))
+    return json.dumps(result)
 
 
 @app.route('/train', methods=['POST'])
@@ -448,6 +456,7 @@ def train():
 
 def train_with_id(job_id):
     try:
+        time.sleep(3)
         job = app.openai_client.fine_tuning.jobs.retrieve(job_id)
         if job.fine_tuned_model is None:
             app.logger.info({"job_id": job_id, "status": job.status})
@@ -486,6 +495,7 @@ def assistant_create():
         if vector_store_id \
         else {}
     try:
+        time.sleep(3)
         assistant = app.openai_client.beta.assistants.create(
             name=assistant_name,
             instructions=assistant_ins,
@@ -594,6 +604,7 @@ def assistant_update(aid=None, vid=None):
             kwargs['name'] = request_form.pop('name')
         if 'instructions' in request_form:
             kwargs['instructions'] = request_form.pop('instructions')
+        time.sleep(3)
         app.openai_client.beta.assistants.update(**kwargs)
         return {"status": "ok"}
     except Exception as e:
@@ -649,6 +660,7 @@ def llama():
 
 @app.route('/speech', methods=['POST'])
 def speech(text=""):
+    time.sleep(3)
     if not text and 'text' not in request.form:
         audio_file = request.files.get('audio')
         res = app.openai_client.audio.transcriptions.create(
@@ -676,18 +688,21 @@ def expresso(text=""):
             text = request.form['text']
         else:
             return {"status": "error", "message": "No text for expression"}
-    response = app.openai_client.chat.completions.create(
-        model="gpt-4o-mini",
-        messages=[
-            {
-                "role": "user",
-                "content": f"What is the closest expression of this text, choose between happy, sad, indifferent, fear, anger, surprise, or disgust, output json with key 'expression':\n\n{text}\n\n"
-            }
-        ],
-        response_format={"type": "json_object"}
-    )
-    response_message = response.choices[0].message.content
-    return json.loads(response_message)
+    try:
+        response = app.openai_client.chat.completions.create(
+            model="gpt-4o-mini",
+            messages=[
+                {
+                    "role": "user",
+                    "content": f"What is the closest expression of this text, choose between happy, sad, indifferent, fear, anger, surprise, or disgust, output json with key 'expression':\n\n{text}\n\n"
+                }
+            ],
+            response_format={"type": "json_object"}
+        )
+        response_message = response.choices[0].message.content
+        return json.loads(response_message)
+    except:
+        return {"expression": "indifferent"}