|
@@ -147,6 +147,7 @@ def gpt():
|
|
|
use_video = False
|
|
|
suggest = False
|
|
|
summarize = False
|
|
|
+ expression = False
|
|
|
predict_q = 0
|
|
|
max_char_msg = 500
|
|
|
max_resp_token = 600
|
|
@@ -172,6 +173,8 @@ def gpt():
|
|
|
if 'chat_model' in json_payload and 'assistant_id' not in json_payload:
|
|
|
chat_model = json_payload['chat_model']
|
|
|
max_resp_token = 2048
|
|
|
+ if 'expression' in json_payload:
|
|
|
+ expression = json_payload['expression'] == "1"
|
|
|
if 'translate' in json_payload:
|
|
|
chat_messages = app.translate_messages.copy()
|
|
|
json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
|
|
@@ -273,12 +276,18 @@ def gpt():
|
|
|
"messages": chat_messages
|
|
|
}
|
|
|
)
|
|
|
- messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
|
|
|
- message_content = messages[0].content[0].text
|
|
|
- app.logger.info(message_content.value)
|
|
|
- pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
|
|
|
- filtered_message = pattern.sub("", message_content.value)
|
|
|
- result = {"role": "assistant", "content": filtered_message}
|
|
|
+ if runs.status != "completed":
|
|
|
+ result = {"role": "assistant", "content": "Maaf, saat ini saya sedang sibuk. Coba beberapa saat lagi."}
|
|
|
+ else:
|
|
|
+ messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
|
|
|
+ try:
|
|
|
+ message_content = messages[0].content[0].text
|
|
|
+ app.logger.info(message_content.value)
|
|
|
+ pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
|
|
|
+ filtered_message = pattern.sub("", message_content.value)
|
|
|
+ result = {"role": "assistant", "content": filtered_message}
|
|
|
+ except IndexError:
|
|
|
+ result = {"role": "assistant", "content": "Saat ini saya tidak memiliki informasi yang diperlukan untuk menjawab pertanyaan Anda."}
|
|
|
else:
|
|
|
json_response = app.openai_client.chat.completions.create(model=chat_model,
|
|
|
messages=chat_messages,
|
|
@@ -301,6 +310,9 @@ def gpt():
|
|
|
result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
else:
|
|
|
result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
+ if expression:
|
|
|
+ exprr = expresso(text=result['content'])
|
|
|
+ result['expression'] = exprr['expression']
|
|
|
if predict_q:
|
|
|
if assistant_id:
|
|
|
query_q = {
|
|
@@ -319,7 +331,9 @@ def gpt():
|
|
|
assistant_id=assistant_id,
|
|
|
thread={
|
|
|
"messages": chat_messages
|
|
|
- }
|
|
|
+ },
|
|
|
+ max_completion_tokens=600,
|
|
|
+ max_prompt_tokens=600
|
|
|
)
|
|
|
messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
|
|
|
message_content = messages[0].content[0].text
|
|
@@ -643,6 +657,27 @@ def speech(text=""):
|
|
|
res.stream_to_file(os.path.join(app.config['UPLOAD_FOLDER'], f"{uu_id}.mp3"))
|
|
|
return download_file(f"{uu_id}.mp3")
|
|
|
|
|
|
+@app.route('/expression', methods=['POST'])
|
|
|
+def expresso(text=""):
|
|
|
+ if not text:
|
|
|
+ if 'text' in request.form:
|
|
|
+ text = request.form['text']
|
|
|
+ else:
|
|
|
+ return {"status": "error", "message": "No text for expression"}
|
|
|
+ response = app.openai_client.chat.completions.create(
|
|
|
+ model="gpt-4o-mini",
|
|
|
+ messages=[
|
|
|
+ {
|
|
|
+ "role": "user",
|
|
|
+ "content": f"What is the closest expression of this text, choose between happy, sad, indifferent, fear, anger, surprise, or disgust, output json with key 'expression':\n\n{text}\n\n"
|
|
|
+ }
|
|
|
+ ],
|
|
|
+ response_format={"type": "json_object"}
|
|
|
+ )
|
|
|
+ response_message = response.choices[0].message.content
|
|
|
+ return json.loads(response_message)
|
|
|
+
|
|
|
+
|
|
|
|
|
|
# Press the green button in the gutter to run the script.
|
|
|
if __name__ == '__main__':
|