|
@@ -2,6 +2,8 @@ import logging
|
|
|
import os
|
|
|
import json
|
|
|
import re
|
|
|
+import uuid
|
|
|
+import random
|
|
|
|
|
|
import openai
|
|
|
import requests
|
|
@@ -54,6 +56,19 @@ def download_file(name):
|
|
|
def test():
|
|
|
return jsonify({"status": "0"})
|
|
|
|
|
|
+def roulette() -> str:
|
|
|
+ roulette_arr = [(80, "gpt-3.5-turbo"), (20, "gpt-4o")]
|
|
|
+ rand_num = random.randrange(0, 99)
|
|
|
+ model_name = ""
|
|
|
+ n = 0
|
|
|
+ j = 0
|
|
|
+ while rand_num > n:
|
|
|
+ n += roulette_arr[j][0]
|
|
|
+ model_name = roulette_arr[j][1]
|
|
|
+ print(model_name)
|
|
|
+ j += 1
|
|
|
+ return model_name
|
|
|
+
|
|
|
|
|
|
def recommend(headlines, category):
|
|
|
chat_messages = app.recommend_messages.copy()
|
|
@@ -124,6 +139,7 @@ def vision(message, image_url=None, image_b64=None):
|
|
|
@app.route('/gpt', methods=['POST'])
|
|
|
def gpt():
|
|
|
assistant_id = ""
|
|
|
+ assistant = None
|
|
|
chat_messages = []
|
|
|
chat_model = "gpt-3.5-turbo"
|
|
|
use_video = False
|
|
@@ -145,10 +161,10 @@ def gpt():
|
|
|
if 'payload' in json_payload:
|
|
|
if 'predict_q' in json_payload:
|
|
|
predict_q = 5 if json_payload['predict_q'] > 4 else 0 if json_payload['predict_q'] < 1 else \
|
|
|
- json_payload['predict_q']
|
|
|
+ json_payload['predict_q']
|
|
|
if 'num_choices' in json_payload:
|
|
|
num_choices = 5 if json_payload['num_choices'] > 4 else 1 if json_payload['num_choices'] < 2 else \
|
|
|
- json_payload['num_choices']
|
|
|
+ json_payload['num_choices']
|
|
|
if 'use_video' in json_payload:
|
|
|
use_video = json_payload['use_video'] == "1"
|
|
|
if 'chat_model' in json_payload and 'assistant_id' not in json_payload:
|
|
@@ -178,6 +194,8 @@ def gpt():
|
|
|
json_payload['payload'][-1]['content']
|
|
|
elif 'assistant_id' in json_payload:
|
|
|
assistant_id = json_payload['assistant_id']
|
|
|
+ assistant = app.openai_client.beta.assistants.retrieve(assistant_id=assistant_id)
|
|
|
+ chat_model = assistant.model
|
|
|
else:
|
|
|
chat_messages = app.chat_messages.copy()
|
|
|
json_payload = json_payload['payload']
|
|
@@ -239,6 +257,9 @@ def gpt():
|
|
|
result = {}
|
|
|
try:
|
|
|
n = num_choices
|
|
|
+ if "gpt-3.5-turbo" in chat_model:
|
|
|
+ chat_model = roulette()
|
|
|
+ app.logger.info(f"Model used: {chat_model}")
|
|
|
if assistant_id and not suggest:
|
|
|
runs = app.openai_client.beta.threads.create_and_run_poll(
|
|
|
assistant_id=assistant_id,
|
|
@@ -275,17 +296,56 @@ def gpt():
|
|
|
else:
|
|
|
result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
if predict_q:
|
|
|
- query_q = {
|
|
|
- "role": "user",
|
|
|
- "content": f"Berikan {predict_q} pertanyaan lain yang akan saya ajukan berdasarkan percakapan kali ini dalam bentuk json array"
|
|
|
- }
|
|
|
+ if assistant_id:
|
|
|
+ query_q = {
|
|
|
+ "role": "user",
|
|
|
+ "content": f"Berikan {predict_q} pertanyaan random yang akan saya ajukan sesuai topik asisten dalam bentuk json array"
|
|
|
+ }
|
|
|
+ else:
|
|
|
+ query_q = {
|
|
|
+ "role": "user",
|
|
|
+ "content": f"Berikan {predict_q} pertanyaan lain yang akan saya ajukan berdasarkan percakapan kali ini dalam bentuk json array"
|
|
|
+ }
|
|
|
chat_messages.append(result)
|
|
|
chat_messages.append(query_q)
|
|
|
- json_response_q = app.openai_client.chat.completions.create(model=chat_model,
|
|
|
- messages=chat_messages,
|
|
|
- max_tokens=max_resp_token,
|
|
|
- temperature=0.2,
|
|
|
- response_format={"type": "json_object"})
|
|
|
+ if assistant_id:
|
|
|
+ runs = app.openai_client.beta.threads.create_and_run_poll(
|
|
|
+ assistant_id=assistant_id,
|
|
|
+ thread={
|
|
|
+ "messages": chat_messages
|
|
|
+ }
|
|
|
+ )
|
|
|
+ messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
|
|
|
+ message_content = messages[0].content[0].text
|
|
|
+ app.logger.info(message_content.value)
|
|
|
+ pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
|
|
|
+ filtered_message = pattern.sub("", message_content.value)
|
|
|
+ predict_q_arr = [
|
|
|
+ {
|
|
|
+ "role": "system",
|
|
|
+ "content": assistant.instructions
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "role": "assistant",
|
|
|
+ "content": filtered_message
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "role": "user",
|
|
|
+ "content": f"Ekstrak {predict_q} pertanyaan tersebut dalam bentuk json array"
|
|
|
+ }
|
|
|
+ ]
|
|
|
+ json_response_q = app.openai_client.chat.completions.create(
|
|
|
+ model=chat_model,
|
|
|
+ messages=predict_q_arr,
|
|
|
+ temperature=0.2,
|
|
|
+ response_format={"type": "json_object"}
|
|
|
+ )
|
|
|
+ else:
|
|
|
+ json_response_q = app.openai_client.chat.completions.create(model=chat_model,
|
|
|
+ messages=chat_messages,
|
|
|
+ max_tokens=max_resp_token,
|
|
|
+ temperature=0.2,
|
|
|
+ response_format={"type": "json_object"})
|
|
|
json_response_dict = json.loads(json_response_q.choices[0].message.content)
|
|
|
if json_response_dict is not None:
|
|
|
if isinstance(json_response_dict, dict):
|
|
@@ -384,6 +444,14 @@ def assistant_create():
|
|
|
model_name = request_form.pop('model_name', model_name)
|
|
|
vector_store_id = request_form.pop('vector_store_id', "")
|
|
|
file_batch_id = ""
|
|
|
+ try:
|
|
|
+ temperature = float(request_form.pop('temperature', 1.0))
|
|
|
+ if temperature < 0.0:
|
|
|
+ temperature = 0.0
|
|
|
+ elif temperature > 1.0:
|
|
|
+ temperature = 1.0
|
|
|
+ except ValueError:
|
|
|
+ temperature = 1.0
|
|
|
tool_resources = {"tool_resources": {"file_search": {"vector_store_ids": [vector_store_id]}}} \
|
|
|
if vector_store_id \
|
|
|
else {}
|
|
@@ -393,6 +461,7 @@ def assistant_create():
|
|
|
instructions=assistant_ins,
|
|
|
model=model_name,
|
|
|
tools=[{"type": "file_search"}],
|
|
|
+ temperature=temperature,
|
|
|
**tool_resources,
|
|
|
**request_form
|
|
|
)
|
|
@@ -465,7 +534,8 @@ def assistant_att():
|
|
|
)
|
|
|
return {"status": file_batch.status, "vector_store_id": vector_store.id, "file_batch_id": file_batch.id}
|
|
|
else:
|
|
|
- file_batch = app.openai_client.beta.vector_stores.file_batches.retrieve(file_batch_id, vector_store_id=vector_store_id)
|
|
|
+ file_batch = app.openai_client.beta.vector_stores.file_batches.retrieve(file_batch_id,
|
|
|
+ vector_store_id=vector_store_id)
|
|
|
return {"status": file_batch.status}
|
|
|
except Exception as e:
|
|
|
app.logger.exception("error")
|
|
@@ -495,6 +565,7 @@ def assistant_update(aid=None, vid=None):
|
|
|
app.logger.exception("error")
|
|
|
return {"status": "error", "message": "Update assistant failed, please try again"}
|
|
|
|
|
|
+
|
|
|
@app.route('/llama', methods=['POST'])
|
|
|
def llama():
|
|
|
max_char_msg = 500
|
|
@@ -541,6 +612,29 @@ def llama():
|
|
|
return result
|
|
|
|
|
|
|
|
|
+@app.route('/speech', methods=['POST'])
|
|
|
+def speech(text=""):
|
|
|
+ if not text and 'text' not in request.form:
|
|
|
+ audio_file = request.files.get('audio')
|
|
|
+ res = app.openai_client.audio.transcriptions.create(
|
|
|
+ model="whisper-1",
|
|
|
+ file=(audio_file.filename, audio_file.stream.read())
|
|
|
+ )
|
|
|
+ return {"status": "ok", "message": res.text}
|
|
|
+ elif 'text' in request.form or text:
|
|
|
+ text = request.form['text'] if 'text' in request.form else text
|
|
|
+ uu_id = str(uuid.uuid4())
|
|
|
+ print(text)
|
|
|
+ with app.openai_client.audio.speech.with_streaming_response.create(
|
|
|
+ model="tts-1-hd",
|
|
|
+ voice="echo",
|
|
|
+ speed=0.8,
|
|
|
+ input=text
|
|
|
+ ) as res:
|
|
|
+ res.stream_to_file(os.path.join(app.config['UPLOAD_FOLDER'], f"{uu_id}.mp3"))
|
|
|
+ return download_file(f"{uu_id}.mp3")
|
|
|
+
|
|
|
+
|
|
|
# Press the green button in the gutter to run the script.
|
|
|
if __name__ == '__main__':
|
|
|
app.run(host='0.0.0.0', port=8348, debug=True, ssl_context=ssl)
|