|
@@ -1,6 +1,10 @@
|
|
|
import io
|
|
|
+import logging
|
|
|
import os
|
|
|
import json
|
|
|
+import re
|
|
|
+import sys
|
|
|
+import traceback
|
|
|
|
|
|
import requests
|
|
|
from openai import OpenAI
|
|
@@ -15,6 +19,8 @@ ssl = None
|
|
|
app.openai_key = os.environ.get("OPENAI_KEY", "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")
|
|
|
app.openai_client = OpenAI(api_key=app.openai_key)
|
|
|
|
|
|
+#logging.basicConfig(level=logging.DEBUG, filename='/jkt-disk-01/app/mms/chatgpt-apache/chatgpt.log', format='%(asctime)s %(message)s')
|
|
|
+
|
|
|
app.chat_messages = [
|
|
|
{"role": "system",
|
|
|
"content": "Please respond professionally and in a friendly manner, using the same language as the original request."}
|
|
@@ -63,12 +69,10 @@ def recommend(headlines, category):
|
|
|
"""
|
|
|
}
|
|
|
chat_messages.append(json_payload)
|
|
|
- print(chat_messages)
|
|
|
json_response = app.openai_client.chat.completions.create(model="gpt-3.5-turbo-1106",
|
|
|
messages=chat_messages,
|
|
|
response_format={"type": "json_object"}
|
|
|
)
|
|
|
- print(json_response.choices[0].message.content)
|
|
|
return json.loads(json_response.choices[0].message.content)
|
|
|
except Exception as error_print:
|
|
|
app.logger.error(error_print)
|
|
@@ -96,7 +100,6 @@ def vision(message, image_url=None, image_b64=None):
|
|
|
],
|
|
|
}
|
|
|
chat_messages.append(json_payload)
|
|
|
- print(chat_messages)
|
|
|
json_response = app.openai_client.chat.completions.create(
|
|
|
model="gpt-4o",
|
|
|
messages=chat_messages,
|
|
@@ -110,7 +113,8 @@ def vision(message, image_url=None, image_b64=None):
|
|
|
|
|
|
@app.route('/gpt', methods=['POST'])
|
|
|
def gpt():
|
|
|
- chat_messages = app.chat_messages.copy()
|
|
|
+ assistant_id = ""
|
|
|
+ chat_messages = []
|
|
|
chat_model = "gpt-3.5-turbo"
|
|
|
use_video = False
|
|
|
suggest = False
|
|
@@ -130,12 +134,14 @@ def gpt():
|
|
|
has_named_params = 'payload' in json_payload
|
|
|
if 'payload' in json_payload:
|
|
|
if 'predict_q' in json_payload:
|
|
|
- predict_q = 5 if json_payload['predict_q'] > 4 else 0 if json_payload['predict_q'] < 1 else json_payload['predict_q']
|
|
|
+ predict_q = 5 if json_payload['predict_q'] > 4 else 0 if json_payload['predict_q'] < 1 else \
|
|
|
+ json_payload['predict_q']
|
|
|
if 'num_choices' in json_payload:
|
|
|
- num_choices = 5 if json_payload['num_choices'] > 4 else 1 if json_payload['num_choices'] < 2 else json_payload['num_choices']
|
|
|
+ num_choices = 5 if json_payload['num_choices'] > 4 else 1 if json_payload['num_choices'] < 2 else \
|
|
|
+ json_payload['num_choices']
|
|
|
if 'use_video' in json_payload:
|
|
|
use_video = json_payload['use_video'] == "1"
|
|
|
- if 'chat_model' in json_payload:
|
|
|
+ if 'chat_model' in json_payload and 'assistant_id' not in json_payload:
|
|
|
chat_model = json_payload['chat_model']
|
|
|
max_resp_token = 2048
|
|
|
if 'translate' in json_payload:
|
|
@@ -160,6 +166,8 @@ def gpt():
|
|
|
chat_messages = app.chat_messages.copy()
|
|
|
json_payload['payload'][-1]['content'] = f"Please summarize this article:\n" + \
|
|
|
json_payload['payload'][-1]['content']
|
|
|
+ elif 'assistant_id' in json_payload:
|
|
|
+ assistant_id = json_payload['assistant_id']
|
|
|
else:
|
|
|
chat_messages = app.chat_messages.copy()
|
|
|
json_payload = json_payload['payload']
|
|
@@ -221,56 +229,70 @@ def gpt():
|
|
|
result = {}
|
|
|
try:
|
|
|
n = num_choices
|
|
|
- json_response = app.openai_client.chat.completions.create(model=chat_model,
|
|
|
- messages=chat_messages,
|
|
|
- max_tokens=max_resp_token, temperature=0.7, n=n)
|
|
|
- app.logger.info(json_response.choices[0].message)
|
|
|
- if has_named_params:
|
|
|
- if suggest:
|
|
|
- choices = json_response.choices
|
|
|
- messages = [i.message for i in choices]
|
|
|
- json_formatted = []
|
|
|
- for message in messages:
|
|
|
- json_formatted.append({"role": "assistant", "content": message.content})
|
|
|
- result = {"url": "", "message": json_formatted}
|
|
|
- else:
|
|
|
- if use_video:
|
|
|
- # TODO: to be implemented
|
|
|
- result = {"url": url_for('download_file', name="test.mp4", _external=True),
|
|
|
- "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
|
|
|
- else:
|
|
|
- result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
- if predict_q:
|
|
|
- query_q = {
|
|
|
- "role": "user",
|
|
|
- "content": f"Berikan {predict_q} pertanyaan lain yang akan saya ajukan berdasarkan percakapan kali ini dalam bentuk json array"
|
|
|
- }
|
|
|
- chat_messages.append(result)
|
|
|
- chat_messages.append(query_q)
|
|
|
- json_response_q = app.openai_client.chat.completions.create(model=chat_model,
|
|
|
- messages=chat_messages,
|
|
|
- max_tokens=max_resp_token,
|
|
|
- temperature=0.2, response_format={"type": "json_object"})
|
|
|
- json_response_dict = json.loads(json_response_q.choices[0].message.content)
|
|
|
- print(json_response_dict)
|
|
|
- if json_response_dict is not None:
|
|
|
- if isinstance(json_response_dict, dict):
|
|
|
- if len(json_response_dict) > 1:
|
|
|
- qs = []
|
|
|
- for q in json_response_dict.values():
|
|
|
- qs.append(q)
|
|
|
- json_response_dict = qs
|
|
|
- else:
|
|
|
- try:
|
|
|
- first_key = next(iter(json_response_dict))
|
|
|
- json_response_dict = json_response_dict[first_key]
|
|
|
- except StopIteration:
|
|
|
- json_response_dict = []
|
|
|
- elif isinstance(json_response_dict, str):
|
|
|
- json_response_dict = [json_response_dict]
|
|
|
- result["predict_q"] = json_response_dict
|
|
|
+ if assistant_id and not suggest:
|
|
|
+ runs = app.openai_client.beta.threads.create_and_run_poll(
|
|
|
+ assistant_id=assistant_id,
|
|
|
+ thread={
|
|
|
+ "messages": chat_messages
|
|
|
+ }
|
|
|
+ )
|
|
|
+ messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
|
|
|
+ message_content = messages[0].content[0].text
|
|
|
+ app.logger.info(message_content.value)
|
|
|
+ pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
|
|
|
+ filtered_message = pattern.sub("", message_content.value)
|
|
|
+ result = {"role": "assistant", "content": filtered_message}
|
|
|
else:
|
|
|
- result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
+ json_response = app.openai_client.chat.completions.create(model=chat_model,
|
|
|
+ messages=chat_messages,
|
|
|
+ max_tokens=max_resp_token, temperature=0.7, n=n)
|
|
|
+ app.logger.info(json_response.choices[0].message)
|
|
|
+ if has_named_params:
|
|
|
+ if suggest:
|
|
|
+ choices = json_response.choices
|
|
|
+ messages = [i.message for i in choices]
|
|
|
+ json_formatted = []
|
|
|
+ for message in messages:
|
|
|
+ json_formatted.append({"role": "assistant", "content": message.content})
|
|
|
+ result = {"url": "", "message": json_formatted}
|
|
|
+ else:
|
|
|
+ if use_video:
|
|
|
+ # TODO: to be implemented
|
|
|
+ result = {"url": url_for('download_file', name="test.mp4", _external=True),
|
|
|
+ "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
|
|
|
+ else:
|
|
|
+ result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
+ else:
|
|
|
+ result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
+ if predict_q:
|
|
|
+ query_q = {
|
|
|
+ "role": "user",
|
|
|
+ "content": f"Berikan {predict_q} pertanyaan lain yang akan saya ajukan berdasarkan percakapan kali ini dalam bentuk json array"
|
|
|
+ }
|
|
|
+ chat_messages.append(result)
|
|
|
+ chat_messages.append(query_q)
|
|
|
+ json_response_q = app.openai_client.chat.completions.create(model=chat_model,
|
|
|
+ messages=chat_messages,
|
|
|
+ max_tokens=max_resp_token,
|
|
|
+ temperature=0.2,
|
|
|
+ response_format={"type": "json_object"})
|
|
|
+ json_response_dict = json.loads(json_response_q.choices[0].message.content)
|
|
|
+ if json_response_dict is not None:
|
|
|
+ if isinstance(json_response_dict, dict):
|
|
|
+ if len(json_response_dict) > 1:
|
|
|
+ qs = []
|
|
|
+ for q in json_response_dict.values():
|
|
|
+ qs.append(q)
|
|
|
+ json_response_dict = qs
|
|
|
+ else:
|
|
|
+ try:
|
|
|
+ first_key = next(iter(json_response_dict))
|
|
|
+ json_response_dict = json_response_dict[first_key]
|
|
|
+ except StopIteration:
|
|
|
+ json_response_dict = []
|
|
|
+ elif isinstance(json_response_dict, str):
|
|
|
+ json_response_dict = [json_response_dict]
|
|
|
+ result["predict_q"] = json_response_dict
|
|
|
except Exception as error_print:
|
|
|
app.logger.error(error_print)
|
|
|
result = {}, 405
|
|
@@ -308,7 +330,8 @@ def train():
|
|
|
}
|
|
|
)
|
|
|
app.logger.info({"mock": "no", "status": job.status, "job_id": job.id})
|
|
|
- return {"status": job.status, "job_id": job.id}
|
|
|
+ retval = {"status": job.status, "job_id": job.id}
|
|
|
+ return retval
|
|
|
else:
|
|
|
app.logger.info({"mock": "yes", "status": "ok"})
|
|
|
return {"status": "ok"}
|
|
@@ -316,6 +339,7 @@ def train():
|
|
|
app.logger.error({"status": "error", "message": "Training file not found"})
|
|
|
return {"status": "error", "message": "Training file not found"}
|
|
|
|
|
|
+
|
|
|
def train_with_id(job_id):
|
|
|
try:
|
|
|
job = app.openai_client.fine_tuning.jobs.retrieve(job_id)
|
|
@@ -329,6 +353,101 @@ def train_with_id(job_id):
|
|
|
app.logger.error(error_print)
|
|
|
return {"status": "Could not find job from id"}
|
|
|
|
|
|
+
|
|
|
+@app.route('/assistant/create', methods=['POST'])
|
|
|
+def assistant_create():
|
|
|
+ model_name = "gpt-3.5-turbo"
|
|
|
+ assistant_name = "Assistant"
|
|
|
+ assistant_ins = "Please respond professionally and in a friendly manner, using the same language as the original request."
|
|
|
+ assistant = None
|
|
|
+ if request.is_json:
|
|
|
+ request_form = request.json
|
|
|
+ else:
|
|
|
+ request_form = request.form.copy()
|
|
|
+ assistant_name = request_form.pop('name', assistant_name)
|
|
|
+ assistant_ins = request_form.pop('instructions', assistant_ins)
|
|
|
+ model_name = request_form.pop('model_name', model_name)
|
|
|
+ try:
|
|
|
+ assistant = app.openai_client.beta.assistants.create(
|
|
|
+ name=assistant_name,
|
|
|
+ instructions=assistant_ins,
|
|
|
+ model=model_name,
|
|
|
+ tools=[{"type": "file_search"}],
|
|
|
+ **request_form
|
|
|
+ )
|
|
|
+ if 'attachment1' in request.files:
|
|
|
+ resp_att = assistant_att()
|
|
|
+ if resp_att['status'] == 'completed':
|
|
|
+ resp_upd = assistant_update(assistant.id, resp_att['vector_store_id'])
|
|
|
+ assistant_updated = "1" if resp_upd['status'] == 'ok' else "0"
|
|
|
+ else:
|
|
|
+ assistant_updated = "0"
|
|
|
+ return {"status": "ok", "assistant_id": assistant.id, "assistant_updated": assistant_updated}
|
|
|
+ else:
|
|
|
+ return {"status": "ok", "assistant_id": assistant.id, "assistant_updated": "0"}
|
|
|
+ except ValueError as e:
|
|
|
+ return {"status": "error",
|
|
|
+ "message": "Failed to create assistant, please check whether your parameters are correct"}
|
|
|
+ except Exception:
|
|
|
+ return {"status": "error", "message": "Failed to create assistant, please try again"}
|
|
|
+
|
|
|
+
|
|
|
+@app.route('/assistant/attachment', methods=['POST'])
|
|
|
+def assistant_att():
|
|
|
+ vector_store_id = request.form.get('vector_store_id', '')
|
|
|
+ file_batch_id = request.form.get('file_batch_id', '')
|
|
|
+ attachments: list[str] = []
|
|
|
+ try:
|
|
|
+ if not vector_store_id or not file_batch_id:
|
|
|
+ if 'attachment1' not in request.files:
|
|
|
+ return {"status": "error", "message": "No file for attachments"}
|
|
|
+ else:
|
|
|
+ has_attachments = True
|
|
|
+ n = 1
|
|
|
+ while has_attachments:
|
|
|
+ if f'attachment{n}' in request.files:
|
|
|
+ retf = app.openai_client.files.create(
|
|
|
+ file=(request.files[f'attachment{n}'].filename,
|
|
|
+ request.files[f'attachment{n}'].read()),
|
|
|
+ purpose="assistants"
|
|
|
+ )
|
|
|
+ retf.filename = request.files[f'attachment{n}'].filename
|
|
|
+ attachments.append(retf.id)
|
|
|
+ n = n + 1
|
|
|
+ else:
|
|
|
+ has_attachments = False
|
|
|
+ vector_store = app.openai_client.beta.vector_stores.create()
|
|
|
+ file_batch = app.openai_client.beta.vector_stores.file_batches.create_and_poll(
|
|
|
+ vector_store_id=vector_store.id,
|
|
|
+ file_ids=attachments
|
|
|
+ )
|
|
|
+ return {"status": file_batch.status, "vector_store_id": vector_store.id, "file_batch_id": file_batch.id}
|
|
|
+ else:
|
|
|
+ file_batch = app.openai_client.beta.vector_stores.file_batches.retrieve(file_batch_id, vector_store_id=vector_store_id)
|
|
|
+ return {"status": file_batch.status}
|
|
|
+ except Exception as e:
|
|
|
+ app.logger.exception("error")
|
|
|
+ return {"status": "error", "message": "Upload attachment failed, please try again"}
|
|
|
+
|
|
|
+
|
|
|
+@app.route('/assistant/update', methods=['POST'])
|
|
|
+def assistant_update(aid=None, vid=None):
|
|
|
+ try:
|
|
|
+ if aid is not None and vid is not None:
|
|
|
+ assistant_id = aid
|
|
|
+ vector_store_id = vid
|
|
|
+ else:
|
|
|
+ assistant_id = request.form['assistant_id']
|
|
|
+ vector_store_id = request.form['vector_store_id']
|
|
|
+ app.openai_client.beta.assistants.update(
|
|
|
+ assistant_id=assistant_id,
|
|
|
+ tool_resources={"file_search": {"vector_store_ids": [vector_store_id]}},
|
|
|
+ )
|
|
|
+ return {"status": "ok"}
|
|
|
+ except Exception as e:
|
|
|
+ app.logger.exception("error")
|
|
|
+ return {"status": "error", "message": "Update assistant failed, please try again"}
|
|
|
+
|
|
|
@app.route('/llama', methods=['POST'])
|
|
|
def llama():
|
|
|
max_char_msg = 500
|
|
@@ -375,7 +494,6 @@ def llama():
|
|
|
return result
|
|
|
|
|
|
|
|
|
-
|
|
|
# Press the green button in the gutter to run the script.
|
|
|
if __name__ == '__main__':
|
|
|
app.run(host='0.0.0.0', port=8348, debug=True, ssl_context=ssl)
|