|
@@ -115,6 +115,7 @@ def gpt():
|
|
|
use_video = False
|
|
|
suggest = False
|
|
|
summarize = False
|
|
|
+ predict_q = 0
|
|
|
max_char_msg = 500
|
|
|
max_resp_token = 600
|
|
|
category = []
|
|
@@ -128,6 +129,8 @@ def gpt():
|
|
|
if isinstance(json_payload, dict):
|
|
|
has_named_params = 'payload' in json_payload
|
|
|
if 'payload' in json_payload:
|
|
|
+ if 'predict_q' in json_payload:
|
|
|
+ predict_q = int(json_payload['predict_q'])
|
|
|
if 'num_choices' in json_payload:
|
|
|
num_choices = 5 if json_payload['num_choices'] > 5 else json_payload['num_choices']
|
|
|
if 'use_video' in json_payload:
|
|
@@ -218,7 +221,7 @@ def gpt():
|
|
|
result = {}
|
|
|
try:
|
|
|
n = num_choices
|
|
|
- json_response = app.openai_client.chat.completions.create(model=chat_model, # GPT-3.5 Turbo engine
|
|
|
+ json_response = app.openai_client.chat.completions.create(model=chat_model,
|
|
|
messages=chat_messages,
|
|
|
max_tokens=max_resp_token, temperature=0.7, n=n)
|
|
|
app.logger.info(json_response.choices[0].message)
|
|
@@ -230,12 +233,33 @@ def gpt():
|
|
|
for message in messages:
|
|
|
json_formatted.append({"role": "assistant", "content": message.content})
|
|
|
result = {"url": "", "message": json_formatted}
|
|
|
- elif use_video:
|
|
|
- # TODO: to be implemented
|
|
|
- result = {"url": url_for('download_file', name="test.mp4", _external=True),
|
|
|
- "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
|
|
|
else:
|
|
|
- result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
+ if use_video:
|
|
|
+ # TODO: to be implemented
|
|
|
+ result = {"url": url_for('download_file', name="test.mp4", _external=True),
|
|
|
+ "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
|
|
|
+ else:
|
|
|
+ result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
+ if predict_q:
|
|
|
+ query_q = {
|
|
|
+ "role": "user",
|
|
|
+ "content": f"Berikan {predict_q} pertanyaan lain yang akan saya ajukan berdasarkan percakapan kali ini dalam bentuk json array"
|
|
|
+ }
|
|
|
+ chat_messages.append(result)
|
|
|
+ chat_messages.append(query_q)
|
|
|
+ json_response_q = app.openai_client.chat.completions.create(model=chat_model,
|
|
|
+ messages=chat_messages,
|
|
|
+ max_tokens=max_resp_token,
|
|
|
+ temperature=0.2, response_format={"type": "json_object"})
|
|
|
+ json_response_dict = json.loads(json_response_q.choices[0].message.content)
|
|
|
+ print(json_response_dict)
|
|
|
+ if json_response_dict is not None:
|
|
|
+ if isinstance(json_response_dict, dict):
|
|
|
+ first_key = next(iter(json_response_dict))
|
|
|
+ json_response_dict = json_response_dict[first_key]
|
|
|
+ elif isinstance(json_response_dict, str):
|
|
|
+ json_response_dict = [json_response_dict]
|
|
|
+ result["predict_q"] = json_response_dict
|
|
|
else:
|
|
|
result = {"role": "assistant", "content": json_response.choices[0].message.content}
|
|
|
except Exception as error_print:
|