|
@@ -93,7 +93,7 @@ def vision(message, image_url=None, image_b64=None):
|
|
|
json_response = openai.ChatCompletion.create(
|
|
|
model="gpt-4-vision-preview",
|
|
|
messages=chat_messages,
|
|
|
- max_tokens=300
|
|
|
+ max_tokens=500
|
|
|
)
|
|
|
return json_response.choices[0]["message"]
|
|
|
except Exception as error_print:
|
|
@@ -107,6 +107,8 @@ def gpt():
|
|
|
use_video = False
|
|
|
suggest = False
|
|
|
summarize = False
|
|
|
+ max_char_msg = 500
|
|
|
+ max_resp_token = 600
|
|
|
category = []
|
|
|
headlines = []
|
|
|
image_url = ""
|
|
@@ -136,6 +138,8 @@ def gpt():
|
|
|
summarize = json_payload['summarize'] == "1"
|
|
|
if summarize:
|
|
|
chat_messages = app.summary_messages.copy()
|
|
|
+ max_char_msg = 10000
|
|
|
+ max_resp_token = 10000
|
|
|
else:
|
|
|
chat_messages = app.chat_messages.copy()
|
|
|
json_payload['payload'][-1]['content'] = f"Please summarize this article:\n" + json_payload['payload'][-1]['content']
|
|
@@ -186,7 +190,7 @@ def gpt():
|
|
|
for message in json_payload:
|
|
|
content = message['content']
|
|
|
content_arr = content.split(" ")
|
|
|
- new_content_arr = content[:300].split(" ")
|
|
|
+ new_content_arr = content[:max_char_msg].split(" ")
|
|
|
new_content_len = len(new_content_arr)
|
|
|
arr = []
|
|
|
for i in range(new_content_len):
|
|
@@ -199,7 +203,7 @@ def gpt():
|
|
|
n = num_choices
|
|
|
json_response = openai.ChatCompletion.create(model="gpt-3.5-turbo", # GPT-3.5 Turbo engine
|
|
|
messages=chat_messages,
|
|
|
- max_tokens=600, temperature=0.7, n = n)
|
|
|
+ max_tokens=max_resp_token, temperature=0.7, n = n)
|
|
|
app.logger.info(json_response.choices[0].message)
|
|
|
if has_named_params:
|
|
|
if suggest:
|