|
@@ -7,9 +7,7 @@ app = Flask(__name__)
|
|
|
ssl = None
|
|
|
# ssl =('/etc/ssl/sample.crt', '/etc/ssl/sample.pem')
|
|
|
|
|
|
-openai_key = os.environ.get("OPENAI_KEY")
|
|
|
-if not openai_key:
|
|
|
- sys.exit()
|
|
|
+openai_key = os.environ.get("OPENAI_KEY", "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")
|
|
|
|
|
|
openai.api_key = openai_key
|
|
|
app.chat_messages = [
|
|
@@ -45,10 +43,19 @@ def gpt():
|
|
|
elif 'greeting' in json_payload:
|
|
|
company_name = json_payload['greeting']['company_name']
|
|
|
timestamp = json_payload['greeting']['timestamp']
|
|
|
- completion_message = "Selamat "
|
|
|
- if 'completion' in json_payload['greeting']:
|
|
|
- completion_message = json_payload['greeting']['completion']
|
|
|
- greeting_message = f"Buatkan chat greeting dari {company_name} pada jam {timestamp}, tidak perlu mention waktu: '{completion_message}'"
|
|
|
+ islamic_message = f"Apakah Nama '{company_name}' terdapat unsur islami? Jawab dengan 'Ya' atau 'Tidak'"
|
|
|
+ islam_messages = app.chat_messages.copy()
|
|
|
+ islam_messages.append({
|
|
|
+ "role": "user",
|
|
|
+ "content": islamic_message
|
|
|
+ })
|
|
|
+ islamic_response = openai.ChatCompletion.create(model="gpt-3.5-turbo", # GPT-3.5 Turbo engine
|
|
|
+ messages=islam_messages,
|
|
|
+ max_tokens=2, temperature=0.5)
|
|
|
+ if 'Ya' in islamic_response.choices[0].message['content']:
|
|
|
+ greeting_message = f"Buatkan respons chatbot berupa greeting dari chat {company_name} pada jam {timestamp}, tidak perlu mention waktu, dan jawab dengan salam terlebih dahulu apabila ada unsur islami di nama perusahaan."
|
|
|
+ else:
|
|
|
+ greeting_message = f"Buatkan respons chatbot berupa greeting dari chat {company_name} pada jam {timestamp}, tidak perlu mention waktu"
|
|
|
json_payload = [
|
|
|
{
|
|
|
"role": "user",
|
|
@@ -61,7 +68,7 @@ def gpt():
|
|
|
for message in json_payload:
|
|
|
content = message['content']
|
|
|
content_arr = content.split(" ")
|
|
|
- new_content_arr = content[:250].split(" ")
|
|
|
+ new_content_arr = content[:300].split(" ")
|
|
|
new_content_len = len(new_content_arr)
|
|
|
arr = []
|
|
|
for i in range(new_content_len):
|
|
@@ -73,7 +80,7 @@ def gpt():
|
|
|
try:
|
|
|
json_response = openai.ChatCompletion.create(model="gpt-3.5-turbo", # GPT-3.5 Turbo engine
|
|
|
messages=chat_messages,
|
|
|
- max_tokens=600, temperature=0.6)
|
|
|
+ max_tokens=600, temperature=0.7)
|
|
|
app.logger.info(json_response.choices[0].message)
|
|
|
if has_named_params:
|
|
|
if use_video:
|
|
@@ -91,6 +98,6 @@ def gpt():
|
|
|
|
|
|
# Press the green button in the gutter to run the script.
|
|
|
if __name__ == '__main__':
|
|
|
- app.run(host='0.0.0.0', port=8348, debug=False, ssl_context=ssl)
|
|
|
+ app.run(host='0.0.0.0', port=8348, debug=True, ssl_context=ssl)
|
|
|
|
|
|
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|