main.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. import logging
  2. import os
  3. import json
  4. import re
  5. import uuid
  6. import random
  7. import openai
  8. import requests
  9. from openai import OpenAI
  10. from flask import Flask, request, jsonify, send_from_directory, url_for
  11. from convert import alpaca_to_chatgpt, csv_to_jsonl
  12. app = Flask(__name__)
  13. ssl = None
  14. # ssl =('/etc/ssl/sample.crt', '/etc/ssl/sample.pem')
  15. app.openai_key = os.environ.get("OPENAI_KEY", "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")
  16. app.openai_client = OpenAI(api_key=app.openai_key)
  17. #logging.basicConfig(level=logging.DEBUG, filename='/jkt-disk-01/app/mms/chatgpt-apache/chatgpt.log', format='%(asctime)s %(message)s')
  18. app.chat_messages = [
  19. {"role": "system",
  20. "content": "Please respond professionally and in a friendly manner, using the same language as the original request. Use emoji responsibly."}
  21. ]
  22. app.translate_messages = [
  23. {"role": "system",
  24. "content": "Please translate using the requested language."}
  25. ]
  26. app.suggest_messages = [
  27. {"role": "system",
  28. "content": "Please suggest reply messages based on the previous conversations and the user's request."}
  29. ]
  30. app.recommend_messages = [
  31. {"role": "system",
  32. "content": "Give normalized total weight of each category in json based on headlines"
  33. }
  34. ]
  35. app.summary_messages = [
  36. {"role": "system",
  37. "content": "Please summarize an article."
  38. }
  39. ]
  40. UPLOAD_FOLDER = 'files'
  41. app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
  42. @app.route('/files/<name>')
  43. def download_file(name):
  44. return send_from_directory(app.config["UPLOAD_FOLDER"], name)
  45. @app.route('/', methods=['GET', 'POST'])
  46. def test():
  47. return jsonify({"status": "0"})
  48. def roulette() -> str:
  49. roulette_arr = [(80, "gpt-4o-mini"), (20, "gpt-4o")]
  50. rand_num = random.randrange(0, 99)
  51. model_name = ""
  52. n = 0
  53. j = 0
  54. while rand_num > n:
  55. n += roulette_arr[j][0]
  56. model_name = roulette_arr[j][1]
  57. print(model_name)
  58. j += 1
  59. return model_name
  60. def recommend(headlines, category):
  61. chat_messages = app.recommend_messages.copy()
  62. try:
  63. json_payload = {
  64. "role": "user",
  65. "content": f"""{headlines}
  66. Berikan nilai berat masing-masing kategori, jumlahkan dan normalisasikan:
  67. {category}
  68. Berikan dalam bentuk json
  69. """
  70. }
  71. chat_messages.append(json_payload)
  72. json_response = app.openai_client.chat.completions.create(model="gpt-4o-mini",
  73. messages=chat_messages,
  74. response_format={"type": "json_object"}
  75. )
  76. result = {"status": "ok", "message": json.loads(json_response.choices[0].message.content)}
  77. except openai.APITimeoutError as e:
  78. app.logger.exception("error")
  79. result = {"status": "error", "message": e.message}, 408
  80. except openai.NotFoundError as e:
  81. app.logger.exception("error")
  82. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  83. except Exception as error_print:
  84. app.logger.exception("error")
  85. result = {"status": "error", "message": "Please try again"}, 405
  86. return result
  87. def vision(message, image_url=None, image_b64=None):
  88. chat_messages = app.chat_messages.copy()
  89. url = ""
  90. if image_url:
  91. url = f"{image_url}"
  92. elif image_b64:
  93. url = f"data:image/jpeg;base64,{image_b64}"
  94. try:
  95. json_payload = {
  96. "role": "user",
  97. "content": [
  98. {"type": "text", "text": message},
  99. {
  100. "type": "image_url",
  101. "image_url": {
  102. "url": url,
  103. },
  104. },
  105. ],
  106. }
  107. chat_messages.append(json_payload)
  108. json_response = app.openai_client.chat.completions.create(
  109. model="gpt-4o",
  110. messages=chat_messages,
  111. max_tokens=500
  112. )
  113. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  114. except openai.APITimeoutError as e:
  115. app.logger.exception("error")
  116. result = {"status": "error", "message": e.message}, 408
  117. except openai.NotFoundError as e:
  118. app.logger.exception("error")
  119. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  120. except Exception as e:
  121. app.logger.exception("error")
  122. result = {"status": "error", "message": "Please try again"}, 405
  123. return result
  124. @app.route('/gpt', methods=['POST'])
  125. def gpt():
  126. assistant_id = ""
  127. assistant = None
  128. chat_messages = app.chat_messages.copy()
  129. chat_model = "gpt-4o-mini"
  130. use_video = False
  131. suggest = False
  132. summarize = False
  133. predict_q = 0
  134. max_char_msg = 500
  135. max_resp_token = 600
  136. category = []
  137. headlines = []
  138. image_url = ""
  139. num_choices = 1
  140. json_payload = request.get_json()
  141. if not json_payload:
  142. json_payload = []
  143. has_named_params = False
  144. if isinstance(json_payload, dict):
  145. has_named_params = 'payload' in json_payload
  146. if 'payload' in json_payload:
  147. if 'predict_q' in json_payload:
  148. predict_q = 5 if json_payload['predict_q'] > 4 else 0 if json_payload['predict_q'] < 1 else \
  149. json_payload['predict_q']
  150. if 'num_choices' in json_payload:
  151. num_choices = 5 if json_payload['num_choices'] > 4 else 1 if json_payload['num_choices'] < 2 else \
  152. json_payload['num_choices']
  153. if 'use_video' in json_payload:
  154. use_video = json_payload['use_video'] == "1"
  155. if 'chat_model' in json_payload and 'assistant_id' not in json_payload:
  156. chat_model = json_payload['chat_model']
  157. max_resp_token = 2048
  158. if 'translate' in json_payload:
  159. chat_messages = app.translate_messages.copy()
  160. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  161. 'content'] + f" (Translate to {json_payload['translate']})"
  162. elif 'suggest' in json_payload:
  163. suggest = json_payload['suggest'] == "1"
  164. if suggest:
  165. chat_messages = app.suggest_messages.copy()
  166. else:
  167. chat_messages = app.chat_messages.copy()
  168. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  169. 'content'] + f" What can I say to him/her?"
  170. elif 'summarize' in json_payload:
  171. summarize = json_payload['summarize'] == "1"
  172. if summarize:
  173. chat_messages = app.summary_messages.copy()
  174. max_char_msg = 2000
  175. max_resp_token = 1000
  176. else:
  177. chat_messages = app.chat_messages.copy()
  178. json_payload['payload'][-1]['content'] = f"Please summarize this article:\n" + \
  179. json_payload['payload'][-1]['content']
  180. elif 'assistant_id' in json_payload:
  181. assistant_id = json_payload['assistant_id']
  182. assistant = app.openai_client.beta.assistants.retrieve(assistant_id=assistant_id)
  183. chat_model = assistant.model
  184. else:
  185. chat_messages = app.chat_messages.copy()
  186. json_payload = json_payload['payload']
  187. if isinstance(json_payload, dict):
  188. json_payload = [json_payload]
  189. elif 'greeting' in json_payload:
  190. chat_messages = app.chat_messages.copy()
  191. company_name = json_payload['greeting']['company_name']
  192. timestamp = json_payload['greeting']['timestamp']
  193. islamic_message = f"Apakah Nama '{company_name}' terdapat unsur islami? Jawab dengan 'Ya' atau 'Tidak'"
  194. islam_messages = app.chat_messages.copy()
  195. islam_messages.append({
  196. "role": "user",
  197. "content": islamic_message
  198. })
  199. islamic_response = app.openai_client.chat.completions.create(model="gpt-4o-mini",
  200. messages=islam_messages,
  201. max_tokens=2, temperature=0.5)
  202. if 'Ya' in islamic_response.choices[0].message.content:
  203. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu, dan jawab dengan 'Assalamu'alaikum...' terlebih dahulu"
  204. else:
  205. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu"
  206. json_payload = [
  207. {
  208. "role": "user",
  209. "content": greeting_message
  210. }
  211. ]
  212. elif 'recommend' in json_payload:
  213. headlines = json_payload['recommend']['headlines']
  214. category = json_payload['recommend']['category']
  215. return recommend(headlines, category)
  216. elif 'image_url' in json_payload:
  217. image = json_payload['image_url']
  218. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  219. return vision(message, image_url=image)
  220. elif 'image_b64' in json_payload:
  221. image = json_payload['image_b64']
  222. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  223. return vision(message, image_b64=image_url)
  224. else:
  225. app.logger.info("This request use old json format")
  226. chat_messages = app.chat_messages.copy()
  227. app.logger.info("Chat Messages:")
  228. app.logger.info(str(chat_messages))
  229. json_payload = json_payload[-5:]
  230. if assistant_id:
  231. chat_messages = []
  232. for message in json_payload:
  233. if message['role'] == 'user':
  234. content = message['content'].lower()
  235. else:
  236. content = message['content']
  237. content_arr = content.split(" ")
  238. new_content_arr = content[:max_char_msg].split(" ")
  239. new_content_len = len(new_content_arr)
  240. arr = []
  241. for i in range(new_content_len):
  242. arr.append(content_arr[i])
  243. message['content'] = " ".join(arr)
  244. chat_messages.append(message)
  245. app.logger.info(chat_messages)
  246. result = {}
  247. try:
  248. n = num_choices
  249. if "gpt-3.5-turbo" or "gpt-4o-mini" in chat_model:
  250. chat_model = roulette()
  251. app.logger.info(f"Model used: {chat_model}")
  252. if assistant_id and not suggest:
  253. runs = app.openai_client.beta.threads.create_and_run_poll(
  254. assistant_id=assistant_id,
  255. thread={
  256. "messages": chat_messages
  257. }
  258. )
  259. messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
  260. message_content = messages[0].content[0].text
  261. app.logger.info(message_content.value)
  262. pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
  263. filtered_message = pattern.sub("", message_content.value)
  264. result = {"role": "assistant", "content": filtered_message}
  265. else:
  266. json_response = app.openai_client.chat.completions.create(model=chat_model,
  267. messages=chat_messages,
  268. max_tokens=max_resp_token, temperature=0.7, n=n)
  269. app.logger.info(json_response.choices[0].message)
  270. if has_named_params:
  271. if suggest:
  272. choices = json_response.choices
  273. messages = [i.message for i in choices]
  274. json_formatted = []
  275. for message in messages:
  276. json_formatted.append({"role": "assistant", "content": message.content})
  277. result = {"url": "", "message": json_formatted}
  278. else:
  279. if use_video:
  280. # TODO: to be implemented
  281. result = {"url": url_for('download_file', name="test.mp4", _external=True),
  282. "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
  283. else:
  284. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  285. else:
  286. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  287. if predict_q:
  288. if assistant_id:
  289. query_q = {
  290. "role": "user",
  291. "content": f"Berikan {predict_q} pertanyaan random yang akan saya ajukan sesuai topik asisten dalam bentuk json array"
  292. }
  293. else:
  294. query_q = {
  295. "role": "user",
  296. "content": f"Berikan {predict_q} pertanyaan lain yang akan saya ajukan berdasarkan percakapan kali ini dalam bentuk json array"
  297. }
  298. chat_messages.append(result)
  299. chat_messages.append(query_q)
  300. if assistant_id:
  301. runs = app.openai_client.beta.threads.create_and_run_poll(
  302. assistant_id=assistant_id,
  303. thread={
  304. "messages": chat_messages
  305. }
  306. )
  307. messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
  308. message_content = messages[0].content[0].text
  309. app.logger.info(message_content.value)
  310. pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
  311. filtered_message = pattern.sub("", message_content.value)
  312. predict_q_arr = [
  313. {
  314. "role": "system",
  315. "content": assistant.instructions
  316. },
  317. {
  318. "role": "assistant",
  319. "content": filtered_message
  320. },
  321. {
  322. "role": "user",
  323. "content": f"Ekstrak {predict_q} pertanyaan tersebut dalam bentuk json array"
  324. }
  325. ]
  326. json_response_q = app.openai_client.chat.completions.create(
  327. model=chat_model,
  328. messages=predict_q_arr,
  329. temperature=0.2,
  330. response_format={"type": "json_object"}
  331. )
  332. else:
  333. json_response_q = app.openai_client.chat.completions.create(model=chat_model,
  334. messages=chat_messages,
  335. max_tokens=max_resp_token,
  336. temperature=0.2,
  337. response_format={"type": "json_object"})
  338. json_response_dict = json.loads(json_response_q.choices[0].message.content)
  339. if json_response_dict is not None:
  340. if isinstance(json_response_dict, dict):
  341. if len(json_response_dict) > 1:
  342. qs = []
  343. for q in json_response_dict.values():
  344. qs.append(q)
  345. json_response_dict = qs
  346. else:
  347. try:
  348. first_key = next(iter(json_response_dict))
  349. json_response_dict = json_response_dict[first_key]
  350. except StopIteration:
  351. json_response_dict = []
  352. elif isinstance(json_response_dict, str):
  353. json_response_dict = [json_response_dict]
  354. result["predict_q"] = json_response_dict
  355. except openai.APITimeoutError as e:
  356. app.logger.exception("error")
  357. result = {"status": "error", "message": e.message}, 408
  358. except openai.NotFoundError as e:
  359. app.logger.exception("error")
  360. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  361. except Exception:
  362. app.logger.exception("error")
  363. result = {"status": "error", "message": "Please try again"}, 405
  364. return result
  365. @app.route('/train', methods=['POST'])
  366. def train():
  367. prev_model = "gpt-3.5-turbo"
  368. instructions = None
  369. if 'job_id' in request.form:
  370. return train_with_id(job_id=request.form['job_id'])
  371. elif 'train_file' in request.files:
  372. train_file = request.files['train_file']
  373. app.logger.info({"filename": train_file.filename})
  374. if 'instructions' in request.form:
  375. instructions = request.form['instructions']
  376. openai_file = None
  377. if train_file.filename.split('.')[1] == 'jsonl':
  378. openai_file = train_file.stream.read()
  379. elif train_file.filename.split('.')[1] == 'csv':
  380. openai_file = csv_to_jsonl(train_file.stream.read(), instructions)
  381. elif train_file.filename.split('.')[1] == 'json':
  382. openai_file = alpaca_to_chatgpt(train_file, instructions)
  383. if 'prev_model' in request.form:
  384. prev_model = request.form['prev_model']
  385. app.logger.info(f"Previous model: {prev_model}")
  386. if 'mock' not in request.form:
  387. f = app.openai_client.files.create(
  388. file=openai_file,
  389. purpose="fine-tune"
  390. )
  391. job = app.openai_client.fine_tuning.jobs.create(
  392. training_file=f.id,
  393. model=prev_model,
  394. hyperparameters={
  395. "n_epochs": 5
  396. }
  397. )
  398. app.logger.info({"mock": "no", "status": job.status, "job_id": job.id})
  399. retval = {"status": job.status, "job_id": job.id}
  400. return retval
  401. else:
  402. app.logger.info({"mock": "yes", "status": "ok"})
  403. return {"status": "ok"}
  404. else:
  405. app.logger.error({"status": "error", "message": "Training file not found"})
  406. return {"status": "error", "message": "Training file not found"}
  407. def train_with_id(job_id):
  408. try:
  409. job = app.openai_client.fine_tuning.jobs.retrieve(job_id)
  410. if job.fine_tuned_model is None:
  411. app.logger.info({"job_id": job_id, "status": job.status})
  412. return {"status": job.status}
  413. else:
  414. app.logger.info({"job_id": job_id, "status": job.status, "model_name": job.fine_tuned_model})
  415. return {"status": job.status, "model_name": job.fine_tuned_model}
  416. except Exception as error_print:
  417. app.logger.exception("error")
  418. return {"status": "Could not find job from id"}
  419. @app.route('/assistant/create', methods=['POST'])
  420. def assistant_create():
  421. model_name = "gpt-4o-mini"
  422. assistant_name = "Assistant"
  423. assistant_ins = "Please respond professionally and in a friendly manner, using the same language as the original request."
  424. if request.is_json:
  425. request_form = request.json
  426. else:
  427. request_form = request.form.copy()
  428. assistant_name = request_form.pop('name', assistant_name)
  429. assistant_ins = request_form.pop('instructions', assistant_ins)
  430. model_name = request_form.pop('model_name', model_name)
  431. vector_store_id = request_form.pop('vector_store_id', "")
  432. file_batch_id = ""
  433. try:
  434. temperature = float(request_form.pop('temperature', 1.0))
  435. if temperature < 0.0:
  436. temperature = 0.0
  437. elif temperature > 1.0:
  438. temperature = 1.0
  439. except ValueError:
  440. temperature = 1.0
  441. tool_resources = {"tool_resources": {"file_search": {"vector_store_ids": [vector_store_id]}}} \
  442. if vector_store_id \
  443. else {}
  444. try:
  445. assistant = app.openai_client.beta.assistants.create(
  446. name=assistant_name,
  447. instructions=assistant_ins,
  448. model=model_name,
  449. tools=[{"type": "file_search"}],
  450. temperature=temperature,
  451. **tool_resources,
  452. **request_form
  453. )
  454. if 'attachment1' in request.files and not vector_store_id:
  455. resp_att = assistant_att()
  456. retval = {}
  457. if resp_att['status'] == 'completed':
  458. resp_upd = assistant_update(assistant.id, resp_att['vector_store_id'])
  459. assistant_updated = "1" if resp_upd['status'] == 'ok' else "0"
  460. else:
  461. assistant_updated = "0"
  462. if 'vector_store_id' in resp_att:
  463. retval['vector_store_id'] = resp_att['vector_store_id']
  464. if 'file_batch_id' in resp_att:
  465. retval['file_batch_id'] = resp_att['file_batch_id']
  466. retval['status'] = "ok"
  467. retval['assistant_id'] = assistant.id
  468. retval['assistant_updated'] = assistant_updated
  469. return retval
  470. else:
  471. return {"status": "ok", "assistant_id": assistant.id, "assistant_updated": "1" if vector_store_id else "0"}
  472. except ValueError as e:
  473. app.logger.exception("error")
  474. return {"status": "error",
  475. "message": "Failed to create assistant, please check whether your parameters are correct"}
  476. except openai.NotFoundError as e:
  477. app.logger.exception("error")
  478. return {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  479. except Exception:
  480. app.logger.exception("error")
  481. return {"status": "error", "message": "Failed to create assistant, please try again"}, 405
  482. @app.route('/assistant/attachment', methods=['POST'])
  483. def assistant_att():
  484. vector_store_id = request.form.get('vector_store_id', '')
  485. file_batch_id = request.form.get('file_batch_id', '')
  486. attachments: list[str] = []
  487. try:
  488. if not file_batch_id:
  489. if 'attachment1' not in request.files:
  490. return {"status": "error", "message": "No file for attachments"}
  491. else:
  492. has_attachments = True
  493. n = 1
  494. while has_attachments:
  495. if f'attachment{n}' in request.files:
  496. retf = app.openai_client.files.create(
  497. file=(request.files[f'attachment{n}'].filename,
  498. request.files[f'attachment{n}'].read()),
  499. purpose="assistants"
  500. )
  501. retf.filename = request.files[f'attachment{n}'].filename
  502. attachments.append(retf.id)
  503. n = n + 1
  504. else:
  505. has_attachments = False
  506. if vector_store_id:
  507. vector_store = app.openai_client.beta.vector_stores.retrieve(vector_store_id=vector_store_id)
  508. else:
  509. vector_store = app.openai_client.beta.vector_stores.create(
  510. expires_after={
  511. "anchor": "last_active_at",
  512. "days": 365
  513. }
  514. )
  515. file_batch = app.openai_client.beta.vector_stores.file_batches.create_and_poll(
  516. vector_store_id=vector_store.id,
  517. file_ids=attachments
  518. )
  519. return {"status": file_batch.status, "vector_store_id": vector_store.id, "file_batch_id": file_batch.id}
  520. else:
  521. file_batch = app.openai_client.beta.vector_stores.file_batches.retrieve(file_batch_id,
  522. vector_store_id=vector_store_id)
  523. return {"status": file_batch.status}
  524. except Exception as e:
  525. app.logger.exception("error")
  526. return {"status": "error", "message": "Upload attachment failed, please try again"}
  527. @app.route('/assistant/update', methods=['POST'])
  528. def assistant_update(aid=None, vid=None):
  529. try:
  530. request_form = request.form.copy()
  531. if aid is not None and vid is not None:
  532. assistant_id = aid
  533. vector_store_id = vid
  534. else:
  535. assistant_id = request_form.pop('assistant_id')
  536. vector_store_id = request_form.pop('vector_store_id', None)
  537. kwargs = {"assistant_id": assistant_id}
  538. if vector_store_id is not None:
  539. kwargs['tool_resources'] = {"file_search": {"vector_store_ids": [vector_store_id]}}
  540. if 'name' in request_form:
  541. kwargs['name'] = request_form.pop('name')
  542. if 'instructions' in request_form:
  543. kwargs['instructions'] = request_form.pop('instructions')
  544. app.openai_client.beta.assistants.update(**kwargs)
  545. return {"status": "ok"}
  546. except Exception as e:
  547. app.logger.exception("error")
  548. return {"status": "error", "message": "Update assistant failed, please try again"}
  549. @app.route('/llama', methods=['POST'])
  550. def llama():
  551. max_char_msg = 500
  552. max_resp_token = 600
  553. json_payload = request.get_json()
  554. if not json_payload:
  555. json_payload = []
  556. has_named_params = False
  557. if isinstance(json_payload, dict):
  558. has_named_params = 'payload' in json_payload
  559. if 'payload' in json_payload:
  560. json_payload = json_payload['payload']
  561. if isinstance(json_payload, dict):
  562. json_payload = [json_payload]
  563. else:
  564. json_payload = [json_payload]
  565. message = json_payload[-1]
  566. content = message['content']
  567. content_arr = content.split(" ")
  568. new_content_arr = content[:max_char_msg].split(" ")
  569. new_content_len = len(new_content_arr)
  570. arr = []
  571. for i in range(new_content_len):
  572. arr.append(content_arr[i])
  573. content = " ".join(arr)
  574. content = content + " Jawab dengan Bahasa Indonesia"
  575. try:
  576. json_request = {
  577. "model": "llama3.1",
  578. "prompt": content,
  579. "stream": False
  580. }
  581. r = requests.post("http://localhost:11434/api/generate", json=json_request)
  582. if r.status_code == 200:
  583. result = {
  584. "role": "assistant",
  585. "content": r.json()["response"]
  586. }
  587. else:
  588. result = {}, r.status_code
  589. except Exception as e:
  590. app.logger.exception("error")
  591. result = {"status": "error", "message": "Please try again"}, 405
  592. return result
  593. @app.route('/speech', methods=['POST'])
  594. def speech(text=""):
  595. if not text and 'text' not in request.form:
  596. audio_file = request.files.get('audio')
  597. res = app.openai_client.audio.transcriptions.create(
  598. model="whisper-1",
  599. file=(audio_file.filename, audio_file.stream.read())
  600. )
  601. return {"status": "ok", "message": res.text}
  602. elif 'text' in request.form or text:
  603. text = request.form['text'] if 'text' in request.form else text
  604. uu_id = str(uuid.uuid4())
  605. print(text)
  606. with app.openai_client.audio.speech.with_streaming_response.create(
  607. model="tts-1-hd",
  608. voice="echo",
  609. speed=0.8,
  610. input=text
  611. ) as res:
  612. res.stream_to_file(os.path.join(app.config['UPLOAD_FOLDER'], f"{uu_id}.mp3"))
  613. return download_file(f"{uu_id}.mp3")
  614. # Press the green button in the gutter to run the script.
  615. if __name__ == '__main__':
  616. app.run(host='0.0.0.0', port=8348, debug=True, ssl_context=ssl)
  617. # See PyCharm help at https://www.jetbrains.com/help/pycharm/