main.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. import logging
  2. import os
  3. import json
  4. import re
  5. import time
  6. import uuid
  7. import random
  8. import openai
  9. import requests
  10. from openai import OpenAI
  11. from flask import Flask, request, jsonify, send_from_directory, url_for
  12. from convert import alpaca_to_chatgpt, csv_to_jsonl
  13. app = Flask(__name__)
  14. ssl = None
  15. # ssl =('/etc/ssl/sample.crt', '/etc/ssl/sample.pem')
  16. app.openai_key = os.environ.get("OPENAI_KEY", "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")
  17. app.openai_client = OpenAI(api_key=app.openai_key)
  18. #logging.basicConfig(level=logging.DEBUG, filename='/jkt-disk-01/app/mms/chatgpt-apache/chatgpt.log', format='%(asctime)s %(message)s')
  19. app.chat_messages = [
  20. {"role": "system",
  21. "content": "Please respond professionally and in a friendly manner, using the same language as the original request. Use emoji responsibly."}
  22. ]
  23. app.translate_messages = [
  24. {"role": "system",
  25. "content": "Please translate using the requested language."}
  26. ]
  27. app.suggest_messages = [
  28. {"role": "system",
  29. "content": "Please suggest reply messages based on the previous conversations and the user's request."}
  30. ]
  31. app.recommend_messages = [
  32. {"role": "system",
  33. "content": "Give normalized total weight of each category in json based on headlines"
  34. }
  35. ]
  36. app.summary_messages = [
  37. {"role": "system",
  38. "content": "Please summarize an article."
  39. }
  40. ]
  41. UPLOAD_FOLDER = 'files'
  42. app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
  43. @app.route('/files/<name>')
  44. def download_file(name):
  45. return send_from_directory(app.config["UPLOAD_FOLDER"], name)
  46. @app.route('/', methods=['GET', 'POST'])
  47. def test():
  48. return jsonify({"status": "0"})
  49. def roulette() -> str:
  50. roulette_arr = [(80, "gpt-4o-mini"), (20, "gpt-4o")]
  51. rand_num = random.randrange(0, 99)
  52. model_name = ""
  53. n = 0
  54. j = 0
  55. while rand_num >= n:
  56. n += roulette_arr[j][0]
  57. model_name = roulette_arr[j][1]
  58. print(model_name)
  59. j += 1
  60. return model_name
  61. def prune_message(message: dict):
  62. m = message.copy()
  63. for k in list(m.keys()):
  64. if k != 'role' and k != 'content':
  65. m.pop(k)
  66. return m
  67. def recommend(headlines, category):
  68. chat_messages = app.recommend_messages.copy()
  69. try:
  70. json_payload = {
  71. "role": "user",
  72. "content": f"""{headlines}
  73. Berikan nilai berat masing-masing kategori, jumlahkan dan normalisasikan:
  74. {category}
  75. Berikan dalam bentuk json
  76. """
  77. }
  78. chat_messages.append(json_payload)
  79. time.sleep(3)
  80. json_response = app.openai_client.chat.completions.create(model="gpt-4o-mini",
  81. messages=chat_messages,
  82. response_format={"type": "json_object"}
  83. )
  84. result = {"status": "ok", "message": json.loads(json_response.choices[0].message.content)}
  85. except openai.APITimeoutError as e:
  86. app.logger.exception("error")
  87. result = {"status": "error", "message": e.message}, 408
  88. except openai.NotFoundError as e:
  89. app.logger.exception("error")
  90. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  91. except Exception as error_print:
  92. app.logger.exception("error")
  93. result = {"status": "error", "message": "Please try again"}, 405
  94. return result
  95. def vision(message, image_url=None, image_b64=None):
  96. chat_messages = app.chat_messages.copy()
  97. url = ""
  98. if image_url:
  99. url = f"{image_url}"
  100. elif image_b64:
  101. url = f"data:image/jpeg;base64,{image_b64}"
  102. try:
  103. json_payload = {
  104. "role": "user",
  105. "content": [
  106. {"type": "text", "text": message},
  107. {
  108. "type": "image_url",
  109. "image_url": {
  110. "url": url,
  111. },
  112. },
  113. ],
  114. }
  115. chat_messages.append(json_payload)
  116. time.sleep(3)
  117. json_response = app.openai_client.chat.completions.create(
  118. model="gpt-4o",
  119. messages=chat_messages,
  120. max_tokens=500
  121. )
  122. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  123. except openai.APITimeoutError as e:
  124. app.logger.exception("error")
  125. result = {"status": "error", "message": e.message}, 408
  126. except openai.NotFoundError as e:
  127. app.logger.exception("error")
  128. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  129. except Exception as e:
  130. app.logger.exception("error")
  131. result = {"status": "error", "message": "Please try again"}, 405
  132. return result
  133. @app.route('/gpt', methods=['POST'])
  134. def gpt():
  135. assistant_id = ""
  136. assistant = None
  137. chat_messages = app.chat_messages.copy()
  138. chat_model = "gpt-4o-mini"
  139. use_video = False
  140. suggest = False
  141. summarize = False
  142. expression = False
  143. predict_q = 0
  144. max_char_msg = 500
  145. max_resp_token = 600
  146. category = []
  147. headlines = []
  148. image_url = ""
  149. num_choices = 1
  150. json_payload = request.get_json()
  151. if not json_payload:
  152. json_payload = []
  153. has_named_params = False
  154. app.logger.info("Request: " + str(json_payload))
  155. if isinstance(json_payload, dict):
  156. has_named_params = 'payload' in json_payload
  157. if 'payload' in json_payload:
  158. if 'predict_q' in json_payload:
  159. predict_q = 5 if json_payload['predict_q'] > 4 else 0 if json_payload['predict_q'] < 1 else \
  160. json_payload['predict_q']
  161. if 'num_choices' in json_payload:
  162. num_choices = 5 if json_payload['num_choices'] > 4 else 1 if json_payload['num_choices'] < 2 else \
  163. json_payload['num_choices']
  164. if 'use_video' in json_payload:
  165. use_video = json_payload['use_video'] == "1"
  166. if 'chat_model' in json_payload and 'assistant_id' not in json_payload:
  167. chat_model = json_payload['chat_model']
  168. max_resp_token = 2048
  169. if 'expression' in json_payload:
  170. expression = json_payload['expression'] == "1"
  171. if 'translate' in json_payload:
  172. chat_messages = app.translate_messages.copy()
  173. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  174. 'content'] + f" (Translate to {json_payload['translate']})"
  175. elif 'suggest' in json_payload:
  176. suggest = json_payload['suggest'] == "1"
  177. if suggest:
  178. chat_messages = app.suggest_messages.copy()
  179. else:
  180. chat_messages = app.chat_messages.copy()
  181. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  182. 'content'] + f" What can I say to him/her?"
  183. elif 'summarize' in json_payload:
  184. summarize = json_payload['summarize'] == "1"
  185. if summarize:
  186. chat_messages = app.summary_messages.copy()
  187. max_char_msg = 2000
  188. max_resp_token = 1000
  189. else:
  190. chat_messages = app.chat_messages.copy()
  191. json_payload['payload'][-1]['content'] = f"Please summarize this article:\n" + \
  192. json_payload['payload'][-1]['content']
  193. elif 'assistant_id' in json_payload:
  194. assistant_id = json_payload['assistant_id']
  195. assistant = app.openai_client.beta.assistants.retrieve(assistant_id=assistant_id)
  196. chat_model = assistant.model
  197. else:
  198. chat_messages = app.chat_messages.copy()
  199. json_payload = json_payload['payload']
  200. if isinstance(json_payload, dict):
  201. json_payload = [json_payload]
  202. elif 'greeting' in json_payload:
  203. chat_messages = app.chat_messages.copy()
  204. company_name = json_payload['greeting']['company_name']
  205. timestamp = json_payload['greeting']['timestamp']
  206. islamic_message = f"Apakah Nama '{company_name}' terdapat unsur islami? Jawab dengan 'Ya' atau 'Tidak'"
  207. islam_messages = app.chat_messages.copy()
  208. islam_messages.append({
  209. "role": "user",
  210. "content": islamic_message
  211. })
  212. islamic_response = app.openai_client.chat.completions.create(model="gpt-4o-mini",
  213. messages=islam_messages,
  214. max_tokens=2, temperature=0.5)
  215. if 'Ya' in islamic_response.choices[0].message.content:
  216. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu, dan jawab dengan 'Assalamu'alaikum...' terlebih dahulu"
  217. else:
  218. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu"
  219. json_payload = [
  220. {
  221. "role": "user",
  222. "content": greeting_message
  223. }
  224. ]
  225. elif 'recommend' in json_payload:
  226. headlines = json_payload['recommend']['headlines']
  227. category = json_payload['recommend']['category']
  228. return recommend(headlines, category)
  229. elif 'image_url' in json_payload:
  230. image = json_payload['image_url']
  231. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  232. return vision(message, image_url=image)
  233. elif 'image_b64' in json_payload:
  234. image = json_payload['image_b64']
  235. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  236. return vision(message, image_b64=image_url)
  237. else:
  238. app.logger.info("This request use old json format")
  239. chat_messages = app.chat_messages.copy()
  240. app.logger.info("Chat Messages:")
  241. app.logger.info(str(chat_messages))
  242. json_payload = json_payload[-5:]
  243. if assistant_id:
  244. chat_messages = []
  245. for message in json_payload:
  246. p_message = prune_message(message)
  247. if p_message['role'] == 'user':
  248. content = p_message['content'].lower()
  249. else:
  250. content = p_message['content']
  251. content_arr = content.split(" ")
  252. new_content_arr = content[:max_char_msg].split(" ")
  253. new_content_len = len(new_content_arr)
  254. arr = []
  255. for i in range(new_content_len):
  256. arr.append(content_arr[i])
  257. p_message['content'] = " ".join(arr)
  258. chat_messages.append(p_message)
  259. app.logger.info(chat_messages)
  260. result = {}
  261. try:
  262. n = num_choices
  263. if "gpt-3.5-turbo" or "gpt-4o-mini" in chat_model:
  264. chat_model = roulette()
  265. app.logger.info(f"Model used: {chat_model}")
  266. if assistant_id and not suggest:
  267. runs = app.openai_client.beta.threads.create_and_run_poll(
  268. assistant_id=assistant_id,
  269. thread={
  270. "messages": chat_messages
  271. }
  272. )
  273. if runs.status != "completed":
  274. result = {"role": "assistant", "content": "Maaf, saat ini saya sedang sibuk. Coba beberapa saat lagi."}
  275. else:
  276. messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
  277. try:
  278. message_content = messages[0].content[0].text
  279. app.logger.info(message_content.value)
  280. pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
  281. filtered_message = pattern.sub("", message_content.value)
  282. result = {"role": "assistant", "content": filtered_message}
  283. except IndexError:
  284. result = {"role": "assistant", "content": "Saat ini saya tidak memiliki informasi yang diperlukan untuk menjawab pertanyaan Anda."}
  285. else:
  286. time.sleep(3)
  287. json_response = app.openai_client.chat.completions.create(model=chat_model,
  288. messages=chat_messages,
  289. max_tokens=max_resp_token, temperature=0.7, n=n)
  290. app.logger.info(json_response.choices[0].message)
  291. if has_named_params:
  292. if suggest:
  293. choices = json_response.choices
  294. messages = [i.message for i in choices]
  295. json_formatted = []
  296. for message in messages:
  297. json_formatted.append({"role": "assistant", "content": message.content})
  298. result = {"url": "", "message": json_formatted}
  299. else:
  300. if use_video:
  301. # TODO: to be implemented
  302. result = {"url": url_for('download_file', name="test.mp4", _external=True),
  303. "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
  304. else:
  305. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  306. else:
  307. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  308. if expression:
  309. exprr = expresso(text=result['content'])
  310. result['expression'] = exprr['expression']
  311. if predict_q:
  312. json_response_q = None
  313. if assistant_id:
  314. query_q = {
  315. "role": "user",
  316. "content": f"Berikan {predict_q} pertanyaan random yang akan saya ajukan sesuai topik asisten dalam bentuk json array"
  317. }
  318. else:
  319. query_q = {
  320. "role": "user",
  321. "content": f"Berikan {predict_q} pertanyaan lain yang akan saya ajukan berdasarkan percakapan kali ini dalam bentuk json array"
  322. }
  323. chat_messages.append(prune_message(result))
  324. chat_messages.append(query_q)
  325. if assistant_id:
  326. runs = app.openai_client.beta.threads.create_and_run_poll(
  327. assistant_id=assistant_id,
  328. thread={
  329. "messages": chat_messages
  330. }
  331. )
  332. if runs.status == "completed":
  333. messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
  334. message_content = messages[0].content[0].text
  335. app.logger.info(message_content.value)
  336. pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
  337. filtered_message = pattern.sub("", message_content.value)
  338. predict_q_arr = [
  339. {
  340. "role": "system",
  341. "content": assistant.instructions
  342. },
  343. {
  344. "role": "assistant",
  345. "content": filtered_message
  346. },
  347. {
  348. "role": "user",
  349. "content": f"Ekstrak {predict_q} pertanyaan tersebut dalam bentuk json array"
  350. }
  351. ]
  352. json_response_q = app.openai_client.chat.completions.create(
  353. model=chat_model,
  354. messages=predict_q_arr,
  355. temperature=0.2,
  356. response_format={"type": "json_object"}
  357. )
  358. else:
  359. json_response_q = app.openai_client.chat.completions.create(model=chat_model,
  360. messages=chat_messages,
  361. max_tokens=max_resp_token,
  362. temperature=0.2,
  363. response_format={"type": "json_object"})
  364. if json_response_q:
  365. json_response_dict = json.loads(json_response_q.choices[0].message.content)
  366. if json_response_dict is not None:
  367. if isinstance(json_response_dict, dict):
  368. if len(json_response_dict) > 1:
  369. qs = []
  370. for q in json_response_dict.values():
  371. qs.append(q)
  372. json_response_dict = qs
  373. else:
  374. try:
  375. first_key = next(iter(json_response_dict))
  376. json_response_dict = json_response_dict[first_key]
  377. except StopIteration:
  378. json_response_dict = []
  379. elif isinstance(json_response_dict, str):
  380. json_response_dict = [json_response_dict]
  381. result["predict_q"] = json_response_dict
  382. except openai.APITimeoutError as e:
  383. app.logger.exception("error")
  384. result = {"status": "error", "message": e.message}, 408
  385. except openai.NotFoundError as e:
  386. app.logger.exception("error")
  387. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  388. except Exception:
  389. app.logger.exception("error")
  390. result = {"status": "error", "message": "Please try again"}, 405
  391. app.logger.info("Result: " + str(result))
  392. return json.dumps(result)
  393. @app.route('/train', methods=['POST'])
  394. def train():
  395. prev_model = "gpt-3.5-turbo"
  396. instructions = None
  397. if 'job_id' in request.form:
  398. return train_with_id(job_id=request.form['job_id'])
  399. elif 'train_file' in request.files:
  400. train_file = request.files['train_file']
  401. app.logger.info({"filename": train_file.filename})
  402. if 'instructions' in request.form:
  403. instructions = request.form['instructions']
  404. openai_file = None
  405. if train_file.filename.split('.')[1] == 'jsonl':
  406. openai_file = train_file.stream.read()
  407. elif train_file.filename.split('.')[1] == 'csv':
  408. openai_file = csv_to_jsonl(train_file.stream.read(), instructions)
  409. elif train_file.filename.split('.')[1] == 'json':
  410. openai_file = alpaca_to_chatgpt(train_file, instructions)
  411. if 'prev_model' in request.form:
  412. prev_model = request.form['prev_model']
  413. app.logger.info(f"Previous model: {prev_model}")
  414. if 'mock' not in request.form:
  415. f = app.openai_client.files.create(
  416. file=openai_file,
  417. purpose="fine-tune"
  418. )
  419. job = app.openai_client.fine_tuning.jobs.create(
  420. training_file=f.id,
  421. model=prev_model,
  422. hyperparameters={
  423. "n_epochs": 5
  424. }
  425. )
  426. app.logger.info({"mock": "no", "status": job.status, "job_id": job.id})
  427. retval = {"status": job.status, "job_id": job.id}
  428. return retval
  429. else:
  430. app.logger.info({"mock": "yes", "status": "ok"})
  431. return {"status": "ok"}
  432. else:
  433. app.logger.error({"status": "error", "message": "Training file not found"})
  434. return {"status": "error", "message": "Training file not found"}
  435. def train_with_id(job_id):
  436. try:
  437. time.sleep(3)
  438. job = app.openai_client.fine_tuning.jobs.retrieve(job_id)
  439. if job.fine_tuned_model is None:
  440. app.logger.info({"job_id": job_id, "status": job.status})
  441. return {"status": job.status}
  442. else:
  443. app.logger.info({"job_id": job_id, "status": job.status, "model_name": job.fine_tuned_model})
  444. return {"status": job.status, "model_name": job.fine_tuned_model}
  445. except Exception as error_print:
  446. app.logger.exception("error")
  447. return {"status": "Could not find job from id"}
  448. @app.route('/assistant/create', methods=['POST'])
  449. def assistant_create():
  450. model_name = "gpt-4o-mini"
  451. assistant_name = "Assistant"
  452. assistant_ins = "Please respond professionally and in a friendly manner, using the same language as the original request."
  453. if request.is_json:
  454. request_form = request.json
  455. else:
  456. request_form = request.form.copy()
  457. assistant_name = request_form.pop('name', assistant_name)
  458. assistant_ins = request_form.pop('instructions', assistant_ins)
  459. model_name = request_form.pop('model_name', model_name)
  460. vector_store_id = request_form.pop('vector_store_id', "")
  461. file_batch_id = ""
  462. try:
  463. temperature = float(request_form.pop('temperature', 1.0))
  464. if temperature < 0.0:
  465. temperature = 0.0
  466. elif temperature > 1.0:
  467. temperature = 1.0
  468. except ValueError:
  469. temperature = 1.0
  470. tool_resources = {"tool_resources": {"file_search": {"vector_store_ids": [vector_store_id]}}} \
  471. if vector_store_id \
  472. else {}
  473. try:
  474. time.sleep(3)
  475. assistant = app.openai_client.beta.assistants.create(
  476. name=assistant_name,
  477. instructions=assistant_ins,
  478. model=model_name,
  479. tools=[{"type": "file_search"}],
  480. temperature=temperature,
  481. **tool_resources,
  482. **request_form
  483. )
  484. if 'attachment1' in request.files and not vector_store_id:
  485. resp_att = assistant_att()
  486. retval = {}
  487. if resp_att['status'] == 'completed':
  488. resp_upd = assistant_update(assistant.id, resp_att['vector_store_id'])
  489. assistant_updated = "1" if resp_upd['status'] == 'ok' else "0"
  490. else:
  491. assistant_updated = "0"
  492. if 'vector_store_id' in resp_att:
  493. retval['vector_store_id'] = resp_att['vector_store_id']
  494. if 'file_batch_id' in resp_att:
  495. retval['file_batch_id'] = resp_att['file_batch_id']
  496. retval['status'] = "ok"
  497. retval['assistant_id'] = assistant.id
  498. retval['assistant_updated'] = assistant_updated
  499. return retval
  500. else:
  501. return {"status": "ok", "assistant_id": assistant.id, "assistant_updated": "1" if vector_store_id else "0"}
  502. except ValueError as e:
  503. app.logger.exception("error")
  504. return {"status": "error",
  505. "message": "Failed to create assistant, please check whether your parameters are correct"}
  506. except openai.NotFoundError as e:
  507. app.logger.exception("error")
  508. return {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  509. except Exception:
  510. app.logger.exception("error")
  511. return {"status": "error", "message": "Failed to create assistant, please try again"}, 405
  512. @app.route('/assistant/attachment', methods=['POST'])
  513. def assistant_att():
  514. vector_store_id = request.form.get('vector_store_id', '')
  515. file_batch_id = request.form.get('file_batch_id', '')
  516. attachments: list[str] = []
  517. try:
  518. if not file_batch_id:
  519. if 'attachment1' not in request.files:
  520. return {"status": "error", "message": "No file for attachments"}
  521. else:
  522. has_attachments = True
  523. n = 1
  524. while has_attachments:
  525. if f'attachment{n}' in request.files:
  526. retf = app.openai_client.files.create(
  527. file=(request.files[f'attachment{n}'].filename,
  528. request.files[f'attachment{n}'].read()),
  529. purpose="assistants"
  530. )
  531. retf.filename = request.files[f'attachment{n}'].filename
  532. attachments.append(retf.id)
  533. n = n + 1
  534. else:
  535. has_attachments = False
  536. if vector_store_id:
  537. vector_store = app.openai_client.beta.vector_stores.retrieve(vector_store_id=vector_store_id)
  538. else:
  539. vector_store = app.openai_client.beta.vector_stores.create(
  540. expires_after={
  541. "anchor": "last_active_at",
  542. "days": 365
  543. }
  544. )
  545. file_batch = app.openai_client.beta.vector_stores.file_batches.create_and_poll(
  546. vector_store_id=vector_store.id,
  547. file_ids=attachments
  548. )
  549. return {"status": file_batch.status, "vector_store_id": vector_store.id, "file_batch_id": file_batch.id}
  550. else:
  551. file_batch = app.openai_client.beta.vector_stores.file_batches.retrieve(file_batch_id,
  552. vector_store_id=vector_store_id)
  553. return {"status": file_batch.status}
  554. except Exception as e:
  555. app.logger.exception("error")
  556. return {"status": "error", "message": "Upload attachment failed, please try again"}
  557. @app.route('/assistant/attachment/update', methods=['POST'])
  558. def assistant_attachment_update():
  559. pass
  560. @app.route('/assistant/update', methods=['POST'])
  561. def assistant_update(aid=None, vid=None):
  562. try:
  563. request_form = request.form.copy()
  564. if aid is not None and vid is not None:
  565. assistant_id = aid
  566. vector_store_id = vid
  567. else:
  568. assistant_id = request_form.pop('assistant_id')
  569. vector_store_id = request_form.pop('vector_store_id', None)
  570. kwargs = {"assistant_id": assistant_id}
  571. if vector_store_id is not None:
  572. kwargs['tool_resources'] = {"file_search": {"vector_store_ids": [vector_store_id]}}
  573. if 'name' in request_form:
  574. kwargs['name'] = request_form.pop('name')
  575. if 'instructions' in request_form:
  576. kwargs['instructions'] = request_form.pop('instructions')
  577. time.sleep(3)
  578. app.openai_client.beta.assistants.update(**kwargs)
  579. return {"status": "ok"}
  580. except Exception as e:
  581. app.logger.exception("error")
  582. return {"status": "error", "message": "Update assistant failed, please try again"}
  583. @app.route('/llama', methods=['POST'])
  584. def llama():
  585. max_char_msg = 500
  586. max_resp_token = 600
  587. json_payload = request.get_json()
  588. if not json_payload:
  589. json_payload = []
  590. has_named_params = False
  591. if isinstance(json_payload, dict):
  592. has_named_params = 'payload' in json_payload
  593. if 'payload' in json_payload:
  594. json_payload = json_payload['payload']
  595. if isinstance(json_payload, dict):
  596. json_payload = [json_payload]
  597. else:
  598. json_payload = [json_payload]
  599. message = json_payload[-1]
  600. content = message['content']
  601. content_arr = content.split(" ")
  602. new_content_arr = content[:max_char_msg].split(" ")
  603. new_content_len = len(new_content_arr)
  604. arr = []
  605. for i in range(new_content_len):
  606. arr.append(content_arr[i])
  607. content = " ".join(arr)
  608. content = content + " Jawab dengan Bahasa Indonesia"
  609. try:
  610. json_request = {
  611. "model": "llama3.1",
  612. "prompt": content,
  613. "stream": False
  614. }
  615. r = requests.post("http://localhost:11434/api/generate", json=json_request)
  616. if r.status_code == 200:
  617. result = {
  618. "role": "assistant",
  619. "content": r.json()["response"]
  620. }
  621. else:
  622. result = {}, r.status_code
  623. except Exception as e:
  624. app.logger.exception("error")
  625. result = {"status": "error", "message": "Please try again"}, 405
  626. return result
  627. @app.route('/speech', methods=['POST'])
  628. def speech(text=""):
  629. time.sleep(3)
  630. if not text and 'text' not in request.form:
  631. audio_file = request.files.get('audio')
  632. res = app.openai_client.audio.transcriptions.create(
  633. model="whisper-1",
  634. file=(audio_file.filename, audio_file.stream.read())
  635. )
  636. return {"status": "ok", "message": res.text}
  637. elif 'text' in request.form or text:
  638. text = request.form['text'] if 'text' in request.form else text
  639. uu_id = str(uuid.uuid4())
  640. print(text)
  641. with app.openai_client.audio.speech.with_streaming_response.create(
  642. model="tts-1-hd",
  643. voice="echo",
  644. speed=0.8,
  645. input=text
  646. ) as res:
  647. res.stream_to_file(os.path.join(app.config['UPLOAD_FOLDER'], f"{uu_id}.mp3"))
  648. return download_file(f"{uu_id}.mp3")
  649. @app.route('/expression', methods=['POST'])
  650. def expresso(text=""):
  651. if not text:
  652. if 'text' in request.form:
  653. text = request.form['text']
  654. else:
  655. return {"status": "error", "message": "No text for expression"}
  656. try:
  657. response = app.openai_client.chat.completions.create(
  658. model="gpt-4o-mini",
  659. messages=[
  660. {
  661. "role": "user",
  662. "content": f"What is the closest expression of this text, choose between happy, sad, indifferent, fear, anger, surprise, or disgust, output json with key 'expression':\n\n{text}\n\n"
  663. }
  664. ],
  665. response_format={"type": "json_object"}
  666. )
  667. response_message = response.choices[0].message.content
  668. return json.loads(response_message)
  669. except:
  670. return {"expression": "indifferent"}
  671. # Press the green button in the gutter to run the script.
  672. if __name__ == '__main__':
  673. app.run(host='0.0.0.0', port=8348, debug=True, ssl_context=ssl)
  674. # See PyCharm help at https://www.jetbrains.com/help/pycharm/