main.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. import datetime
  2. import logging
  3. import os
  4. import json
  5. import re
  6. import time
  7. import uuid
  8. import random
  9. import openai
  10. import requests
  11. from openai import OpenAI
  12. from flask import Flask, request, jsonify, send_from_directory, url_for
  13. from convert import alpaca_to_chatgpt, csv_to_jsonl
  14. app = Flask(__name__)
  15. ssl = None
  16. # ssl =('/etc/ssl/sample.crt', '/etc/ssl/sample.pem')
  17. app.openai_key = os.environ.get("OPENAI_KEY", "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")
  18. app.openai_client = OpenAI(api_key=app.openai_key)
  19. #logging.basicConfig(level=logging.DEBUG, filename='/jkt-disk-01/app/mms/chatgpt-apache/chatgpt.log', format='%(asctime)s %(message)s')
  20. app.chat_messages = [
  21. {"role": "system",
  22. "content": "Please respond professionally and in a friendly manner, using the same language as the original request. Use emoji responsibly."}
  23. ]
  24. app.translate_messages = [
  25. {"role": "system",
  26. "content": "Please translate using the requested language."}
  27. ]
  28. app.suggest_messages = [
  29. {"role": "system",
  30. "content": "Please suggest reply messages based on the previous conversations and the user's request."}
  31. ]
  32. app.recommend_messages = [
  33. {"role": "system",
  34. "content": "Give normalized total weight of each category in json based on headlines"
  35. }
  36. ]
  37. app.summary_messages = [
  38. {"role": "system",
  39. "content": "Please summarize an article."
  40. }
  41. ]
  42. UPLOAD_FOLDER = 'files'
  43. app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
  44. @app.route('/files/<name>')
  45. def download_file(name):
  46. return send_from_directory(app.config["UPLOAD_FOLDER"], name)
  47. @app.route('/', methods=['GET', 'POST'])
  48. def test():
  49. return jsonify({"status": "0"})
  50. def roulette() -> str:
  51. roulette_arr = [(80, "gpt-4o-mini"), (20, "gpt-4o")]
  52. rand_num = random.randrange(0, 99)
  53. model_name = ""
  54. n = 0
  55. j = 0
  56. while rand_num >= n:
  57. n += roulette_arr[j][0]
  58. model_name = roulette_arr[j][1]
  59. print(model_name)
  60. j += 1
  61. return model_name
  62. def prune_message(message: dict):
  63. m = message.copy()
  64. for k in list(m.keys()):
  65. if k != 'role' and k != 'content':
  66. m.pop(k)
  67. return m
  68. def recommend(headlines, category):
  69. chat_messages = app.recommend_messages.copy()
  70. try:
  71. json_payload = {
  72. "role": "user",
  73. "content": f"""{headlines}
  74. Berikan nilai berat masing-masing kategori, jumlahkan dan normalisasikan:
  75. {category}
  76. Berikan dalam bentuk json
  77. """
  78. }
  79. chat_messages.append(json_payload)
  80. time.sleep(3)
  81. json_response = app.openai_client.chat.completions.create(model="gpt-4o-mini",
  82. messages=chat_messages,
  83. response_format={"type": "json_object"}
  84. )
  85. result = {"status": "ok", "message": json.loads(json_response.choices[0].message.content)}
  86. except openai.APITimeoutError as e:
  87. app.logger.exception("error")
  88. result = {"status": "error", "message": e.message}, 408
  89. except openai.NotFoundError as e:
  90. app.logger.exception("error")
  91. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  92. except Exception as error_print:
  93. app.logger.exception("error")
  94. result = {"status": "error", "message": "Please try again"}, 405
  95. return result
  96. def vision(message, image_url=None, image_b64=None):
  97. chat_messages = app.chat_messages.copy()
  98. url = ""
  99. if image_url:
  100. url = f"{image_url}"
  101. elif image_b64:
  102. url = f"data:image/jpeg;base64,{image_b64}"
  103. try:
  104. json_payload = {
  105. "role": "user",
  106. "content": [
  107. {"type": "text", "text": message},
  108. {
  109. "type": "image_url",
  110. "image_url": {
  111. "url": url,
  112. },
  113. },
  114. ],
  115. }
  116. chat_messages.append(json_payload)
  117. time.sleep(3)
  118. json_response = app.openai_client.chat.completions.create(
  119. model="gpt-4o",
  120. messages=chat_messages,
  121. max_tokens=500
  122. )
  123. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  124. except openai.APITimeoutError as e:
  125. app.logger.exception("error")
  126. result = {"status": "error", "message": e.message}, 408
  127. except openai.NotFoundError as e:
  128. app.logger.exception("error")
  129. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  130. except Exception as e:
  131. app.logger.exception("error")
  132. result = {"status": "error", "message": "Please try again"}, 405
  133. return result
  134. def suggest_schedule(sched, message: list, assistant_id=None, timestamp=None):
  135. result = {}
  136. if timestamp is None:
  137. timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
  138. message[-1]["content"] = f"{message[-1]['content']} Sekarang jam {timestamp}.\n\n{json.dumps(sched)}"
  139. try:
  140. if assistant_id:
  141. runs = app.openai_client.beta.threads.create_and_run_poll(
  142. assistant_id=assistant_id,
  143. thread={
  144. "messages": message
  145. }
  146. )
  147. if runs.status == "completed":
  148. messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
  149. message_content = messages[0].content[0].text
  150. app.logger.info(message_content.value)
  151. pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
  152. filtered_message = pattern.sub("", message_content.value)
  153. result = {"role": "assistant", "content": filtered_message}
  154. else:
  155. json_response = app.openai_client.chat.completions.create(model="gpt-4o",
  156. messages=message,
  157. temperature=0.7)
  158. response_message = json_response.choices[0].message.content
  159. result = {"role": "assistant", "content": response_message}
  160. except openai.APITimeoutError as e:
  161. app.logger.exception("error")
  162. result = {"status": "error", "message": e.message}, 408
  163. except openai.NotFoundError as e:
  164. app.logger.exception("error")
  165. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  166. except Exception as e:
  167. app.logger.exception("error")
  168. result = {"status": "error", "message": "Please try again"}, 405
  169. return result
  170. def convert_to_schedule(message: list):
  171. result = {}
  172. message[-1]["content"] = f'{message[-1]["content"]} Ubah menjadi format json seperti berikut:\n\n{{"start_time": "2025-10-01 11:00", "end_time": "2025-10-01 13:00", "description": "Deskripsi kegiatan" }}'
  173. try:
  174. json_response = app.openai_client.chat.completions.create(
  175. model="gpt-4o",
  176. messages=message,
  177. temperature=0.2,
  178. response_format={"type": "json_object"}
  179. )
  180. response_message = json_response.choices[0].message.content
  181. result = json.loads(response_message)
  182. except openai.APITimeoutError as e:
  183. app.logger.exception("error")
  184. result = {"status": "error", "message": e.message}, 408
  185. except openai.NotFoundError as e:
  186. app.logger.exception("error")
  187. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  188. except Exception as e:
  189. app.logger.exception("error")
  190. result = {"status": "error", "message": "Please try again"}, 405
  191. return result
  192. @app.route('/gpt', methods=['POST'])
  193. def gpt():
  194. assistant_id = ""
  195. assistant = None
  196. chat_messages = app.chat_messages.copy()
  197. chat_model = "gpt-4o-mini"
  198. use_video = False
  199. suggest = False
  200. summarize = False
  201. expression = False
  202. predict_q = 0
  203. max_char_msg = 500
  204. max_resp_token = 600
  205. category = []
  206. headlines = []
  207. image_url = ""
  208. num_choices = 1
  209. json_payload = request.get_json()
  210. if not json_payload:
  211. json_payload = []
  212. has_named_params = False
  213. app.logger.info("Request: " + str(json_payload))
  214. if isinstance(json_payload, dict):
  215. has_named_params = 'payload' in json_payload
  216. if 'payload' in json_payload:
  217. if 'predict_q' in json_payload:
  218. predict_q = 5 if json_payload['predict_q'] > 4 else 0 if json_payload['predict_q'] < 1 else \
  219. json_payload['predict_q']
  220. if 'num_choices' in json_payload:
  221. num_choices = 5 if json_payload['num_choices'] > 4 else 1 if json_payload['num_choices'] < 2 else \
  222. json_payload['num_choices']
  223. if 'use_video' in json_payload:
  224. use_video = json_payload['use_video'] == "1"
  225. if 'chat_model' in json_payload and 'assistant_id' not in json_payload:
  226. chat_model = json_payload['chat_model']
  227. max_resp_token = 2048
  228. if 'expression' in json_payload:
  229. expression = json_payload['expression'] == "1"
  230. if 'translate' in json_payload:
  231. chat_messages = app.translate_messages.copy()
  232. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  233. 'content'] + f" (Translate to {json_payload['translate']})"
  234. elif 'suggest' in json_payload:
  235. suggest = json_payload['suggest'] == "1"
  236. if suggest:
  237. chat_messages = app.suggest_messages.copy()
  238. else:
  239. chat_messages = app.chat_messages.copy()
  240. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  241. 'content'] + f" What can I say to him/her?"
  242. elif 'summarize' in json_payload:
  243. summarize = json_payload['summarize'] == "1"
  244. if summarize:
  245. chat_messages = app.summary_messages.copy()
  246. max_char_msg = 2000
  247. max_resp_token = 1000
  248. else:
  249. chat_messages = app.chat_messages.copy()
  250. json_payload['payload'][-1]['content'] = f"Please summarize this article:\n" + \
  251. json_payload['payload'][-1]['content']
  252. elif 'assistant_id' in json_payload:
  253. assistant_id = json_payload['assistant_id']
  254. assistant = app.openai_client.beta.assistants.retrieve(assistant_id=assistant_id)
  255. chat_model = assistant.model
  256. else:
  257. chat_messages = app.chat_messages.copy()
  258. if 'schedule' in json_payload:
  259. timestamp = None
  260. if 'timestamp' in json_payload:
  261. timestamp = json_payload["timestamp"]
  262. sched = json_payload["schedule"].copy()
  263. message = json_payload["payload"].copy()
  264. return suggest_schedule(sched, message, assistant_id, timestamp)
  265. elif 'convert_schedule' in json_payload:
  266. if json_payload['convert_schedule'] == "1":
  267. return convert_to_schedule(json_payload['payload'].copy())
  268. json_payload = json_payload['payload']
  269. if isinstance(json_payload, dict):
  270. json_payload = [json_payload]
  271. elif 'greeting' in json_payload:
  272. chat_messages = app.chat_messages.copy()
  273. company_name = json_payload['greeting']['company_name']
  274. timestamp = json_payload['greeting']['timestamp']
  275. islamic_message = f"Apakah Nama '{company_name}' terdapat unsur islami? Jawab dengan 'Ya' atau 'Tidak'"
  276. islam_messages = app.chat_messages.copy()
  277. islam_messages.append({
  278. "role": "user",
  279. "content": islamic_message
  280. })
  281. islamic_response = app.openai_client.chat.completions.create(model="gpt-4o-mini",
  282. messages=islam_messages,
  283. max_tokens=2, temperature=0.5)
  284. if 'Ya' in islamic_response.choices[0].message.content:
  285. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu, dan jawab dengan 'Assalamu'alaikum...' terlebih dahulu"
  286. else:
  287. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu"
  288. json_payload = [
  289. {
  290. "role": "user",
  291. "content": greeting_message
  292. }
  293. ]
  294. elif 'recommend' in json_payload:
  295. headlines = json_payload['recommend']['headlines']
  296. category = json_payload['recommend']['category']
  297. return recommend(headlines, category)
  298. elif 'image_url' in json_payload:
  299. image = json_payload['image_url']
  300. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  301. return vision(message, image_url=image)
  302. elif 'image_b64' in json_payload:
  303. image = json_payload['image_b64']
  304. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  305. return vision(message, image_b64=image_url)
  306. else:
  307. app.logger.info("This request use old json format")
  308. chat_messages = app.chat_messages.copy()
  309. app.logger.info("Chat Messages:")
  310. app.logger.info(str(chat_messages))
  311. json_payload = json_payload[-5:]
  312. if assistant_id:
  313. chat_messages = []
  314. for message in json_payload:
  315. p_message = prune_message(message)
  316. if p_message['role'] == 'user':
  317. content = p_message['content'].lower()
  318. else:
  319. content = p_message['content']
  320. content_arr = content.split(" ")
  321. new_content_arr = content[:max_char_msg].split(" ")
  322. new_content_len = len(new_content_arr)
  323. arr = []
  324. for i in range(new_content_len):
  325. arr.append(content_arr[i])
  326. p_message['content'] = " ".join(arr)
  327. chat_messages.append(p_message)
  328. app.logger.info(chat_messages)
  329. result = {}
  330. try:
  331. n = num_choices
  332. if "gpt-3.5-turbo" or "gpt-4o-mini" in chat_model:
  333. chat_model = roulette()
  334. app.logger.info(f"Model used: {chat_model}")
  335. if assistant_id and not suggest:
  336. runs = app.openai_client.beta.threads.create_and_run_poll(
  337. assistant_id=assistant_id,
  338. thread={
  339. "messages": chat_messages
  340. }
  341. )
  342. if runs.status != "completed":
  343. result = {"role": "assistant", "content": "Maaf, saat ini saya sedang sibuk. Coba beberapa saat lagi."}
  344. else:
  345. messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
  346. try:
  347. message_content = messages[0].content[0].text
  348. app.logger.info(message_content.value)
  349. pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
  350. filtered_message = pattern.sub("", message_content.value)
  351. result = {"role": "assistant", "content": filtered_message}
  352. except IndexError:
  353. result = {"role": "assistant", "content": "Saat ini saya tidak memiliki informasi yang diperlukan untuk menjawab pertanyaan Anda."}
  354. else:
  355. time.sleep(3)
  356. json_response = app.openai_client.chat.completions.create(model=chat_model,
  357. messages=chat_messages,
  358. max_tokens=max_resp_token, temperature=0.7, n=n)
  359. app.logger.info(json_response.choices[0].message)
  360. if has_named_params:
  361. if suggest:
  362. choices = json_response.choices
  363. messages = [i.message for i in choices]
  364. json_formatted = []
  365. for message in messages:
  366. json_formatted.append({"role": "assistant", "content": message.content})
  367. result = {"url": "", "message": json_formatted}
  368. else:
  369. if use_video:
  370. # TODO: to be implemented
  371. result = {"url": url_for('download_file', name="test.mp4", _external=True),
  372. "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
  373. else:
  374. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  375. else:
  376. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  377. if expression:
  378. exprr = expresso(text=result['content'])
  379. result['expression'] = exprr['expression']
  380. if predict_q:
  381. json_response_q = None
  382. if assistant_id:
  383. query_q = {
  384. "role": "user",
  385. "content": f"Berikan {predict_q} pertanyaan random yang akan saya ajukan sesuai topik asisten dalam bentuk json array"
  386. }
  387. else:
  388. query_q = {
  389. "role": "user",
  390. "content": f"Berikan {predict_q} pertanyaan lain yang akan saya ajukan berdasarkan percakapan kali ini dalam bentuk json array"
  391. }
  392. chat_messages.append(prune_message(result))
  393. chat_messages.append(query_q)
  394. if assistant_id:
  395. runs = app.openai_client.beta.threads.create_and_run_poll(
  396. assistant_id=assistant_id,
  397. thread={
  398. "messages": chat_messages
  399. }
  400. )
  401. if runs.status == "completed":
  402. messages = list(app.openai_client.beta.threads.messages.list(thread_id=runs.thread_id, run_id=runs.id))
  403. message_content = messages[0].content[0].text
  404. app.logger.info(message_content.value)
  405. pattern = re.compile(r"【\d+:\d+†\(?source\)?】")
  406. filtered_message = pattern.sub("", message_content.value)
  407. predict_q_arr = [
  408. {
  409. "role": "system",
  410. "content": assistant.instructions
  411. },
  412. {
  413. "role": "assistant",
  414. "content": filtered_message
  415. },
  416. {
  417. "role": "user",
  418. "content": f"Ekstrak {predict_q} pertanyaan tersebut dalam bentuk json array"
  419. }
  420. ]
  421. json_response_q = app.openai_client.chat.completions.create(
  422. model=chat_model,
  423. messages=predict_q_arr,
  424. temperature=0.2,
  425. response_format={"type": "json_object"}
  426. )
  427. else:
  428. json_response_q = app.openai_client.chat.completions.create(model=chat_model,
  429. messages=chat_messages,
  430. max_tokens=max_resp_token,
  431. temperature=0.2,
  432. response_format={"type": "json_object"})
  433. if json_response_q:
  434. json_response_dict = json.loads(json_response_q.choices[0].message.content)
  435. if json_response_dict is not None:
  436. if isinstance(json_response_dict, dict):
  437. if len(json_response_dict) > 1:
  438. qs = []
  439. for q in json_response_dict.values():
  440. qs.append(q)
  441. json_response_dict = qs
  442. else:
  443. try:
  444. first_key = next(iter(json_response_dict))
  445. json_response_dict = json_response_dict[first_key]
  446. except StopIteration:
  447. json_response_dict = []
  448. elif isinstance(json_response_dict, str):
  449. json_response_dict = [json_response_dict]
  450. result["predict_q"] = json_response_dict
  451. except openai.APITimeoutError as e:
  452. app.logger.exception("error")
  453. result = {"status": "error", "message": e.message}, 408
  454. except openai.NotFoundError as e:
  455. app.logger.exception("error")
  456. result = {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  457. except Exception:
  458. app.logger.exception("error")
  459. result = {"status": "error", "message": "Please try again"}, 405
  460. app.logger.info("Result: " + str(result))
  461. return json.dumps(result)
  462. @app.route('/train', methods=['POST'])
  463. def train():
  464. prev_model = "gpt-3.5-turbo"
  465. instructions = None
  466. if 'job_id' in request.form:
  467. return train_with_id(job_id=request.form['job_id'])
  468. elif 'train_file' in request.files:
  469. train_file = request.files['train_file']
  470. app.logger.info({"filename": train_file.filename})
  471. if 'instructions' in request.form:
  472. instructions = request.form['instructions']
  473. openai_file = None
  474. if train_file.filename.split('.')[1] == 'jsonl':
  475. openai_file = train_file.stream.read()
  476. elif train_file.filename.split('.')[1] == 'csv':
  477. openai_file = csv_to_jsonl(train_file.stream.read(), instructions)
  478. elif train_file.filename.split('.')[1] == 'json':
  479. openai_file = alpaca_to_chatgpt(train_file, instructions)
  480. if 'prev_model' in request.form:
  481. prev_model = request.form['prev_model']
  482. app.logger.info(f"Previous model: {prev_model}")
  483. if 'mock' not in request.form:
  484. f = app.openai_client.files.create(
  485. file=openai_file,
  486. purpose="fine-tune"
  487. )
  488. job = app.openai_client.fine_tuning.jobs.create(
  489. training_file=f.id,
  490. model=prev_model,
  491. hyperparameters={
  492. "n_epochs": 5
  493. }
  494. )
  495. app.logger.info({"mock": "no", "status": job.status, "job_id": job.id})
  496. retval = {"status": job.status, "job_id": job.id}
  497. return retval
  498. else:
  499. app.logger.info({"mock": "yes", "status": "ok"})
  500. return {"status": "ok"}
  501. else:
  502. app.logger.error({"status": "error", "message": "Training file not found"})
  503. return {"status": "error", "message": "Training file not found"}
  504. def train_with_id(job_id):
  505. try:
  506. time.sleep(3)
  507. job = app.openai_client.fine_tuning.jobs.retrieve(job_id)
  508. if job.fine_tuned_model is None:
  509. app.logger.info({"job_id": job_id, "status": job.status})
  510. return {"status": job.status}
  511. else:
  512. app.logger.info({"job_id": job_id, "status": job.status, "model_name": job.fine_tuned_model})
  513. return {"status": job.status, "model_name": job.fine_tuned_model}
  514. except Exception as error_print:
  515. app.logger.exception("error")
  516. return {"status": "Could not find job from id"}
  517. @app.route('/assistant/create', methods=['POST'])
  518. def assistant_create():
  519. model_name = "gpt-4o-mini"
  520. assistant_name = "Assistant"
  521. assistant_ins = "Please respond professionally and in a friendly manner, using the same language as the original request."
  522. if request.is_json:
  523. request_form = request.json
  524. else:
  525. request_form = request.form.copy()
  526. assistant_name = request_form.pop('name', assistant_name)
  527. assistant_ins = request_form.pop('instructions', assistant_ins)
  528. model_name = request_form.pop('model_name', model_name)
  529. vector_store_id = request_form.pop('vector_store_id', "")
  530. file_batch_id = ""
  531. try:
  532. temperature = float(request_form.pop('temperature', 1.0))
  533. if temperature < 0.0:
  534. temperature = 0.0
  535. elif temperature > 1.0:
  536. temperature = 1.0
  537. except ValueError:
  538. temperature = 1.0
  539. tool_resources = {"tool_resources": {"file_search": {"vector_store_ids": [vector_store_id]}}} \
  540. if vector_store_id \
  541. else {}
  542. try:
  543. time.sleep(3)
  544. assistant = app.openai_client.beta.assistants.create(
  545. name=assistant_name,
  546. instructions=assistant_ins,
  547. model=model_name,
  548. tools=[{"type": "file_search"}],
  549. temperature=temperature,
  550. **tool_resources,
  551. **request_form
  552. )
  553. if 'attachment1' in request.files and not vector_store_id:
  554. resp_att = assistant_att()
  555. retval = {}
  556. if resp_att['status'] == 'completed':
  557. resp_upd = assistant_update(assistant.id, resp_att['vector_store_id'])
  558. assistant_updated = "1" if resp_upd['status'] == 'ok' else "0"
  559. else:
  560. assistant_updated = "0"
  561. if 'vector_store_id' in resp_att:
  562. retval['vector_store_id'] = resp_att['vector_store_id']
  563. if 'file_batch_id' in resp_att:
  564. retval['file_batch_id'] = resp_att['file_batch_id']
  565. retval['status'] = "ok"
  566. retval['assistant_id'] = assistant.id
  567. retval['assistant_updated'] = assistant_updated
  568. return retval
  569. else:
  570. return {"status": "ok", "assistant_id": assistant.id, "assistant_updated": "1" if vector_store_id else "0"}
  571. except ValueError as e:
  572. app.logger.exception("error")
  573. return {"status": "error",
  574. "message": "Failed to create assistant, please check whether your parameters are correct"}
  575. except openai.NotFoundError as e:
  576. app.logger.exception("error")
  577. return {"status": "error", "message": json.loads(e.response.content)['error']['message']}, e.status_code
  578. except Exception:
  579. app.logger.exception("error")
  580. return {"status": "error", "message": "Failed to create assistant, please try again"}, 405
  581. @app.route('/assistant/attachment', methods=['POST'])
  582. def assistant_att():
  583. vector_store_id = request.form.get('vector_store_id', '')
  584. file_batch_id = request.form.get('file_batch_id', '')
  585. attachments: list[str] = []
  586. try:
  587. if not file_batch_id:
  588. if 'attachment1' not in request.files:
  589. return {"status": "error", "message": "No file for attachments"}
  590. else:
  591. has_attachments = True
  592. n = 1
  593. while has_attachments:
  594. if f'attachment{n}' in request.files:
  595. retf = app.openai_client.files.create(
  596. file=(request.files[f'attachment{n}'].filename,
  597. request.files[f'attachment{n}'].read()),
  598. purpose="assistants"
  599. )
  600. retf.filename = request.files[f'attachment{n}'].filename
  601. attachments.append(retf.id)
  602. n = n + 1
  603. else:
  604. has_attachments = False
  605. if vector_store_id:
  606. vector_store = app.openai_client.beta.vector_stores.retrieve(vector_store_id=vector_store_id)
  607. else:
  608. vector_store = app.openai_client.beta.vector_stores.create(
  609. expires_after={
  610. "anchor": "last_active_at",
  611. "days": 365
  612. }
  613. )
  614. file_batch = app.openai_client.beta.vector_stores.file_batches.create_and_poll(
  615. vector_store_id=vector_store.id,
  616. file_ids=attachments
  617. )
  618. return {"status": file_batch.status, "vector_store_id": vector_store.id, "file_batch_id": file_batch.id}
  619. else:
  620. file_batch = app.openai_client.beta.vector_stores.file_batches.retrieve(file_batch_id,
  621. vector_store_id=vector_store_id)
  622. return {"status": file_batch.status}
  623. except Exception as e:
  624. app.logger.exception("error")
  625. return {"status": "error", "message": "Upload attachment failed, please try again"}
  626. @app.route('/assistant/attachment/update', methods=['POST'])
  627. def assistant_attachment_update():
  628. pass
  629. @app.route('/assistant/update', methods=['POST'])
  630. def assistant_update(aid=None, vid=None):
  631. try:
  632. request_form = request.form.copy()
  633. if aid is not None and vid is not None:
  634. assistant_id = aid
  635. vector_store_id = vid
  636. else:
  637. assistant_id = request_form.pop('assistant_id')
  638. vector_store_id = request_form.pop('vector_store_id', None)
  639. kwargs = {"assistant_id": assistant_id}
  640. if vector_store_id is not None:
  641. kwargs['tool_resources'] = {"file_search": {"vector_store_ids": [vector_store_id]}}
  642. if 'name' in request_form:
  643. kwargs['name'] = request_form.pop('name')
  644. if 'instructions' in request_form:
  645. kwargs['instructions'] = request_form.pop('instructions')
  646. time.sleep(3)
  647. app.openai_client.beta.assistants.update(**kwargs)
  648. return {"status": "ok"}
  649. except Exception as e:
  650. app.logger.exception("error")
  651. return {"status": "error", "message": "Update assistant failed, please try again"}
  652. @app.route('/llama', methods=['POST'])
  653. def llama():
  654. max_char_msg = 500
  655. max_resp_token = 600
  656. json_payload = request.get_json()
  657. if not json_payload:
  658. json_payload = []
  659. has_named_params = False
  660. if isinstance(json_payload, dict):
  661. has_named_params = 'payload' in json_payload
  662. if 'payload' in json_payload:
  663. json_payload = json_payload['payload']
  664. if isinstance(json_payload, dict):
  665. json_payload = [json_payload]
  666. else:
  667. json_payload = [json_payload]
  668. message = json_payload[-1]
  669. content = message['content']
  670. content_arr = content.split(" ")
  671. new_content_arr = content[:max_char_msg].split(" ")
  672. new_content_len = len(new_content_arr)
  673. arr = []
  674. for i in range(new_content_len):
  675. arr.append(content_arr[i])
  676. content = " ".join(arr)
  677. content = content + " Jawab dengan Bahasa Indonesia"
  678. try:
  679. json_request = {
  680. "model": "llama3.1",
  681. "prompt": content,
  682. "stream": False
  683. }
  684. r = requests.post("http://localhost:11434/api/generate", json=json_request)
  685. if r.status_code == 200:
  686. result = {
  687. "role": "assistant",
  688. "content": r.json()["response"]
  689. }
  690. else:
  691. result = {}, r.status_code
  692. except Exception as e:
  693. app.logger.exception("error")
  694. result = {"status": "error", "message": "Please try again"}, 405
  695. return result
  696. @app.route('/speech', methods=['POST'])
  697. def speech(text=""):
  698. time.sleep(3)
  699. if not text and 'text' not in request.form:
  700. audio_file = request.files.get('audio')
  701. res = app.openai_client.audio.transcriptions.create(
  702. model="whisper-1",
  703. file=(audio_file.filename, audio_file.stream.read())
  704. )
  705. return {"status": "ok", "message": res.text}
  706. elif 'text' in request.form or text:
  707. text = request.form['text'] if 'text' in request.form else text
  708. uu_id = str(uuid.uuid4())
  709. print(text)
  710. with app.openai_client.audio.speech.with_streaming_response.create(
  711. model="tts-1-hd",
  712. voice="echo",
  713. speed=0.8,
  714. input=text
  715. ) as res:
  716. res.stream_to_file(os.path.join(app.config['UPLOAD_FOLDER'], f"{uu_id}.mp3"))
  717. return download_file(f"{uu_id}.mp3")
  718. @app.route('/expression', methods=['POST'])
  719. def expresso(text=""):
  720. if not text:
  721. if 'text' in request.form:
  722. text = request.form['text']
  723. else:
  724. return {"status": "error", "message": "No text for expression"}
  725. try:
  726. response = app.openai_client.chat.completions.create(
  727. model="gpt-4o-mini",
  728. messages=[
  729. {
  730. "role": "user",
  731. "content": f"What is the closest expression of this text, choose between happy, sad, indifferent, fear, anger, surprise, or disgust, output json with key 'expression':\n\n{text}\n\n"
  732. }
  733. ],
  734. response_format={"type": "json_object"}
  735. )
  736. response_message = response.choices[0].message.content
  737. return json.loads(response_message)
  738. except:
  739. return {"expression": "indifferent"}
  740. # Press the green button in the gutter to run the script.
  741. if __name__ == '__main__':
  742. app.run(host='0.0.0.0', port=8348, debug=True, ssl_context=ssl)
  743. # See PyCharm help at https://www.jetbrains.com/help/pycharm/