main.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. import io
  2. import os
  3. import json
  4. import requests
  5. from openai import OpenAI
  6. from flask import Flask, request, jsonify, send_from_directory, url_for
  7. from convert import alpaca_to_chatgpt, csv_to_jsonl
  8. app = Flask(__name__)
  9. ssl = None
  10. # ssl =('/etc/ssl/sample.crt', '/etc/ssl/sample.pem')
  11. app.openai_key = os.environ.get("OPENAI_KEY", "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")
  12. app.openai_client = OpenAI(api_key=app.openai_key)
  13. app.chat_messages = [
  14. {"role": "system",
  15. "content": "Please respond professionally and in a friendly manner, using the same language as the original request."}
  16. ]
  17. app.translate_messages = [
  18. {"role": "system",
  19. "content": "Please translate using the requested language."}
  20. ]
  21. app.suggest_messages = [
  22. {"role": "system",
  23. "content": "Please suggest reply messages based on the previous conversations and the user's request."}
  24. ]
  25. app.recommend_messages = [
  26. {"role": "system",
  27. "content": "Give normalized total weight of each category in json based on headlines"
  28. }
  29. ]
  30. app.summary_messages = [
  31. {"role": "system",
  32. "content": "Please summarize an article."
  33. }
  34. ]
  35. UPLOAD_FOLDER = 'files'
  36. app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
  37. @app.route('/files/<name>')
  38. def download_file(name):
  39. return send_from_directory(app.config["UPLOAD_FOLDER"], name)
  40. @app.route('/', methods=['GET', 'POST'])
  41. def test():
  42. return jsonify({"status": "0"})
  43. def recommend(headlines, category):
  44. chat_messages = app.recommend_messages.copy()
  45. try:
  46. json_payload = {
  47. "role": "user",
  48. "content": f"""{headlines}
  49. Berikan nilai berat masing-masing kategori, jumlahkan dan normalisasikan:
  50. {category}
  51. Berikan dalam bentuk json
  52. """
  53. }
  54. chat_messages.append(json_payload)
  55. print(chat_messages)
  56. json_response = app.openai_client.chat.completions.create(model="gpt-3.5-turbo-1106",
  57. messages=chat_messages,
  58. response_format={"type": "json_object"}
  59. )
  60. print(json_response.choices[0].message.content)
  61. return json.loads(json_response.choices[0].message.content)
  62. except Exception as error_print:
  63. app.logger.error(error_print)
  64. result = {}, 405
  65. def vision(message, image_url=None, image_b64=None):
  66. chat_messages = app.chat_messages.copy()
  67. url = ""
  68. if image_url:
  69. url = f"{image_url}"
  70. elif image_b64:
  71. url = f"data:image/jpeg;base64,{image_b64}"
  72. try:
  73. json_payload = {
  74. "role": "user",
  75. "content": [
  76. {"type": "text", "text": message},
  77. {
  78. "type": "image_url",
  79. "image_url": {
  80. "url": url,
  81. },
  82. },
  83. ],
  84. }
  85. chat_messages.append(json_payload)
  86. print(chat_messages)
  87. json_response = app.openai_client.chat.completions.create(
  88. model="gpt-4o",
  89. messages=chat_messages,
  90. max_tokens=500
  91. )
  92. return {"role": "assistant", "content": json_response.choices[0].message.content}
  93. except Exception as error_print:
  94. app.logger.error(error_print)
  95. result = {}, 405
  96. @app.route('/gpt', methods=['POST'])
  97. def gpt():
  98. chat_messages = app.chat_messages.copy()
  99. chat_model = "gpt-3.5-turbo"
  100. use_video = False
  101. suggest = False
  102. summarize = False
  103. max_char_msg = 500
  104. max_resp_token = 600
  105. category = []
  106. headlines = []
  107. image_url = ""
  108. num_choices = 1
  109. json_payload = request.get_json()
  110. if not json_payload:
  111. json_payload = []
  112. has_named_params = False
  113. if isinstance(json_payload, dict):
  114. has_named_params = 'payload' in json_payload
  115. if 'payload' in json_payload:
  116. if 'num_choices' in json_payload:
  117. num_choices = 5 if json_payload['num_choices'] > 5 else json_payload['num_choices']
  118. if 'use_video' in json_payload:
  119. use_video = json_payload['use_video'] == "1"
  120. if 'chat_model' in json_payload:
  121. chat_model = json_payload['chat_model']
  122. max_resp_token = 2048
  123. if 'translate' in json_payload:
  124. chat_messages = app.translate_messages.copy()
  125. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  126. 'content'] + f" (Translate to {json_payload['translate']})"
  127. elif 'suggest' in json_payload:
  128. suggest = json_payload['suggest'] == "1"
  129. if suggest:
  130. chat_messages = app.suggest_messages.copy()
  131. else:
  132. chat_messages = app.chat_messages.copy()
  133. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  134. 'content'] + f" What can I say to him/her?"
  135. elif 'summarize' in json_payload:
  136. summarize = json_payload['summarize'] == "1"
  137. if summarize:
  138. chat_messages = app.summary_messages.copy()
  139. max_char_msg = 2000
  140. max_resp_token = 1000
  141. else:
  142. chat_messages = app.chat_messages.copy()
  143. json_payload['payload'][-1]['content'] = f"Please summarize this article:\n" + \
  144. json_payload['payload'][-1]['content']
  145. else:
  146. chat_messages = app.chat_messages.copy()
  147. json_payload = json_payload['payload']
  148. if isinstance(json_payload, dict):
  149. json_payload = [json_payload]
  150. elif 'greeting' in json_payload:
  151. chat_messages = app.chat_messages.copy()
  152. company_name = json_payload['greeting']['company_name']
  153. timestamp = json_payload['greeting']['timestamp']
  154. islamic_message = f"Apakah Nama '{company_name}' terdapat unsur islami? Jawab dengan 'Ya' atau 'Tidak'"
  155. islam_messages = app.chat_messages.copy()
  156. islam_messages.append({
  157. "role": "user",
  158. "content": islamic_message
  159. })
  160. islamic_response = app.openai_client.chat.completions.create(model="gpt-3.5-turbo", # GPT-3.5 Turbo engine
  161. messages=islam_messages,
  162. max_tokens=2, temperature=0.5)
  163. if 'Ya' in islamic_response.choices[0].message.content:
  164. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu, dan jawab dengan 'Assalamu'alaikum...' terlebih dahulu"
  165. else:
  166. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu"
  167. json_payload = [
  168. {
  169. "role": "user",
  170. "content": greeting_message
  171. }
  172. ]
  173. elif 'recommend' in json_payload:
  174. headlines = json_payload['recommend']['headlines']
  175. category = json_payload['recommend']['category']
  176. return recommend(headlines, category)
  177. elif 'image_url' in json_payload:
  178. image = json_payload['image_url']
  179. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  180. return vision(message, image_url=image)
  181. elif 'image_b64' in json_payload:
  182. image = json_payload['image_b64']
  183. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  184. return vision(message, image_b64=image_url)
  185. else:
  186. chat_messages = app.chat_messages.copy()
  187. json_payload = [json_payload]
  188. json_payload = json_payload[-5:]
  189. for message in json_payload:
  190. if message['role'] == 'user':
  191. content = message['content'].lower()
  192. else:
  193. content = message['content']
  194. content_arr = content.split(" ")
  195. new_content_arr = content[:max_char_msg].split(" ")
  196. new_content_len = len(new_content_arr)
  197. arr = []
  198. for i in range(new_content_len):
  199. arr.append(content_arr[i])
  200. message['content'] = " ".join(arr)
  201. chat_messages.append(message)
  202. app.logger.info(chat_messages)
  203. result = {}
  204. try:
  205. n = num_choices
  206. json_response = app.openai_client.chat.completions.create(model=chat_model, # GPT-3.5 Turbo engine
  207. messages=chat_messages,
  208. max_tokens=max_resp_token, temperature=0.7, n=n)
  209. app.logger.info(json_response.choices[0].message)
  210. if has_named_params:
  211. if suggest:
  212. choices = json_response.choices
  213. messages = [i.message for i in choices]
  214. json_formatted = []
  215. for message in messages:
  216. json_formatted.append({"role": "assistant", "content": message.content})
  217. result = {"url": "", "message": json_formatted}
  218. elif use_video:
  219. # TODO: to be implemented
  220. result = {"url": url_for('download_file', name="test.mp4", _external=True),
  221. "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
  222. else:
  223. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  224. else:
  225. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  226. except Exception as error_print:
  227. app.logger.error(error_print)
  228. result = {}, 405
  229. return result
  230. @app.route('/train', methods=['POST'])
  231. def train():
  232. prev_model = "gpt-3.5-turbo"
  233. if 'job_id' in request.form:
  234. return train_with_id(job_id=request.form['job_id'])
  235. elif 'train_file' in request.files:
  236. train_file = request.files['train_file']
  237. app.logger.info({"filename": train_file.filename})
  238. openai_file = None
  239. if train_file.filename.split('.')[1] == 'jsonl':
  240. openai_file = train_file.stream.read()
  241. elif train_file.filename.split('.')[1] == 'csv':
  242. openai_file = csv_to_jsonl(train_file.stream.read())
  243. elif train_file.filename.split('.')[1] == 'json':
  244. openai_file = alpaca_to_chatgpt(train_file)
  245. if 'prev_model' in request.form:
  246. prev_model = request.form['prev_model']
  247. app.logger.info(f"Previous model: {prev_model}")
  248. if 'mock' not in request.form:
  249. f = app.openai_client.files.create(
  250. file=openai_file,
  251. purpose="fine-tune"
  252. )
  253. job = app.openai_client.fine_tuning.jobs.create(
  254. training_file=f.id,
  255. model=prev_model,
  256. hyperparameters={
  257. "n_epochs": 5
  258. }
  259. )
  260. app.logger.info({"mock": "no", "status": job.status, "job_id": job.id})
  261. return {"status": job.status, "job_id": job.id}
  262. else:
  263. app.logger.info({"mock": "yes", "status": "ok"})
  264. return {"status": "ok"}
  265. else:
  266. app.logger.error({"status": "error", "message": "Training file not found"})
  267. return {"status": "error", "message": "Training file not found"}
  268. def train_with_id(job_id):
  269. try:
  270. job = app.openai_client.fine_tuning.jobs.retrieve(job_id)
  271. if job.fine_tuned_model is None:
  272. app.logger.info({"job_id": job_id, "status": job.status})
  273. return {"status": job.status}
  274. else:
  275. app.logger.info({"job_id": job_id, "status": job.status, "model_name": job.fine_tuned_model})
  276. return {"status": job.status, "model_name": job.fine_tuned_model}
  277. except Exception as error_print:
  278. app.logger.error(error_print)
  279. return {"status": "Could not find job from id"}
  280. @app.route('/llama', methods=['POST'])
  281. def llama():
  282. max_char_msg = 500
  283. max_resp_token = 600
  284. json_payload = request.get_json()
  285. if not json_payload:
  286. json_payload = []
  287. has_named_params = False
  288. if isinstance(json_payload, dict):
  289. has_named_params = 'payload' in json_payload
  290. if 'payload' in json_payload:
  291. json_payload = json_payload['payload']
  292. if isinstance(json_payload, dict):
  293. json_payload = [json_payload]
  294. else:
  295. json_payload = [json_payload]
  296. message = json_payload[-1]
  297. content = message['content']
  298. content_arr = content.split(" ")
  299. new_content_arr = content[:max_char_msg].split(" ")
  300. new_content_len = len(new_content_arr)
  301. arr = []
  302. for i in range(new_content_len):
  303. arr.append(content_arr[i])
  304. content = " ".join(arr)
  305. content = content + " Jawab dengan Bahasa Indonesia"
  306. try:
  307. json_request = {
  308. "model": "llama3",
  309. "prompt": content,
  310. "stream": False
  311. }
  312. r = requests.post("http://localhost:11434/api/generate", json=json_request)
  313. if r.status_code == 200:
  314. result = {
  315. "role": "assistant",
  316. "content": r.json()["response"]
  317. }
  318. else:
  319. result = {}, r.status_code
  320. except Exception as error_print:
  321. app.logger.error(error_print)
  322. result = {}, 405
  323. return result
  324. # Press the green button in the gutter to run the script.
  325. if __name__ == '__main__':
  326. app.run(host='0.0.0.0', port=8348, debug=True, ssl_context=ssl)
  327. # See PyCharm help at https://www.jetbrains.com/help/pycharm/