main.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. import io
  2. import os
  3. import json
  4. from openai import OpenAI
  5. from flask import Flask, request, jsonify, send_from_directory, url_for
  6. from convert import alpaca_to_chatgpt, csv_to_jsonl
  7. app = Flask(__name__)
  8. ssl = None
  9. # ssl =('/etc/ssl/sample.crt', '/etc/ssl/sample.pem')
  10. app.openai_key = os.environ.get("OPENAI_KEY", "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")
  11. app.openai_client = OpenAI(api_key=app.openai_key)
  12. app.chat_messages = [
  13. {"role": "system",
  14. "content": "Please respond professionally and in a friendly manner, using the same language as the original request."}
  15. ]
  16. app.translate_messages = [
  17. {"role": "system",
  18. "content": "Please translate using the requested language."}
  19. ]
  20. app.suggest_messages = [
  21. {"role": "system",
  22. "content": "Please suggest reply messages based on the previous conversations and the user's request."}
  23. ]
  24. app.recommend_messages = [
  25. {"role": "system",
  26. "content": "Give normalized total weight of each category in json based on headlines"
  27. }
  28. ]
  29. app.summary_messages = [
  30. {"role": "system",
  31. "content": "Please summarize an article."
  32. }
  33. ]
  34. UPLOAD_FOLDER = 'files'
  35. app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
  36. @app.route('/files/<name>')
  37. def download_file(name):
  38. return send_from_directory(app.config["UPLOAD_FOLDER"], name)
  39. @app.route('/', methods=['GET', 'POST'])
  40. def test():
  41. return jsonify({"status": "0"})
  42. def recommend(headlines, category):
  43. chat_messages = app.recommend_messages.copy()
  44. try:
  45. json_payload = {
  46. "role": "user",
  47. "content": f"""{headlines}
  48. Berikan nilai berat masing-masing kategori, jumlahkan dan normalisasikan:
  49. {category}
  50. Berikan dalam bentuk json
  51. """
  52. }
  53. chat_messages.append(json_payload)
  54. print(chat_messages)
  55. json_response = app.openai_client.chat.completions.create(model="gpt-3.5-turbo-1106",
  56. messages=chat_messages,
  57. response_format={"type": "json_object"}
  58. )
  59. print(json_response.choices[0].message.content)
  60. return json.loads(json_response.choices[0].message.content)
  61. except Exception as error_print:
  62. app.logger.error(error_print)
  63. result = {}, 405
  64. def vision(message, image_url=None, image_b64=None):
  65. chat_messages = app.chat_messages.copy()
  66. url = ""
  67. if image_url:
  68. url = f"{image_url}"
  69. elif image_b64:
  70. url = f"data:image/jpeg;base64,{image_b64}"
  71. try:
  72. json_payload = {
  73. "role": "user",
  74. "content": [
  75. {"type": "text", "text": message},
  76. {
  77. "type": "image_url",
  78. "image_url": {
  79. "url": url,
  80. },
  81. },
  82. ],
  83. }
  84. chat_messages.append(json_payload)
  85. print(chat_messages)
  86. json_response = app.openai_client.chat.completions.create(
  87. model="gpt-4-vision-preview",
  88. messages=chat_messages,
  89. max_tokens=500
  90. )
  91. return {"role": "assistant", "content": json_response.choices[0].message.content}
  92. except Exception as error_print:
  93. app.logger.error(error_print)
  94. result = {}, 405
  95. @app.route('/gpt', methods=['POST'])
  96. def gpt():
  97. chat_messages = app.chat_messages.copy()
  98. chat_model = "gpt-3.5-turbo"
  99. use_video = False
  100. suggest = False
  101. summarize = False
  102. max_char_msg = 500
  103. max_resp_token = 600
  104. category = []
  105. headlines = []
  106. image_url = ""
  107. num_choices = 1
  108. json_payload = request.get_json()
  109. if not json_payload:
  110. json_payload = []
  111. has_named_params = False
  112. if isinstance(json_payload, dict):
  113. has_named_params = 'payload' in json_payload
  114. if 'payload' in json_payload:
  115. if 'num_choices' in json_payload:
  116. num_choices = 5 if json_payload['num_choices'] > 5 else json_payload['num_choices']
  117. if 'use_video' in json_payload:
  118. use_video = json_payload['use_video'] == "1"
  119. if 'chat_model' in json_payload:
  120. chat_model = json_payload['chat_model']
  121. max_resp_token = 2048
  122. if 'translate' in json_payload:
  123. chat_messages = app.translate_messages.copy()
  124. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  125. 'content'] + f" (Translate to {json_payload['translate']})"
  126. elif 'suggest' in json_payload:
  127. suggest = json_payload['suggest'] == "1"
  128. if suggest:
  129. chat_messages = app.suggest_messages.copy()
  130. else:
  131. chat_messages = app.chat_messages.copy()
  132. json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
  133. 'content'] + f" What can I say to him/her?"
  134. elif 'summarize' in json_payload:
  135. summarize = json_payload['summarize'] == "1"
  136. if summarize:
  137. chat_messages = app.summary_messages.copy()
  138. max_char_msg = 2000
  139. max_resp_token = 4096
  140. else:
  141. chat_messages = app.chat_messages.copy()
  142. json_payload['payload'][-1]['content'] = f"Please summarize this article:\n" + \
  143. json_payload['payload'][-1]['content']
  144. else:
  145. chat_messages = app.chat_messages.copy()
  146. json_payload = json_payload['payload']
  147. if isinstance(json_payload, dict):
  148. json_payload = [json_payload]
  149. elif 'greeting' in json_payload:
  150. chat_messages = app.chat_messages.copy()
  151. company_name = json_payload['greeting']['company_name']
  152. timestamp = json_payload['greeting']['timestamp']
  153. islamic_message = f"Apakah Nama '{company_name}' terdapat unsur islami? Jawab dengan 'Ya' atau 'Tidak'"
  154. islam_messages = app.chat_messages.copy()
  155. islam_messages.append({
  156. "role": "user",
  157. "content": islamic_message
  158. })
  159. islamic_response = app.openai_client.chat.completions.create(model="gpt-3.5-turbo", # GPT-3.5 Turbo engine
  160. messages=islam_messages,
  161. max_tokens=2, temperature=0.5)
  162. if 'Ya' in islamic_response.choices[0].message['content']:
  163. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu, dan jawab dengan 'Assalamu'alaikum...' terlebih dahulu"
  164. else:
  165. greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu"
  166. json_payload = [
  167. {
  168. "role": "user",
  169. "content": greeting_message
  170. }
  171. ]
  172. elif 'recommend' in json_payload:
  173. headlines = json_payload['recommend']['headlines']
  174. category = json_payload['recommend']['category']
  175. return recommend(headlines, category)
  176. elif 'image_url' in json_payload:
  177. image = json_payload['image_url']
  178. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  179. return vision(message, image_url=image)
  180. elif 'image_b64' in json_payload:
  181. image = json_payload['image_b64']
  182. message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
  183. return vision(message, image_b64=image_url)
  184. else:
  185. chat_messages = app.chat_messages.copy()
  186. json_payload = [json_payload]
  187. json_payload = json_payload[-5:]
  188. for message in json_payload:
  189. if message['role'] == 'user':
  190. content = message['content'].lower()
  191. else:
  192. content = message['content']
  193. content_arr = content.split(" ")
  194. new_content_arr = content[:max_char_msg].split(" ")
  195. new_content_len = len(new_content_arr)
  196. arr = []
  197. for i in range(new_content_len):
  198. arr.append(content_arr[i])
  199. message['content'] = " ".join(arr)
  200. chat_messages.append(message)
  201. app.logger.info(chat_messages)
  202. result = {}
  203. try:
  204. n = num_choices
  205. json_response = app.openai_client.chat.completions.create(model=chat_model, # GPT-3.5 Turbo engine
  206. messages=chat_messages,
  207. max_tokens=max_resp_token, temperature=0.7, n=n)
  208. app.logger.info(json_response.choices[0].message)
  209. if has_named_params:
  210. if suggest:
  211. choices = json_response.choices
  212. messages = [i.message for i in choices]
  213. json_formatted = []
  214. for message in messages:
  215. json_formatted.append({"role": "assistant", "content": message.content})
  216. result = {"url": "", "message": json_formatted}
  217. elif use_video:
  218. # TODO: to be implemented
  219. result = {"url": url_for('download_file', name="test.mp4", _external=True),
  220. "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
  221. else:
  222. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  223. else:
  224. result = {"role": "assistant", "content": json_response.choices[0].message.content}
  225. except Exception as error_print:
  226. app.logger.error(error_print)
  227. result = {}, 405
  228. return result
  229. @app.route('/train', methods=['POST'])
  230. def train():
  231. prev_model = "gpt-3.5-turbo"
  232. if 'job_id' in request.form:
  233. return train_with_id(job_id=request.form['job_id'])
  234. elif 'train_file' in request.files:
  235. train_file = request.files['train_file']
  236. app.logger.info({"filename": train_file.filename})
  237. openai_file = None
  238. if train_file.filename.split('.')[1] == 'jsonl':
  239. openai_file = train_file.stream.read()
  240. elif train_file.filename.split('.')[1] == 'csv':
  241. openai_file = csv_to_jsonl(train_file.stream.read())
  242. elif train_file.filename.split('.')[1] == 'json':
  243. openai_file = alpaca_to_chatgpt(train_file)
  244. if 'prev_model' in request.form:
  245. prev_model = request.form['prev_model']
  246. app.logger.info(f"Previous model: {prev_model}")
  247. if 'mock' not in request.form:
  248. f = app.openai_client.files.create(
  249. file=openai_file,
  250. purpose="fine-tune"
  251. )
  252. job = app.openai_client.fine_tuning.jobs.create(
  253. training_file=f.id,
  254. model=prev_model,
  255. hyperparameters={
  256. "n_epochs": 5
  257. }
  258. )
  259. app.logger.info({"mock": "yes", "status": job.status, "job_id": job.id})
  260. return {"status": job.status, "job_id": job.id}
  261. else:
  262. app.logger.info({"mock": "yes", "status": "ok"})
  263. return {"status": "ok"}
  264. else:
  265. app.logger.error({"status": "error", "message": "Training file not found"})
  266. return {"status": "error", "message": "Training file not found"}
  267. def train_with_id(job_id):
  268. try:
  269. job = app.openai_client.fine_tuning.jobs.retrieve(job_id)
  270. if job.fine_tuned_model is None:
  271. app.logger.info({"job_id": job_id, "status": job.status})
  272. return {"status": job.status}
  273. else:
  274. app.logger.info({"job_id": job_id, "status": job.status, "model_name": job.fine_tuned_model})
  275. return {"status": job.status, "model_name": job.fine_tuned_model}
  276. except Exception as error_print:
  277. app.logger.error(error_print)
  278. return {"status": "Could not find job from id"}
  279. # Press the green button in the gutter to run the script.
  280. if __name__ == '__main__':
  281. app.run(host='0.0.0.0', port=8348, debug=True, ssl_context=ssl)
  282. # See PyCharm help at https://www.jetbrains.com/help/pycharm/