123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374 |
- import io
- import os
- import json
- import requests
- from openai import OpenAI
- from flask import Flask, request, jsonify, send_from_directory, url_for
- from convert import alpaca_to_chatgpt, csv_to_jsonl
- app = Flask(__name__)
- ssl = None
- # ssl =('/etc/ssl/sample.crt', '/etc/ssl/sample.pem')
- app.openai_key = os.environ.get("OPENAI_KEY", "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")
- app.openai_client = OpenAI(api_key=app.openai_key)
- app.chat_messages = [
- {"role": "system",
- "content": "Please respond professionally and in a friendly manner, using the same language as the original request."}
- ]
- app.translate_messages = [
- {"role": "system",
- "content": "Please translate using the requested language."}
- ]
- app.suggest_messages = [
- {"role": "system",
- "content": "Please suggest reply messages based on the previous conversations and the user's request."}
- ]
- app.recommend_messages = [
- {"role": "system",
- "content": "Give normalized total weight of each category in json based on headlines"
- }
- ]
- app.summary_messages = [
- {"role": "system",
- "content": "Please summarize an article."
- }
- ]
- UPLOAD_FOLDER = 'files'
- app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
- @app.route('/files/<name>')
- def download_file(name):
- return send_from_directory(app.config["UPLOAD_FOLDER"], name)
- @app.route('/', methods=['GET', 'POST'])
- def test():
- return jsonify({"status": "0"})
- def recommend(headlines, category):
- chat_messages = app.recommend_messages.copy()
- try:
- json_payload = {
- "role": "user",
- "content": f"""{headlines}
- Berikan nilai berat masing-masing kategori, jumlahkan dan normalisasikan:
- {category}
- Berikan dalam bentuk json
- """
- }
- chat_messages.append(json_payload)
- print(chat_messages)
- json_response = app.openai_client.chat.completions.create(model="gpt-3.5-turbo-1106",
- messages=chat_messages,
- response_format={"type": "json_object"}
- )
- print(json_response.choices[0].message.content)
- return json.loads(json_response.choices[0].message.content)
- except Exception as error_print:
- app.logger.error(error_print)
- result = {}, 405
- def vision(message, image_url=None, image_b64=None):
- chat_messages = app.chat_messages.copy()
- url = ""
- if image_url:
- url = f"{image_url}"
- elif image_b64:
- url = f"data:image/jpeg;base64,{image_b64}"
- try:
- json_payload = {
- "role": "user",
- "content": [
- {"type": "text", "text": message},
- {
- "type": "image_url",
- "image_url": {
- "url": url,
- },
- },
- ],
- }
- chat_messages.append(json_payload)
- print(chat_messages)
- json_response = app.openai_client.chat.completions.create(
- model="gpt-4o",
- messages=chat_messages,
- max_tokens=500
- )
- return {"role": "assistant", "content": json_response.choices[0].message.content}
- except Exception as error_print:
- app.logger.error(error_print)
- result = {}, 405
- @app.route('/gpt', methods=['POST'])
- def gpt():
- chat_messages = app.chat_messages.copy()
- chat_model = "gpt-3.5-turbo"
- use_video = False
- suggest = False
- summarize = False
- predict_q = 0
- max_char_msg = 500
- max_resp_token = 600
- category = []
- headlines = []
- image_url = ""
- num_choices = 1
- json_payload = request.get_json()
- if not json_payload:
- json_payload = []
- has_named_params = False
- if isinstance(json_payload, dict):
- has_named_params = 'payload' in json_payload
- if 'payload' in json_payload:
- if 'predict_q' in json_payload:
- predict_q = 5 if json_payload['predict_q'] > 5 else 0 if json_payload['predict_q'] < 0 else json_payload['predict_q']
- if 'num_choices' in json_payload:
- num_choices = 5 if json_payload['num_choices'] > 5 else 1 if json_payload['num_choices'] < 1 else json_payload['num_choices']
- if 'use_video' in json_payload:
- use_video = json_payload['use_video'] == "1"
- if 'chat_model' in json_payload:
- chat_model = json_payload['chat_model']
- max_resp_token = 2048
- if 'translate' in json_payload:
- chat_messages = app.translate_messages.copy()
- json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
- 'content'] + f" (Translate to {json_payload['translate']})"
- elif 'suggest' in json_payload:
- suggest = json_payload['suggest'] == "1"
- if suggest:
- chat_messages = app.suggest_messages.copy()
- else:
- chat_messages = app.chat_messages.copy()
- json_payload['payload'][-1]['content'] = json_payload['payload'][-1][
- 'content'] + f" What can I say to him/her?"
- elif 'summarize' in json_payload:
- summarize = json_payload['summarize'] == "1"
- if summarize:
- chat_messages = app.summary_messages.copy()
- max_char_msg = 2000
- max_resp_token = 1000
- else:
- chat_messages = app.chat_messages.copy()
- json_payload['payload'][-1]['content'] = f"Please summarize this article:\n" + \
- json_payload['payload'][-1]['content']
- else:
- chat_messages = app.chat_messages.copy()
- json_payload = json_payload['payload']
- if isinstance(json_payload, dict):
- json_payload = [json_payload]
- elif 'greeting' in json_payload:
- chat_messages = app.chat_messages.copy()
- company_name = json_payload['greeting']['company_name']
- timestamp = json_payload['greeting']['timestamp']
- islamic_message = f"Apakah Nama '{company_name}' terdapat unsur islami? Jawab dengan 'Ya' atau 'Tidak'"
- islam_messages = app.chat_messages.copy()
- islam_messages.append({
- "role": "user",
- "content": islamic_message
- })
- islamic_response = app.openai_client.chat.completions.create(model="gpt-3.5-turbo", # GPT-3.5 Turbo engine
- messages=islam_messages,
- max_tokens=2, temperature=0.5)
- if 'Ya' in islamic_response.choices[0].message.content:
- greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu, dan jawab dengan 'Assalamu'alaikum...' terlebih dahulu"
- else:
- greeting_message = f"Buatkan respons chatbot berupa greeting dari chat perusahaan bernama {company_name} pada jam {timestamp}, tidak perlu mention waktu"
- json_payload = [
- {
- "role": "user",
- "content": greeting_message
- }
- ]
- elif 'recommend' in json_payload:
- headlines = json_payload['recommend']['headlines']
- category = json_payload['recommend']['category']
- return recommend(headlines, category)
- elif 'image_url' in json_payload:
- image = json_payload['image_url']
- message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
- return vision(message, image_url=image)
- elif 'image_b64' in json_payload:
- image = json_payload['image_b64']
- message = json_payload["message"] if 'message' in json_payload else "Ini gambar apa?"
- return vision(message, image_b64=image_url)
- else:
- chat_messages = app.chat_messages.copy()
- json_payload = [json_payload]
- json_payload = json_payload[-5:]
- for message in json_payload:
- if message['role'] == 'user':
- content = message['content'].lower()
- else:
- content = message['content']
- content_arr = content.split(" ")
- new_content_arr = content[:max_char_msg].split(" ")
- new_content_len = len(new_content_arr)
- arr = []
- for i in range(new_content_len):
- arr.append(content_arr[i])
- message['content'] = " ".join(arr)
- chat_messages.append(message)
- app.logger.info(chat_messages)
- result = {}
- try:
- n = num_choices
- json_response = app.openai_client.chat.completions.create(model=chat_model,
- messages=chat_messages,
- max_tokens=max_resp_token, temperature=0.7, n=n)
- app.logger.info(json_response.choices[0].message)
- if has_named_params:
- if suggest:
- choices = json_response.choices
- messages = [i.message for i in choices]
- json_formatted = []
- for message in messages:
- json_formatted.append({"role": "assistant", "content": message.content})
- result = {"url": "", "message": json_formatted}
- else:
- if use_video:
- # TODO: to be implemented
- result = {"url": url_for('download_file', name="test.mp4", _external=True),
- "message": {"role": "assistant", "content": json_response.choices[0].message.content}}
- else:
- result = {"role": "assistant", "content": json_response.choices[0].message.content}
- if predict_q:
- query_q = {
- "role": "user",
- "content": f"Berikan {predict_q} pertanyaan lain yang akan saya ajukan berdasarkan percakapan kali ini dalam bentuk json array"
- }
- chat_messages.append(result)
- chat_messages.append(query_q)
- json_response_q = app.openai_client.chat.completions.create(model=chat_model,
- messages=chat_messages,
- max_tokens=max_resp_token,
- temperature=0.2, response_format={"type": "json_object"})
- json_response_dict = json.loads(json_response_q.choices[0].message.content)
- print(json_response_dict)
- if json_response_dict is not None:
- if isinstance(json_response_dict, dict):
- first_key = next(iter(json_response_dict))
- json_response_dict = json_response_dict[first_key]
- elif isinstance(json_response_dict, str):
- json_response_dict = [json_response_dict]
- result["predict_q"] = json_response_dict
- else:
- result = {"role": "assistant", "content": json_response.choices[0].message.content}
- except Exception as error_print:
- app.logger.error(error_print)
- result = {}, 405
- return result
- @app.route('/train', methods=['POST'])
- def train():
- prev_model = "gpt-3.5-turbo"
- if 'job_id' in request.form:
- return train_with_id(job_id=request.form['job_id'])
- elif 'train_file' in request.files:
- train_file = request.files['train_file']
- app.logger.info({"filename": train_file.filename})
- openai_file = None
- if train_file.filename.split('.')[1] == 'jsonl':
- openai_file = train_file.stream.read()
- elif train_file.filename.split('.')[1] == 'csv':
- openai_file = csv_to_jsonl(train_file.stream.read())
- elif train_file.filename.split('.')[1] == 'json':
- openai_file = alpaca_to_chatgpt(train_file)
- if 'prev_model' in request.form:
- prev_model = request.form['prev_model']
- app.logger.info(f"Previous model: {prev_model}")
- if 'mock' not in request.form:
- f = app.openai_client.files.create(
- file=openai_file,
- purpose="fine-tune"
- )
- job = app.openai_client.fine_tuning.jobs.create(
- training_file=f.id,
- model=prev_model,
- hyperparameters={
- "n_epochs": 5
- }
- )
- app.logger.info({"mock": "no", "status": job.status, "job_id": job.id})
- return {"status": job.status, "job_id": job.id}
- else:
- app.logger.info({"mock": "yes", "status": "ok"})
- return {"status": "ok"}
- else:
- app.logger.error({"status": "error", "message": "Training file not found"})
- return {"status": "error", "message": "Training file not found"}
- def train_with_id(job_id):
- try:
- job = app.openai_client.fine_tuning.jobs.retrieve(job_id)
- if job.fine_tuned_model is None:
- app.logger.info({"job_id": job_id, "status": job.status})
- return {"status": job.status}
- else:
- app.logger.info({"job_id": job_id, "status": job.status, "model_name": job.fine_tuned_model})
- return {"status": job.status, "model_name": job.fine_tuned_model}
- except Exception as error_print:
- app.logger.error(error_print)
- return {"status": "Could not find job from id"}
- @app.route('/llama', methods=['POST'])
- def llama():
- max_char_msg = 500
- max_resp_token = 600
- json_payload = request.get_json()
- if not json_payload:
- json_payload = []
- has_named_params = False
- if isinstance(json_payload, dict):
- has_named_params = 'payload' in json_payload
- if 'payload' in json_payload:
- json_payload = json_payload['payload']
- if isinstance(json_payload, dict):
- json_payload = [json_payload]
- else:
- json_payload = [json_payload]
- message = json_payload[-1]
- content = message['content']
- content_arr = content.split(" ")
- new_content_arr = content[:max_char_msg].split(" ")
- new_content_len = len(new_content_arr)
- arr = []
- for i in range(new_content_len):
- arr.append(content_arr[i])
- content = " ".join(arr)
- content = content + " Jawab dengan Bahasa Indonesia"
- try:
- json_request = {
- "model": "llama3",
- "prompt": content,
- "stream": False
- }
- r = requests.post("http://localhost:11434/api/generate", json=json_request)
- if r.status_code == 200:
- result = {
- "role": "assistant",
- "content": r.json()["response"]
- }
- else:
- result = {}, r.status_code
- except Exception as error_print:
- app.logger.error(error_print)
- result = {}, 405
- return result
- # Press the green button in the gutter to run the script.
- if __name__ == '__main__':
- app.run(host='0.0.0.0', port=8348, debug=True, ssl_context=ssl)
- # See PyCharm help at https://www.jetbrains.com/help/pycharm/
|