123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122 |
- from flask import Flask, request, send_from_directory, render_template
- import speech_recognition as sr
- import openai
- import os
- import json
- import tiktoken
- from gtts import gTTS
- import uuid
- import subprocess
- from pydub import AudioSegment
- # from OpenSSL import SSL
- app = Flask(__name__)
- app.config['UPLOAD_FOLDER'] = "files"
- PROJECT_ABSPATH = os.path.abspath(".")
- WAV2LIP_ABSPATH = "/home/maronakins/Documents/Wav2Lip"
- # ALLOWED_EXTENSIONS = {'mp3', 'wav'}
- # context = SSL.Context(SSL.TLSv1_2_METHOD)
- # context.load_cert_chain('certificate.crt', 'private.key')
- with open('open_ai_key') as f:
- openai.api_key = f.readline()
- chat_messages = [
- {"role": "system", "content": "Kamu adalah asisten yang baik hati, tapi jawaban nya jangan panjang-panjang ya"}
- ]
- encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
- def verify_token(token):
- # Verify that the token is valid (e.g. by checking against a database)
- return True
- @app.route('/', methods=['GET', 'POST'])
- def index():
- if request.method == 'GET':
- return render_template("index.html")
- else:
- return "Unsupported request", 415
- @app.route('/process', methods=['POST'])
- def process_input():
- if request.is_json:
- msg_json = request.get_json(force=True)
- print(msg_json)
- else:
- return "Unsupported request", 415
- # result = process_input(msg_json)
- # Verify the token
- # token = request.headers.get('Authorization')
- # if not verify_token(token):
- # return 'Invalid token', 401
- req_id = str(uuid.uuid4())
- use_video = True if msg_json["use_video"] == "1" else False
- print(msg_json)
- payload = msg_json["payload"]
- msg = chat_messages.copy()
- for i in payload:
- msg.append(i)
- print(msg)
- completed = False
- completion = None
- chat_response = ""
- while not completed:
- try:
- completion = openai.ChatCompletion.create(
- model="gpt-3.5-turbo",
- messages=msg,
- max_tokens=400
- )
- chat_response = completion.choices[0].message.content
- completed = True
- except openai.error.RateLimitError as e:
- pass
- print(f'{completion["usage"]["prompt_tokens"]} prompt tokens counted by the OpenAI API.')
- print(f'{completion["usage"]["completion_tokens"]} completion tokens counted by the OpenAI API.')
- print(f'{completion["usage"]["total_tokens"]} total tokens counted by the OpenAI API.')
- print(f'ChatGPT: {chat_response}')
- language = 'id'
- myobj = gTTS(text=chat_response, lang=language, slow=False)
- audio_filename = f"chat-{req_id}.mp3"
- video_filename = ""
- myobj.save(os.path.join(app.config["UPLOAD_FOLDER"], audio_filename))
- try:
- if use_video:
- video_filename = f"video-{req_id}.mp4"
- wavlip_python = os.path.join(WAV2LIP_ABSPATH, "venv", "bin", "python")
- wavlip_pyfile = os.path.join(WAV2LIP_ABSPATH, "inference.py")
- wavlip_checkpoint = os.path.join(WAV2LIP_ABSPATH, "checkpoints", "wav2lip_gan.pth")
- wavlip_temp = os.path.join(WAV2LIP_ABSPATH, "temp")
- wavlip_face = os.path.join(WAV2LIP_ABSPATH, "ganjarstill.mp4")
- wavlip_audio = os.path.join(PROJECT_ABSPATH, "files", audio_filename)
- wavlip_outfile = os.path.join(PROJECT_ABSPATH, "files", video_filename)
- subprocess.run([wavlip_python, wavlip_pyfile,
- "--checkpoint_path", wavlip_checkpoint,
- "--temp", wavlip_temp,
- "--face_det_batch_size", "64", "--resize_factor", "2",
- "--face", wavlip_face,
- "--audio", wavlip_audio,
- "--outfile", wavlip_outfile], check=True)
- except Exception as e:
- print(str(e))
- video_filename = ""
- return {"role": "assistant", "content": f'{chat_response}', "audio": audio_filename, "video": video_filename}
- @app.route('/files/<filename>', methods=["GET"])
- def files(filename):
- return send_from_directory(directory=app.config["UPLOAD_FOLDER"], path=filename)
- if __name__ == '__main__':
- app.run(debug=True, host='0.0.0.0', port=46294)
- # app.run(debug=True, host='0.0.0.0', port=8080, ssl_context=context)
|