Update dependencies, openai to v1
This commit is contained in:
parent
2506df7f23
commit
1c52ccb0a9
3 changed files with 23 additions and 16 deletions
|
|
@ -1,4 +1,4 @@
|
|||
import openai
|
||||
from openai import OpenAI
|
||||
import ffmpeg
|
||||
import os
|
||||
import discord
|
||||
|
|
@ -12,9 +12,10 @@ Path("temp/").mkdir(parents=True, exist_ok=True)
|
|||
config = configparser.ConfigParser()
|
||||
config.read('config.ini')
|
||||
|
||||
openai.api_key = config['DEFAULT']['OpenAIKey'] # your OpenAI-API Key
|
||||
allowed_senders = config['DEFAULT']['AllowedDiscordSenders'].split(',') # list of allowed Discord accounts
|
||||
allowed_channel_names = config['DEFAULT']['AllowedDiscordChannels'].split(',') # allowed channel names
|
||||
openai = OpenAI(api_key=config['DEFAULT']['OpenAIKey']) # initiate with your openai key
|
||||
allowed_senders = config['DEFAULT']['AllowedDiscordSenders'].split(',') # list of allowed Discord accounts
|
||||
allowed_channel_names = config['DEFAULT']['AllowedDiscordChannels'].split(',') # allowed channel names
|
||||
|
||||
|
||||
class TranscribeClient(discord.Client):
|
||||
async def on_ready(self):
|
||||
|
|
@ -35,13 +36,14 @@ class TranscribeClient(discord.Client):
|
|||
|
||||
# convert to mp3
|
||||
input = ffmpeg.input('temp/in')
|
||||
out = ffmpeg.output(input,'temp/out.mp3')
|
||||
out = ffmpeg.output(input, 'temp/out.mp3')
|
||||
ffmpeg.run(out, overwrite_output=True)
|
||||
audio_file = open("temp/out.mp3", "rb")
|
||||
|
||||
# transcribe using OAI Whisper
|
||||
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
||||
text = transcript['text']
|
||||
transcript = openai.audio.transcriptions.create(model="whisper-1", file=audio_file)
|
||||
|
||||
text = transcript.text
|
||||
|
||||
# respond with the transcription
|
||||
await message.channel.send(f'**{attachment.filename}:**')
|
||||
|
|
@ -54,7 +56,7 @@ async def main():
|
|||
return
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
if __name__ == '__main__':
|
||||
token = config['DEFAULT']['DiscordToken']
|
||||
intents = discord.Intents.default()
|
||||
intents.message_content = True
|
||||
|
|
|
|||
4
requirements.txt
Normal file
4
requirements.txt
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
discord.py==2.3.2
|
||||
ffmpeg_python==0.2.0
|
||||
openai==1.27.0
|
||||
python-telegram-bot==21.1.1
|
||||
|
|
@ -13,9 +13,10 @@ Path("temp/").mkdir(parents=True, exist_ok=True)
|
|||
config = configparser.ConfigParser()
|
||||
config.read('config.ini')
|
||||
|
||||
openai.api_key = config['DEFAULT']['OpenAIKey']
|
||||
openai = OpenAI(api_key=config['DEFAULT']['OpenAIKey']) # initiate with your openai key
|
||||
allowed_senders = config['DEFAULT']['AllowedTelegramSenders'].split(',')
|
||||
|
||||
|
||||
async def on_msg(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
message = update.message
|
||||
sender_id = message.from_user.id
|
||||
|
|
@ -27,23 +28,23 @@ async def on_msg(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|||
await f.download_to_drive('temp/in')
|
||||
|
||||
input = ffmpeg.input('temp/in')
|
||||
out = ffmpeg.output(input,'temp/out.mp3')
|
||||
out = ffmpeg.output(input, 'temp/out.mp3')
|
||||
ffmpeg.run(out, overwrite_output=True)
|
||||
audio_file = open("temp/out.mp3", "rb")
|
||||
|
||||
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
||||
text = transcript['text']
|
||||
transcript = openai.audio.transcriptions.create(model="whisper-1", file=audio_file)
|
||||
text = transcript.text
|
||||
for t in textwrap.wrap(text, 4000):
|
||||
await update.message.reply_text(t)
|
||||
|
||||
|
||||
async def main():
|
||||
return
|
||||
|
||||
|
||||
if __name__=='__main__':
|
||||
if __name__ == '__main__':
|
||||
token = config['DEFAULT']['TelegramToken']
|
||||
application = Application.builder().token(token).build()
|
||||
|
||||
application.add_handler(MessageHandler(filters=None,callback=on_msg))
|
||||
application.add_handler(MessageHandler(filters=None, callback=on_msg))
|
||||
application.run_polling()
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue