Update dependencies, openai to v1
This commit is contained in:
parent
2506df7f23
commit
1c52ccb0a9
3 changed files with 23 additions and 16 deletions
|
|
@ -1,4 +1,4 @@
|
|||
import openai
|
||||
from openai import OpenAI
|
||||
import ffmpeg
|
||||
import os
|
||||
import discord
|
||||
|
|
@ -12,10 +12,11 @@ Path("temp/").mkdir(parents=True, exist_ok=True)
|
|||
config = configparser.ConfigParser()
|
||||
config.read('config.ini')
|
||||
|
||||
openai.api_key = config['DEFAULT']['OpenAIKey'] # your OpenAI-API Key
|
||||
openai = OpenAI(api_key=config['DEFAULT']['OpenAIKey']) # initiate with your openai key
|
||||
allowed_senders = config['DEFAULT']['AllowedDiscordSenders'].split(',') # list of allowed Discord accounts
|
||||
allowed_channel_names = config['DEFAULT']['AllowedDiscordChannels'].split(',') # allowed channel names
|
||||
|
||||
|
||||
class TranscribeClient(discord.Client):
|
||||
async def on_ready(self):
|
||||
print(f'Logged on as {self.user}!')
|
||||
|
|
@ -40,8 +41,9 @@ class TranscribeClient(discord.Client):
|
|||
audio_file = open("temp/out.mp3", "rb")
|
||||
|
||||
# transcribe using OAI Whisper
|
||||
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
||||
text = transcript['text']
|
||||
transcript = openai.audio.transcriptions.create(model="whisper-1", file=audio_file)
|
||||
|
||||
text = transcript.text
|
||||
|
||||
# respond with the transcription
|
||||
await message.channel.send(f'**{attachment.filename}:**')
|
||||
|
|
|
|||
4
requirements.txt
Normal file
4
requirements.txt
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
discord.py==2.3.2
|
||||
ffmpeg_python==0.2.0
|
||||
openai==1.27.0
|
||||
python-telegram-bot==21.1.1
|
||||
|
|
@ -13,9 +13,10 @@ Path("temp/").mkdir(parents=True, exist_ok=True)
|
|||
config = configparser.ConfigParser()
|
||||
config.read('config.ini')
|
||||
|
||||
openai.api_key = config['DEFAULT']['OpenAIKey']
|
||||
openai = OpenAI(api_key=config['DEFAULT']['OpenAIKey']) # initiate with your openai key
|
||||
allowed_senders = config['DEFAULT']['AllowedTelegramSenders'].split(',')
|
||||
|
||||
|
||||
async def on_msg(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
||||
message = update.message
|
||||
sender_id = message.from_user.id
|
||||
|
|
@ -31,11 +32,12 @@ async def on_msg(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|||
ffmpeg.run(out, overwrite_output=True)
|
||||
audio_file = open("temp/out.mp3", "rb")
|
||||
|
||||
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
||||
text = transcript['text']
|
||||
transcript = openai.audio.transcriptions.create(model="whisper-1", file=audio_file)
|
||||
text = transcript.text
|
||||
for t in textwrap.wrap(text, 4000):
|
||||
await update.message.reply_text(t)
|
||||
|
||||
|
||||
async def main():
|
||||
return
|
||||
|
||||
|
|
@ -46,4 +48,3 @@ if __name__=='__main__':
|
|||
|
||||
application.add_handler(MessageHandler(filters=None, callback=on_msg))
|
||||
application.run_polling()
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue