Zhiguang Huo (Caleb)
Tuesday Nov 28th, 2023
#pip install --upgrade openai
from openai import OpenAI
client = OpenAI(api_key='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')
## there are better ways to do this by setting environment variables: See https://platform.openai.com/docs/quickstart?context=python
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Tell me a joke!"}
]
)
print(completion.choices[0].message.content)
## Sure, here you go:
##
## Why don't scientists trust atoms?
##
## Because they make up EVERYTHING!
https://platform.openai.com/docs/models
Free tier (for 3 months)
Tokens can be thought of as pieces of words. Before the API processes the prompts, the input is broken down into tokens.
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "I am a biostatistician, I am learning programming using python."},
{"role": "user", "content": "Tell me a joke!"}
]
)
print(completion.choices[0].message.content)
## Sure, here's a programming joke for you:
##
## Why do programmers prefer dark mode?
##
## Because the light attracts bugs!
?client.chat.completions.create
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
temperature = 1.5,
messages=[
{"role": "user", "content": "Tell me a joke!"}
]
)
print(completion.choices[0].message.content)
## Why don't scientists trust atoms?
##
## Because they make up everything!
response = client.images.generate(
model="dall-e-3",
prompt="an orange cat driving a car in space",
size="1024x1024",
quality="standard",
n=1,
)
image_url = response.data[0].url
image_url
## 'https://oaidalleapiprodscus.blob.core.windows.net/private/org-Qvud1DXqJ6sIny1VfoCPNQAc/user-3qbiZ0O2GBsVFtX6h9jTtfYn/img-UWL9BkiGcPei5FkKz3vXkpL7.png?st=2023-11-28T14%3A16%3A29Z&se=2023-11-28T16%3A16%3A29Z&sp=r&sv=2021-08-06&sr=b&rscd=inline&rsct=image/png&skoid=6aaadede-4fb3-4698-a8f6-684d7786b067&sktid=a48cca56-e6da-484e-a814-9c849652bcb3&skt=2023-11-27T20%3A49%3A34Z&ske=2023-11-28T20%3A49%3A34Z&sks=b&skv=2021-08-06&sig=/5i1GUqWQUH3PKlNMq5Y47Qhv6kD/NWfScQ5JplKp6E%3D'
## <IPython.core.display.Image object>
## (-0.5, 1023.5, 1023.5, -0.5)
response = client.audio.speech.create(
model="tts-1",
voice="alloy",
input="Today is a wonderful day to learn programming basics for Biostatistics!"
)
speech_file_path = '/Users/zhuo/Desktop/speech.mp3'
response.stream_to_file(speech_file_path)
from IPython.display import Audio
Audio(response.read(), autoplay=True)
?client.audio.speech.create
response = client.audio.speech.create(
model="tts-1",
voice="echo",
input="今天是学习生物统计学编程基础的好日子!"
)
Audio(response.read(), autoplay=True)
speech_file_path = '/Users/zhuo/Desktop/speech.mp3'
audio_file= open(speech_file_path, "rb")
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
transcript.text
You may want to open jupyter notebook from terminal with admin account
open terminal with a proper environment
open jupyter notebook from terminal with admin (otherwise access may be denied)
/Users/zhuo/anaconda3/envs/py311/bin/jupyter_mac.command --allow-root
import pyaudio
import wave
# Audio recording parameters
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
CHUNK = 1024
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = '/Users/zhuo/Desktop/speech2.mp3'
audio = pyaudio.PyAudio()
# Start recording
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
print("Recording...")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("Finished recording.")
# Stop recording
stream.stop_stream()
stream.close()
audio.terminate()
# # Save the recorded data as a WAV file
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(audio.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
speech_file_path = '/Users/zhuo/Desktop/speech2.mp3'
audio_file= open(speech_file_path, "rb")
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file
)
transcript.text