From b1f9236bb6ea0a89193477b3bec93f3efb095dec Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Thu, 23 Nov 2023 01:21:43 -0800 Subject: [PATCH 01/14] [Setup] script. (#1) --- README.md | 7 +++++++ setup.sh | 16 ++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100755 setup.sh diff --git a/README.md b/README.md index c10bdcb..1c9b150 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,13 @@ Make a new voice in Eleven and get the voice id of that voice using their [get v export ELEVENLABS_VOICE_ID= ``` +### Setup Script + +Alternatively, one can use the `setup.sh` script to facilitate getting the shell envs ready to rock by updating the API key values in `setup.sh` and run. + +_Note: may have to manually run `source source venv/bin/activate` afterwards depending on shell env._ + + ## Run it! In on terminal, run the webcam capture: diff --git a/setup.sh b/setup.sh new file mode 100755 index 0000000..bab016d --- /dev/null +++ b/setup.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# create a virtual environment +python3 -m pip install virtualenv +python3 -m virtualenv venv + +# source the virtual environment +source venv/bin/activate + +# install the dependencies +pip install -r requirements.txt + +# set the environment variables +export ELEVENLABS_VOICE_ID= +export OPENAI_API_KEY= +export ELEVENLABS_API_KEY= \ No newline at end of file From 4ab05a4b1d13dab4e047e000e78d9c897d02467d Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Thu, 23 Nov 2023 01:22:52 -0800 Subject: [PATCH 02/14] [Narrator] prompt to describe the image like David Attenborough for increased complex descriptors. (#2) --- narrator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narrator.py b/narrator.py index cd086f7..845158f 100644 --- a/narrator.py +++ b/narrator.py @@ -43,7 +43,7 @@ def generate_new_line(base64_image): { "role": "user", "content": [ - {"type": "text", "text": "Describe this image"}, + {"type": "text", "text": "Describe this image as if you David Attenborough"}, { "type": "image_url", "image_url": f"data:image/jpeg;base64,{base64_image}", From 1bb728ada311c0892ac18f61718e6538279a3192 Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Thu, 23 Nov 2023 01:45:28 -0800 Subject: [PATCH 03/14] [Narrator] fix --- narrator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narrator.py b/narrator.py index 845158f..7eca4f5 100644 --- a/narrator.py +++ b/narrator.py @@ -43,7 +43,7 @@ def generate_new_line(base64_image): { "role": "user", "content": [ - {"type": "text", "text": "Describe this image as if you David Attenborough"}, + {"type": "text", "text": "Describe this image as if you are David Attenborough"}, { "type": "image_url", "image_url": f"data:image/jpeg;base64,{base64_image}", From a4847a83450bd6a3a8fc03f19679e8e15e52fce0 Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Thu, 23 Nov 2023 15:06:17 -0800 Subject: [PATCH 04/14] [Narrator] streaming (#3) --- .gitignore | 3 ++- README.md | 23 ++++++++++++++++------- narrator.py | 36 ++++++++++++++++++++++++++++-------- setup.sh | 4 +++- 4 files changed, 49 insertions(+), 17 deletions(-) diff --git a/.gitignore b/.gitignore index 4d9cf0b..825f964 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ /venv /narration /frames/* -!/frames/.gitkeep \ No newline at end of file +!/frames/.gitkeep +.env \ No newline at end of file diff --git a/README.md b/README.md index 1c9b150..b2c7f25 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,9 @@ -# David Attenborough narrates your life. +# David Attenborough narrates your life. https://twitter.com/charliebholtz/status/1724815159590293764 ## Want to make your own AI app? + Check out [Replicate](https://replicate.com). We make it easy to run machine learning models with an API. ## Setup @@ -20,33 +21,41 @@ Then, install the dependencies: Make a [Replicate](https://replicate.com), [OpenAI](https://beta.openai.com/), and [ElevenLabs](https://elevenlabs.io) account and set your tokens: -``` +```bash export OPENAI_API_KEY= export ELEVENLABS_API_KEY= ``` Make a new voice in Eleven and get the voice id of that voice using their [get voices](https://elevenlabs.io/docs/api-reference/voices) API, or by clicking the flask icon next to the voice in the VoiceLab tab. -``` +```bash export ELEVENLABS_VOICE_ID= ``` -### Setup Script +### Streaming -Alternatively, one can use the `setup.sh` script to facilitate getting the shell envs ready to rock by updating the API key values in `setup.sh` and run. +If you would like the speech to start quicker via a streaming manner set the environment variable to enable. The concession is that the audio snippet is not saved in the `/narration` directory. + +```bash +export ELEVENLABS_STREAMING=true +``` + +### Script + +Alternative to running the commands above individually, one can use the `setup.sh` script to facilitate getting the two required shell envs ready to rock by updating the environment variable values in `setup.sh` and executing the script. _Note: may have to manually run `source source venv/bin/activate` afterwards depending on shell env._ - ## Run it! In on terminal, run the webcam capture: + ```bash python capture.py ``` + In another terminal, run the narrator: ```bash python narrator.py ``` - diff --git a/narrator.py b/narrator.py index 7eca4f5..d33da74 100644 --- a/narrator.py +++ b/narrator.py @@ -1,16 +1,24 @@ -import os -from openai import OpenAI import base64 -import json -import time -import simpleaudio as sa import errno -from elevenlabs import generate, play, set_api_key, voices +import json +import os +import time + +import simpleaudio as sa +from elevenlabs import generate, play, set_api_key, stream, voices +from openai import OpenAI client = OpenAI() set_api_key(os.environ.get("ELEVENLABS_API_KEY")) + +# This code initializes the variable 'isStreaming' based on the value of the environment variable 'ELEVENLABS_STREAMIMAGES'. +# If the value of 'ELEVENLABS_STREAMIMAGES' is "true", then 'isStreaming' is set to True. +# Otherwise, 'isStreaming' is set to False. +isStreaming = os.environ.get("ELEVENLABS_STREAMING", "false") == "true" + + def encode_image(image_path): while True: try: @@ -25,7 +33,16 @@ def encode_image(image_path): def play_audio(text): - audio = generate(text, voice=os.environ.get("ELEVENLABS_VOICE_ID")) + audio = generate( + text, + voice=os.environ.get("ELEVENLABS_VOICE_ID"), + model="eleven_turbo_v2", + stream=isStreaming, + ) + + if isStreaming: + stream(audio) + return unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=") dir_path = os.path.join("narration", unique_id) @@ -43,7 +60,10 @@ def generate_new_line(base64_image): { "role": "user", "content": [ - {"type": "text", "text": "Describe this image as if you are David Attenborough"}, + { + "type": "text", + "text": "Describe this image as if you are David Attenborough", + }, { "type": "image_url", "image_url": f"data:image/jpeg;base64,{base64_image}", diff --git a/setup.sh b/setup.sh index bab016d..823a544 100755 --- a/setup.sh +++ b/setup.sh @@ -13,4 +13,6 @@ pip install -r requirements.txt # set the environment variables export ELEVENLABS_VOICE_ID= export OPENAI_API_KEY= -export ELEVENLABS_API_KEY= \ No newline at end of file +export ELEVENLABS_API_KEY= + +export ELEVENLABS_STREAMING=false From 0a65b08cd8b99106e4019217fc4e2d0470047c42 Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 09:07:14 -0800 Subject: [PATCH 05/14] [PhotoBooth] mode (#4) * spacebar * trunk * cleanup --- .gitignore | 2 +- capture.py | 11 +++--- narrator.py | 91 +++++++++++++++++++++++++++++++++--------------- requirements.txt | 3 +- 4 files changed, 71 insertions(+), 36 deletions(-) diff --git a/.gitignore b/.gitignore index 825f964..6667455 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,4 @@ /narration /frames/* !/frames/.gitkeep -.env \ No newline at end of file +.trunk \ No newline at end of file diff --git a/capture.py b/capture.py index bc9845c..71349bd 100644 --- a/capture.py +++ b/capture.py @@ -1,8 +1,9 @@ -import cv2 -import time -from PIL import Image -import numpy as np import os +import time + +import cv2 +import numpy as np +from PIL import Image # Folder folder = "frames" @@ -30,7 +31,7 @@ while True: # Resize the image max_size = 250 ratio = max_size / max(pil_img.size) - new_size = tuple([int(x*ratio) for x in pil_img.size]) + new_size = tuple([int(x * ratio) for x in pil_img.size]) resized_img = pil_img.resize(new_size, Image.LANCZOS) # Convert the PIL image back to an OpenCV image diff --git a/narrator.py b/narrator.py index d33da74..3a652c3 100644 --- a/narrator.py +++ b/narrator.py @@ -1,22 +1,43 @@ import base64 import errno -import json import os import time -import simpleaudio as sa -from elevenlabs import generate, play, set_api_key, stream, voices +from elevenlabs import generate, play, set_api_key, stream from openai import OpenAI +from pynput import ( # Using pynput to listen for a keypress instead of native keyboard module which was requiring admin privileges + keyboard, +) client = OpenAI() set_api_key(os.environ.get("ELEVENLABS_API_KEY")) - -# This code initializes the variable 'isStreaming' based on the value of the environment variable 'ELEVENLABS_STREAMIMAGES'. -# If the value of 'ELEVENLABS_STREAMIMAGES' is "true", then 'isStreaming' is set to True. -# Otherwise, 'isStreaming' is set to False. +# Initializes the variables based their respective environment variable values, defaulting to false isStreaming = os.environ.get("ELEVENLABS_STREAMING", "false") == "true" +isPhotoBooth = os.environ.get("PHOTOBOOTH_MODE", "false") == "true" + +script = [] +narrator = "Sir David Attenborough" + + +def on_press(key): + if key == keyboard.Key.space: + # When space bar is pressed, run the main function which analyzes the image and generates the audio + _main() + + +def on_release(key): + if key == keyboard.Key.esc: + # Stop listener + return False + + +# Create a listener +listener = keyboard.Listener(on_press=on_press, on_release=on_release) + +# Start the listener +listener.start() def encode_image(image_path): @@ -41,9 +62,11 @@ def play_audio(text): ) if isStreaming: + # Stream the audio for more real-time responsiveness stream(audio) return + # Save the audio to a file and play it unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=") dir_path = os.path.join("narration", unique_id) os.makedirs(dir_path, exist_ok=True) @@ -62,7 +85,7 @@ def generate_new_line(base64_image): "content": [ { "type": "text", - "text": "Describe this image as if you are David Attenborough", + "text": f"Describe this image as if you are {narrator}", }, { "type": "image_url", @@ -79,8 +102,8 @@ def analyze_image(base64_image, script): messages=[ { "role": "system", - "content": """ - You are Sir David Attenborough. Narrate the picture of the human as if it is a nature documentary. + "content": f""" + You are {narrator}. Narrate the picture of the human as if it is a nature documentary. Make it snarky and funny. Don't repeat yourself. Make it short. If I do anything remotely interesting, make a big deal about it! """, }, @@ -93,30 +116,40 @@ def analyze_image(base64_image, script): return response_text +def _main(): + global script + + # path to your image + image_path = os.path.join(os.getcwd(), "./frames/frame.jpg") + + # getting the base64 encoding + base64_image = encode_image(image_path) + + # analyze posture + print(f"👀 {narrator} is watching...") + analysis = analyze_image(base64_image, script=script) + + print("🎙️ David says:") + print(analysis) + + play_audio(analysis) + + script = script + [{"role": "assistant", "content": analysis}] + + def main(): - script = [] - while True: - # path to your image - image_path = os.path.join(os.getcwd(), "./frames/frame.jpg") + if isPhotoBooth: + pass + else: + _main() - # getting the base64 encoding - base64_image = encode_image(image_path) + # wait for 5 seconds + time.sleep(5) - # analyze posture - print("👀 David is watching...") - analysis = analyze_image(base64_image, script=script) - - print("🎙️ David says:") - print(analysis) - - play_audio(analysis) - - script = script + [{"role": "assistant", "content": analysis}] - - # wait for 5 seconds - time.sleep(5) +if isPhotoBooth: + print(f"Press the spacebar to trigger {narrator}") if __name__ == "__main__": main() diff --git a/requirements.txt b/requirements.txt index 12cae1c..0f145b6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,6 +28,7 @@ pure-eval==0.2.2 pydantic==2.4.2 pydantic_core==2.10.1 Pygments==2.16.1 +pynput==1.7.6 requests==2.31.0 simpleaudio==1.0.4 six==1.16.0 @@ -38,4 +39,4 @@ traitlets==5.13.0 typing_extensions==4.8.0 urllib3==2.0.7 wcwidth==0.2.10 -websockets==12.0 +websockets==12.0 \ No newline at end of file From e074efb9428e852327b7e5b820881aeae8f80aa2 Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 09:14:36 -0800 Subject: [PATCH 06/14] [REAME] updated with photo booth mode information. --- README.md | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index b2c7f25..ee102f5 100644 --- a/README.md +++ b/README.md @@ -32,14 +32,6 @@ Make a new voice in Eleven and get the voice id of that voice using their [get v export ELEVENLABS_VOICE_ID= ``` -### Streaming - -If you would like the speech to start quicker via a streaming manner set the environment variable to enable. The concession is that the audio snippet is not saved in the `/narration` directory. - -```bash -export ELEVENLABS_STREAMING=true -``` - ### Script Alternative to running the commands above individually, one can use the `setup.sh` script to facilitate getting the two required shell envs ready to rock by updating the environment variable values in `setup.sh` and executing the script. @@ -59,3 +51,21 @@ In another terminal, run the narrator: ```bash python narrator.py ``` + +## Options + +### Streaming + +If you would like the speech to start quicker via a streaming manner set the environment variable to enable. The concession is that the audio snippet is not saved in the `/narration` directory. + +```bash +export ELEVENLABS_STREAMING=true +``` + +### PhotoBooth + +The default behavior of this app will continually analyze images. If you would like to use in a mode more similar to a photo booth, set the environment variable. In this mode, the image will only be analyzed when the spacebar key is pressed. + +```bash +export PHOTOBOOTH_MODE=true +``` From 604c2b562401d479719fb60fa0601d6fd9b42549 Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 09:37:29 -0800 Subject: [PATCH 07/14] [dotenv] module added and README updated with information on how to use. Also, small README format refactor. (#5) --- .env.example | 8 ++++++++ README.md | 20 +++++++++++++------- narrator.py | 4 ++++ requirements.txt | 1 + setup.sh | 9 ++------- 5 files changed, 28 insertions(+), 14 deletions(-) create mode 100644 .env.example diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..2bf14e2 --- /dev/null +++ b/.env.example @@ -0,0 +1,8 @@ +# Required variables: +export OPENAI_API_KEY= +export ELEVENLABS_API_KEY= +export ELEVENLABS_VOICE_ID= + +# Optional variables: +export ELEVENLABS_STREAMING= +export PHOTOBOOTH_MODE= \ No newline at end of file diff --git a/README.md b/README.md index ee102f5..70259e1 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# David Attenborough narrates your life. +# David Attenborough narrates your life https://twitter.com/charliebholtz/status/1724815159590293764 @@ -32,12 +32,6 @@ Make a new voice in Eleven and get the voice id of that voice using their [get v export ELEVENLABS_VOICE_ID= ``` -### Script - -Alternative to running the commands above individually, one can use the `setup.sh` script to facilitate getting the two required shell envs ready to rock by updating the environment variable values in `setup.sh` and executing the script. - -_Note: may have to manually run `source source venv/bin/activate` afterwards depending on shell env._ - ## Run it! In on terminal, run the webcam capture: @@ -54,6 +48,18 @@ python narrator.py ## Options +### Setup + +#### Script + +Alternative to running the [Setup](#setup) commands above individually, one can use the `setup.sh` script to facilitate getting the two required shell envs ready to rock. + +_Note: may have to manually run `source source venv/bin/activate` afterwards depending on shell env._ + +#### Dotenv + +One can set the environment variables via the `.env` file, which is read every time the process starts. It is recommended to copy the `.env.example` file and rename to `.env`. + ### Streaming If you would like the speech to start quicker via a streaming manner set the environment variable to enable. The concession is that the audio snippet is not saved in the `/narration` directory. diff --git a/narrator.py b/narrator.py index 3a652c3..8cc9add 100644 --- a/narrator.py +++ b/narrator.py @@ -3,12 +3,16 @@ import errno import os import time +from dotenv import load_dotenv from elevenlabs import generate, play, set_api_key, stream from openai import OpenAI from pynput import ( # Using pynput to listen for a keypress instead of native keyboard module which was requiring admin privileges keyboard, ) +# import environment variables from .env file +load_dotenv() + client = OpenAI() set_api_key(os.environ.get("ELEVENLABS_API_KEY")) diff --git a/requirements.txt b/requirements.txt index 0f145b6..d914e19 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,6 +29,7 @@ pydantic==2.4.2 pydantic_core==2.10.1 Pygments==2.16.1 pynput==1.7.6 +python-dotenv==1.0.0 requests==2.31.0 simpleaudio==1.0.4 six==1.16.0 diff --git a/setup.sh b/setup.sh index 823a544..c93c250 100755 --- a/setup.sh +++ b/setup.sh @@ -4,15 +4,10 @@ python3 -m pip install virtualenv python3 -m virtualenv venv -# source the virtual environment +# source the virtual environment to install dependencies source venv/bin/activate # install the dependencies pip install -r requirements.txt -# set the environment variables -export ELEVENLABS_VOICE_ID= -export OPENAI_API_KEY= -export ELEVENLABS_API_KEY= - -export ELEVENLABS_STREAMING=false +echo -e "\n\n\nSetup complete. Run $(source venv/bin/activate) to activate the virtual environment.\n\nAlso, please ensure your environment variables are set correctly in the .env file." From d3d540577ede80e16661c73aeac27366a63b1e5a Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 09:47:24 -0800 Subject: [PATCH 08/14] [Narrator] small refactor for clarity. --- narrator.py | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/narrator.py b/narrator.py index 8cc9add..776ae66 100644 --- a/narrator.py +++ b/narrator.py @@ -25,25 +25,6 @@ script = [] narrator = "Sir David Attenborough" -def on_press(key): - if key == keyboard.Key.space: - # When space bar is pressed, run the main function which analyzes the image and generates the audio - _main() - - -def on_release(key): - if key == keyboard.Key.esc: - # Stop listener - return False - - -# Create a listener -listener = keyboard.Listener(on_press=on_press, on_release=on_release) - -# Start the listener -listener.start() - - def encode_image(image_path): while True: try: @@ -152,6 +133,24 @@ def main(): time.sleep(5) +def on_press(key): + if key == keyboard.Key.space: + # When space bar is pressed, run the main function which analyzes the image and generates the audio + _main() + + +def on_release(key): + if key == keyboard.Key.esc: + # Stop listener + return False + + +# Create a listener +listener = keyboard.Listener(on_press=on_press, on_release=on_release) + +# Start the listener +listener.start() + if isPhotoBooth: print(f"Press the spacebar to trigger {narrator}") From 5cedbec10fc0e7da31f44fe02584d239248e6038 Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 10:00:32 -0800 Subject: [PATCH 09/14] [Image] file saved along side the corresponding audio file. --- narrator.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/narrator.py b/narrator.py index 776ae66..1bd83d8 100644 --- a/narrator.py +++ b/narrator.py @@ -51,7 +51,7 @@ def play_audio(text): stream(audio) return - # Save the audio to a file and play it + # Save the audio to a file unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=") dir_path = os.path.join("narration", unique_id) os.makedirs(dir_path, exist_ok=True) @@ -60,6 +60,12 @@ def play_audio(text): with open(file_path, "wb") as f: f.write(audio) + # Copy the image analyzed to the same directory as the audio file + image_path = os.path.join(os.getcwd(), "./frames/frame.jpg") + new_image_path = os.path.join(dir_path, "image.jpg") + os.system(f"cp {image_path} {new_image_path}") + + # Play the audio play(audio) From ac703e9ebb36c1c4c4050c862327c534a587492a Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 10:09:03 -0800 Subject: [PATCH 10/14] [shutil] used thanks to trunk calling out security implication of the native os cp command via the script. --- narrator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narrator.py b/narrator.py index 1bd83d8..589aaf3 100644 --- a/narrator.py +++ b/narrator.py @@ -1,6 +1,7 @@ import base64 import errno import os +import shutil import time from dotenv import load_dotenv @@ -63,9 +64,8 @@ def play_audio(text): # Copy the image analyzed to the same directory as the audio file image_path = os.path.join(os.getcwd(), "./frames/frame.jpg") new_image_path = os.path.join(dir_path, "image.jpg") - os.system(f"cp {image_path} {new_image_path}") + shutil.copy(image_path, new_image_path) - # Play the audio play(audio) From b0770fd3ad90b81cd9af5bf07220629d38a9d0e6 Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 10:27:15 -0800 Subject: [PATCH 11/14] readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 70259e1..3caacd2 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,7 @@ One can set the environment variables via the `.env` file, which is read every t ### Streaming -If you would like the speech to start quicker via a streaming manner set the environment variable to enable. The concession is that the audio snippet is not saved in the `/narration` directory. +If you would like the speech to start quicker via a streaming manner set the environment variable to enable. The concession is that the audio and corresponding image is not saved in the `/narration` directory. ```bash export ELEVENLABS_STREAMING=true From c3e86d8d146d151ece487dda2148b2a568f3b53c Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 10:29:23 -0800 Subject: [PATCH 12/14] readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3caacd2..3ff2da2 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ python narrator.py Alternative to running the [Setup](#setup) commands above individually, one can use the `setup.sh` script to facilitate getting the two required shell envs ready to rock. -_Note: may have to manually run `source source venv/bin/activate` afterwards depending on shell env._ +_Note: will have to run `source source venv/bin/activate` afterwards to activate the virtual env._ #### Dotenv From cee2b62b63125cb9e64d7fc4f59d390599589873 Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 11:04:01 -0800 Subject: [PATCH 13/14] Fix/imagesave (#7) * [Image] file saved along side the corresponding audio file (#6) * [Image] file saved along side the corresponding audio file. * [shutil] used thanks to trunk calling out security implication of the native os cp command via the script. * [README] small update to streaming section to mention image file is not saved either. * [README] small setup.sh wording refactor. * [Fix] narrator when streaming is enabled regardign image save logic --- narrator.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/narrator.py b/narrator.py index 589aaf3..89c184a 100644 --- a/narrator.py +++ b/narrator.py @@ -39,7 +39,7 @@ def encode_image(image_path): time.sleep(0.1) -def play_audio(text): +def play_audio(text, dir_path=None): audio = generate( text, voice=os.environ.get("ELEVENLABS_VOICE_ID"), @@ -52,10 +52,7 @@ def play_audio(text): stream(audio) return - # Save the audio to a file - unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=") - dir_path = os.path.join("narration", unique_id) - os.makedirs(dir_path, exist_ok=True) + # Save the audio file to the directory file_path = os.path.join(dir_path, "audio.wav") with open(file_path, "wb") as f: @@ -113,17 +110,30 @@ def _main(): # path to your image image_path = os.path.join(os.getcwd(), "./frames/frame.jpg") + dir_path = None + if not isStreaming: + # create a unique directory to store the audio and image + unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=") + dir_path = os.path.join("narration", unique_id) + os.makedirs(dir_path, exist_ok=True) + + # copy the image to the directory + new_image_path = os.path.join(dir_path, "image.jpg") + shutil.copy(image_path, new_image_path) + image_path = new_image_path + # getting the base64 encoding base64_image = encode_image(image_path) - # analyze posture + # analyze the image print(f"👀 {narrator} is watching...") analysis = analyze_image(base64_image, script=script) - print("🎙️ David says:") + print(f"🎙️ {narrator} says:") print(analysis) - play_audio(analysis) + # generate and play audio + play_audio(analysis, dir_path) script = script + [{"role": "assistant", "content": analysis}] From d57184945207319b3dfb6e59ed2106a05c7f3902 Mon Sep 17 00:00:00 2001 From: Ray Smets Date: Fri, 24 Nov 2023 11:15:58 -0800 Subject: [PATCH 14/14] Fix/imagesave (#9) * [Image] file saved along side the corresponding audio file (#6) * [Image] file saved along side the corresponding audio file. * [shutil] used thanks to trunk calling out security implication of the native os cp command via the script. * [README] small update to streaming section to mention image file is not saved either. * [README] small setup.sh wording refactor. * [Fix] narrator when streaming is enabled regarding image save logic (#8) * [Fix] narrator when streaming is enabled regardign image save logic * [Fix] imagesave. * cleanup