Adding leageoflegends
This commit is contained in:
parent
1174275b5b
commit
95f6778cf0
|
@ -0,0 +1,11 @@
|
||||||
|
{
|
||||||
|
"files.exclude": {
|
||||||
|
"**/.git": true,
|
||||||
|
"**/.svn": true,
|
||||||
|
"**/.hg": true,
|
||||||
|
"**/CVS": true,
|
||||||
|
"**/.DS_Store": true,
|
||||||
|
"**/Thumbs.db": true
|
||||||
|
},
|
||||||
|
"hide-files.files": []
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import pyautogui
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
# Folder
|
||||||
|
folder = "frames"
|
||||||
|
frames_dir = os.path.join(os.getcwd(), folder)
|
||||||
|
os.makedirs(frames_dir, exist_ok=True)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
|
||||||
|
# Wait for Chrome to activate
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
# Take a screenshot
|
||||||
|
screenshot = pyautogui.screenshot()
|
||||||
|
|
||||||
|
# Convert screenshot to RGB (JPEG does not support RGBA)
|
||||||
|
screenshot_rgb = screenshot.convert('RGB')
|
||||||
|
|
||||||
|
# Optional: Resize the image
|
||||||
|
max_size = 250
|
||||||
|
ratio = max_size / max(screenshot_rgb.size)
|
||||||
|
new_size = tuple([int(x * ratio) for x in screenshot_rgb.size])
|
||||||
|
resized_img = screenshot_rgb.resize(new_size, Image.LANCZOS)
|
||||||
|
|
||||||
|
# Save the frame as an image file
|
||||||
|
print("📸 Captured Chrome tab. Saving frame.")
|
||||||
|
path = f"{frames_dir}/frame.jpg"
|
||||||
|
resized_img.save(path)
|
|
@ -0,0 +1,26 @@
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from playsound import playsound
|
||||||
|
|
||||||
|
def find_latest_folder(base_path):
|
||||||
|
all_folders = [os.path.join(base_path, d) for d in os.listdir(base_path) if os.path.isdir(os.path.join(base_path, d))]
|
||||||
|
latest_folder = max(all_folders, key=os.path.getmtime)
|
||||||
|
return latest_folder
|
||||||
|
|
||||||
|
def main():
|
||||||
|
base_path = 'narration' # Base path to the narration folder
|
||||||
|
last_played_folder = None
|
||||||
|
|
||||||
|
while True:
|
||||||
|
latest_folder = find_latest_folder(base_path)
|
||||||
|
audio_file = os.path.join(latest_folder, 'audio.wav')
|
||||||
|
|
||||||
|
if os.path.exists(audio_file) and latest_folder != last_played_folder:
|
||||||
|
print(f"Playing audio from {audio_file}")
|
||||||
|
playsound(audio_file)
|
||||||
|
last_played_folder = latest_folder
|
||||||
|
# After the audio finishes, wait for a short period before checking for new folders
|
||||||
|
# time.sleep(2)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -0,0 +1,111 @@
|
||||||
|
import os
|
||||||
|
from openai import OpenAI
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import simpleaudio as sa
|
||||||
|
import errno
|
||||||
|
from elevenlabs import generate, play, set_api_key, voices
|
||||||
|
from playsound import playsound
|
||||||
|
|
||||||
|
client = OpenAI()
|
||||||
|
|
||||||
|
set_api_key(os.environ.get("ELEVENLABS_API_KEY"))
|
||||||
|
|
||||||
|
def encode_image(image_path):
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
with open(image_path, "rb") as image_file:
|
||||||
|
return base64.b64encode(image_file.read()).decode("utf-8")
|
||||||
|
except IOError as e:
|
||||||
|
if e.errno != errno.EACCES:
|
||||||
|
# Not a "file in use" error, re-raise
|
||||||
|
raise
|
||||||
|
# File is being written to, wait a bit and retry
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
|
||||||
|
def play_audio(text):
|
||||||
|
audio = generate(text, voice=os.environ.get("ELEVENLABS_VOICE_ID"))
|
||||||
|
|
||||||
|
unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=")
|
||||||
|
dir_path = os.path.join("narration", unique_id)
|
||||||
|
os.makedirs(dir_path, exist_ok=True)
|
||||||
|
file_path = os.path.join(dir_path, "audio.wav")
|
||||||
|
|
||||||
|
with open(file_path, "wb") as f:
|
||||||
|
f.write(audio)
|
||||||
|
|
||||||
|
# play(audio)
|
||||||
|
# playsound(file_path)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_new_line(base64_image):
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{"type": "text", "text": "Describe this image"},
|
||||||
|
{
|
||||||
|
"type": "image_url",
|
||||||
|
"image_url": f"data:image/jpeg;base64,{base64_image}",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def analyze_image(base64_image, script):
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
model="gpt-4-vision-preview",
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": """
|
||||||
|
You are real commentator, express more.
|
||||||
|
Narrate with intensity.
|
||||||
|
You are watching a game.
|
||||||
|
Don't repeat yourself. Comment in one line.
|
||||||
|
Focus on the minute details.
|
||||||
|
Mention the individual by what the text near the head says.
|
||||||
|
Focus on what's happenning in the game.
|
||||||
|
Comments not more than 20 words.
|
||||||
|
Don't explain about the game, but focus on activities in the game.
|
||||||
|
""",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
+ script
|
||||||
|
+ generate_new_line(base64_image),
|
||||||
|
max_tokens=500,
|
||||||
|
)
|
||||||
|
response_text = response.choices[0].message.content
|
||||||
|
return response_text
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
script = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# path to your image
|
||||||
|
image_path = os.path.join(os.getcwd(), "./frames/frame.jpg")
|
||||||
|
|
||||||
|
# getting the base64 encoding
|
||||||
|
base64_image = encode_image(image_path)
|
||||||
|
|
||||||
|
# analyze posture
|
||||||
|
print("👀 Mervin Praison is watching...")
|
||||||
|
analysis = analyze_image(base64_image, script=script)
|
||||||
|
|
||||||
|
print("🎙️ Mervin Praison says:")
|
||||||
|
print(analysis)
|
||||||
|
|
||||||
|
play_audio(analysis)
|
||||||
|
|
||||||
|
script = script + [{"role": "assistant", "content": analysis}]
|
||||||
|
|
||||||
|
# wait for 5 seconds
|
||||||
|
# time.sleep(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -6,6 +6,7 @@ import time
|
||||||
import simpleaudio as sa
|
import simpleaudio as sa
|
||||||
import errno
|
import errno
|
||||||
from elevenlabs import generate, play, set_api_key, voices
|
from elevenlabs import generate, play, set_api_key, voices
|
||||||
|
from playsound import playsound
|
||||||
|
|
||||||
client = OpenAI()
|
client = OpenAI()
|
||||||
|
|
||||||
|
@ -36,6 +37,7 @@ def play_audio(text):
|
||||||
f.write(audio)
|
f.write(audio)
|
||||||
|
|
||||||
play(audio)
|
play(audio)
|
||||||
|
playsound(file_path)
|
||||||
|
|
||||||
|
|
||||||
def generate_new_line(base64_image):
|
def generate_new_line(base64_image):
|
||||||
|
@ -84,10 +86,10 @@ def main():
|
||||||
base64_image = encode_image(image_path)
|
base64_image = encode_image(image_path)
|
||||||
|
|
||||||
# analyze posture
|
# analyze posture
|
||||||
print("👀 David is watching...")
|
print("👀 Mervin Praison is watching...")
|
||||||
analysis = analyze_image(base64_image, script=script)
|
analysis = analyze_image(base64_image, script=script)
|
||||||
|
|
||||||
print("🎙️ David says:")
|
print("🎙️ Mervin Praison says:")
|
||||||
print(analysis)
|
print(analysis)
|
||||||
|
|
||||||
play_audio(analysis)
|
play_audio(analysis)
|
||||||
|
|
|
@ -39,3 +39,5 @@ typing_extensions==4.8.0
|
||||||
urllib3==2.0.7
|
urllib3==2.0.7
|
||||||
wcwidth==0.2.10
|
wcwidth==0.2.10
|
||||||
websockets==12.0
|
websockets==12.0
|
||||||
|
playsound
|
||||||
|
pyautogui
|
Loading…
Reference in New Issue