Python ML Hand Gesture Control GUI π | Control PC with Gestures (No CV2/No Mediapipe)
Demo :
Click Video πππ
Features:
-
Hand gestures mapped to PC actions (Launch apps, Show desktop, Lock screen)
-
Modern GUI with Dark Mode
-
Works with ML model or Random Simulation
Code :
import customtkinter as ctk
import imageio
import threading
import numpy as np
import pyautogui
from PIL import Image, ImageTk
import tensorflow as tf
import os
import random
# ===== GUI Settings =====
ctk.set_appearance_mode("dark")
ctk.set_default_color_theme("blue")
class GestureApp(ctk.CTk):
def __init__(self):
super().__init__()
self.title("Fuzzu ML Hand Gesture Control (No CV2/No Mediapipe)")
self.geometry("500x500")
self.resizable(False, False)
# Camera
self.camera = None
self.gesture_enabled = False
# Title
self.label = ctk.CTkLabel(self, text="π Hand Gesture Control",
font=("Arial", 28, "bold"))
self.label.pack(pady=15)
# Video Area
self.video_label = ctk.CTkLabel(self, text="")
self.video_label.pack(pady=10)
# Buttons
self.toggle_btn = ctk.CTkButton(self, text="Enable Gesture Control",
command=self.toggle_gesture,
width=200, height=50, fg_color="green")
self.toggle_btn.pack(pady=10)
self.quit_btn = ctk.CTkButton(self, text="Exit App", command=self.destroy,
width=200, height=40, fg_color="red")
self.quit_btn.pack(pady=10)
# Load Model (Optional: Replace with your gesture_model.h5)
self.model = None
if os.path.exists("gesture_model.h5"):
self.model = tf.keras.models.load_model("gesture_model.h5")
def toggle_gesture(self):
if not self.gesture_enabled:
self.gesture_enabled = True
self.toggle_btn.configure(text="Disable Gesture Control", fg_color="orange")
self.camera = imageio.get_reader("<video0>") # default webcam
threading.Thread(target=self.run_camera, daemon=True).start()
else:
self.gesture_enabled = False
self.toggle_btn.configure(text="Enable Gesture Control", fg_color="green")
if self.camera:
self.camera.close()
def run_camera(self):
for frame in self.camera:
if not self.gesture_enabled:
break
img = Image.fromarray(frame)
imgtk = ImageTk.PhotoImage(image=img)
self.video_label.imgtk = imgtk
self.video_label.configure(image=imgtk)
# Detect gesture (dummy / model-based)
gesture = self.detect_gesture(frame)
self.perform_action(gesture)
def detect_gesture(self, frame):
""" Replace this with real ML predictions """
if self.model:
resized = np.array(Image.fromarray(frame).resize((64, 64))) / 255.0
prediction = np.argmax(self.model.predict(resized.reshape(1, 64, 64, 3)))
gestures = ["OPEN_PALM", "ONE_FINGER", "TWO_FINGER", "FIST"]
return gestures[prediction]
else:
# Random simulate if no model
return random.choice(["OPEN_PALM", "ONE_FINGER", "TWO_FINGER", "FIST", None])
def perform_action(self, gesture):
if gesture == "OPEN_PALM":
print("π Open palm → Exit App")
self.destroy()
elif gesture == "ONE_FINGER":
print("☝ One finger → Open Notepad")
os.system("notepad.exe")
elif gesture == "TWO_FINGER":
print("✌ Two fingers → Show Desktop")
pyautogui.hotkey('win', 'd')
elif gesture == "FIST":
print("✊ Fist → Lock Screen")
pyautogui.hotkey('win', 'l')
if __name__ == "__main__":
app = GestureApp()
app.mainloop()
Comments
Post a Comment