import cv2 import numpy as np import gradio as gr from keras._tf_keras.keras.models import model_from_json from tensorflow import keras def load_model(): json_file = open("emotiondetector.json", "r") model_json = json_file.read() json_file.close() model = model_from_json(model_json) model.load_weights("emotiondetectorweights.weights.h5") return model def extract_features(image): image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert to grayscale image = cv2.resize(image, (48, 48)) # Resize feature = np.array(image).reshape(1, 48, 48, 1) # Reshape return feature / 255.0 def detect_emotion(image): model = load_model() # Load the model on each inference face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') faces = face_cascade.detectMultiScale(image, 1.3, 5) if len(faces) > 0: (p, q, r, s) = faces[0] face_image = image[q:q + s, p:p + r] face_features = extract_features(face_image) prediction = model.predict(face_features) prediction_label = labels[prediction.argmax()] confidence = np.max(prediction)*100 confidence = f"{confidence:.2f}%" return {"label": prediction_label, "Confidence": confidence} #return prediction_label else: return "No face detected" # Handle no face case labels = {0: 'Angry', 1: 'Disgust', 2: 'Fear', 3: 'Happy', 4: 'Neutral', 5: 'Sad', 6: 'Surprise'} # Gradio interface definition interface = gr.Interface( fn=detect_emotion, inputs="image", outputs="text", description="Emotion Detection with Your Model", allow_flagging=True # Allow reporting issues ) interface.launch(share=True)