nowimsoham commited on
Commit
3b1a288
·
1 Parent(s): b1cf75c
Files changed (3) hide show
  1. .gitignore +8 -0
  2. requirements.txt +10 -0
  3. secmain.py +55 -0
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ flagged/
2
+ *.pt
3
+ *.png
4
+ *.jpg
5
+ *.h5
6
+ *.json
7
+ *.ipynb
8
+ gradio_cached_examples/
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ numpy==1.26.4
2
+ opencv-python==4.9.0.80
3
+ tensorflow==2.16.1
4
+ keras==3.3.3
5
+ pandas==2.2.2
6
+ matplotlib ==3.8.2
7
+ tqdm==4.66.1
8
+ flask
9
+ pillow
10
+ gradio
secmain.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import gradio as gr
4
+ from keras._tf_keras.keras.models import model_from_json
5
+ from tensorflow import keras # Assuming TensorFlow backend for your model
6
+
7
+ # Function to load the model (assuming saved as HDF5)
8
+ def load_model():
9
+ json_file = open("emotiondetector.json", "r")
10
+ model_json = json_file.read()
11
+ json_file.close()
12
+ model = model_from_json(model_json)
13
+ model.load_weights("emotiondetectorweights.weights.h5")
14
+ return model
15
+
16
+
17
+ # Function to extract features from an image
18
+ def extract_features(image):
19
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Convert to grayscale
20
+ image = cv2.resize(image, (48, 48)) # Resize to model input size
21
+ feature = np.array(image).reshape(1, 48, 48, 1) # Reshape for model input
22
+ return feature / 255.0 # Normalize pixel values
23
+
24
+ # Function to detect emotions in an image
25
+ def detect_emotion(image):
26
+ model = load_model() # Load the model on each inference
27
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
28
+ faces = face_cascade.detectMultiScale(image, 1.3, 5)
29
+
30
+ if len(faces) > 0:
31
+ (p, q, r, s) = faces[0]
32
+ face_image = image[q:q + s, p:p + r]
33
+ face_features = extract_features(face_image)
34
+ prediction = model.predict(face_features)
35
+ prediction_label = labels[prediction.argmax()]
36
+ return prediction_label
37
+ else:
38
+ return "No face detected" # Handle no face case
39
+
40
+ # Define emotion labels (modify based on your model's output)
41
+ labels = {0: 'angry', 1: 'disgust', 2: 'fear', 3: 'happy',
42
+ 4: 'neutral', 5: 'sad', 6: 'surprise'}
43
+
44
+ # Gradio interface definition
45
+ interface = gr.Interface(
46
+ fn=detect_emotion,
47
+ inputs="image",
48
+ outputs="text",
49
+ description="Emotion Detection with Your Model",
50
+ allow_flagging=True # Allow reporting issues
51
+ )
52
+
53
+ # Launch the Gradio app on Hugging Face Spaces
54
+ interface.launch(share=True) # Set "share=True" for public deployment
55
+