annaferrari02 commited on
Commit
5958fad
·
verified ·
1 Parent(s): c9b0379

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
multiclass_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea235b1b90083e084e441f294cf7a960d3fc17573b33323fdd68949fa8da460c
3
+ size 1935
pca_lda_params.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:671fb557f0f5451e42f06b5d2f8d5fe7410d1310cf443f0ba1c077555766f4aa
3
+ size 95234738
script.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Inference script
3
+ Version combining baseline structure with enhanced features
4
+ """
5
+
6
+ import os
7
+ import pickle
8
+ import cv2
9
+ import pandas as pd
10
+ import numpy as np
11
+ from utils.utils import extract_features_from_image, apply_pca_lda_transform
12
+
13
+
14
+ def run_inference(TEST_IMAGE_PATH, svm_model, pca_params, SUBMISSION_CSV_SAVE_PATH):
15
+ """
16
+ Run inference on test images
17
+
18
+ Args:
19
+ TEST_IMAGE_PATH: Path to test images (/tmp/data/test_images)
20
+ svm_model: Trained SVM model
21
+ pca_params: Dictionary containing PCA transformation parameters
22
+ SUBMISSION_CSV_SAVE_PATH: Path to save submission.csv
23
+ """
24
+
25
+ # Load test images
26
+ test_images = os.listdir(TEST_IMAGE_PATH)
27
+ test_images.sort()
28
+
29
+ # Extract features from all test images
30
+ image_feature_list = []
31
+
32
+ for test_image in test_images:
33
+ path_to_image = os.path.join(TEST_IMAGE_PATH, test_image)
34
+
35
+ image = cv2.imread(path_to_image)
36
+
37
+ # Extract features (using enhanced features by default)
38
+ image_features = extract_features_from_image(image)
39
+
40
+ image_feature_list.append(image_features)
41
+
42
+ features_array = np.array(image_feature_list)
43
+
44
+ # Apply PCA transformation using saved parameters
45
+ features_reduced = apply_pca_lda_transform(features_array, pca_params)
46
+
47
+ # Run predictions
48
+ predictions = svm_model.predict(features_reduced)
49
+
50
+ # Create submission CSV
51
+ df_predictions = pd.DataFrame({
52
+ "file_name": test_images,
53
+ "category_id": predictions
54
+ })
55
+
56
+ df_predictions.to_csv(SUBMISSION_CSV_SAVE_PATH, index=False)
57
+
58
+
59
+ if __name__ == "__main__":
60
+
61
+ # Paths
62
+ current_directory = os.path.dirname(os.path.abspath(__file__))
63
+ TEST_IMAGE_PATH = "/tmp/data/test_images"
64
+
65
+ MODEL_NAME = "multiclass_model.pkl"
66
+ MODEL_PATH = os.path.join(current_directory, MODEL_NAME)
67
+
68
+ PCA_LDA_PARAMS_NAME = "pca_lda_params.pkl"
69
+ PCA_LDA_PARAMS_PATH = os.path.join(current_directory, PCA_LDA_PARAMS_NAME)
70
+
71
+ SUBMISSION_CSV_SAVE_PATH = os.path.join(current_directory, "submission.csv")
72
+
73
+ # Load trained SVM model
74
+ with open(MODEL_PATH, 'rb') as file:
75
+ svm_model = pickle.load(file)
76
+
77
+ # Load PCA parameters
78
+ with open(PCA_LDA_PARAMS_PATH, 'rb') as file:
79
+ pca_params = pickle.load(file)
80
+
81
+ # Run inference
82
+ run_inference(TEST_IMAGE_PATH, svm_model, pca_params, SUBMISSION_CSV_SAVE_PATH)
train.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Training script for surgical instrument classification
3
+ """
4
+
5
+ import os
6
+ import pickle
7
+ import cv2
8
+ import pandas as pd
9
+ import numpy as np
10
+ from utils.utils import extract_features_from_image, fit_pca_lda_transformer, train_svm_model
11
+
12
+
13
+ def train_and_save_model(base_path, images_folder, gt_csv, save_dir, n_components=100):
14
+ """
15
+ Complete training pipeline that saves everything needed for submission
16
+
17
+ Args:
18
+ base_path: Base directory path
19
+ images_folder: Folder name containing images
20
+ gt_csv: Ground truth CSV filename
21
+ save_dir: Directory to save model artifacts
22
+ n_components: Number of PCA components
23
+ """
24
+
25
+ print("="*80)
26
+ print("TRAINING SURGICAL INSTRUMENT CLASSIFIER")
27
+ print("="*80)
28
+
29
+ # Setup paths
30
+ PATH_TO_GT = os.path.join(base_path, gt_csv)
31
+ PATH_TO_IMAGES = os.path.join(base_path, images_folder)
32
+
33
+ print(f"\nConfiguration:")
34
+ print(f" Ground Truth: {PATH_TO_GT}")
35
+ print(f" Images: {PATH_TO_IMAGES}")
36
+ print(f" PCA Components: {n_components}")
37
+ print(f" Save directory: {save_dir}")
38
+
39
+ # Load ground truth
40
+ df = pd.read_csv(PATH_TO_GT)
41
+ print(f"\nLoaded {len(df)} training samples")
42
+ print(f"\nLabel distribution:")
43
+ print(df['category_id'].value_counts().sort_index())
44
+
45
+ # Extract features
46
+ print(f"\n{'='*80}")
47
+ print("STEP 1: FEATURE EXTRACTION")
48
+ print("="*80)
49
+
50
+ features = []
51
+ labels = []
52
+
53
+ for i in range(len(df)):
54
+ if i % 500 == 0:
55
+ print(f" Processing {i}/{len(df)}...")
56
+
57
+ image_name = df.iloc[i]["file_name"]
58
+ label = df.iloc[i]["category_id"]
59
+
60
+ path_to_image = os.path.join(PATH_TO_IMAGES, image_name)
61
+
62
+ try:
63
+ image = cv2.imread(path_to_image)
64
+ if image is None:
65
+ print(f" Warning: Could not read {image_name}, skipping...")
66
+ continue
67
+
68
+ # Extract features with enhanced configuration
69
+ image_features = extract_features_from_image(image)
70
+
71
+ features.append(image_features)
72
+ labels.append(label)
73
+
74
+ except Exception as e:
75
+ print(f" Error processing {image_name}: {e}")
76
+ continue
77
+
78
+ features_array = np.array(features)
79
+ labels_array = np.array(labels)
80
+
81
+ print(f"\nFeature extraction complete!")
82
+ print(f" Features shape: {features_array.shape}")
83
+ print(f" Labels shape: {labels_array.shape}")
84
+ print(f" Feature dimension: {features_array.shape[1]}")
85
+
86
+ # Apply PCA+LDA
87
+ print(f"\n{'='*80}")
88
+ print("STEP 2: HYBRID DIMENSIONALITY REDUCTION (PCA → LDA)")
89
+ print("="*80)
90
+
91
+ combined_params, features_reduced = fit_pca_lda_transformer(
92
+ features_array,
93
+ labels_array,
94
+ n_pca_components=N_COMPONENTS
95
+ )
96
+
97
+ print(f"\n Final dimension: {features_reduced.shape[1]}")
98
+ print(f" Compression ratio: {features_array.shape[1] / features_reduced.shape[1]:.1f}x")
99
+
100
+ # Train SVM
101
+ print(f"\n{'='*80}")
102
+ print("STEP 3: TRAINING SVM CLASSIFIER")
103
+ print("="*80)
104
+
105
+ train_results = train_svm_model(features_reduced, labels_array)
106
+
107
+ svm_model = train_results['model']
108
+
109
+ print(f"\nTraining complete!")
110
+ print(f" Support vectors: {len(svm_model.support_)}")
111
+
112
+ # Save model artifacts
113
+ print(f"\n{'='*80}")
114
+ print("STEP 4: SAVING MODEL ARTIFACTS")
115
+ print("="*80)
116
+
117
+ os.makedirs(save_dir, exist_ok=True)
118
+
119
+ # Save SVM model
120
+ model_path = os.path.join(save_dir, "multiclass_model.pkl")
121
+ with open(model_path, "wb") as f:
122
+ pickle.dump(svm_model, f)
123
+ print(f" ✓ Saved SVM model: {model_path}")
124
+
125
+ # Save PCA parameters
126
+ # Save combined PCA+LDA parameters
127
+ params_path = os.path.join(save_dir, "pca_lda_params.pkl")
128
+ with open(params_path, "wb") as f:
129
+ pickle.dump(combined_params, f)
130
+ print(f" ✓ Saved PCA+LDA params: {params_path}")
131
+
132
+ print(f"\n{'='*80}")
133
+ print("TRAINING COMPLETE!")
134
+ print("="*80)
135
+ print(f"\nFinal Results:")
136
+ print(f" Train Accuracy: {train_results['train_accuracy']:.4f}")
137
+ print(f" Test Accuracy: {train_results['test_accuracy']:.4f}")
138
+ print(f" Test F1-score: {train_results['test_f1']:.4f}")
139
+ print(f"\nFiles saved to: {save_dir}")
140
+ print(f"\nNext steps:")
141
+ print(f" 1. Create a 'utils' folder in your HuggingFace repository")
142
+ print(f" 2. Copy utils.py into the 'utils' folder")
143
+ print(f" 3. Copy script.py, multiclass_model.pkl, and pca_params.pkl to the repository root")
144
+ print(f" 4. Create an empty __init__.py file in the 'utils' folder")
145
+ print(f" 5. Submit to competition!")
146
+
147
+
148
+ if __name__ == "__main__":
149
+
150
+ BASE_PATH = "C:/Users/anna2/ISM/ANNA/phase1a2"
151
+ IMAGES_FOLDER = "C:/Users/anna2/ISM/Images"
152
+ GT_CSV = "C:/Users/anna2/ISM/Baselines/phase_1a/gt_for_classification_multiclass_from_filenames_0_index.csv"
153
+
154
+ SAVE_DIR = "C:/Users/anna2/ISM/ANNA/phase1a2/submission"
155
+
156
+ # Number of PCA components
157
+ N_COMPONENTS = 250 #can be adjusted
158
+
159
+ # Train and save
160
+ train_and_save_model(BASE_PATH, IMAGES_FOLDER, GT_CSV, SAVE_DIR, N_COMPONENTS)
utils/__pycache__/utils.cpython-312.pyc ADDED
Binary file (14.6 kB). View file
 
utils/utils.py ADDED
@@ -0,0 +1,370 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility functions for surgical instrument classification
3
+ """
4
+
5
+ import cv2
6
+ import numpy as np
7
+ from skimage.feature.texture import graycomatrix, graycoprops
8
+ from skimage.feature import local_binary_pattern, hog
9
+ from sklearn.decomposition import PCA
10
+ from sklearn.svm import SVC
11
+ from sklearn.model_selection import train_test_split
12
+ from sklearn.metrics import accuracy_score, f1_score
13
+ from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
14
+
15
+
16
+ def preprocess_image(image):
17
+ """
18
+ Apply CLAHE preprocessing for better contrast
19
+ MUST be defined BEFORE extract_features_from_image
20
+ (Contrast Limited Adaptive Historam Equalization)
21
+ """
22
+ # Convert to LAB color space (basically separating lightness, L, from color info)
23
+ lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
24
+ l, a, b = cv2.split(lab) #this enhances constrast between colors
25
+
26
+ # Apply CLAHE to L channel
27
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) #split into a 8x8 grid and performs the contrast enhancement to the smaller regions instead of full image
28
+ l = clahe.apply(l)
29
+
30
+ # Merge and convert back
31
+ enhanced = cv2.merge([l, a, b]) #merge the contrast channel with the other two (A,B)
32
+ enhanced = cv2.cvtColor(enhanced, cv2.COLOR_LAB2BGR) #go back to BGR so it can be used later on
33
+
34
+ return enhanced
35
+
36
+
37
+ #this is the same as baseline code, well working so let's keep it
38
+ #it basically computes normalized color histograms for the classic three channels
39
+ def rgb_histogram(image, bins=256):
40
+ """Extract RGB histogram features"""
41
+ hist_features = []
42
+ for i in range(3): # RGB Channels
43
+ hist, _ = np.histogram(image[:, :, i], bins=bins, range=(0, 256), density=True)
44
+ hist_features.append(hist)
45
+ return np.concatenate(hist_features)
46
+
47
+
48
+ def hu_moments(image):
49
+ """Extract Hu moment features, takes BGR format in input
50
+ basically provides shape description that are consistent
51
+ wrt to position, size and rotation"""
52
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #turn to greyscale (works in 1 channel)
53
+ moments = cv2.moments(gray)
54
+ hu_moments = cv2.HuMoments(moments).flatten()
55
+ return hu_moments
56
+
57
+
58
+ def glcm_features(image, distances=[1], angles=[0], levels=256, symmetric=True, normed=True):
59
+ """Extract GLCM texture features,
60
+ captures texture info considering spatial
61
+ relationship between pixel intensities. works well with RGB and hu"""
62
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
63
+ glcm = graycomatrix(gray, distances=distances, angles=angles, levels=levels,
64
+ symmetric=symmetric, normed=normed)
65
+ contrast = graycoprops(glcm, 'contrast').flatten()
66
+ dissimilarity = graycoprops(glcm, 'dissimilarity').flatten()
67
+ homogeneity = graycoprops(glcm, 'homogeneity').flatten()
68
+ energy = graycoprops(glcm, 'energy').flatten()
69
+ correlation = graycoprops(glcm, 'correlation').flatten()
70
+ asm = graycoprops(glcm, 'ASM').flatten()
71
+ return np.concatenate([contrast, dissimilarity, homogeneity, energy, correlation, asm])
72
+
73
+
74
+ def local_binary_pattern_features(image, P=8, R=1):
75
+ """Extract Local Binary Pattern features, useful for light changes
76
+ combined with rgb, hu and glcm"""
77
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
78
+ lbp = local_binary_pattern(gray, P, R, method='uniform')
79
+ (hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, P + 3),
80
+ range=(0, P + 2), density=True)
81
+ return hist #feature vector representing the texture of the image
82
+
83
+
84
+ def hog_features(image, orientations=12, pixels_per_cell=(8, 8), cells_per_block=(2, 2)):
85
+ """
86
+ Extract HOG (Histogram of Oriented Gradients) features
87
+ Great for capturing shape and edge information in surgical instruments
88
+ """
89
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
90
+
91
+ # Resize to standard size for consistency
92
+ gray_resized = cv2.resize(gray, (256, 256))
93
+
94
+ hog_features_vector = hog(
95
+ gray_resized,
96
+ orientations=orientations,
97
+ pixels_per_cell=pixels_per_cell,
98
+ cells_per_block=cells_per_block,
99
+ block_norm='L2-Hys',
100
+ feature_vector=True
101
+ )
102
+
103
+ return hog_features_vector #Returns a vector capturing local edge
104
+ #directions and shape information, useful for detecting instruments,
105
+ #objects, or structural patterns.
106
+
107
+
108
+ def luv_histogram(image, bins=32): #instead of bgr it uses lightness and chromatic components
109
+ """
110
+ Extract histogram in LUV color space
111
+ LUV is perceptually uniform and better for underwater/surgical imaging
112
+ """
113
+ luv = cv2.cvtColor(image, cv2.COLOR_BGR2LUV)
114
+ hist_features = []
115
+ for i in range(3):
116
+ hist, _ = np.histogram(luv[:, :, i], bins=bins, range=(0, 256), density=True)
117
+ hist_features.append(hist)
118
+ return np.concatenate(hist_features)
119
+
120
+
121
+ def gabor_features(image, frequencies=[0.1, 0.2, 0.3],
122
+ orientations=[0, 45, 90, 135]):
123
+ """
124
+ Extract Gabor filter features (gabor kernels)
125
+ texture orientation that deals well with different scales and diff orientation
126
+ """
127
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # uses intensity and not color
128
+ features = []
129
+
130
+ for freq in frequencies:
131
+ for theta in orientations:
132
+ theta_rad = theta * np.pi / 180
133
+ kernel = cv2.getGaborKernel((21, 21), 5, theta_rad,
134
+ 10.0/freq, 0.5, 0)
135
+ filtered = cv2.filter2D(gray, cv2.CV_32F, kernel)
136
+ features.append(np.mean(filtered))
137
+ features.append(np.std(filtered))
138
+
139
+ return np.array(features)
140
+
141
+
142
+ def extract_features_from_image(image):
143
+ """
144
+ Extract enhanced features from image
145
+ Uses baseline features + HOG + LUV histogram + Gabor for better performance
146
+
147
+ Args:
148
+ image: Input image (BGR format from cv2.imread)
149
+
150
+ Returns:
151
+ Feature vector as numpy array
152
+ """
153
+ # Preprocess image first
154
+ image = preprocess_image(image)
155
+
156
+ # Baseline features
157
+ hist_features = rgb_histogram(image)
158
+ hu_features = hu_moments(image)
159
+ glcm_features_vector = glcm_features(image)
160
+ lbp_features = local_binary_pattern_features(image)
161
+
162
+ # Enhanced features that add discriminative power for complex images
163
+ hog_feat = hog_features(image)
164
+ luv_hist = luv_histogram(image)
165
+ gabor_feat = gabor_features(image)
166
+
167
+ # Concatenate all features (produces a single vector)
168
+ image_features = np.concatenate([
169
+ hist_features,
170
+ hu_features,
171
+ glcm_features_vector,
172
+ lbp_features,
173
+ hog_feat,
174
+ luv_hist,
175
+ gabor_feat
176
+ ])
177
+
178
+ return image_features # comprehensive numerical representation of the imag
179
+
180
+
181
+ def fit_pca_transformer(data, num_components):
182
+ """
183
+ Fit a PCA transformer on training data
184
+
185
+ Args:
186
+ data: Training data (n_samples, n_features)
187
+ num_components: Number of PCA components to keep
188
+
189
+ Returns:
190
+ pca_params: Dictionary containing PCA parameters
191
+ data_reduced: PCA-transformed data
192
+ """
193
+
194
+ # Standardize the data
195
+ mean = np.mean(data, axis=0)
196
+ std = np.std(data, axis=0)
197
+
198
+ # Avoid division by zero
199
+ std[std == 0] = 1.0
200
+
201
+ data_standardized = (data - mean) / std
202
+
203
+ # Fit PCA using sklearn
204
+ pca_model = PCA(n_components=num_components)
205
+ data_reduced = pca_model.fit_transform(data_standardized)
206
+
207
+ # Create params dictionary
208
+ pca_params = {
209
+ 'pca_model': pca_model,
210
+ 'mean': mean,
211
+ 'std': std,
212
+ 'num_components': num_components,
213
+ 'feature_dim': data.shape[1],
214
+ 'explained_variance_ratio': pca_model.explained_variance_ratio_,
215
+ 'cumulative_variance': np.cumsum(pca_model.explained_variance_ratio_)
216
+ }
217
+
218
+ return pca_params, data_reduced
219
+
220
+
221
+ def apply_pca_transform(data, pca_params):
222
+ """
223
+ Apply saved PCA transformation to new data
224
+ CRITICAL: This uses the saved mean/std/PCA from training
225
+
226
+ Args:
227
+ data: New data to transform (n_samples, n_features)
228
+ pca_params: Dictionary from fit_pca_transformer
229
+
230
+ Returns:
231
+ Transformed data
232
+ """
233
+
234
+ # Standardize using training mean/std
235
+ data_standardized = (data - pca_params['mean']) / pca_params['std']
236
+
237
+ # Apply PCA transformation
238
+ # Projects new data onto the same principal components computed from training data
239
+ data_reduced = pca_params['pca_model'].transform(data_standardized)
240
+
241
+ return data_reduced
242
+
243
+ def train_svm_model(features, labels, test_size=0.2, kernel='rbf', C=1.0, gamma='scale'):
244
+ """
245
+ Train an SVM model and return both the model and performance metrics
246
+
247
+ Args:
248
+ features: Feature matrix (n_samples, n_features)
249
+ labels: Label array (n_samples,)
250
+ test_size: Proportion for test split
251
+ kernel: SVM kernel type
252
+ C: SVM regularization parameter
253
+ gamma: Kernel coefficient ('scale', 'auto', or float value)
254
+
255
+ Returns:
256
+ Dictionary containing model and metrics
257
+ """
258
+
259
+ # Check if labels are one-hot encoded
260
+ if labels.ndim > 1 and labels.shape[1] > 1:
261
+ labels = np.argmax(labels, axis=1)
262
+
263
+ # Split data
264
+ X_train, X_test, y_train, y_test = train_test_split(
265
+ features, labels, test_size=test_size, random_state=42, stratify=labels
266
+ )
267
+
268
+ # Train SVM
269
+ svm_model = SVC(kernel=kernel, C=C, gamma=gamma, random_state=42) # ← Added gamma here
270
+ svm_model.fit(X_train, y_train)
271
+
272
+ # Evaluate
273
+ y_train_pred = svm_model.predict(X_train)
274
+ y_test_pred = svm_model.predict(X_test)
275
+
276
+ train_accuracy = accuracy_score(y_train, y_train_pred)
277
+ test_accuracy = accuracy_score(y_test, y_test_pred)
278
+ test_f1 = f1_score(y_test, y_test_pred, average='macro')
279
+
280
+ print(f'Train Accuracy: {train_accuracy:.4f}')
281
+ print(f'Test Accuracy: {test_accuracy:.4f}')
282
+ print(f'Test F1-score: {test_f1:.4f}')
283
+
284
+ results = {
285
+ 'model': svm_model,
286
+ 'train_accuracy': train_accuracy,
287
+ 'test_accuracy': test_accuracy,
288
+ 'test_f1': test_f1
289
+ }
290
+
291
+ return results
292
+
293
+ def fit_pca_lda_transformer(data, labels, n_pca_components=250):
294
+ """
295
+ Two-stage dimensionality reduction: PCA then LDA
296
+
297
+ Args:
298
+ data: Training data (n_samples, n_features)
299
+ labels: Class labels (n_samples,)
300
+ n_pca_components: Number of PCA components (default 250)
301
+
302
+ Returns:
303
+ combined_params: Dictionary containing both PCA and LDA parameters
304
+ data_reduced: Transformed data
305
+ """
306
+
307
+ print(f"\n{'='*80}")
308
+ print("FITTING HYBRID PCA+LDA TRANSFORMER")
309
+ print("="*80)
310
+
311
+ # Stage 1: PCA
312
+ print("\nStage 1: PCA for noise reduction and variance preservation")
313
+ pca_params, data_pca_reduced = fit_pca_transformer(data, n_pca_components)
314
+
315
+ print(f" ✓ PCA reduced from {data.shape[1]} to {n_pca_components} dimensions")
316
+ print(f" ✓ PCA explained variance: {pca_params['cumulative_variance'][-1]:.4f}")
317
+
318
+ # Stage 2: LDA on PCA-reduced features
319
+ print("\nStage 2: LDA for class separability maximization")
320
+
321
+ n_classes = len(np.unique(labels))
322
+ max_lda_components = n_classes - 1
323
+
324
+ print(f" Number of classes: {n_classes}")
325
+ print(f" Maximum LDA components: {max_lda_components}")
326
+
327
+ # Fit LDA (no additional standardization needed, PCA output is already standardized)
328
+ lda_model = LinearDiscriminantAnalysis()
329
+ data_final = lda_model.fit_transform(data_pca_reduced, labels)
330
+
331
+ print(f" ✓ LDA reduced from {n_pca_components} to {data_final.shape[1]} dimensions")
332
+ print(f" ✓ Total compression: {data.shape[1]}→{n_pca_components}→{data_final.shape[1]}")
333
+
334
+ # Calculate LDA explained variance
335
+ lda_explained_variance = lda_model.explained_variance_ratio_
336
+ print(f" ✓ LDA explained variance: {np.sum(lda_explained_variance):.4f}")
337
+
338
+ # Combine parameters
339
+ combined_params = {
340
+ 'pca_params': pca_params,
341
+ 'lda_model': lda_model,
342
+ 'n_pca_components': n_pca_components,
343
+ 'n_lda_components': data_final.shape[1],
344
+ 'n_classes': n_classes,
345
+ 'original_feature_dim': data.shape[1],
346
+ 'lda_explained_variance_ratio': lda_explained_variance
347
+ }
348
+
349
+ return combined_params, data_final
350
+
351
+
352
+ def apply_pca_lda_transform(data, combined_params):
353
+ """
354
+ Apply saved PCA+LDA transformation to new data
355
+
356
+ Args:
357
+ data: New data to transform (n_samples, n_features)
358
+ combined_params: Dictionary from fit_pca_lda_transformer
359
+
360
+ Returns:
361
+ Transformed data
362
+ """
363
+
364
+ # Stage 1: Apply PCA transformation
365
+ data_pca_reduced = apply_pca_transform(data, combined_params['pca_params'])
366
+
367
+ # Stage 2: Apply LDA transformation
368
+ data_final = combined_params['lda_model'].transform(data_pca_reduced)
369
+
370
+ return data_final