Spaces:
Sleeping
Sleeping
| """ | |
| π Two-Spiral Neural Network Classifier β Streamlit App | |
| ======================================================== | |
| Interactive exploration of learning non-linear decision boundaries | |
| using shallow neural networks on the classic Two-Spiral problem. | |
| """ | |
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| import matplotlib.colors as mcolors | |
| import streamlit as st | |
| import time, io | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # CONFIGURATION & PAGE SETUP | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| st.set_page_config( | |
| page_title="π Two-Spiral NN Classifier", | |
| page_icon="π", | |
| layout="wide", | |
| ) | |
| # Custom CSS for a polished UI | |
| st.markdown(""" | |
| <style> | |
| /* Main background */ | |
| .stApp { | |
| background: linear-gradient(135deg, #0f0c29 0%, #302b63 50%, #24243e 100%); | |
| } | |
| /* Sidebar */ | |
| section[data-testid="stSidebar"] { | |
| background: rgba(15, 12, 41, 0.92); | |
| } | |
| /* Card-like containers */ | |
| div[data-testid="stVerticalBlock"] > div { | |
| border-radius: 12px; | |
| } | |
| /* Headers */ | |
| h1, h2, h3 { | |
| color: #e0e0ff !important; | |
| } | |
| /* Metric labels */ | |
| [data-testid="stMetricLabel"] { | |
| color: #b0b0e0 !important; | |
| } | |
| [data-testid="stMetricValue"] { | |
| color: #ffffff !important; | |
| } | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # UTILITY FUNCTIONS | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| def generate_two_spirals(n_points=200, noise=0.5, n_turns=2, seed=42): | |
| """Generate the classic two-spiral dataset.""" | |
| rng = np.random.RandomState(seed) | |
| n = n_points | |
| theta = np.linspace(0, n_turns * 2 * np.pi, n) | |
| r = theta | |
| x1 = r * np.cos(theta) + rng.randn(n) * noise | |
| y1 = r * np.sin(theta) + rng.randn(n) * noise | |
| x2 = -r * np.cos(theta) + rng.randn(n) * noise | |
| y2 = -r * np.sin(theta) + rng.randn(n) * noise | |
| X = np.vstack([np.column_stack([x1, y1]), | |
| np.column_stack([x2, y2])]) | |
| y = np.hstack([np.zeros(n), np.ones(n)]) | |
| return X, y | |
| class ShallowNN: | |
| """A simple NumPy-based shallow Neural Network (1-2 hidden layers).""" | |
| def __init__(self, input_size, hidden_size, output_size=1, | |
| activation="tanh", learning_rate=0.01): | |
| self.input_size = input_size | |
| self.hidden_size = hidden_size | |
| self.output_size = output_size | |
| self.activation = activation | |
| self.lr = learning_rate | |
| self._init_weights() | |
| # ββ weight initialisation ββββββββββββββββββββββββββββββββββ | |
| def _init_weights(self): | |
| scale = np.sqrt(2.0 / self.input_size) | |
| self.W1 = np.random.randn(self.input_size, self.hidden_size) * scale | |
| self.b1 = np.zeros((1, self.hidden_size)) | |
| scale2 = np.sqrt(2.0 / self.hidden_size) | |
| self.W2 = np.random.randn(self.hidden_size, self.output_size) * scale2 | |
| self.b2 = np.zeros((1, self.output_size)) | |
| # ββ activation helpers βββββββββββββββββββββββββββββββββββββ | |
| def _activate(self, z): | |
| if self.activation == "tanh": | |
| return np.tanh(z) | |
| elif self.activation == "relu": | |
| return np.maximum(0, z) | |
| elif self.activation == "sigmoid": | |
| return 1.0 / (1.0 + np.exp(-np.clip(z, -500, 500))) | |
| return np.tanh(z) | |
| def _activate_deriv(self, z): | |
| if self.activation == "tanh": | |
| t = np.tanh(z) | |
| return 1 - t ** 2 | |
| elif self.activation == "relu": | |
| return (z > 0).astype(float) | |
| elif self.activation == "sigmoid": | |
| s = 1.0 / (1.0 + np.exp(-np.clip(z, -500, 500))) | |
| return s * (1 - s) | |
| t = np.tanh(z) | |
| return 1 - t ** 2 | |
| def _sigmoid(z): | |
| return 1.0 / (1.0 + np.exp(-np.clip(z, -500, 500))) | |
| # ββ forward / backward βββββββββββββββββββββββββββββββββββββ | |
| def forward(self, X): | |
| self.z1 = X @ self.W1 + self.b1 | |
| self.a1 = self._activate(self.z1) | |
| self.z2 = self.a1 @ self.W2 + self.b2 | |
| self.a2 = self._sigmoid(self.z2) | |
| return self.a2 | |
| def _loss(self, y_true, y_pred): | |
| eps = 1e-8 | |
| y_pred = np.clip(y_pred, eps, 1 - eps) | |
| return -np.mean(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) | |
| def backward(self, X, y_true, y_pred): | |
| m = X.shape[0] | |
| dz2 = y_pred - y_true.reshape(-1, 1) | |
| dW2 = (self.a1.T @ dz2) / m | |
| db2 = np.sum(dz2, axis=0, keepdims=True) / m | |
| dz1 = (dz2 @ self.W2.T) * self._activate_deriv(self.z1) | |
| dW1 = (X.T @ dz1) / m | |
| db1 = np.sum(dz1, axis=0, keepdims=True) / m | |
| self.W2 -= self.lr * dW2 | |
| self.b2 -= self.lr * db2 | |
| self.W1 -= self.lr * dW1 | |
| self.b1 -= self.lr * db1 | |
| # ββ training loop ββββββββββββββββββββββββββββββββββββββββββ | |
| def train(self, X, y, epochs=1000, log_every=100): | |
| losses, accs = [], [] | |
| for ep in range(1, epochs + 1): | |
| y_pred = self.forward(X) | |
| loss = self._loss(y, y_pred) | |
| self.backward(X, y, y_pred) | |
| if ep % log_every == 0 or ep == 1: | |
| acc = self.accuracy(X, y) | |
| losses.append(loss) | |
| accs.append(acc) | |
| return losses, accs | |
| def predict(self, X): | |
| return (self.forward(X) >= 0.5).astype(int).flatten() | |
| def accuracy(self, X, y): | |
| return np.mean(self.predict(X) == y) * 100 | |
| def plot_dataset(X, y, title="Two-Spiral Dataset", ax=None): | |
| if ax is None: | |
| fig, ax = plt.subplots(figsize=(6, 6)) | |
| colors = ['#E74C3C', '#3498DB'] | |
| labels = ['Spiral 0', 'Spiral 1'] | |
| for cls in [0, 1]: | |
| mask = y == cls | |
| ax.scatter(X[mask, 0], X[mask, 1], c=colors[cls], | |
| label=labels[cls], alpha=0.8, s=20, | |
| edgecolors='white', linewidth=0.3) | |
| ax.set_title(title, fontsize=13, fontweight='bold', pad=10) | |
| ax.set_xlabel('$x_1$'); ax.set_ylabel('$x_2$') | |
| ax.legend(fontsize=9); ax.set_aspect('equal'); ax.grid(True, alpha=0.25) | |
| return ax | |
| def plot_decision_boundary(nn, X, y, title="Decision Boundary", ax=None): | |
| if ax is None: | |
| fig, ax = plt.subplots(figsize=(6, 6)) | |
| h = 0.25 | |
| x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 | |
| y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 | |
| xx, yy = np.meshgrid(np.arange(x_min, x_max, h), | |
| np.arange(y_min, y_max, h)) | |
| grid = np.c_[xx.ravel(), yy.ravel()] | |
| Z = nn.predict(grid).reshape(xx.shape) | |
| cmap_bg = mcolors.LinearSegmentedColormap.from_list( | |
| "bg", ["#FADBD8", "#D6EAF8"], N=2) | |
| ax.contourf(xx, yy, Z, alpha=0.4, cmap=cmap_bg, levels=1) | |
| ax.contour(xx, yy, Z, colors='gray', linewidths=0.5, levels=1) | |
| plot_dataset(X, y, title=title, ax=ax) | |
| return ax | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # SIDEBAR β HYPER-PARAMETERS | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| with st.sidebar: | |
| st.markdown("## βοΈ Hyper-parameters") | |
| st.markdown("---") | |
| n_points = st.slider("Points per spiral", 50, 500, 200, 50) | |
| noise = st.slider("Noise Ο", 0.1, 1.5, 0.4, 0.1) | |
| n_turns = st.slider("Spiral turns", 1, 4, 2, 1) | |
| seed = st.number_input("Random seed", value=42, step=1) | |
| st.markdown("---") | |
| hidden_size = st.slider("Hidden-layer neurons", 8, 256, 64, 8) | |
| activation = st.selectbox("Activation", ["tanh", "relu", "sigmoid"]) | |
| learning_rate = st.select_slider("Learning rate", | |
| options=[0.001, 0.005, 0.01, 0.05, 0.1, 0.5], value=0.01) | |
| epochs = st.slider("Epochs", 500, 10000, 3000, 500) | |
| st.markdown("---") | |
| run_btn = st.button("π Train network", use_container_width=True) | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| # MAIN AREA | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| st.markdown("# π Two-Spiral Neural Network Classifier") | |
| st.markdown(""" | |
| > **Explore** how a *shallow neural network* learns highly non-linear decision | |
| > boundaries on the classic **Two-Spiral Problem** introduced by | |
| > Lang & Witbrock (1988). Adjust hyper-parameters in the sidebar and click | |
| > **Train network** to watch the model learn. | |
| """) | |
| # Generate data | |
| X, y = generate_two_spirals(n_points, noise, n_turns, int(seed)) | |
| # Normalise | |
| X_mean = X.mean(axis=0) | |
| X_std = X.std(axis=0) | |
| X_norm = (X - X_mean) / X_std | |
| tab_data, tab_train, tab_analysis = st.tabs( | |
| ["π Dataset", "ποΈ Training", "π¬ Activation Analysis"]) | |
| # ββ TAB 1 β Dataset βββββββββββββββββββββββββββββββββββββββββββ | |
| with tab_data: | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| fig1, ax1 = plt.subplots(figsize=(6, 6), facecolor='#1a1a2e') | |
| ax1.set_facecolor('#1a1a2e') | |
| ax1.tick_params(colors='white'); ax1.xaxis.label.set_color('white') | |
| ax1.yaxis.label.set_color('white'); ax1.title.set_color('white') | |
| for spine in ax1.spines.values(): spine.set_color('#444') | |
| plot_dataset(X, y, "Two-Spiral Dataset", ax=ax1) | |
| ax1.legend(facecolor='#2a2a4e', edgecolor='#444', labelcolor='white') | |
| st.pyplot(fig1) | |
| with col2: | |
| st.markdown("### π Dataset statistics") | |
| st.metric("Total samples", f"{len(y)}") | |
| st.metric("Class 0", f"{int((y==0).sum())}") | |
| st.metric("Class 1", f"{int((y==1).sum())}") | |
| st.metric("Feature range (xβ)", | |
| f"[{X[:,0].min():.2f}, {X[:,0].max():.2f}]") | |
| st.metric("Feature range (xβ)", | |
| f"[{X[:,1].min():.2f}, {X[:,1].max():.2f}]") | |
| st.info("The two spirals are **completely interleaved** β " | |
| "no linear boundary can separate them.") | |
| # ββ TAB 2 β Training ββββββββββββββββββββββββββββββββββββββββββ | |
| with tab_train: | |
| if run_btn: | |
| np.random.seed(int(seed)) | |
| nn = ShallowNN(2, hidden_size, activation=activation, | |
| learning_rate=learning_rate) | |
| log_every = max(1, epochs // 50) | |
| progress_bar = st.progress(0, text="Training β¦") | |
| metric_col1, metric_col2 = st.columns(2) | |
| loss_placeholder = metric_col1.empty() | |
| acc_placeholder = metric_col2.empty() | |
| losses, accs = [], [] | |
| for ep in range(1, epochs + 1): | |
| y_pred = nn.forward(X_norm) | |
| loss = nn._loss(y, y_pred) | |
| nn.backward(X_norm, y, y_pred) | |
| if ep % log_every == 0 or ep == 1: | |
| acc = nn.accuracy(X_norm, y) | |
| losses.append(loss) | |
| accs.append(acc) | |
| progress_bar.progress(ep / epochs, | |
| text=f"Epoch {ep}/{epochs} β Loss {loss:.4f} β Acc {acc:.1f}%") | |
| loss_placeholder.metric("Loss", f"{loss:.4f}") | |
| acc_placeholder.metric("Accuracy", f"{acc:.1f}%") | |
| progress_bar.empty() | |
| st.success(f"β Training finished β **Final accuracy: {accs[-1]:.1f}%**") | |
| # Charts | |
| col_loss, col_acc, col_boundary = st.columns(3) | |
| with col_loss: | |
| fig_l, ax_l = plt.subplots(figsize=(5, 4), facecolor='#1a1a2e') | |
| ax_l.set_facecolor('#1a1a2e') | |
| ax_l.plot(losses, color='#E74C3C', linewidth=1.5) | |
| ax_l.set_title("Loss", color='white', fontweight='bold') | |
| ax_l.set_xlabel("log step", color='white') | |
| ax_l.tick_params(colors='white') | |
| for sp in ax_l.spines.values(): sp.set_color('#444') | |
| st.pyplot(fig_l) | |
| with col_acc: | |
| fig_a, ax_a = plt.subplots(figsize=(5, 4), facecolor='#1a1a2e') | |
| ax_a.set_facecolor('#1a1a2e') | |
| ax_a.plot(accs, color='#2ECC71', linewidth=1.5) | |
| ax_a.set_title("Accuracy (%)", color='white', fontweight='bold') | |
| ax_a.set_xlabel("log step", color='white') | |
| ax_a.tick_params(colors='white') | |
| for sp in ax_a.spines.values(): sp.set_color('#444') | |
| st.pyplot(fig_a) | |
| with col_boundary: | |
| fig_b, ax_b = plt.subplots(figsize=(5, 4), facecolor='#1a1a2e') | |
| ax_b.set_facecolor('#1a1a2e') | |
| ax_b.tick_params(colors='white'); ax_b.xaxis.label.set_color('white') | |
| ax_b.yaxis.label.set_color('white'); ax_b.title.set_color('white') | |
| for sp in ax_b.spines.values(): sp.set_color('#444') | |
| plot_decision_boundary(nn, X_norm, y, "Decision Boundary", ax=ax_b) | |
| ax_b.legend(facecolor='#2a2a4e', edgecolor='#444', labelcolor='white') | |
| st.pyplot(fig_b) | |
| else: | |
| st.info("π Click **Train network** in the sidebar to start.") | |
| # ββ TAB 3 β Activation analysis βββββββββββββββββββββββββββββββ | |
| with tab_analysis: | |
| st.markdown("### π¬ Comparing activation functions") | |
| st.markdown("Train the same architecture with **tanh**, **relu**, and " | |
| "**sigmoid** to see which one separates the spirals best.") | |
| if st.button("βΆοΈ Run comparison", use_container_width=True): | |
| acts = ["tanh", "relu", "sigmoid"] | |
| results = {} | |
| for act in acts: | |
| np.random.seed(int(seed)) | |
| _nn = ShallowNN(2, hidden_size, activation=act, | |
| learning_rate=learning_rate) | |
| _losses, _accs = _nn.train(X_norm, y, epochs=epochs, | |
| log_every=max(1, epochs // 50)) | |
| results[act] = {"nn": _nn, "losses": _losses, "accs": _accs} | |
| cols = st.columns(3) | |
| for idx, act in enumerate(acts): | |
| with cols[idx]: | |
| fig_c, ax_c = plt.subplots(figsize=(5, 5), facecolor='#1a1a2e') | |
| ax_c.set_facecolor('#1a1a2e') | |
| ax_c.tick_params(colors='white') | |
| ax_c.xaxis.label.set_color('white') | |
| ax_c.yaxis.label.set_color('white') | |
| ax_c.title.set_color('white') | |
| for sp in ax_c.spines.values(): sp.set_color('#444') | |
| plot_decision_boundary(results[act]["nn"], X_norm, y, | |
| f"{act} β {results[act]['accs'][-1]:.1f}%", ax=ax_c) | |
| ax_c.legend(facecolor='#2a2a4e', edgecolor='#444', | |
| labelcolor='white') | |
| st.pyplot(fig_c) | |
| else: | |
| st.info("Click **Run comparison** to start the analysis.") | |
| # ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| st.markdown("---") | |
| st.caption("Built with β€οΈ using Streamlit Β· Two-Spiral classification experiment") | |