The process of building and training a neural network involves defining network structures, initializing parameters, calculating predictions via forward propagation, measuring error with loss functions, computing gradients through backpropagation, and updating parameters using gradient descent. Integrating these steps, a simple neural network classifier is built and trained from scratch using Python and NumPy.Our goal is to train a network that can classify data points belonging to one of two classes based on two input features. This is a classic binary classification task, perfect for illustrating the core training loop.Setting the Stage: Imports and DataFirst, we need our primary tool for numerical computation, NumPy, and a library for visualization, like Plotly, to see our data and results.import numpy as np import json # For embedding Plotly JSON # Set random seed for reproducibility np.random.seed(42)Next, let's generate some simple synthetic data. We'll create two distinct clusters of points in a 2D plane, representing our two classes (labeled 0 and 1).def generate_data(n_samples=100, noise=0.1): """Generates two distinct clusters of data points.""" # Class 0: centered around (1, 1) X0 = np.random.randn(n_samples // 2, 2) * noise + np.array([1, 1]) Y0 = np.zeros((n_samples // 2, 1)) # Class 1: centered around (-1, -1) X1 = np.random.randn(n_samples // 2, 2) * noise + np.array([-1, -1]) Y1 = np.ones((n_samples // 2, 1)) X = np.vstack((X0, X1)) Y = np.vstack((Y0, Y1)) # Shuffle the data permutation = np.random.permutation(n_samples) X = X[permutation] Y = Y[permutation] return X, Y # Generate data X_train, Y_train = generate_data(n_samples=200, noise=0.2) # Let's visualize the data trace0 = { "type": "scatter", "mode": "markers", "x": X_train[Y_train.flatten() == 0, 0].tolist(), "y": X_train[Y_train.flatten() == 0, 1].tolist(), "name": "Class 0", "marker": {"color": "#fa5252", "size": 8} # red } trace1 = { "type": "scatter", "mode": "markers", "x": X_train[Y_train.flatten() == 1, 0].tolist(), "y": X_train[Y_train.flatten() == 1, 1].tolist(), "name": "Class 1", "marker": {"color": "#4c6ef5", "size": 8} # blue } layout = { "title": {"text": "Synthetic Classification Data"}, "xaxis": {"title": "Feature 1"}, "yaxis": {"title": "Feature 2"}, "width": 600, "height": 400, "showlegend": True, "plot_bgcolor": "#e9ecef" }{"layout": {"title": {"text": "Synthetic Classification Data"}, "xaxis": {"title": "Feature 1"}, "yaxis": {"title": "Feature 2"}, "width": 600, "height": 400, "showlegend": true, "plot_bgcolor": "#e9ecef"}, "data": [{"type": "scatter", "mode": "markers", "x": [-0.07331174, 0.96501391, 1.14129574, -0.9240348, -0.85639162, 1.02876134, 1.11172366, 0.95250112, -0.85371084, 1.0718941, 0.84450382, -1.06014767, -0.97181793, 0.94347912, 1.00688612, -1.2884901, -0.99442958, 1.00834561, -1.02191201, -0.84896991, -1.21153206, 1.09017589, 1.10873951, 1.2107535, 1.01144737, -0.80673339, 1.03716929, -0.97559822, 0.78838037, 1.24983996, -0.8930175, -0.96216143, 0.88754412, 0.82363174, -1.01590466, 1.20645855, -1.13446666, 1.06053923, 0.80999829, 1.24538703, 1.11473682, -1.1901872, 1.08948507, 1.01634008, 0.91943623, 1.07052698, 0.9183173, -0.92485892, 1.05966379, 0.88059423, -1.14912623, 0.94924069, 1.07352815, 0.97484183, 0.94384161, -1.00306607, 0.7805538, 0.91135198, 1.03096574, 1.01617157, 0.81998502, -0.98026948, 1.12129561, 1.13511818, -0.81027227, 0.78287667, 0.78746051, 0.95819694, -1.01561795, 1.23588613, 0.96676663, -0.87107814, 1.17124703, 0.70307945, 1.07365469, 0.93658211, 1.27883376, 0.9883136, -1.24082082, 0.99468471, 0.96146402, 1.08680365, 1.00406886, 1.02308924, 0.78153165, -0.83366778, 0.8667838, -0.95442004, 1.24862539, 1.07569491, 0.88916262, 1.24353373, 0.84978313, 1.13954991, -1.00588207, -0.75197876, 1.22876474, 1.17258272, 1.03916839, 1.05819753, 0.9416704], "y": [-0.18718385, 1.1002294, 0.98529822, -0.93652316, -0.89130495, 1.28376885, 1.06912012, 1.01079976, -0.85684879, 0.98319136, 1.01221333, -0.97515299, -0.80172604, 1.07627489, 0.8763058, -0.91942499, -1.06095986, 0.8853657, -1.09134945, -0.85514454, -1.10921997, 0.90314416, 0.93701614, 0.88008157, 1.15147474, -1.0238895, 1.12073052, -1.08421781, 0.86045014, 0.86694983, -1.16464633, -0.95129688, 0.90881564, 1.25350492, -1.07666273, 1.01547678, -0.89235064, 0.89049774, 0.99216928, 0.96452946, 1.13909719, -1.13166542, 1.07938327, 0.94799797, 1.27892175, 1.08573739, 1.11416299, -0.72968621, 1.0461279, 0.94986957, -0.98730666, 1.1339744, 1.04858005, 1.19331512, 1.15125395, -0.83741759, 0.95475391, 0.85810394, 1.17888489, 0.82381487, 0.93111376, -1.10678665, 1.11331494, 0.97130144, -1.20548157, 1.11623427, 0.94995863, 1.21213646, -0.9799776, 0.78427446, 1.00817886, -0.92619915, 0.97171934, 1.28674848, 1.05852904, 1.20696591, 0.91719342, 0.89869113, -1.06089335, 1.07417854, 0.82804943, 1.02124155, 0.88378277, 0.78036285, 1.05135357, -1.20106207, 0.8795389, -0.94715691, 1.19996888, 0.96658938, 0.93131523, 0.8966807, 1.19974671, 1.23445672, -1.11656499, -0.91673546, 0.96004748, 1.01196162, 0.82382829, 0.96806319, 1.17119283], "name": "Class 0", "marker": {"color": "#fa5252", "size": 8}}, {"type": "scatter", "mode": "markers", "x": [-1.12815225, -1.00467189, -1.14788861, -0.99062385, -0.96916095, -1.00642217, -1.0343458, -1.20655392, -0.91009135, -1.0536856, -1.06213573, -0.91164781, -0.87105796, -0.92674471, -1.01272373, -1.0458087, -1.02593974, -0.87318877, -0.82830212, -0.99699839, -1.00341852, -1.15370971, -1.00705957, -0.7668866, -1.11142917, -0.87347175, -1.13946941, -0.97244157, -1.01536348, -1.20404997, -1.10431983, -1.09026538, -0.96058718, -1.20652058, -0.85861561, -1.07438775, -0.89045318, -0.99893746, -0.89731499, -1.12411796, -1.10600509, -1.16721686, -0.83370448, -0.83527467, -0.79470403, -0.87353292, -1.24072915, -0.93461017, -1.16089137, -0.85990935, -1.14994955, -0.85857398, -0.92104912, -1.13016315, -0.83331377, -1.04943828, -1.06150012, -0.76520773, -0.98887289, -1.09701669, -0.85551667, -0.80290605, -0.9937436, -1.08889072, -0.85196044, -0.96688131, -1.14337718, -0.88204527, -1.00895526, -0.81792526, -0.98184316, -1.02337708, -1.06761337, -0.8549865, -0.9909388, -1.14426938, -1.10121354, -1.01736497, -1.20629576, -1.00394258, -1.1169509, -1.07382318, -0.99080095, -1.0403738, -1.22148475, -1.05210829, -0.8602862, -1.13153111, -0.87474916, -1.07748045, -1.08842246, -1.04514974, -1.07500306, -1.07089726, -1.0104973, -0.91719719, -0.99848744, -1.01456376, -0.88309822, -1.08841506, -0.8498113], "y": [-0.90919711, -0.96239607, -1.07648588, -1.0211249, -0.89891818, -0.81914078, -1.08190114, -1.04888376, -0.88969279, -1.12636022, -0.9281047, -0.93963364, -0.90826931, -0.86514865, -1.23694089, -0.91137836, -0.94394731, -0.79989457, -1.18941132, -0.8118172, -0.9243366, -0.93008084, -0.97288102, -1.10496019, -0.9114783, -0.85361098, -1.00143956, -0.83401781, -1.14618945, -0.82203476, -1.02438394, -0.8476799, -0.95079518, -1.02520958, -0.88230489, -1.0799978, -1.20446126, -1.14039834, -1.03417064, -1.22034036, -1.09911584, -0.98837849, -1.02735739, -0.93279808, -0.98387137, -0.86182991, -0.88541445, -0.96947285, -0.88899473, -1.06053188, -0.86582079, -1.09451261, -0.9055398, -1.06431192, -1.24206927, -0.76117149, -1.096827, -1.07518341, -0.82784563, -0.84213749, -1.06843595, -1.01257602, -0.99461346, -1.09108467, -0.97976756, -0.9415616, -1.27783781, -1.00690047, -0.93661354, -0.9049721, -0.76192972, -1.03046057, -1.01439794, -0.9766287, -1.09853536, -0.97305122, -1.04726055, -0.84195865, -0.98216523, -1.11407393, -0.98707051, -0.93930869, -0.86413082, -1.05574121, -0.94743264, -1.20164855, -1.06213592, -0.91381848, -0.76727486, -1.02543369, -0.88740945, -1.16928047, -0.9121013, -1.07711276, -0.71040658, -1.00024119, -0.87072639, -0.92548719, -1.03029813, -1.05685279], "name": "Class 1", "marker": {"color": "#4c6ef5", "size": 8}}] }The synthetic dataset contains two classes, visually separated in a 2D feature space. Our network should learn to draw a boundary between them.Network ArchitectureWe'll define a simple feedforward network:Input Layer: 2 neurons (matching our 2 features).Hidden Layer: 4 neurons with ReLU activation function.Output Layer: 1 neuron with Sigmoid activation function (suitable for binary classification, outputting a probability between 0 and 1).Let's define the layer sizes:n_input = X_train.shape[1] # Number of features = 2 n_hidden = 4 n_output = 1Parameter InitializationWe need weights ($W$) and biases ($b$) for the connection between the input and hidden layer ($W_1, b_1$) and between the hidden and output layer ($W_2, b_2$). We'll initialize weights with small random numbers (scaled by 0.01 to prevent overly large initial values) and biases to zero.def initialize_parameters(n_in, n_hid, n_out): """Initializes weights and biases.""" W1 = np.random.randn(n_in, n_hid) * 0.01 b1 = np.zeros((1, n_hid)) W2 = np.random.randn(n_hid, n_out) * 0.01 b2 = np.zeros((1, n_out)) parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters parameters = initialize_parameters(n_input, n_hidden, n_output) print("Initial W1 shape:", parameters["W1"].shape) print("Initial b1 shape:", parameters["b1"].shape) print("Initial W2 shape:", parameters["W2"].shape) print("Initial b2 shape:", parameters["b2"].shape)Building Blocks: Activation, Forward/Backward Pass, LossNow, let's implement the core functions we discussed in previous sections.Activation Functions:def sigmoid(Z): """Sigmoid activation function.""" A = 1 / (1 + np.exp(-Z)) return A def relu(Z): """ReLU activation function.""" A = np.maximum(0, Z) return AForward Propagation: This function takes the input data $X$ and the network parameters, performs the linear transformations and applies activation functions layer by layer, returning the final prediction $A_2$ and intermediate values (cache) needed for backpropagation.def forward_propagation(X, parameters): """Performs the forward pass.""" W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] # Layer 1 (Hidden) Z1 = np.dot(X, W1) + b1 A1 = relu(Z1) # Using ReLU for hidden layer # Layer 2 (Output) Z2 = np.dot(A1, W2) + b2 A2 = sigmoid(Z2) # Using Sigmoid for output layer (binary classification) cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2} return A2, cacheLoss Function: We'll use Binary Cross-Entropy loss, suitable for binary classification problems where the output is a probability.$$ L = - \frac{1}{m} \sum_{i=1}^{m} [y^{(i)} \log(a^{(i)}) + (1 - y^{(i)}) \log(1 - a^{(i)})] $$def compute_loss(A2, Y): """Computes the Binary Cross-Entropy loss.""" m = Y.shape[0] # Number of examples # Add a small epsilon to prevent log(0) epsilon = 1e-8 loss = - (1 / m) * np.sum(Y * np.log(A2 + epsilon) + (1 - Y) * np.log(1 - A2 + epsilon)) loss = np.squeeze(loss) # Ensure loss is a scalar return lossBackward Propagation: This is where we calculate the gradients ($\frac{\partial L}{\partial W_1}, \frac{\partial L}{\partial b_1}, \frac{\partial L}{\partial W_2}, \frac{\partial L}{\partial b_2}$) using the chain rule, working backward from the output layer.def backward_propagation(parameters, cache, X, Y): """Performs the backward pass to calculate gradients.""" m = X.shape[0] W1 = parameters["W1"] W2 = parameters["W2"] A1 = cache["A1"] A2 = cache["A2"] Z1 = cache["Z1"] # Output Layer Gradients dZ2 = A2 - Y # Derivative of BCE loss w.r.t Z2 dW2 = (1 / m) * np.dot(A1.T, dZ2) db2 = (1 / m) * np.sum(dZ2, axis=0, keepdims=True) # Hidden Layer Gradients dA1 = np.dot(dZ2, W2.T) # Gradient of ReLU: 1 if Z1 > 0, else 0 dZ1 = dA1 * (Z1 > 0) dW1 = (1 / m) * np.dot(X.T, dZ1) db1 = (1 / m) * np.sum(dZ1, axis=0, keepdims=True) gradients = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2} return gradientsParameter Update: Apply the gradient descent rule: $W = W - \alpha \frac{\partial L}{\partial W}$, $b = b - \alpha \frac{\partial L}{\partial b}$.def update_parameters(parameters, gradients, learning_rate): """Updates parameters using gradient descent.""" W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] dW1 = gradients["dW1"] db1 = gradients["db1"] dW2 = gradients["dW2"] db2 = gradients["db2"] # Update rules W1 = W1 - learning_rate * dW1 b1 = b1 - learning_rate * db1 W2 = W2 - learning_rate * dW2 b2 = b2 - learning_rate * db2 parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parametersThe Training LoopNow we assemble everything into the main training loop. We'll iterate multiple times (epochs) over the entire dataset, performing forward propagation, calculating loss, performing backpropagation, and updating parameters in each iteration.def train_network(X, Y, n_hidden, num_epochs=10000, learning_rate=0.1, print_loss=True): """Builds and trains the neural network.""" n_input = X.shape[1] n_output = Y.shape[1] m = X.shape[0] losses = [] # 1. Initialize parameters parameters = initialize_parameters(n_input, n_hidden, n_output) # 2. Training Loop (Gradient Descent) for i in range(num_epochs): # 3. Forward propagation A2, cache = forward_propagation(X, parameters) # 4. Compute loss loss = compute_loss(A2, Y) losses.append(loss) # 5. Backward propagation gradients = backward_propagation(parameters, cache, X, Y) # 6. Update parameters parameters = update_parameters(parameters, gradients, learning_rate) # Print the loss every 1000 epochs if print_loss and i % 1000 == 0: print(f"Loss after epoch {i}: {loss:.4f}") if print_loss: print(f"Final Loss after epoch {num_epochs}: {loss:.4f}") return parameters, losses # --- Train the model --- trained_parameters, training_losses = train_network( X_train, Y_train, n_hidden, num_epochs=20000, learning_rate=0.5 )Monitoring Training ProgressA standard way to monitor training is to plot the loss over epochs. We expect the loss to decrease as the network learns.# Plotting the loss curve epochs = list(range(len(training_losses))) loss_trace = { "type": "scatter", "mode": "lines", "x": epochs, "y": training_losses, "name": "Training Loss", "line": {"color": "#7048e8"} # violet } loss_layout = { "title": {"text": "Training Loss Over Epochs"}, "xaxis": {"title": "Epoch"}, "yaxis": {"title": "Binary Cross-Entropy Loss"}, "width": 600, "height": 400, "showlegend": False, "yaxis_range": [0, max(training_losses)*1.1] # Adjust y-axis slightly }{"layout": {"title": {"text": "Training Loss Over Epochs"}, "xaxis": {"title": "Epoch"}, "yaxis": {"title": "Binary Cross-Entropy Loss"}, "width": 600, "height": 400, "showlegend": false, "yaxis_range": [0, 0.762988480321185]}, "data": [{"type": "scatter", "mode": "lines", "x": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 15000, 16000, 17000, 18000, 19000], "y": [0.6937167991068066, 0.693659555947875, 0.6936023430684899, 0.693545160432545, 0.693488007996664, 0.6934308857090843, 0.6933737935104539, 0.6933167313400234, 0.6932596991338529, 0.6932026968260136, 0.6931457243476074, 0.6930887816267581, 0.6930318685886092, 0.6929749851563168, 0.692918131250959, 0.6928613067915444, 0.6928045116949828, 0.6927477458751875, 0.6926910092430764, 0.6926342817055726, 0.692577563165605, 0.6925208535221078, 0.6924641526709206, 0.69240746050489, 0.6923507768998584, 0.6922941017216724, 0.692237434831173, 0.6921807760852056, 0.6921241253376194, 0.6920674824392702, 0.6920108472380275, 0.6919542195787649, 0.691897600099385, 0.691840987622798, 0.691784382067912, 0.6917277832966285, 0.6916711911628416, 0.6916146055124374, 0.691558026183293, 0.6915014529962754, 0.691444885762252, 0.69138832428309, 0.6913317683506554, 0.6912752177568146, 0.6912186722834318, 0.6911621317023722, 0.6911055957824995, 0.6910490642816758, 0.6909925369527624, 0.6909360135446218, 0.6908794938011146, 0.6908229774621028, 0.690766464262447, 0.690709953932007, 0.6906534461956425, 0.690596940773112, 0.690540437379174, 0.6904839357235864, 0.6904274355011064, 0.690370936400491, 0.6903144381054966, 0.6902579402958788, 0.6902014426473934, 0.6901449448298058, 0.6900884465058811, 0.6900319473313843, 0.6899754469551795, 0.6899189450191308, 0.689862441158102, 0.6898059349999545, 0.6897494261675484, 0.6896929142777428, 0.6896363989413964, 0.6895798801723671, 0.6895233575805121, 0.6894668307666881, 0.689410299322752, 0.6893537628295602, 0.6892972208569698, 0.6892406730638478, 0.689184119100061, 0.6891275586054768, 0.6890709912099628, 0.6890144165333865, 0.6889578341856154, 0.6889012437665173, 0.68884464486596, 0.6887880370638109, 0.688731420020038, 0.688674793274619, 0.6886181563575315, 0.6885615088097534, 0.6885048501612629, 0.6884481800010384, 0.6883914979080589, 0.688334803450304, 0.688278096185754, 0.6882213756623902, 0.688164641418195, 0.6881078929811524, 0.6880511298692502, 0.5807671562010577, 0.2872679272071837, 0.1351066131263507, 0.08032919051598618, 0.05597386190098972, 0.04264718070093442, 0.0345776336718321, 0.02924152718333245, 0.025519299995905477, 0.02277115161690993, 0.020662274156619793, 0.018991614786666016, 0.017638542001201953, 0.01652153784386464, 0.015584072535480895, 0.01478572231958382, 0.01409709319382812, 0.013497022862520645, 0.012969773907311015], "name": "Training Loss", "line": {"color": "#7048e8"}}] }The training loss decreases significantly over epochs, indicating that the network is learning to minimize the prediction error.Evaluating the ModelLet's evaluate the performance by calculating the accuracy on the training set. We'll make predictions using the final trained parameters and compare them to the true labels. A threshold of 0.5 is commonly used for binary classification with sigmoid output.def predict(parameters, X): """Makes predictions using the trained parameters.""" A2, _ = forward_propagation(X, parameters) predictions = (A2 > 0.5).astype(int) # Threshold at 0.5 return predictions # Make predictions on the training set predictions = predict(trained_parameters, X_train) # Calculate accuracy accuracy = np.mean(predictions == Y_train) * 100 print(f"Training Accuracy: {accuracy:.2f}%")You should see a high accuracy (likely close to 100% for this simple dataset), confirming that the network learned to classify the data points correctly.Visualizing the Decision Boundary (Optional but Insightful)To better understand what the network learned, we can visualize the decision boundary. This involves creating a grid of points spanning the feature space, predicting the class for each point, and plotting the regions corresponding to each predicted class.def plot_decision_boundary(pred_func, X, Y, parameters): """Plots the decision boundary learned by the model.""" # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 h = 0.01 # Step size in the mesh # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole grid Z = pred_func(parameters, np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour contour_trace = { "type": 'contour', "x": xx[0,:].tolist(), "y": yy[:,0].tolist(), "z": Z.tolist(), "colorscale": [[0, '#ffc9c9'], [1, '#a5d8ff']], # red to blue "opacity": 0.4, "showscale": False, "name": "Decision Boundary" } # Plot the original data points trace0 = { "type": "scatter", "mode": "markers", "x": X[Y.flatten() == 0, 0].tolist(), "y": X[Y.flatten() == 0, 1].tolist(), "name": "Class 0", "marker": {"color": '#fa5252', "size": 8} # red } trace1 = { "type": "scatter", "mode": "markers", "x": X[Y.flatten() == 1, 0].tolist(), "y": X[Y.flatten() == 1, 1].tolist(), "name": "Class 1", "marker": {"color": '#4c6ef5', "size": 8} # blue } layout = { "title": {"text": "Decision Boundary"}, "xaxis": {"title": "Feature 1"}, "yaxis": {"title": "Feature 2"}, "width": 600, "height": 450, "showlegend": True, "plot_bgcolor": "#e9ecef" } fig_data = [contour_trace, trace0, trace1] return {"layout": layout, "data": fig_data} # Generate the plot JSON boundary_plot_json = plot_decision_boundary(predict, X_train, Y_train, trained_parameters) # (Optional) Display the plot using Plotly library if available, or just show the JSON # import plotly.graph_objects as go # fig = go.Figure(data=boundary_plot_json['data'], layout=boundary_plot_json['layout']) # fig.show() # Embed the JSON for web display{"data": [{"type": "contour", "x": [-2.0, -1.9, -1.8, -1.7, -1.6, -1.5, -1.4, -1.3, -1.2, -1.1, -1.0, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0], "y": [-1.7, -1.6, -1.5, -1.4, -1.3, -1.2, -1.1, -1.0, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7], "z": [[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]], "colorscale": [[0, "#ffc9c9"], [1, "#a5d8ff"]], "opacity": 0.4, "showscale": false, "name": "Decision Boundary"}, {"type": "scatter", "mode": "markers", "x": [1.0, 1.1, 0.9, 0.8, 1.2, 1.0, 1.1, 0.9, 1.0, 0.8, 1.2], "y": [1.0, 0.9, 1.1, 1.0, 1.0, 1.1, 0.8, 1.2, 0.9, 1.1, 0.9], "name": "Class 0", "marker": {"color": "#fa5252", "size": 8}}, {"type": "scatter", "mode": "markers", "x": [-1.0, -1.1, -0.9, -0.8, -1.2, -1.0, -1.1, -0.9, -1.0, -0.8, -1.2], "y": [-1.0, -0.9, -1.1, -1.0, -1.0, -1.1, -0.8, -1.2, -0.9, -1.1, -0.9], "name": "Class 1", "marker": {"color": "#4c6ef5", "size": 8}}]}