Building Your Foundation: From Python Basics to Neural Networks
Prerequisites: Lectures 1-3 completed | Basic computer literacy
By the end of this lab, you will be able to:
Primary Workspace: All your coding will be done in this interactive Google Colab notebook. The notebook contains detailed instructions, code templates, and exercises.
🚀 Open Lab 1 Google Colab Notebook
# Example: Creating and manipulating matrices
import numpy as np
import matplotlib.pyplot as plt
# Create matrices representing neural network weights
weights = np.random.randn(3, 2) # 3 inputs, 2 outputs
inputs = np.array([[1, 2, 3], [4, 5, 6]]) # 2 samples
# Matrix multiplication (core of neural networks)
output = np.dot(inputs, weights)
print(f"Output shape: {output.shape}")
print(f"Output values:\n{output}")
# Example: Computing derivatives numerically
def compute_derivative(func, x, h=1e-5):
"""Compute derivative using finite differences"""
return (func(x + h) - func(x - h)) / (2 * h)
# Define a simple function
def quadratic(x):
return x**2 + 2*x + 1
# Compute derivative at x = 3
x_point = 3
derivative = compute_derivative(quadratic, x_point)
analytical = 2*x_point + 2 # We know d/dx(x²+2x+1) = 2x+2
print(f"Numerical derivative: {derivative:.6f}")
print(f"Analytical derivative: {analytical}")
print(f"Difference: {abs(derivative - analytical):.8f}")
# Example: Single neuron implementation
class Neuron:
def __init__(self, num_inputs):
# Initialize weights randomly
self.weights = np.random.randn(num_inputs)
self.bias = np.random.randn()
def sigmoid(self, x):
"""Sigmoid activation function"""
return 1 / (1 + np.exp(-np.clip(x, -500, 500)))
def forward(self, inputs):
"""Forward pass through the neuron"""
# Weighted sum + bias
weighted_sum = np.dot(inputs, self.weights) + self.bias
# Apply activation function
output = self.sigmoid(weighted_sum)
return output, weighted_sum
# Create and test a neuron
neuron = Neuron(3) # 3 inputs
test_inputs = np.array([0.5, 0.3, 0.8])
output, weighted_sum = neuron.forward(test_inputs)
print(f"Inputs: {test_inputs}")
print(f"Weights: {neuron.weights}")
print(f"Bias: {neuron.bias:.3f}")
print(f"Weighted sum: {weighted_sum:.3f}")
print(f"Final output: {output:.3f}")
# Example: Simple neural network structure
class NeuralNetwork:
def __init__(self, input_size, hidden_size, output_size):
# Initialize weights and biases
self.W1 = np.random.randn(input_size, hidden_size) * 0.1
self.b1 = np.zeros((1, hidden_size))
self.W2 = np.random.randn(hidden_size, output_size) * 0.1
self.b2 = np.zeros((1, output_size))
# Store for backpropagation
self.cache = {}
def sigmoid(self, x):
return 1 / (1 + np.exp(-np.clip(x, -500, 500)))
def sigmoid_derivative(self, x):
return x * (1 - x)
def forward(self, X):
"""Forward propagation"""
# Hidden layer
self.cache['z1'] = np.dot(X, self.W1) + self.b1
self.cache['a1'] = self.sigmoid(self.cache['z1'])
# Output layer
self.cache['z2'] = np.dot(self.cache['a1'], self.W2) + self.b2
self.cache['a2'] = self.sigmoid(self.cache['z2'])
return self.cache['a2']
def backward(self, X, y, learning_rate=0.1):
"""Backpropagation"""
m = X.shape[0] # Number of samples
# Output layer gradients
dz2 = self.cache['a2'] - y
dW2 = (1/m) * np.dot(self.cache['a1'].T, dz2)
db2 = (1/m) * np.sum(dz2, axis=0, keepdims=True)
# Hidden layer gradients
da1 = np.dot(dz2, self.W2.T)
dz1 = da1 * self.sigmoid_derivative(self.cache['a1'])
dW1 = (1/m) * np.dot(X.T, dz1)
db1 = (1/m) * np.sum(dz1, axis=0, keepdims=True)
# Update weights
self.W2 -= learning_rate * dW2
self.b2 -= learning_rate * db2
self.W1 -= learning_rate * dW1
self.b1 -= learning_rate * db1
def train(self, X, y, epochs=1000, learning_rate=0.1):
"""Training loop"""
losses = []
for epoch in range(epochs):
# Forward pass
output = self.forward(X)
# Calculate loss (Mean Squared Error)
loss = np.mean((output - y)**2)
losses.append(loss)
# Backward pass
self.backward(X, y, learning_rate)
# Print progress
if epoch % 100 == 0:
print(f"Epoch {epoch}, Loss: {loss:.6f}")
return losses
# Example usage:
# Create XOR dataset
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([[0], [1], [1], [0]])
# Create and train network
nn = NeuralNetwork(2, 4, 1) # 2 inputs, 4 hidden, 1 output
losses = nn.train(X, y, epochs=5000, learning_rate=1.0)
# Example: Visualization code
def plot_training_progress(losses):
"""Plot training loss over time"""
plt.figure(figsize=(10, 6))
plt.plot(losses)
plt.title('Neural Network Training Progress')
plt.xlabel('Epoch')
plt.ylabel('Loss (Mean Squared Error)')
plt.grid(True, alpha=0.3)
plt.show()
def plot_predictions(nn, X, y):
"""Plot network predictions vs actual"""
predictions = nn.forward(X)
plt.figure(figsize=(12, 4))
# Plot 1: Actual vs Predicted
plt.subplot(1, 2, 1)
plt.scatter(range(len(y)), y.flatten(), label='Actual', alpha=0.7)
plt.scatter(range(len(predictions)), predictions.flatten(), label='Predicted', alpha=0.7)
plt.title('Actual vs Predicted Values')
plt.xlabel('Sample')
plt.ylabel('Output')
plt.legend()
plt.grid(True, alpha=0.3)
# Plot 2: Error analysis
plt.subplot(1, 2, 2)
errors = np.abs(y.flatten() - predictions.flatten())
plt.bar(range(len(errors)), errors)
plt.title('Prediction Errors')
plt.xlabel('Sample')
plt.ylabel('Absolute Error')
plt.grid(True, alpha=0.3)
plt.tight_layout()
plt.show()
# Print results
print("Input -> Expected | Predicted | Error")
print("-" * 40)
for i, (input_val, expected, predicted) in enumerate(zip(X, y.flatten(), predictions.flatten())):
error = abs(expected - predicted)
print(f"{input_val} -> {expected:.1f} | {predicted:.3f} | {error:.3f}")
| Component | Excellent (90-100%) | Good (75-89%) | Needs Improvement (60-74%) | Points |
|---|---|---|---|---|
| Code Implementation | All exercises completed correctly, clean code, proper comments | Most exercises correct, minor issues, adequate documentation | Some exercises incorrect, poor documentation | 40 pts |
| Neural Network | Network trains successfully, good performance, proper implementation | Network works with minor issues, acceptable performance | Network has significant issues or poor performance | 30 pts |
| Visualizations | Clear, informative plots with proper labels and analysis | Good plots with minor labeling issues | Basic plots, missing labels or poor presentation | 15 pts |
| Analysis & Reflection | Insightful analysis, demonstrates deep understanding | Good analysis, shows understanding of key concepts | Basic analysis, limited understanding demonstrated | 15 pts |
Solution: Runtime → Reconnect. Save your work frequently!
Solution: Check array shapes using .shape. Use np.reshape() if needed.
Solution: Try different learning rates (0.01, 0.1, 1.0). Check your gradient calculations.
Solution: Use np.clip() to prevent extreme values in exponential functions.
Mathematical Foundations in Code
Advanced optimization techniques, gradient descent variants, and mathematical deep dives with Python implementation.
Based on Lectures 4-6 | Duration: 2 hours | Focus: Optimization & Advanced Math
Created by Dr. Daya Shankar
Dean, Woxsen University | Founder, VaidyaAI
🌐 Personal Website | 🏥 VaidyaAI | 🎓 Woxsen University
Need help? Email: dayashankar.ai@gmail.com | Office Hours: Mon-Fri 2-4 PM