Loading W Code...
🇮🇳 Indian Context
Gradient descent on real estate pricing, stock market optimization, and SIP returns!
Foundation
import numpy as np
print("=" * 55)
print("DERIVATIVES IN ML")
print("=" * 55)
# Numerical derivative
def numerical_derivative(f, x, h=1e-7):
return (f(x + h) - f(x - h)) / (2 * h)
# Power Rule: d/dx[x³] = 3x²
f = lambda x: x**3
x = 2.0
analytical = 3 * x**2
numerical = numerical_derivative(f, x)
print(f"\nf(x) = x³ at x = {x}")
print(f" Analytical: f'(x) = 3x² = {analytical}")
print(f" Numerical: f'(x) ≈ {numerical:.6f}")
# Sigmoid Derivative
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
s = sigmoid(x)
return s * (1 - s)
print("\n" + "=" * 55)
print("SIGMOID DERIVATIVE: σ'(x) = σ(x)(1 - σ(x))")
print("=" * 55)
for x in [-3, -1, 0, 1, 3]:
s = sigmoid(x)
sd = sigmoid_derivative(x)
print(f" x={x:2}: σ(x)={s:.4f}, σ'(x)={sd:.4f}")
print("\n💡 Notice: σ'(0) = 0.25 is maximum!")
print(" At |x| > 3, gradient nearly vanishes → 'vanishing gradient problem'")
# Chain Rule: d/dx[eˣ²] = 2x·eˣ²
print("\n" + "=" * 55)
print("CHAIN RULE: BACKPROPAGATION PREVIEW")
print("=" * 55)
# Loss = (y - σ(wx + b))²
w, x, b, y = 0.5, 2.0, 0.1, 1.0
z = w * x + b
a = sigmoid(z)
loss = (y - a) ** 2
# Backprop chain
dL_da = -2 * (y - a)
da_dz = sigmoid_derivative(z)
dz_dw = x
dL_dw = dL_da * da_dz * dz_dw
print(f" Forward: z = wx+b = {z}")
print(f" a = σ(z) = {a:.4f}")
print(f" L = (y-a)² = {loss:.4f}")
print(f"\n Backward (Chain Rule):")
print(f" ∂L/∂a = {dL_da:.4f}")
print(f" ∂a/∂z = {da_dz:.4f}")
print(f" ∂z/∂w = {dz_dw}")
print(f" ∂L/∂w = {dL_da:.4f} × {da_dz:.4f} × {dz_dw} = {dL_dw:.6f}")