import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# from latexify import latexify
# latexify(columns = 2)
%matplotlib inline
%config InlineBackend.figure_format = "retina"
Meshgrid
ML
import ipywidgets as widgets
from ipywidgets import interactive
from sklearn.datasets import make_moons
= make_moons(n_samples = 1000, noise = 0.1, random_state = 0)
X, y
= (8, 6))
plt.figure(figsize 0], X[:, 1], c = y, s = 40, cmap = plt.cm.Spectral);
plt.scatter(X[:, plt.show()
from sklearn.ensemble import RandomForestClassifier
= RandomForestClassifier(n_estimators = 100, random_state = 0)
rf rf.fit(X, y)
RandomForestClassifier(random_state=0)In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
RandomForestClassifier(random_state=0)
# Decision surface
= (8, 6))
plt.figure(figsize 0], X[:, 1], c = y, s = 40, cmap = plt.cm.viridis)
plt.scatter(X[:, = plt.gca()
ax = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
xlim = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
ylim
# Create grid to evaluate model
= np.linspace(xlim[0], xlim[1], 30)
x_lin = np.linspace(ylim[0], ylim[1], 30)
y_lin
= np.meshgrid(x_lin, y_lin)
XX, YY = np.vstack([XX.ravel(), YY.ravel()]).T
xy = rf.predict_proba(xy)[:, 1].reshape(XX.shape)
Z
# Plot decision boundary
= plt.cm.viridis, alpha = 0.2)
ax.contourf(XX, YY, Z, cmap
plt.colorbar() plt.show()
= np.array([1, 2, 3, 4])
X_arr = np.array([5, 6, 7])
Y_arr
= np.meshgrid(X_arr, Y_arr) XX, YY
XX
array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
YY
array([[5, 5, 5, 5],
[6, 6, 6, 6],
[7, 7, 7, 7]])
XX.shape, YY.shape
((3, 4), (3, 4))
= {}
out = 0
count for i in range(XX.shape[0]):
for j in range(XX.shape[1]):
= count + 1
count = {"i": i, "j": j, "XX": XX[i, j], "YY": YY[i, j]} out[count]
pd.DataFrame(out).T
i | j | XX | YY | |
---|---|---|---|---|
1 | 0 | 0 | 1 | 5 |
2 | 0 | 1 | 2 | 5 |
3 | 0 | 2 | 3 | 5 |
4 | 0 | 3 | 4 | 5 |
5 | 1 | 0 | 1 | 6 |
6 | 1 | 1 | 2 | 6 |
7 | 1 | 2 | 3 | 6 |
8 | 1 | 3 | 4 | 6 |
9 | 2 | 0 | 1 | 7 |
10 | 2 | 1 | 2 | 7 |
11 | 2 | 2 | 3 | 7 |
12 | 2 | 3 | 4 | 7 |
0], YY[0] XX[
(array([1, 2, 3, 4]), array([5, 5, 5, 5]))
= (6, 4))
plt.figure(figsize for i in range(XX.shape[0]):
'o', label=i)
plt.plot(XX[i], YY[i], ; plt.legend()
= (6, 4))
plt.figure(figsize 'o')
plt.plot(XX, YY, plt.show()
Let’s see some Plots and their Contours
- \[ \color{red}{F(x, y) = \sin(x)\cos(y)}, -\frac{3\pi}{2} \le x \le \frac{3\pi}{2}, -\pi \le y \le \pi \]
from mpl_toolkits.mplot3d import Axes3D
= np.linspace(-3/2 * np.pi, 3/2 * np.pi, 100)
X1 = np.linspace(-np.pi, np.pi, 100)
Y1 = np.meshgrid(X1, Y1)
XX1, YY1 = np.sin(XX1) * np.cos(YY1)
F
=(6, 3))
plt.figure(figsize= "viridis")
plt.contourf(XX1, YY1, F, cmap
plt.colorbar()r"Contour Plot of $F(x, y) = \sin(x)\cos(y)$", fontsize = 14)
plt.title(
plt.show()
= plt.figure(figsize=(8, 4))
fig = fig.add_subplot(111, projection = "3d")
ax4 = "viridis")
ax4.plot_surface(XX1, YY1, F, cmap r"Graph of $F(x, y) = \sin(x)\cos(y)$", fontsize = 14)
ax4.set_title( plt.show()
= X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
xlim = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
ylim
# Create grid to evaluate model
= np.linspace(xlim[0], xlim[1], 30)
x_lin = np.linspace(ylim[0], ylim[1], 30) y_lin
x_lin
array([-1.67150293, -1.52070662, -1.36991031, -1.219114 , -1.06831769,
-0.91752137, -0.76672506, -0.61592875, -0.46513244, -0.31433613,
-0.16353982, -0.01274351, 0.1380528 , 0.28884911, 0.43964542,
0.59044173, 0.74123804, 0.89203435, 1.04283066, 1.19362697,
1.34442328, 1.49521959, 1.6460159 , 1.79681221, 1.94760852,
2.09840483, 2.24920114, 2.39999745, 2.55079376, 2.70159007])
= np.meshgrid(x_lin, y_lin) XX, YY
def update_plot(i = 0, j = 2):
= XX[i, j]
x_point = YY[i, j]
y_point
= (8, 6))
plt.figure(figsize 'o', alpha = 0.1, color = 'k')
plt.plot(XX, YY, 0], X[:, 1], c=y, s=40, cmap=plt.cm.viridis)
plt.scatter(X[:,
= rf.predict_proba([[x_point, y_point]])[:, 1]
pred
= 100, c = "r")
plt.scatter(x_point, y_point, s f"Prediction P(Class 1): {pred[0]:.2f}")
plt.title(
plt.show()
0, 0) update_plot(
= interactive(update_plot, i = (0, XX.shape[0] - 1), j = (0, XX.shape[1] - 1))
widget display(widget)
0], YY[:, 0] XX[
(array([-1.67150293, -1.52070662, -1.36991031, -1.219114 , -1.06831769,
-0.91752137, -0.76672506, -0.61592875, -0.46513244, -0.31433613,
-0.16353982, -0.01274351, 0.1380528 , 0.28884911, 0.43964542,
0.59044173, 0.74123804, 0.89203435, 1.04283066, 1.19362697,
1.34442328, 1.49521959, 1.6460159 , 1.79681221, 1.94760852,
2.09840483, 2.24920114, 2.39999745, 2.55079376, 2.70159007]),
array([-1.23673767, -1.13313159, -1.02952551, -0.92591943, -0.82231335,
-0.71870727, -0.61510119, -0.51149511, -0.40788903, -0.30428295,
-0.20067687, -0.09707079, 0.00653529, 0.11014137, 0.21374745,
0.31735353, 0.42095961, 0.52456569, 0.62817177, 0.73177785,
0.83538393, 0.93899001, 1.04259609, 1.14620217, 1.24980825,
1.35341433, 1.45702041, 1.56062649, 1.66423257, 1.76783865]))
XX.shape
(30, 30)
from einops import rearrange, repeat, reduce
Usually when dealing with images, the shapes are of the form \((B \times C \times H \times W)\) where
\(B\): Batch Size
\(C\): Number of Image Channels
\(H\): Image Height
\(W\): Image Width
Inorder to flatten the image data into a linear array \((B \times C \times H \times W) \to (B \times C \cdot H \cdot W)\), we may use
\[ \texttt{einops.rearrange(img, "b c h w -> b (c h w)")} \]
XX.shape
(30, 30)
XX.ravel().shape
(900,)
'i j -> (i j) 1').shape, rearrange(XX, 'i j -> (i j)').shape rearrange(XX,
((900, 1), (900,))
'i j -> (i j) 1').shape rearrange(YY,
(900, 1)
= rearrange(XX, 'i j -> (i j) 1')
XX_flat = rearrange(YY, 'i j -> (i j) 1') YY_flat
np.array([XX_flat, YY_flat]).shape
(2, 900, 1)
'f n 1 -> n f').shape rearrange([XX_flat, YY_flat],
(900, 2)
= rearrange([XX_flat, YY_flat], 'f n 1 -> n f') X_feature
32] X_feature[:
array([[-1.67150293, -1.23673767],
[-1.52070662, -1.23673767],
[-1.36991031, -1.23673767],
[-1.219114 , -1.23673767],
[-1.06831769, -1.23673767],
[-0.91752137, -1.23673767],
[-0.76672506, -1.23673767],
[-0.61592875, -1.23673767],
[-0.46513244, -1.23673767],
[-0.31433613, -1.23673767],
[-0.16353982, -1.23673767],
[-0.01274351, -1.23673767],
[ 0.1380528 , -1.23673767],
[ 0.28884911, -1.23673767],
[ 0.43964542, -1.23673767],
[ 0.59044173, -1.23673767],
[ 0.74123804, -1.23673767],
[ 0.89203435, -1.23673767],
[ 1.04283066, -1.23673767],
[ 1.19362697, -1.23673767],
[ 1.34442328, -1.23673767],
[ 1.49521959, -1.23673767],
[ 1.6460159 , -1.23673767],
[ 1.79681221, -1.23673767],
[ 1.94760852, -1.23673767],
[ 2.09840483, -1.23673767],
[ 2.24920114, -1.23673767],
[ 2.39999745, -1.23673767],
[ 2.55079376, -1.23673767],
[ 2.70159007, -1.23673767],
[-1.67150293, -1.13313159],
[-1.52070662, -1.13313159]])
= rf.predict_proba(X_feature)[:, 1] Z
Z.shape
(900,)
= (8, 6))
plt.figure(figsize = Z, cmap = plt.cm.viridis)
plt.scatter(XX_flat, YY_flat, c plt.show()
10] Z[:
array([0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.19, 0.36, 0.73])
# Divide Z into k levels
= 10
k = Z.min()
min_Z = Z.max()
max_Z
= np.linspace(min_Z, max_Z, k)
levels
levels
array([0. , 0.11111111, 0.22222222, 0.33333333, 0.44444444,
0.55555556, 0.66666667, 0.77777778, 0.88888889, 1. ])
# Create an image from Z
= rearrange(Z, '(h w) -> h w', h=XX.shape[0])
img = (8, 6))
plt.figure(figsize =plt.cm.viridis,
plt.imshow(img, cmap=[XX.min(), XX.max(), YY.min(), YY.max()],
extent='lower',
origin='spline36')
interpolation
plt.show()
= (8, 6))
plt.figure(figsize =plt.cm.viridis, levels=10);
plt.contourf(XX, YY, Z.reshape(XX.shape), cmap
plt.colorbar() plt.show()