import matplotlib.pyplot as plt
import numpy as np
import torch
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
Taylor’s Series
ML
Tutorial
= lambda x, y: x**2 + y**2
f
def f(x, y):
return x**2 + y**2
= torch.func.grad(f, argnums=(0, 1)) f_dash
f_dash
<function __main__.f(x, y)>
def f2(argument):
= argument
x, yreturn x**2 + y**2
1.0, 1.0])) torch.func.grad(f2)(torch.tensor([
tensor([2., 2.])
1.0), torch.tensor(1.0)) f_dash(torch.tensor(
(tensor(2.), tensor(2.))
= torch.tensor(1.0, requires_grad=True)
x = torch.tensor(1.0, requires_grad=True)
y print("Before backward: ", x.grad, y.grad)
= f(x, y)
z
z.backward()print("After backward: ", x.grad, y.grad)
Before backward: None None
After backward: tensor(2.) tensor(2.)
= lambda x: torch.cos(x) f
= torch.arange(-2*np.pi, 2*np.pi, 0.01)
x_range = f(x_range)
y_range plt.plot(x_range, y_range)
def nth_order_appx(f, n, x0=0.0, verbose=False):
= torch.tensor(x0)
x0 = {1:torch.func.grad(f)}
derivs = {0:f(x0), 1:derivs[1](x0)}
vals if verbose:
print("f(x0) = {}".format(vals[0]))
print("f'(x0) = {}".format(vals[1]))
for i in range(2, n+1):
= torch.func.grad(derivs[i-1])
derivs[i] = derivs[i](x0)
vals[i] if verbose:
= "'"*i
d print("f{}(x0) = {}".format(d, vals[i]))
def g(x):
= x - x0
x_diff = "f(x) = f(x0) + "
str_rep = vals[0].repeat(x.shape)
out for i in range(1, n+1):
+= f"{vals[i]} * (x-{x0.item()})^{i} / {i}! + "
str_rep += vals[i] * x_diff**i / torch.math.factorial(i)
out if verbose:
print("--"*40)
print(str_rep)
return out
return g
= lambda x: torch.cos(x)
f = nth_order_appx(f, 1, 0.0, verbose=True)(x_range) _
f(x0) = 1.0
f'(x0) = -0.0
--------------------------------------------------------------------------------
f(x) = f(x0) + -0.0 * (x-0.0)^1 / 1! +
="f(x) = cos(x)")
plt.plot(x_range, f(x_range), labelfor i in range(16, 19, 2):
0.0)(x_range), label=f"order {i} appx")
plt.plot(x_range, nth_order_appx(f, i, -2, 2)
plt.ylim(# legend outside
='center left', bbox_to_anchor=(1, 0.5)) plt.legend(loc
= lambda x: torch.cos(x)
f = 3.14
x0
plt.plot(x_range, f(x_range))='r', label='x0')
plt.scatter(x0, f(torch.tensor(x0)), cfor i in range(1, 11, 2):
3.14)(x_range), label=f"order {i}")
plt.plot(x_range, nth_order_appx(f, i, -2, 2)
plt.ylim( plt.legend()
= lambda x: x**2 + 2
f
plt.plot(x_range, f(x_range))= 2.0
x0 ='r', label='x0')
plt.scatter(x0, f(torch.tensor(x0)), c1, x0)(x_range), label=f"order 1") plt.plot(x_range, nth_order_appx(f,
="f(x) = x^2 + 2", lw=3, alpha=0.5, ls='--')
plt.plot(x_range, f(x_range), label='r', label='x0')
plt.scatter(x0, f(torch.tensor(x0)), c1, x0)(x_range), label=f"order 1")
plt.plot(x_range, nth_order_appx(f, 1.5, 2.5)
plt.xlim( plt.legend()
= lambda x: x**2 + 2
f
plt.plot(x_range, f(x_range))= 2.0
x0 ='r', label='x0')
plt.scatter(x0, f(torch.tensor(x0)), c1, x0)(x_range), label=f"order 1") plt.plot(x_range, nth_order_appx(f,
from latexify import latexify, format_axes
=2) latexify(columns
def plot_gd(alpha=0.1, iter=3):
= torch.tensor(2.0)
x0
= x0
xi =r"$f(x) = x^2 + 2$", lw=3, alpha=0.5, ls='--', color='k')
plt.plot(x_range, f(x_range), labelfor i in range(iter):
=fr'$x_{i}$ = {xi.item():0.2f}', s=100, c=f"C{i}")
plt.scatter(xi, f(xi), labelwith torch.no_grad():
= nth_order_appx(f, 1, xi)(x_range)
appx =fr"order 1 appx. at $x=x_{i}$", c=f"C{i}", alpha=0.5)
plt.plot(x_range, appx, label= xi - alpha * torch.func.grad(f)(xi)
xi
-2.5, 2.5)
plt.xlim(0, 8)
plt.ylim(# legend outside plot
='center left', bbox_to_anchor=(1, 0.5))
plt.legend(loc format_axes(plt.gca())
=0.1, iter=5)
plot_gd(alpha"../figures/mml/gd-lr-0.1.pdf", bbox_inches="tight") plt.savefig(
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
=0.8, iter=4)
plot_gd(alpha"../figures/mml/gd-lr-0.8.pdf", bbox_inches="tight") plt.savefig(
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
=1.01, iter=4)
plot_gd(alpha"../figures/mml/gd-lr-1.01.pdf", bbox_inches="tight") plt.savefig(
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
=0.01, iter=4)
plot_gd(alpha"../figures/mml/gd-lr-0.01.pdf", bbox_inches="tight") plt.savefig(
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)
/tmp/ipykernel_4089142/3273250015.py:2: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
x0 = torch.tensor(x0)