From 2af7fe5a8eb3831941135ad547d333ce0c576c1a Mon Sep 17 00:00:00 2001 From: sanghui_ilu Date: Mon, 2 Dec 2024 18:39:38 +0800 Subject: [PATCH] add kan model link #IAVGEO add kan model --- dl/kan/README.md | 22 + dl/kan/__init__.py | 0 .../.ipynb_checkpoints/KANLayer-checkpoint.py | 364 +++ .../.ipynb_checkpoints/LBFGS-checkpoint.py | 493 +++ .../kan/.ipynb_checkpoints/MLP-checkpoint.py | 361 +++ .../.ipynb_checkpoints/MultKAN-checkpoint.py | 2805 +++++++++++++++++ .../Symbolic_KANLayer-checkpoint.py | 270 ++ .../.ipynb_checkpoints/__init__-checkpoint.py | 3 + .../.ipynb_checkpoints/compiler-checkpoint.py | 498 +++ .../experiment-checkpoint.py | 55 + .../.ipynb_checkpoints/feynman-checkpoint.py | 739 +++++ .../hypothesis-checkpoint.py | 695 ++++ .../.ipynb_checkpoints/spline-checkpoint.py | 144 + .../.ipynb_checkpoints/utils-checkpoint.py | 594 ++++ dl/kan/kan/KANLayer.py | 364 +++ dl/kan/kan/LBFGS.py | 493 +++ dl/kan/kan/MLP.py | 361 +++ dl/kan/kan/MultKAN.py | 2805 +++++++++++++++++ dl/kan/kan/Symbolic_KANLayer.py | 270 ++ dl/kan/kan/__init__.py | 3 + dl/kan/kan/assets/img/mult_symbol.png | Bin 0 -> 6392 bytes dl/kan/kan/assets/img/sum_symbol.png | Bin 0 -> 6210 bytes dl/kan/kan/compiler.py | 498 +++ dl/kan/kan/experiment.py | 55 + dl/kan/kan/experiments/experiment1.ipynb | 338 ++ dl/kan/kan/feynman.py | 739 +++++ dl/kan/kan/hypothesis.py | 695 ++++ dl/kan/kan/spline.py | 147 + dl/kan/kan/utils.py | 594 ++++ dl/kan/requirements.txt | 8 + dl/kan/run_train.sh | 1 + dl/kan/train_kan.py | 39 + 32 files changed, 14453 insertions(+) create mode 100644 dl/kan/README.md create mode 100644 dl/kan/__init__.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/KANLayer-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/LBFGS-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/MLP-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/MultKAN-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/Symbolic_KANLayer-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/__init__-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/compiler-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/experiment-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/feynman-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/hypothesis-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/spline-checkpoint.py create mode 100644 dl/kan/kan/.ipynb_checkpoints/utils-checkpoint.py create mode 100644 dl/kan/kan/KANLayer.py create mode 100644 dl/kan/kan/LBFGS.py create mode 100644 dl/kan/kan/MLP.py create mode 100644 dl/kan/kan/MultKAN.py create mode 100644 dl/kan/kan/Symbolic_KANLayer.py create mode 100644 dl/kan/kan/__init__.py create mode 100644 dl/kan/kan/assets/img/mult_symbol.png create mode 100644 dl/kan/kan/assets/img/sum_symbol.png create mode 100644 dl/kan/kan/compiler.py create mode 100644 dl/kan/kan/experiment.py create mode 100644 dl/kan/kan/experiments/experiment1.ipynb create mode 100644 dl/kan/kan/feynman.py create mode 100644 dl/kan/kan/hypothesis.py create mode 100644 dl/kan/kan/spline.py create mode 100644 dl/kan/kan/utils.py create mode 100644 dl/kan/requirements.txt create mode 100644 dl/kan/run_train.sh create mode 100644 dl/kan/train_kan.py diff --git a/dl/kan/README.md b/dl/kan/README.md new file mode 100644 index 000000000..9c0af347d --- /dev/null +++ b/dl/kan/README.md @@ -0,0 +1,22 @@ +# KAN + +## Model description +Kolmogorov-Arnold Networks (KANs) are promising alternatives of Multi-Layer Perceptrons (MLPs). KANs have strong mathematical foundations just like MLPs: MLPs are based on the universal approximation theorem, while KANs are based on Kolmogorov-Arnold representation theorem. KANs and MLPs are dual: KANs have activation functions on edges, while MLPs have activation functions on nodes. This simple change makes KANs better (sometimes much better!) than MLPs in terms of both model accuracy and interpretability. + + +## Run +```shell +$ bash ./run_train.sh + +``` + +## Result +| Model | Training speed | +|-------------|------------------| +| KAN | 6490 samples/sec | + + +## Reference + +- [pykan](https://github.com/KindXiaoming/pykan) + diff --git a/dl/kan/__init__.py b/dl/kan/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/dl/kan/kan/.ipynb_checkpoints/KANLayer-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/KANLayer-checkpoint.py new file mode 100644 index 000000000..b880bfe8b --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/KANLayer-checkpoint.py @@ -0,0 +1,364 @@ +import torch +import torch.nn as nn +import numpy as np +from .spline import * +from .utils import sparse_mask + + +class KANLayer(nn.Module): + """ + KANLayer class + + + Attributes: + ----------- + in_dim: int + input dimension + out_dim: int + output dimension + num: int + the number of grid intervals + k: int + the piecewise polynomial order of splines + noise_scale: float + spline scale at initialization + coef: 2D torch.tensor + coefficients of B-spline bases + scale_base_mu: float + magnitude of the residual function b(x) is drawn from N(mu, sigma^2), mu = sigma_base_mu + scale_base_sigma: float + magnitude of the residual function b(x) is drawn from N(mu, sigma^2), mu = sigma_base_sigma + scale_sp: float + mangitude of the spline function spline(x) + base_fun: fun + residual function b(x) + mask: 1D torch.float + mask of spline functions. setting some element of the mask to zero means setting the corresponding activation to zero function. + grid_eps: float in [0,1] + a hyperparameter used in update_grid_from_samples. When grid_eps = 1, the grid is uniform; when grid_eps = 0, the grid is partitioned using percentiles of samples. 0 < grid_eps < 1 interpolates between the two extremes. + the id of activation functions that are locked + device: str + device + """ + + def __init__(self, in_dim=3, out_dim=2, num=5, k=3, noise_scale=0.5, scale_base_mu=0.0, scale_base_sigma=1.0, scale_sp=1.0, base_fun=torch.nn.SiLU(), grid_eps=0.02, grid_range=[-1, 1], sp_trainable=True, sb_trainable=True, save_plot_data = True, device='cpu', sparse_init=False): + '''' + initialize a KANLayer + + Args: + ----- + in_dim : int + input dimension. Default: 2. + out_dim : int + output dimension. Default: 3. + num : int + the number of grid intervals = G. Default: 5. + k : int + the order of piecewise polynomial. Default: 3. + noise_scale : float + the scale of noise injected at initialization. Default: 0.1. + scale_base_mu : float + the scale of the residual function b(x) is intialized to be N(scale_base_mu, scale_base_sigma^2). + scale_base_sigma : float + the scale of the residual function b(x) is intialized to be N(scale_base_mu, scale_base_sigma^2). + scale_sp : float + the scale of the base function spline(x). + base_fun : function + residual function b(x). Default: torch.nn.SiLU() + grid_eps : float + When grid_eps = 1, the grid is uniform; when grid_eps = 0, the grid is partitioned using percentiles of samples. 0 < grid_eps < 1 interpolates between the two extremes. + grid_range : list/np.array of shape (2,) + setting the range of grids. Default: [-1,1]. + sp_trainable : bool + If true, scale_sp is trainable + sb_trainable : bool + If true, scale_base is trainable + device : str + device + sparse_init : bool + if sparse_init = True, sparse initialization is applied. + + Returns: + -------- + self + + Example + ------- + >>> from kan.KANLayer import * + >>> model = KANLayer(in_dim=3, out_dim=5) + >>> (model.in_dim, model.out_dim) + ''' + super(KANLayer, self).__init__() + # size + self.out_dim = out_dim + self.in_dim = in_dim + self.num = num + self.k = k + + grid = torch.linspace(grid_range[0], grid_range[1], steps=num + 1)[None,:].expand(self.in_dim, num+1) + grid = extend_grid(grid, k_extend=k) + self.grid = torch.nn.Parameter(grid).requires_grad_(False) + noises = (torch.rand(self.num+1, self.in_dim, self.out_dim) - 1/2) * noise_scale / num + + self.coef = torch.nn.Parameter(curve2coef(self.grid[:,k:-k].permute(1,0), noises, self.grid, k)) + + if sparse_init: + self.mask = torch.nn.Parameter(sparse_mask(in_dim, out_dim)).requires_grad_(False) + else: + self.mask = torch.nn.Parameter(torch.ones(in_dim, out_dim)).requires_grad_(False) + + self.scale_base = torch.nn.Parameter(scale_base_mu * 1 / np.sqrt(in_dim) + \ + scale_base_sigma * (torch.rand(in_dim, out_dim)*2-1) * 1/np.sqrt(in_dim)).requires_grad_(sb_trainable) + self.scale_sp = torch.nn.Parameter(torch.ones(in_dim, out_dim) * scale_sp * 1 / np.sqrt(in_dim) * self.mask).requires_grad_(sp_trainable) # make scale trainable + self.base_fun = base_fun + + + self.grid_eps = grid_eps + + self.to(device) + + def to(self, device): + super(KANLayer, self).to(device) + self.device = device + return self + + def forward(self, x): + ''' + KANLayer forward given input x + + Args: + ----- + x : 2D torch.float + inputs, shape (number of samples, input dimension) + + Returns: + -------- + y : 2D torch.float + outputs, shape (number of samples, output dimension) + preacts : 3D torch.float + fan out x into activations, shape (number of sampels, output dimension, input dimension) + postacts : 3D torch.float + the outputs of activation functions with preacts as inputs + postspline : 3D torch.float + the outputs of spline functions with preacts as inputs + + Example + ------- + >>> from kan.KANLayer import * + >>> model = KANLayer(in_dim=3, out_dim=5) + >>> x = torch.normal(0,1,size=(100,3)) + >>> y, preacts, postacts, postspline = model(x) + >>> y.shape, preacts.shape, postacts.shape, postspline.shape + ''' + batch = x.shape[0] + preacts = x[:,None,:].clone().expand(batch, self.out_dim, self.in_dim) + + base = self.base_fun(x) # (batch, in_dim) + y = coef2curve(x_eval=x, grid=self.grid, coef=self.coef, k=self.k) + + postspline = y.clone().permute(0,2,1) + + y = self.scale_base[None,:,:] * base[:,:,None] + self.scale_sp[None,:,:] * y + y = self.mask[None,:,:] * y + + postacts = y.clone().permute(0,2,1) + + y = torch.sum(y, dim=1) + return y, preacts, postacts, postspline + + def update_grid_from_samples(self, x, mode='sample'): + ''' + update grid from samples + + Args: + ----- + x : 2D torch.float + inputs, shape (number of samples, input dimension) + + Returns: + -------- + None + + Example + ------- + >>> model = KANLayer(in_dim=1, out_dim=1, num=5, k=3) + >>> print(model.grid.data) + >>> x = torch.linspace(-3,3,steps=100)[:,None] + >>> model.update_grid_from_samples(x) + >>> print(model.grid.data) + ''' + + batch = x.shape[0] + #x = torch.einsum('ij,k->ikj', x, torch.ones(self.out_dim, ).to(self.device)).reshape(batch, self.size).permute(1, 0) + x_pos = torch.sort(x, dim=0)[0] + y_eval = coef2curve(x_pos, self.grid, self.coef, self.k) + num_interval = self.grid.shape[1] - 1 - 2*self.k + + def get_grid(num_interval): + ids = [int(batch / num_interval * i) for i in range(num_interval)] + [-1] + grid_adaptive = x_pos[ids, :].permute(1,0) + margin = 0.00 + h = (grid_adaptive[:,[-1]] - grid_adaptive[:,[0]] + 2 * margin)/num_interval + grid_uniform = grid_adaptive[:,[0]] - margin + h * torch.arange(num_interval+1,)[None, :].to(x.device) + grid = self.grid_eps * grid_uniform + (1 - self.grid_eps) * grid_adaptive + return grid + + + grid = get_grid(num_interval) + + if mode == 'grid': + sample_grid = get_grid(2*num_interval) + x_pos = sample_grid.permute(1,0) + y_eval = coef2curve(x_pos, self.grid, self.coef, self.k) + + self.grid.data = extend_grid(grid, k_extend=self.k) + #print('x_pos 2', x_pos.shape) + #print('y_eval 2', y_eval.shape) + self.coef.data = curve2coef(x_pos, y_eval, self.grid, self.k) + + def initialize_grid_from_parent(self, parent, x, mode='sample'): + ''' + update grid from a parent KANLayer & samples + + Args: + ----- + parent : KANLayer + a parent KANLayer (whose grid is usually coarser than the current model) + x : 2D torch.float + inputs, shape (number of samples, input dimension) + + Returns: + -------- + None + + Example + ------- + >>> batch = 100 + >>> parent_model = KANLayer(in_dim=1, out_dim=1, num=5, k=3) + >>> print(parent_model.grid.data) + >>> model = KANLayer(in_dim=1, out_dim=1, num=10, k=3) + >>> x = torch.normal(0,1,size=(batch, 1)) + >>> model.initialize_grid_from_parent(parent_model, x) + >>> print(model.grid.data) + ''' + + batch = x.shape[0] + + # shrink grid + x_pos = torch.sort(x, dim=0)[0] + y_eval = coef2curve(x_pos, parent.grid, parent.coef, parent.k) + num_interval = self.grid.shape[1] - 1 - 2*self.k + + + ''' + # based on samples + def get_grid(num_interval): + ids = [int(batch / num_interval * i) for i in range(num_interval)] + [-1] + grid_adaptive = x_pos[ids, :].permute(1,0) + h = (grid_adaptive[:,[-1]] - grid_adaptive[:,[0]])/num_interval + grid_uniform = grid_adaptive[:,[0]] + h * torch.arange(num_interval+1,)[None, :].to(x.device) + grid = self.grid_eps * grid_uniform + (1 - self.grid_eps) * grid_adaptive + return grid''' + + #print('p', parent.grid) + # based on interpolating parent grid + def get_grid(num_interval): + x_pos = parent.grid[:,parent.k:-parent.k] + #print('x_pos', x_pos) + sp2 = KANLayer(in_dim=1, out_dim=self.in_dim,k=1,num=x_pos.shape[1]-1,scale_base_mu=0.0, scale_base_sigma=0.0).to(x.device) + + #print('sp2_grid', sp2.grid[:,sp2.k:-sp2.k].permute(1,0).expand(-1,self.in_dim)) + #print('sp2_coef_shape', sp2.coef.shape) + sp2_coef = curve2coef(sp2.grid[:,sp2.k:-sp2.k].permute(1,0).expand(-1,self.in_dim), x_pos.permute(1,0).unsqueeze(dim=2), sp2.grid[:,:], k=1).permute(1,0,2) + shp = sp2_coef.shape + #sp2_coef = torch.cat([torch.zeros(shp[0], shp[1], 1), sp2_coef, torch.zeros(shp[0], shp[1], 1)], dim=2) + #print('sp2_coef',sp2_coef) + #print(sp2.coef.shape) + sp2.coef.data = sp2_coef + percentile = torch.linspace(-1,1,self.num+1).to(self.device) + grid = sp2(percentile.unsqueeze(dim=1))[0].permute(1,0) + #print('c', grid) + return grid + + grid = get_grid(num_interval) + + if mode == 'grid': + sample_grid = get_grid(2*num_interval) + x_pos = sample_grid.permute(1,0) + y_eval = coef2curve(x_pos, parent.grid, parent.coef, parent.k) + + grid = extend_grid(grid, k_extend=self.k) + self.grid.data = grid + self.coef.data = curve2coef(x_pos, y_eval, self.grid, self.k) + + def get_subset(self, in_id, out_id): + ''' + get a smaller KANLayer from a larger KANLayer (used for pruning) + + Args: + ----- + in_id : list + id of selected input neurons + out_id : list + id of selected output neurons + + Returns: + -------- + spb : KANLayer + + Example + ------- + >>> kanlayer_large = KANLayer(in_dim=10, out_dim=10, num=5, k=3) + >>> kanlayer_small = kanlayer_large.get_subset([0,9],[1,2,3]) + >>> kanlayer_small.in_dim, kanlayer_small.out_dim + (2, 3) + ''' + spb = KANLayer(len(in_id), len(out_id), self.num, self.k, base_fun=self.base_fun) + spb.grid.data = self.grid[in_id] + spb.coef.data = self.coef[in_id][:,out_id] + spb.scale_base.data = self.scale_base[in_id][:,out_id] + spb.scale_sp.data = self.scale_sp[in_id][:,out_id] + spb.mask.data = self.mask[in_id][:,out_id] + + spb.in_dim = len(in_id) + spb.out_dim = len(out_id) + return spb + + + def swap(self, i1, i2, mode='in'): + ''' + swap the i1 neuron with the i2 neuron in input (if mode == 'in') or output (if mode == 'out') + + Args: + ----- + i1 : int + i2 : int + mode : str + mode = 'in' or 'out' + + Returns: + -------- + None + + Example + ------- + >>> from kan.KANLayer import * + >>> model = KANLayer(in_dim=2, out_dim=2, num=5, k=3) + >>> print(model.coef) + >>> model.swap(0,1,mode='in') + >>> print(model.coef) + ''' + with torch.no_grad(): + def swap_(data, i1, i2, mode='in'): + if mode == 'in': + data[i1], data[i2] = data[i2].clone(), data[i1].clone() + elif mode == 'out': + data[:,i1], data[:,i2] = data[:,i2].clone(), data[:,i1].clone() + + if mode == 'in': + swap_(self.grid.data, i1, i2, mode='in') + swap_(self.coef.data, i1, i2, mode=mode) + swap_(self.scale_base.data, i1, i2, mode=mode) + swap_(self.scale_sp.data, i1, i2, mode=mode) + swap_(self.mask.data, i1, i2, mode=mode) + diff --git a/dl/kan/kan/.ipynb_checkpoints/LBFGS-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/LBFGS-checkpoint.py new file mode 100644 index 000000000..212477f23 --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/LBFGS-checkpoint.py @@ -0,0 +1,493 @@ +import torch +from functools import reduce +from torch.optim import Optimizer + +__all__ = ['LBFGS'] + +def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None): + # ported from https://github.com/torch/optim/blob/master/polyinterp.lua + # Compute bounds of interpolation area + if bounds is not None: + xmin_bound, xmax_bound = bounds + else: + xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1) + + # Code for most common case: cubic interpolation of 2 points + # w/ function and derivative values for both + # Solution in this case (where x2 is the farthest point): + # d1 = g1 + g2 - 3*(f1-f2)/(x1-x2); + # d2 = sqrt(d1^2 - g1*g2); + # min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2)); + # t_new = min(max(min_pos,xmin_bound),xmax_bound); + d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2) + d2_square = d1**2 - g1 * g2 + if d2_square >= 0: + d2 = d2_square.sqrt() + if x1 <= x2: + min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2)) + else: + min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2)) + return min(max(min_pos, xmin_bound), xmax_bound) + else: + return (xmin_bound + xmax_bound) / 2. + + +def _strong_wolfe(obj_func, + x, + t, + d, + f, + g, + gtd, + c1=1e-4, + c2=0.9, + tolerance_change=1e-9, + max_ls=25): + # ported from https://github.com/torch/optim/blob/master/lswolfe.lua + d_norm = d.abs().max() + g = g.clone(memory_format=torch.contiguous_format) + # evaluate objective and gradient using initial step + f_new, g_new = obj_func(x, t, d) + ls_func_evals = 1 + gtd_new = g_new.dot(d) + + # bracket an interval containing a point satisfying the Wolfe criteria + t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd + done = False + ls_iter = 0 + while ls_iter < max_ls: + # check conditions + #print(f_prev, f_new, g_new) + if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev): + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + if abs(gtd_new) <= -c2 * gtd: + bracket = [t] + bracket_f = [f_new] + bracket_g = [g_new] + done = True + break + + if gtd_new >= 0: + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + # interpolate + min_step = t + 0.01 * (t - t_prev) + max_step = t * 10 + tmp = t + t = _cubic_interpolate( + t_prev, + f_prev, + gtd_prev, + t, + f_new, + gtd_new, + bounds=(min_step, max_step)) + + # next step + t_prev = tmp + f_prev = f_new + g_prev = g_new.clone(memory_format=torch.contiguous_format) + gtd_prev = gtd_new + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + + # reached max number of iterations? + if ls_iter == max_ls: + bracket = [0, t] + bracket_f = [f, f_new] + bracket_g = [g, g_new] + + # zoom phase: we now have a point satisfying the criteria, or + # a bracket around it. We refine the bracket until we find the + # exact point satisfying the criteria + insuf_progress = False + # find high and low points in bracket + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0) + while not done and ls_iter < max_ls: + # line-search bracket is so small + if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change: + break + + # compute new trial value + t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0], + bracket[1], bracket_f[1], bracket_gtd[1]) + + # test that we are making sufficient progress: + # in case `t` is so close to boundary, we mark that we are making + # insufficient progress, and if + # + we have made insufficient progress in the last step, or + # + `t` is at one of the boundary, + # we will move `t` to a position which is `0.1 * len(bracket)` + # away from the nearest boundary point. + eps = 0.1 * (max(bracket) - min(bracket)) + if min(max(bracket) - t, t - min(bracket)) < eps: + # interpolation close to boundary + if insuf_progress or t >= max(bracket) or t <= min(bracket): + # evaluate at 0.1 away from boundary + if abs(t - max(bracket)) < abs(t - min(bracket)): + t = max(bracket) - eps + else: + t = min(bracket) + eps + insuf_progress = False + else: + insuf_progress = True + else: + insuf_progress = False + + # Evaluate new point + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]: + # Armijo condition not satisfied or not lower than lowest point + bracket[high_pos] = t + bracket_f[high_pos] = f_new + bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format) + bracket_gtd[high_pos] = gtd_new + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0) + else: + if abs(gtd_new) <= -c2 * gtd: + # Wolfe conditions satisfied + done = True + elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0: + # old low becomes new high + bracket[high_pos] = bracket[low_pos] + bracket_f[high_pos] = bracket_f[low_pos] + bracket_g[high_pos] = bracket_g[low_pos] + bracket_gtd[high_pos] = bracket_gtd[low_pos] + + # new point becomes new low + bracket[low_pos] = t + bracket_f[low_pos] = f_new + bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format) + bracket_gtd[low_pos] = gtd_new + + #print(bracket) + if len(bracket) == 1: + t = bracket[0] + f_new = bracket_f[0] + g_new = bracket_g[0] + else: + t = bracket[low_pos] + f_new = bracket_f[low_pos] + g_new = bracket_g[low_pos] + return f_new, g_new, t, ls_func_evals + + + +class LBFGS(Optimizer): + """Implements L-BFGS algorithm. + + Heavily inspired by `minFunc + `_. + + .. warning:: + This optimizer doesn't support per-parameter options and parameter + groups (there can be only one). + + .. warning:: + Right now all parameters have to be on a single device. This will be + improved in the future. + + .. note:: + This is a very memory intensive optimizer (it requires additional + ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory + try reducing the history size, or use a different algorithm. + + Args: + lr (float): learning rate (default: 1) + max_iter (int): maximal number of iterations per optimization step + (default: 20) + max_eval (int): maximal number of function evaluations per optimization + step (default: max_iter * 1.25). + tolerance_grad (float): termination tolerance on first order optimality + (default: 1e-7). + tolerance_change (float): termination tolerance on function + value/parameter changes (default: 1e-9). + history_size (int): update history size (default: 100). + line_search_fn (str): either 'strong_wolfe' or None (default: None). + """ + + def __init__(self, + params, + lr=1, + max_iter=20, + max_eval=None, + tolerance_grad=1e-7, + tolerance_change=1e-9, + tolerance_ys=1e-32, + history_size=100, + line_search_fn=None): + if max_eval is None: + max_eval = max_iter * 5 // 4 + defaults = dict( + lr=lr, + max_iter=max_iter, + max_eval=max_eval, + tolerance_grad=tolerance_grad, + tolerance_change=tolerance_change, + tolerance_ys=tolerance_ys, + history_size=history_size, + line_search_fn=line_search_fn) + super().__init__(params, defaults) + + if len(self.param_groups) != 1: + raise ValueError("LBFGS doesn't support per-parameter options " + "(parameter groups)") + + self._params = self.param_groups[0]['params'] + self._numel_cache = None + + def _numel(self): + if self._numel_cache is None: + self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0) + return self._numel_cache + + def _gather_flat_grad(self): + views = [] + for p in self._params: + if p.grad is None: + view = p.new(p.numel()).zero_() + elif p.grad.is_sparse: + view = p.grad.to_dense().view(-1) + else: + view = p.grad.view(-1) + views.append(view) + device = views[0].device + return torch.cat(views, dim=0) + + def _add_grad(self, step_size, update): + offset = 0 + for p in self._params: + numel = p.numel() + # view as to avoid deprecated pointwise semantics + p.add_(update[offset:offset + numel].view_as(p), alpha=step_size) + offset += numel + assert offset == self._numel() + + def _clone_param(self): + return [p.clone(memory_format=torch.contiguous_format) for p in self._params] + + def _set_param(self, params_data): + for p, pdata in zip(self._params, params_data): + p.copy_(pdata) + + def _directional_evaluate(self, closure, x, t, d): + self._add_grad(t, d) + loss = float(closure()) + flat_grad = self._gather_flat_grad() + self._set_param(x) + return loss, flat_grad + + + @torch.no_grad() + def step(self, closure): + """Perform a single optimization step. + + Args: + closure (Callable): A closure that reevaluates the model + and returns the loss. + """ + + torch.manual_seed(0) + + assert len(self.param_groups) == 1 + + # Make sure the closure is always called with grad enabled + closure = torch.enable_grad()(closure) + + group = self.param_groups[0] + lr = group['lr'] + max_iter = group['max_iter'] + max_eval = group['max_eval'] + tolerance_grad = group['tolerance_grad'] + tolerance_change = group['tolerance_change'] + tolerance_ys = group['tolerance_ys'] + line_search_fn = group['line_search_fn'] + history_size = group['history_size'] + + # NOTE: LBFGS has only global state, but we register it as state for + # the first param, because this helps with casting in load_state_dict + state = self.state[self._params[0]] + state.setdefault('func_evals', 0) + state.setdefault('n_iter', 0) + + # evaluate initial f(x) and df/dx + orig_loss = closure() + loss = float(orig_loss) + current_evals = 1 + state['func_evals'] += 1 + + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + + # optimal condition + if opt_cond: + return orig_loss + + # tensors cached in state (for tracing) + d = state.get('d') + t = state.get('t') + old_dirs = state.get('old_dirs') + old_stps = state.get('old_stps') + ro = state.get('ro') + H_diag = state.get('H_diag') + prev_flat_grad = state.get('prev_flat_grad') + prev_loss = state.get('prev_loss') + + n_iter = 0 + # optimize for a max of max_iter iterations + while n_iter < max_iter: + # keep track of nb of iterations + n_iter += 1 + state['n_iter'] += 1 + + ############################################################ + # compute gradient descent direction + ############################################################ + if state['n_iter'] == 1: + d = flat_grad.neg() + old_dirs = [] + old_stps = [] + ro = [] + H_diag = 1 + else: + # do lbfgs update (update memory) + y = flat_grad.sub(prev_flat_grad) + s = d.mul(t) + ys = y.dot(s) # y*s + if ys > tolerance_ys: + # updating memory + if len(old_dirs) == history_size: + # shift history by one (limited-memory) + old_dirs.pop(0) + old_stps.pop(0) + ro.pop(0) + + # store new direction/step + old_dirs.append(y) + old_stps.append(s) + ro.append(1. / ys) + + # update scale of initial Hessian approximation + H_diag = ys / y.dot(y) # (y*y) + + # compute the approximate (L-BFGS) inverse Hessian + # multiplied by the gradient + num_old = len(old_dirs) + + if 'al' not in state: + state['al'] = [None] * history_size + al = state['al'] + + # iteration in L-BFGS loop collapsed to use just one buffer + q = flat_grad.neg() + for i in range(num_old - 1, -1, -1): + al[i] = old_stps[i].dot(q) * ro[i] + q.add_(old_dirs[i], alpha=-al[i]) + + # multiply by initial Hessian + # r/d is the final direction + d = r = torch.mul(q, H_diag) + for i in range(num_old): + be_i = old_dirs[i].dot(r) * ro[i] + r.add_(old_stps[i], alpha=al[i] - be_i) + + if prev_flat_grad is None: + prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format) + else: + prev_flat_grad.copy_(flat_grad) + prev_loss = loss + + ############################################################ + # compute step length + ############################################################ + # reset initial guess for step size + if state['n_iter'] == 1: + t = min(1., 1. / flat_grad.abs().sum()) * lr + else: + t = lr + + # directional derivative + gtd = flat_grad.dot(d) # g * d + + # directional derivative is below tolerance + if gtd > -tolerance_change: + break + + # optional line search: user function + ls_func_evals = 0 + if line_search_fn is not None: + # perform line search, using user function + if line_search_fn != "strong_wolfe": + raise RuntimeError("only 'strong_wolfe' is supported") + else: + x_init = self._clone_param() + + def obj_func(x, t, d): + return self._directional_evaluate(closure, x, t, d) + loss, flat_grad, t, ls_func_evals = _strong_wolfe( + obj_func, x_init, t, d, loss, flat_grad, gtd) + self._add_grad(t, d) + opt_cond = flat_grad.abs().max() <= tolerance_grad + else: + # no line search, simply move with fixed-step + self._add_grad(t, d) + if n_iter != max_iter: + # re-evaluate function only if not in last iteration + # the reason we do this: in a stochastic setting, + # no use to re-evaluate that function here + with torch.enable_grad(): + loss = float(closure()) + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + ls_func_evals = 1 + + # update func eval + current_evals += ls_func_evals + state['func_evals'] += ls_func_evals + + ############################################################ + # check conditions + ############################################################ + if n_iter == max_iter: + break + + if current_evals >= max_eval: + break + + # optimal condition + if opt_cond: + break + + # lack of progress + if d.mul(t).abs().max() <= tolerance_change: + break + + if abs(loss - prev_loss) < tolerance_change: + break + + state['d'] = d + state['t'] = t + state['old_dirs'] = old_dirs + state['old_stps'] = old_stps + state['ro'] = ro + state['H_diag'] = H_diag + state['prev_flat_grad'] = prev_flat_grad + state['prev_loss'] = prev_loss + + return orig_loss diff --git a/dl/kan/kan/.ipynb_checkpoints/MLP-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/MLP-checkpoint.py new file mode 100644 index 000000000..1066c3b3d --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/MLP-checkpoint.py @@ -0,0 +1,361 @@ +import torch +import torch.nn as nn +import matplotlib.pyplot as plt +import numpy as np +from tqdm import tqdm +from .LBFGS import LBFGS + +seed = 0 +torch.manual_seed(seed) + +class MLP(nn.Module): + + def __init__(self, width, act='silu', save_act=True, seed=0, device='cpu'): + super(MLP, self).__init__() + + torch.manual_seed(seed) + + linears = [] + self.width = width + self.depth = depth = len(width) - 1 + for i in range(depth): + linears.append(nn.Linear(width[i], width[i+1])) + self.linears = nn.ModuleList(linears) + + #if activation == 'silu': + self.act_fun = torch.nn.SiLU() + self.save_act = save_act + self.acts = None + + self.cache_data = None + + self.device = device + self.to(device) + + + def to(self, device): + super(MLP, self).to(device) + self.device = device + + return self + + + def get_act(self, x=None): + if isinstance(x, dict): + x = x['train_input'] + if x == None: + if self.cache_data != None: + x = self.cache_data + else: + raise Exception("missing input data x") + save_act = self.save_act + self.save_act = True + self.forward(x) + self.save_act = save_act + + @property + def w(self): + return [self.linears[l].weight for l in range(self.depth)] + + def forward(self, x): + + # cache data + self.cache_data = x + + self.acts = [] + self.acts_scale = [] + self.wa_forward = [] + self.a_forward = [] + + for i in range(self.depth): + + if self.save_act: + act = x.clone() + act_scale = torch.std(x, dim=0) + wa_forward = act_scale[None, :] * self.linears[i].weight + self.acts.append(act) + if i > 0: + self.acts_scale.append(act_scale) + self.wa_forward.append(wa_forward) + + x = self.linears[i](x) + if i < self.depth - 1: + x = self.act_fun(x) + else: + if self.save_act: + act_scale = torch.std(x, dim=0) + self.acts_scale.append(act_scale) + + return x + + def attribute(self): + if self.acts == None: + self.get_act() + + node_scores = [] + edge_scores = [] + + # back propagate from the last layer + node_score = torch.ones(self.width[-1]).requires_grad_(True).to(self.device) + node_scores.append(node_score) + + for l in range(self.depth,0,-1): + + edge_score = torch.einsum('ij,i->ij', torch.abs(self.wa_forward[l-1]), node_score/(self.acts_scale[l-1]+1e-4)) + edge_scores.append(edge_score) + + # this might be improper for MLPs (although reasonable for KANs) + node_score = torch.sum(edge_score, dim=0)/torch.sqrt(torch.tensor(self.width[l-1], device=self.device)) + #print(self.width[l]) + node_scores.append(node_score) + + self.node_scores = list(reversed(node_scores)) + self.edge_scores = list(reversed(edge_scores)) + self.wa_backward = self.edge_scores + + def plot(self, beta=3, scale=1., metric='w'): + # metric = 'w', 'act' or 'fa' + + if metric == 'fa': + self.attribute() + + depth = self.depth + y0 = 0.5 + fig, ax = plt.subplots(figsize=(3*scale,3*y0*depth*scale)) + shp = self.width + + min_spacing = 1/max(self.width) + for j in range(len(shp)): + N = shp[j] + for i in range(N): + plt.scatter(1 / (2 * N) + i / N, j * y0, s=min_spacing ** 2 * 5000 * scale ** 2, color='black') + + plt.ylim(-0.1*y0,y0*depth+0.1*y0) + plt.xlim(-0.02,1.02) + + linears = self.linears + + for ii in range(len(linears)): + linear = linears[ii] + p = linear.weight + p_shp = p.shape + + if metric == 'w': + pass + elif metric == 'act': + p = self.wa_forward[ii] + elif metric == 'fa': + p = self.wa_backward[ii] + else: + raise Exception('metric = \'{}\' not recognized. Choices are \'w\', \'act\', \'fa\'.'.format(metric)) + for i in range(p_shp[0]): + for j in range(p_shp[1]): + plt.plot([1/(2*p_shp[0])+i/p_shp[0], 1/(2*p_shp[1])+j/p_shp[1]], [y0*(ii+1),y0*ii], lw=0.5*scale, alpha=np.tanh(beta*np.abs(p[i,j].cpu().detach().numpy())), color="blue" if p[i,j]>0 else "red") + + ax.axis('off') + + def reg(self, reg_metric, lamb_l1, lamb_entropy): + + if reg_metric == 'w': + acts_scale = self.w + if reg_metric == 'act': + acts_scale = self.wa_forward + if reg_metric == 'fa': + acts_scale = self.wa_backward + if reg_metric == 'a': + acts_scale = self.acts_scale + + if len(acts_scale[0].shape) == 2: + reg_ = 0. + + for i in range(len(acts_scale)): + vec = acts_scale[i] + vec = torch.abs(vec) + + l1 = torch.sum(vec) + p_row = vec / (torch.sum(vec, dim=1, keepdim=True) + 1) + p_col = vec / (torch.sum(vec, dim=0, keepdim=True) + 1) + entropy_row = - torch.mean(torch.sum(p_row * torch.log2(p_row + 1e-4), dim=1)) + entropy_col = - torch.mean(torch.sum(p_col * torch.log2(p_col + 1e-4), dim=0)) + reg_ += lamb_l1 * l1 + lamb_entropy * (entropy_row + entropy_col) + + elif len(acts_scale[0].shape) == 1: + + reg_ = 0. + + for i in range(len(acts_scale)): + vec = acts_scale[i] + vec = torch.abs(vec) + + l1 = torch.sum(vec) + p = vec / (torch.sum(vec) + 1) + entropy = - torch.sum(p * torch.log2(p + 1e-4)) + reg_ += lamb_l1 * l1 + lamb_entropy * entropy + + return reg_ + + def get_reg(self, reg_metric, lamb_l1, lamb_entropy): + return self.reg(reg_metric, lamb_l1, lamb_entropy) + + def fit(self, dataset, opt="LBFGS", steps=100, log=1, lamb=0., lamb_l1=1., lamb_entropy=2., loss_fn=None, lr=1., batch=-1, + metrics=None, in_vars=None, out_vars=None, beta=3, device='cpu', reg_metric='w', display_metrics=None): + + if lamb > 0. and not self.save_act: + print('setting lamb=0. If you want to set lamb > 0, set =True') + + old_save_act = self.save_act + if lamb == 0.: + self.save_act = False + + pbar = tqdm(range(steps), desc='description', ncols=100) + + if loss_fn == None: + loss_fn = loss_fn_eval = lambda x, y: torch.mean((x - y) ** 2) + else: + loss_fn = loss_fn_eval = loss_fn + + if opt == "Adam": + optimizer = torch.optim.Adam(self.parameters(), lr=lr) + elif opt == "LBFGS": + optimizer = LBFGS(self.parameters(), lr=lr, history_size=10, line_search_fn="strong_wolfe", tolerance_grad=1e-32, tolerance_change=1e-32, tolerance_ys=1e-32) + + results = {} + results['train_loss'] = [] + results['test_loss'] = [] + results['reg'] = [] + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__] = [] + + if batch == -1 or batch > dataset['train_input'].shape[0]: + batch_size = dataset['train_input'].shape[0] + batch_size_test = dataset['test_input'].shape[0] + else: + batch_size = batch + batch_size_test = batch + + global train_loss, reg_ + + def closure(): + global train_loss, reg_ + optimizer.zero_grad() + pred = self.forward(dataset['train_input'][train_id].to(self.device)) + train_loss = loss_fn(pred, dataset['train_label'][train_id].to(self.device)) + if self.save_act: + if reg_metric == 'fa': + self.attribute() + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy) + else: + reg_ = torch.tensor(0.) + objective = train_loss + lamb * reg_ + objective.backward() + return objective + + for _ in pbar: + + if _ == steps-1 and old_save_act: + self.save_act = True + + train_id = np.random.choice(dataset['train_input'].shape[0], batch_size, replace=False) + test_id = np.random.choice(dataset['test_input'].shape[0], batch_size_test, replace=False) + + if opt == "LBFGS": + optimizer.step(closure) + + if opt == "Adam": + pred = self.forward(dataset['train_input'][train_id].to(self.device)) + train_loss = loss_fn(pred, dataset['train_label'][train_id].to(self.device)) + if self.save_act: + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy) + else: + reg_ = torch.tensor(0.) + loss = train_loss + lamb * reg_ + optimizer.zero_grad() + loss.backward() + optimizer.step() + + test_loss = loss_fn_eval(self.forward(dataset['test_input'][test_id].to(self.device)), dataset['test_label'][test_id].to(self.device)) + + + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__].append(metrics[i]().item()) + + results['train_loss'].append(torch.sqrt(train_loss).cpu().detach().numpy()) + results['test_loss'].append(torch.sqrt(test_loss).cpu().detach().numpy()) + results['reg'].append(reg_.cpu().detach().numpy()) + + if _ % log == 0: + if display_metrics == None: + pbar.set_description("| train_loss: %.2e | test_loss: %.2e | reg: %.2e | " % (torch.sqrt(train_loss).cpu().detach().numpy(), torch.sqrt(test_loss).cpu().detach().numpy(), reg_.cpu().detach().numpy())) + else: + string = '' + data = () + for metric in display_metrics: + string += f' {metric}: %.2e |' + try: + results[metric] + except: + raise Exception(f'{metric} not recognized') + data += (results[metric][-1],) + pbar.set_description(string % data) + + return results + + @property + def connection_cost(self): + + with torch.no_grad(): + cc = 0. + for linear in self.linears: + t = torch.abs(linear.weight) + def get_coordinate(n): + return torch.linspace(0,1,steps=n+1, device=self.device)[:n] + 1/(2*n) + + in_dim = t.shape[0] + x_in = get_coordinate(in_dim) + + out_dim = t.shape[1] + x_out = get_coordinate(out_dim) + + dist = torch.abs(x_in[:,None] - x_out[None,:]) + cc += torch.sum(dist * t) + + return cc + + def swap(self, l, i1, i2): + + def swap_row(data, i1, i2): + data[i1], data[i2] = data[i2].clone(), data[i1].clone() + + def swap_col(data, i1, i2): + data[:,i1], data[:,i2] = data[:,i2].clone(), data[:,i1].clone() + + swap_row(self.linears[l-1].weight.data, i1, i2) + swap_row(self.linears[l-1].bias.data, i1, i2) + swap_col(self.linears[l].weight.data, i1, i2) + + def auto_swap_l(self, l): + + num = self.width[l] + for i in range(num): + ccs = [] + for j in range(num): + self.swap(l,i,j) + self.get_act() + self.attribute() + cc = self.connection_cost.detach().clone() + ccs.append(cc) + self.swap(l,i,j) + j = torch.argmin(torch.tensor(ccs)) + self.swap(l,i,j) + + def auto_swap(self): + depth = self.depth + for l in range(1, depth): + self.auto_swap_l(l) + + def tree(self, x=None, in_var=None, style='tree', sym_th=1e-3, sep_th=1e-1, skip_sep_test=False, verbose=False): + if x == None: + x = self.cache_data + plot_tree(self, x, in_var=in_var, style=style, sym_th=sym_th, sep_th=sep_th, skip_sep_test=skip_sep_test, verbose=verbose) \ No newline at end of file diff --git a/dl/kan/kan/.ipynb_checkpoints/MultKAN-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/MultKAN-checkpoint.py new file mode 100644 index 000000000..37f3e5820 --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/MultKAN-checkpoint.py @@ -0,0 +1,2805 @@ +import torch +import torch.nn as nn +import numpy as np +from .KANLayer import KANLayer +#from .Symbolic_MultKANLayer import * +from .Symbolic_KANLayer import Symbolic_KANLayer +from .LBFGS import * +import os +import glob +import matplotlib.pyplot as plt +from tqdm import tqdm +import random +import copy +#from .MultKANLayer import MultKANLayer +import pandas as pd +from sympy.printing import latex +from sympy import * +import sympy +import yaml +from .spline import curve2coef +from .utils import SYMBOLIC_LIB +from .hypothesis import plot_tree + +class MultKAN(nn.Module): + ''' + KAN class + + Attributes: + ----------- + grid : int + the number of grid intervals + k : int + spline order + act_fun : a list of KANLayers + symbolic_fun: a list of Symbolic_KANLayer + depth : int + depth of KAN + width : list + number of neurons in each layer. + Without multiplication nodes, [2,5,5,3] means 2D inputs, 3D outputs, with 2 layers of 5 hidden neurons. + With multiplication nodes, [2,[5,3],[5,1],3] means besides the [2,5,53] KAN, there are 3 (1) mul nodes in layer 1 (2). + mult_arity : int, or list of int lists + multiplication arity for each multiplication node (the number of numbers to be multiplied) + grid : int + the number of grid intervals + k : int + the order of piecewise polynomial + base_fun : fun + residual function b(x). an activation function phi(x) = sb_scale * b(x) + sp_scale * spline(x) + symbolic_fun : a list of Symbolic_KANLayer + Symbolic_KANLayers + symbolic_enabled : bool + If False, the symbolic front is not computed (to save time). Default: True. + width_in : list + The number of input neurons for each layer + width_out : list + The number of output neurons for each layer + base_fun_name : str + The base function b(x) + grip_eps : float + The parameter that interpolates between uniform grid and adaptive grid (based on sample quantile) + node_bias : a list of 1D torch.float + node_scale : a list of 1D torch.float + subnode_bias : a list of 1D torch.float + subnode_scale : a list of 1D torch.float + symbolic_enabled : bool + when symbolic_enabled = False, the symbolic branch (symbolic_fun) will be ignored in computation (set to zero) + affine_trainable : bool + indicate whether affine parameters are trainable (node_bias, node_scale, subnode_bias, subnode_scale) + sp_trainable : bool + indicate whether the overall magnitude of splines is trainable + sb_trainable : bool + indicate whether the overall magnitude of base function is trainable + save_act : bool + indicate whether intermediate activations are saved in forward pass + node_scores : None or list of 1D torch.float + node attribution score + edge_scores : None or list of 2D torch.float + edge attribution score + subnode_scores : None or list of 1D torch.float + subnode attribution score + cache_data : None or 2D torch.float + cached input data + acts : None or a list of 2D torch.float + activations on nodes + auto_save : bool + indicate whether to automatically save a checkpoint once the model is modified + state_id : int + the state of the model (used to save checkpoint) + ckpt_path : str + the folder to store checkpoints + round : int + the number of times rewind() has been called + device : str + ''' + def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=0.3, scale_base_mu=0.0, scale_base_sigma=1.0, base_fun='silu', symbolic_enabled=True, affine_trainable=False, grid_eps=0.02, grid_range=[-1, 1], sp_trainable=True, sb_trainable=True, seed=1, save_act=True, sparse_init=False, auto_save=True, first_init=True, ckpt_path='./model', state_id=0, round=0, device='cpu'): + ''' + initalize a KAN model + + Args: + ----- + width : list of int + Without multiplication nodes: :math:`[n_0, n_1, .., n_{L-1}]` specify the number of neurons in each layer (including inputs/outputs) + With multiplication nodes: :math:`[[n_0,m_0=0], [n_1,m_1], .., [n_{L-1},m_{L-1}]]` specify the number of addition/multiplication nodes in each layer (including inputs/outputs) + grid : int + number of grid intervals. Default: 3. + k : int + order of piecewise polynomial. Default: 3. + mult_arity : int, or list of int lists + multiplication arity for each multiplication node (the number of numbers to be multiplied) + noise_scale : float + initial injected noise to spline. + base_fun : str + the residual function b(x). Default: 'silu' + symbolic_enabled : bool + compute (True) or skip (False) symbolic computations (for efficiency). By default: True. + affine_trainable : bool + affine parameters are updated or not. Affine parameters include node_scale, node_bias, subnode_scale, subnode_bias + grid_eps : float + When grid_eps = 1, the grid is uniform; when grid_eps = 0, the grid is partitioned using percentiles of samples. 0 < grid_eps < 1 interpolates between the two extremes. + grid_range : list/np.array of shape (2,)) + setting the range of grids. Default: [-1,1]. This argument is not important if fit(update_grid=True) (by default updata_grid=True) + sp_trainable : bool + If true, scale_sp is trainable. Default: True. + sb_trainable : bool + If true, scale_base is trainable. Default: True. + device : str + device + seed : int + random seed + save_act : bool + indicate whether intermediate activations are saved in forward pass + sparse_init : bool + sparse initialization (True) or normal dense initialization. Default: False. + auto_save : bool + indicate whether to automatically save a checkpoint once the model is modified + state_id : int + the state of the model (used to save checkpoint) + ckpt_path : str + the folder to store checkpoints. Default: './model' + round : int + the number of times rewind() has been called + device : str + + Returns: + -------- + self + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + checkpoint directory created: ./model + saving model version 0.0 + ''' + super(MultKAN, self).__init__() + + torch.manual_seed(seed) + np.random.seed(seed) + random.seed(seed) + + ### initializeing the numerical front ### + + self.act_fun = [] + self.depth = len(width) - 1 + + #print('haha1', width) + for i in range(len(width)): + #print(type(width[i]), type(width[i]) == int) + if type(width[i]) == int or type(width[i]) == np.int64: + width[i] = [width[i],0] + + #print('haha2', width) + + self.width = width + + # if mult_arity is just a scalar, we extend it to a list of lists + # e.g, mult_arity = [[2,3],[4]] means that in the first hidden layer, 2 mult ops have arity 2 and 3, respectively; + # in the second hidden layer, 1 mult op has arity 4. + if isinstance(mult_arity, int): + self.mult_homo = True # when homo is True, parallelization is possible + else: + self.mult_homo = False # when home if False, for loop is required. + self.mult_arity = mult_arity + + width_in = self.width_in + width_out = self.width_out + + self.base_fun_name = base_fun + if base_fun == 'silu': + base_fun = torch.nn.SiLU() + elif base_fun == 'identity': + base_fun = torch.nn.Identity() + elif base_fun == 'zero': + base_fun = lambda x: x*0. + + self.grid_eps = grid_eps + self.grid_range = grid_range + + + for l in range(self.depth): + # splines + if isinstance(grid, list): + grid_l = grid[l] + else: + grid_l = grid + + if isinstance(k, list): + k_l = k[l] + else: + k_l = k + + + sp_batch = KANLayer(in_dim=width_in[l], out_dim=width_out[l+1], num=grid_l, k=k_l, noise_scale=noise_scale, scale_base_mu=scale_base_mu, scale_base_sigma=scale_base_sigma, scale_sp=1., base_fun=base_fun, grid_eps=grid_eps, grid_range=grid_range, sp_trainable=sp_trainable, sb_trainable=sb_trainable, sparse_init=sparse_init) + self.act_fun.append(sp_batch) + + self.node_bias = [] + self.node_scale = [] + self.subnode_bias = [] + self.subnode_scale = [] + + globals()['self.node_bias_0'] = torch.nn.Parameter(torch.zeros(3,1)).requires_grad_(False) + exec('self.node_bias_0' + " = torch.nn.Parameter(torch.zeros(3,1)).requires_grad_(False)") + + for l in range(self.depth): + exec(f'self.node_bias_{l} = torch.nn.Parameter(torch.zeros(width_in[l+1])).requires_grad_(affine_trainable)') + exec(f'self.node_scale_{l} = torch.nn.Parameter(torch.ones(width_in[l+1])).requires_grad_(affine_trainable)') + exec(f'self.subnode_bias_{l} = torch.nn.Parameter(torch.zeros(width_out[l+1])).requires_grad_(affine_trainable)') + exec(f'self.subnode_scale_{l} = torch.nn.Parameter(torch.ones(width_out[l+1])).requires_grad_(affine_trainable)') + exec(f'self.node_bias.append(self.node_bias_{l})') + exec(f'self.node_scale.append(self.node_scale_{l})') + exec(f'self.subnode_bias.append(self.subnode_bias_{l})') + exec(f'self.subnode_scale.append(self.subnode_scale_{l})') + + + self.act_fun = nn.ModuleList(self.act_fun) + + self.grid = grid + self.k = k + self.base_fun = base_fun + + ### initializing the symbolic front ### + self.symbolic_fun = [] + for l in range(self.depth): + sb_batch = Symbolic_KANLayer(in_dim=width_in[l], out_dim=width_out[l+1]) + self.symbolic_fun.append(sb_batch) + + self.symbolic_fun = nn.ModuleList(self.symbolic_fun) + self.symbolic_enabled = symbolic_enabled + self.affine_trainable = affine_trainable + self.sp_trainable = sp_trainable + self.sb_trainable = sb_trainable + + self.save_act = save_act + + self.node_scores = None + self.edge_scores = None + self.subnode_scores = None + + self.cache_data = None + self.acts = None + + self.auto_save = auto_save + self.state_id = 0 + self.ckpt_path = ckpt_path + self.round = round + + self.device = device + self.to(device) + + if auto_save: + if first_init: + if not os.path.exists(ckpt_path): + # Create the directory + os.makedirs(ckpt_path) + print(f"checkpoint directory created: {ckpt_path}") + print('saving model version 0.0') + + history_path = self.ckpt_path+'/history.txt' + with open(history_path, 'w') as file: + file.write(f'### Round {self.round} ###' + '\n') + file.write('init => 0.0' + '\n') + self.saveckpt(path=self.ckpt_path+'/'+'0.0') + else: + self.state_id = state_id + + self.input_id = torch.arange(self.width_in[0],) + + def to(self, device): + ''' + move the model to device + + Args: + ----- + device : str or device + + Returns: + -------- + self + + Example + ------- + >>> from kan import * + >>> device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> model.to(device) + ''' + super(MultKAN, self).to(device) + self.device = device + + for kanlayer in self.act_fun: + kanlayer.to(device) + + for symbolic_kanlayer in self.symbolic_fun: + symbolic_kanlayer.to(device) + + return self + + @property + def width_in(self): + ''' + The number of input nodes for each layer + ''' + width = self.width + width_in = [width[l][0]+width[l][1] for l in range(len(width))] + return width_in + + @property + def width_out(self): + ''' + The number of output subnodes for each layer + ''' + width = self.width + if self.mult_homo == True: + width_out = [width[l][0]+self.mult_arity*width[l][1] for l in range(len(width))] + else: + width_out = [width[l][0]+int(np.sum(self.mult_arity[l])) for l in range(len(width))] + return width_out + + @property + def n_sum(self): + ''' + The number of addition nodes for each layer + ''' + width = self.width + n_sum = [width[l][0] for l in range(1,len(width)-1)] + return n_sum + + @property + def n_mult(self): + ''' + The number of multiplication nodes for each layer + ''' + width = self.width + n_mult = [width[l][1] for l in range(1,len(width)-1)] + return n_mult + + @property + def feature_score(self): + ''' + attribution scores for inputs + ''' + self.attribute() + if self.node_scores == None: + return None + else: + return self.node_scores[0] + + def initialize_from_another_model(self, another_model, x): + ''' + initialize from another model of the same width, but their 'grid' parameter can be different. + Note this is equivalent to refine() when we don't want to keep another_model + + Args: + ----- + another_model : MultKAN + x : 2D torch.float + + Returns: + -------- + self + + Example + ------- + >>> from kan import * + >>> model1 = KAN(width=[2,5,1], grid=3) + >>> model2 = KAN(width=[2,5,1], grid=10) + >>> x = torch.rand(100,2) + >>> model2.initialize_from_another_model(model1, x) + ''' + another_model(x) # get activations + batch = x.shape[0] + + self.initialize_grid_from_another_model(another_model, x) + + for l in range(self.depth): + spb = self.act_fun[l] + #spb_parent = another_model.act_fun[l] + + # spb = spb_parent + preacts = another_model.spline_preacts[l] + postsplines = another_model.spline_postsplines[l] + self.act_fun[l].coef.data = curve2coef(preacts[:,0,:], postsplines.permute(0,2,1), spb.grid, k=spb.k) + self.act_fun[l].scale_base.data = another_model.act_fun[l].scale_base.data + self.act_fun[l].scale_sp.data = another_model.act_fun[l].scale_sp.data + self.act_fun[l].mask.data = another_model.act_fun[l].mask.data + + for l in range(self.depth): + self.node_bias[l].data = another_model.node_bias[l].data + self.node_scale[l].data = another_model.node_scale[l].data + + self.subnode_bias[l].data = another_model.subnode_bias[l].data + self.subnode_scale[l].data = another_model.subnode_scale[l].data + + for l in range(self.depth): + self.symbolic_fun[l] = another_model.symbolic_fun[l] + + return self.to(self.device) + + def log_history(self, method_name): + + if self.auto_save: + + # save to log file + #print(func.__name__) + with open(self.ckpt_path+'/history.txt', 'a') as file: + file.write(str(self.round)+'.'+str(self.state_id)+' => '+ method_name + ' => ' + str(self.round)+'.'+str(self.state_id+1) + '\n') + + # update state_id + self.state_id += 1 + + # save to ckpt + self.saveckpt(path=self.ckpt_path+'/'+str(self.round)+'.'+str(self.state_id)) + print('saving model version '+str(self.round)+'.'+str(self.state_id)) + + + def refine(self, new_grid): + ''' + grid refinement + + Args: + ----- + new_grid : init + the number of grid intervals after refinement + + Returns: + -------- + a refined model : MultKAN + + Example + ------- + >>> from kan import * + >>> device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> print(model.grid) + >>> x = torch.rand(100,2) + >>> model.get_act(x) + >>> model = model.refine(10) + >>> print(model.grid) + checkpoint directory created: ./model + saving model version 0.0 + 5 + saving model version 0.1 + 10 + ''' + + model_new = MultKAN(width=self.width, + grid=new_grid, + k=self.k, + mult_arity=self.mult_arity, + base_fun=self.base_fun_name, + symbolic_enabled=self.symbolic_enabled, + affine_trainable=self.affine_trainable, + grid_eps=self.grid_eps, + grid_range=self.grid_range, + sp_trainable=self.sp_trainable, + sb_trainable=self.sb_trainable, + ckpt_path=self.ckpt_path, + auto_save=True, + first_init=False, + state_id=self.state_id, + round=self.round, + device=self.device) + + model_new.initialize_from_another_model(self, self.cache_data) + model_new.cache_data = self.cache_data + model_new.grid = new_grid + + self.log_history('refine') + model_new.state_id += 1 + + return model_new.to(self.device) + + + def saveckpt(self, path='model'): + ''' + save the current model to files (configuration file and state file) + + Args: + ----- + path : str + the path where checkpoints are saved + + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> model.saveckpt('./mark') + # There will be three files appearing in the current folder: mark_cache_data, mark_config.yml, mark_state + ''' + + model = self + + dic = dict( + width = model.width, + grid = model.grid, + k = model.k, + mult_arity = model.mult_arity, + base_fun_name = model.base_fun_name, + symbolic_enabled = model.symbolic_enabled, + affine_trainable = model.affine_trainable, + grid_eps = model.grid_eps, + grid_range = model.grid_range, + sp_trainable = model.sp_trainable, + sb_trainable = model.sb_trainable, + state_id = model.state_id, + auto_save = model.auto_save, + ckpt_path = model.ckpt_path, + round = model.round, + device = str(model.device) + ) + + for i in range (model.depth): + dic[f'symbolic.funs_name.{i}'] = model.symbolic_fun[i].funs_name + + with open(f'{path}_config.yml', 'w') as outfile: + yaml.dump(dic, outfile, default_flow_style=False) + + torch.save(model.state_dict(), f'{path}_state') + torch.save(model.cache_data, f'{path}_cache_data') + + @staticmethod + def loadckpt(path='model'): + ''' + load checkpoint from path + + Args: + ----- + path : str + the path where checkpoints are saved + + Returns: + -------- + MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> model.saveckpt('./mark') + >>> KAN.loadckpt('./mark') + ''' + with open(f'{path}_config.yml', 'r') as stream: + config = yaml.safe_load(stream) + + state = torch.load(f'{path}_state') + + model_load = MultKAN(width=config['width'], + grid=config['grid'], + k=config['k'], + mult_arity = config['mult_arity'], + base_fun=config['base_fun_name'], + symbolic_enabled=config['symbolic_enabled'], + affine_trainable=config['affine_trainable'], + grid_eps=config['grid_eps'], + grid_range=config['grid_range'], + sp_trainable=config['sp_trainable'], + sb_trainable=config['sb_trainable'], + state_id=config['state_id'], + auto_save=config['auto_save'], + first_init=False, + ckpt_path=config['ckpt_path'], + round = config['round']+1, + device = config['device']) + + model_load.load_state_dict(state) + model_load.cache_data = torch.load(f'{path}_cache_data') + + depth = len(model_load.width) - 1 + for l in range(depth): + out_dim = model_load.symbolic_fun[l].out_dim + in_dim = model_load.symbolic_fun[l].in_dim + funs_name = config[f'symbolic.funs_name.{l}'] + for j in range(out_dim): + for i in range(in_dim): + fun_name = funs_name[j][i] + model_load.symbolic_fun[l].funs_name[j][i] = fun_name + model_load.symbolic_fun[l].funs[j][i] = SYMBOLIC_LIB[fun_name][0] + model_load.symbolic_fun[l].funs_sympy[j][i] = SYMBOLIC_LIB[fun_name][1] + model_load.symbolic_fun[l].funs_avoid_singularity[j][i] = SYMBOLIC_LIB[fun_name][3] + return model_load + + def copy(self): + ''' + deepcopy + + Args: + ----- + path : str + the path where checkpoints are saved + + Returns: + -------- + MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[1,1], grid=5, k=3, seed=0) + >>> model2 = model.copy() + >>> model2.act_fun[0].coef.data *= 2 + >>> print(model2.act_fun[0].coef.data) + >>> print(model.act_fun[0].coef.data) + ''' + path='copy_temp' + self.saveckpt(path) + return KAN.loadckpt(path) + + def rewind(self, model_id): + ''' + rewind to an old version + + Args: + ----- + model_id : str + in format '{a}.{b}' where a is the round number, b is the version number in that round + + Returns: + -------- + MultKAN + + Example + ------- + Please refer to tutorials. API 12: Checkpoint, save & load model + ''' + self.round += 1 + self.state_id = model_id.split('.')[-1] + + history_path = self.ckpt_path+'/history.txt' + with open(history_path, 'a') as file: + file.write(f'### Round {self.round} ###' + '\n') + + self.saveckpt(path=self.ckpt_path+'/'+f'{self.round}.{self.state_id}') + + print('rewind to model version '+f'{self.round-1}.{self.state_id}'+', renamed as '+f'{self.round}.{self.state_id}') + + return MultKAN.loadckpt(path=self.ckpt_path+'/'+str(model_id)) + + + def checkout(self, model_id): + ''' + check out an old version + + Args: + ----- + model_id : str + in format '{a}.{b}' where a is the round number, b is the version number in that round + + Returns: + -------- + MultKAN + + Example + ------- + Same use as rewind, although checkout doesn't change states + ''' + return MultKAN.loadckpt(path=self.ckpt_path+'/'+str(model_id)) + + def update_grid_from_samples(self, x): + ''' + update grid from samples + + Args: + ----- + x : 2D torch.tensor + inputs + + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[1,1], grid=5, k=3, seed=0) + >>> print(model.act_fun[0].grid) + >>> x = torch.linspace(-10,10,steps=101)[:,None] + >>> model.update_grid_from_samples(x) + >>> print(model.act_fun[0].grid) + ''' + for l in range(self.depth): + self.get_act(x) + self.act_fun[l].update_grid_from_samples(self.acts[l]) + + def update_grid(self, x): + ''' + call update_grid_from_samples. This seems unnecessary but we retain it for the sake of classes that might inherit from MultKAN + ''' + self.update_grid_from_samples(x) + + def initialize_grid_from_another_model(self, model, x): + ''' + initialize grid from another model + + Args: + ----- + model : MultKAN + parent model + x : 2D torch.tensor + inputs + + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[1,1], grid=5, k=3, seed=0) + >>> print(model.act_fun[0].grid) + >>> x = torch.linspace(-10,10,steps=101)[:,None] + >>> model2 = KAN(width=[1,1], grid=10, k=3, seed=0) + >>> model2.initialize_grid_from_another_model(model, x) + >>> print(model2.act_fun[0].grid) + ''' + model(x) + for l in range(self.depth): + self.act_fun[l].initialize_grid_from_parent(model.act_fun[l], model.acts[l]) + + def forward(self, x, singularity_avoiding=False, y_th=10.): + ''' + forward pass + + Args: + ----- + x : 2D torch.tensor + inputs + singularity_avoiding : bool + whether to avoid singularity for the symbolic branch + y_th : float + the threshold for singularity + + Returns: + -------- + None + + Example1 + -------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> x = torch.rand(100,2) + >>> model(x).shape + + Example2 + -------- + >>> from kan import * + >>> model = KAN(width=[1,1], grid=5, k=3, seed=0) + >>> x = torch.tensor([[1],[-0.01]]) + >>> model.fix_symbolic(0,0,0,'log',fit_params_bool=False) + >>> print(model(x)) + >>> print(model(x, singularity_avoiding=True)) + >>> print(model(x, singularity_avoiding=True, y_th=1.)) + ''' + x = x[:,self.input_id.long()] + assert x.shape[1] == self.width_in[0] + + # cache data + self.cache_data = x + + self.acts = [] # shape ([batch, n0], [batch, n1], ..., [batch, n_L]) + self.acts_premult = [] + self.spline_preacts = [] + self.spline_postsplines = [] + self.spline_postacts = [] + self.acts_scale = [] + self.acts_scale_spline = [] + self.subnode_actscale = [] + self.edge_actscale = [] + # self.neurons_scale = [] + + self.acts.append(x) # acts shape: (batch, width[l]) + + for l in range(self.depth): + + x_numerical, preacts, postacts_numerical, postspline = self.act_fun[l](x) + #print(preacts, postacts_numerical, postspline) + + if self.symbolic_enabled == True: + x_symbolic, postacts_symbolic = self.symbolic_fun[l](x, singularity_avoiding=singularity_avoiding, y_th=y_th) + else: + x_symbolic = 0. + postacts_symbolic = 0. + + x = x_numerical + x_symbolic + + if self.save_act: + # save subnode_scale + self.subnode_actscale.append(torch.std(x, dim=0).detach()) + + # subnode affine transform + x = self.subnode_scale[l][None,:] * x + self.subnode_bias[l][None,:] + + if self.save_act: + postacts = postacts_numerical + postacts_symbolic + + # self.neurons_scale.append(torch.mean(torch.abs(x), dim=0)) + #grid_reshape = self.act_fun[l].grid.reshape(self.width_out[l + 1], self.width_in[l], -1) + input_range = torch.std(preacts, dim=0) + 0.1 + output_range_spline = torch.std(postacts_numerical, dim=0) # for training, only penalize the spline part + output_range = torch.std(postacts, dim=0) # for visualization, include the contribution from both spline + symbolic + # save edge_scale + self.edge_actscale.append(output_range) + + self.acts_scale.append((output_range / input_range).detach()) + self.acts_scale_spline.append(output_range_spline / input_range) + self.spline_preacts.append(preacts.detach()) + self.spline_postacts.append(postacts.detach()) + self.spline_postsplines.append(postspline.detach()) + + self.acts_premult.append(x.detach()) + + # multiplication + dim_sum = self.width[l+1][0] + dim_mult = self.width[l+1][1] + + if self.mult_homo == True: + for i in range(self.mult_arity-1): + if i == 0: + x_mult = x[:,dim_sum::self.mult_arity] * x[:,dim_sum+1::self.mult_arity] + else: + x_mult = x_mult * x[:,dim_sum+i+1::self.mult_arity] + + else: + for j in range(dim_mult): + acml_id = dim_sum + np.sum(self.mult_arity[l+1][:j]) + for i in range(self.mult_arity[l+1][j]-1): + if i == 0: + x_mult_j = x[:,[acml_id]] * x[:,[acml_id+1]] + else: + x_mult_j = x_mult_j * x[:,[acml_id+i+1]] + + if j == 0: + x_mult = x_mult_j + else: + x_mult = torch.cat([x_mult, x_mult_j], dim=1) + + if self.width[l+1][1] > 0: + x = torch.cat([x[:,:dim_sum], x_mult], dim=1) + + # x = x + self.biases[l].weight + # node affine transform + x = self.node_scale[l][None,:] * x + self.node_bias[l][None,:] + + self.acts.append(x.detach()) + + + return x + + def set_mode(self, l, i, j, mode, mask_n=None): + if mode == "s": + mask_n = 0.; + mask_s = 1. + elif mode == "n": + mask_n = 1.; + mask_s = 0. + elif mode == "sn" or mode == "ns": + if mask_n == None: + mask_n = 1. + else: + mask_n = mask_n + mask_s = 1. + else: + mask_n = 0.; + mask_s = 0. + + self.act_fun[l].mask.data[i][j] = mask_n + self.symbolic_fun[l].mask.data[j,i] = mask_s + + def fix_symbolic(self, l, i, j, fun_name, fit_params_bool=True, a_range=(-10, 10), b_range=(-10, 10), verbose=True, random=False, log_history=True): + ''' + set (l,i,j) activation to be symbolic (specified by fun_name) + + Args: + ----- + l : int + layer index + i : int + input neuron index + j : int + output neuron index + fun_name : str + function name + fit_params_bool : bool + obtaining affine parameters through fitting (True) or setting default values (False) + a_range : tuple + sweeping range of a + b_range : tuple + sweeping range of b + verbose : bool + If True, more information is printed. + random : bool + initialize affine parameteres randomly or as [1,0,1,0] + log_history : bool + indicate whether to log history when the function is called + + Returns: + -------- + None or r2 (coefficient of determination) + + Example 1 + --------- + >>> # when fit_params_bool = False + >>> model = KAN(width=[2,5,1], grid=5, k=3) + >>> model.fix_symbolic(0,1,3,'sin',fit_params_bool=False) + >>> print(model.act_fun[0].mask.reshape(2,5)) + >>> print(model.symbolic_fun[0].mask.reshape(2,5)) + + Example 2 + --------- + >>> # when fit_params_bool = True + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=1.) + >>> x = torch.normal(0,1,size=(100,2)) + >>> model(x) # obtain activations (otherwise model does not have attributes acts) + >>> model.fix_symbolic(0,1,3,'sin',fit_params_bool=True) + >>> print(model.act_fun[0].mask.reshape(2,5)) + >>> print(model.symbolic_fun[0].mask.reshape(2,5)) + ''' + if not fit_params_bool: + self.symbolic_fun[l].fix_symbolic(i, j, fun_name, verbose=verbose, random=random) + r2 = None + else: + x = self.acts[l][:, i] + mask = self.act_fun[l].mask + y = self.spline_postacts[l][:, j, i] + #y = self.postacts[l][:, j, i] + r2 = self.symbolic_fun[l].fix_symbolic(i, j, fun_name, x, y, a_range=a_range, b_range=b_range, verbose=verbose) + if mask[i,j] == 0: + r2 = - 1e8 + self.set_mode(l, i, j, mode="s") + + if log_history: + self.log_history('fix_symbolic') + return r2 + + def unfix_symbolic(self, l, i, j, log_history=True): + ''' + unfix the (l,i,j) activation function. + ''' + self.set_mode(l, i, j, mode="n") + self.symbolic_fun[l].funs_name[j][i] = "0" + if log_history: + self.log_history('unfix_symbolic') + + def unfix_symbolic_all(self, log_history=True): + ''' + unfix all activation functions. + ''' + for l in range(len(self.width) - 1): + for i in range(self.width_in[l]): + for j in range(self.width_out[l + 1]): + self.unfix_symbolic(l, i, j, log_history) + + def get_range(self, l, i, j, verbose=True): + ''' + Get the input range and output range of the (l,i,j) activation + + Args: + ----- + l : int + layer index + i : int + input neuron index + j : int + output neuron index + + Returns: + -------- + x_min : float + minimum of input + x_max : float + maximum of input + y_min : float + minimum of output + y_max : float + maximum of output + + Example + ------- + >>> model = KAN(width=[2,3,1], grid=5, k=3, noise_scale=1.) + >>> x = torch.normal(0,1,size=(100,2)) + >>> model(x) # do a forward pass to obtain model.acts + >>> model.get_range(0,0,0) + ''' + x = self.spline_preacts[l][:, j, i] + y = self.spline_postacts[l][:, j, i] + x_min = torch.min(x).cpu().detach().numpy() + x_max = torch.max(x).cpu().detach().numpy() + y_min = torch.min(y).cpu().detach().numpy() + y_max = torch.max(y).cpu().detach().numpy() + if verbose: + print('x range: [' + '%.2f' % x_min, ',', '%.2f' % x_max, ']') + print('y range: [' + '%.2f' % y_min, ',', '%.2f' % y_max, ']') + return x_min, x_max, y_min, y_max + + def plot(self, folder="./figures", beta=3, metric='backward', scale=0.5, tick=False, sample=False, in_vars=None, out_vars=None, title=None, varscale=1.0): + ''' + plot KAN + + Args: + ----- + folder : str + the folder to store pngs + beta : float + positive number. control the transparency of each activation. transparency = tanh(beta*l1). + mask : bool + If True, plot with mask (need to run prune() first to obtain mask). If False (by default), plot all activation functions. + mode : bool + "supervised" or "unsupervised". If "supervised", l1 is measured by absolution value (not subtracting mean); if "unsupervised", l1 is measured by standard deviation (subtracting mean). + scale : float + control the size of the diagram + in_vars: None or list of str + the name(s) of input variables + out_vars: None or list of str + the name(s) of output variables + title: None or str + title + varscale : float + the size of input variables + + Returns: + -------- + Figure + + Example + ------- + >>> # see more interactive examples in demos + >>> model = KAN(width=[2,3,1], grid=3, k=3, noise_scale=1.0) + >>> x = torch.normal(0,1,size=(100,2)) + >>> model(x) # do a forward pass to obtain model.acts + >>> model.plot() + ''' + global Symbol + + if not self.save_act: + print('cannot plot since data are not saved. Set save_act=True first.') + + # forward to obtain activations + if self.acts == None: + if self.cache_data == None: + raise Exception('model hasn\'t seen any data yet.') + self.forward(self.cache_data) + + if metric == 'backward': + self.attribute() + + + if not os.path.exists(folder): + os.makedirs(folder) + # matplotlib.use('Agg') + depth = len(self.width) - 1 + for l in range(depth): + w_large = 2.0 + for i in range(self.width_in[l]): + for j in range(self.width_out[l+1]): + rank = torch.argsort(self.acts[l][:, i]) + fig, ax = plt.subplots(figsize=(w_large, w_large)) + + num = rank.shape[0] + + #print(self.width_in[l]) + #print(self.width_out[l+1]) + symbolic_mask = self.symbolic_fun[l].mask[j][i] + numeric_mask = self.act_fun[l].mask[i][j] + if symbolic_mask > 0. and numeric_mask > 0.: + color = 'purple' + alpha_mask = 1 + if symbolic_mask > 0. and numeric_mask == 0.: + color = "red" + alpha_mask = 1 + if symbolic_mask == 0. and numeric_mask > 0.: + color = "black" + alpha_mask = 1 + if symbolic_mask == 0. and numeric_mask == 0.: + color = "white" + alpha_mask = 0 + + + if tick == True: + ax.tick_params(axis="y", direction="in", pad=-22, labelsize=50) + ax.tick_params(axis="x", direction="in", pad=-15, labelsize=50) + x_min, x_max, y_min, y_max = self.get_range(l, i, j, verbose=False) + plt.xticks([x_min, x_max], ['%2.f' % x_min, '%2.f' % x_max]) + plt.yticks([y_min, y_max], ['%2.f' % y_min, '%2.f' % y_max]) + else: + plt.xticks([]) + plt.yticks([]) + if alpha_mask == 1: + plt.gca().patch.set_edgecolor('black') + else: + plt.gca().patch.set_edgecolor('white') + plt.gca().patch.set_linewidth(1.5) + # plt.axis('off') + + plt.plot(self.acts[l][:, i][rank].cpu().detach().numpy(), self.spline_postacts[l][:, j, i][rank].cpu().detach().numpy(), color=color, lw=5) + if sample == True: + plt.scatter(self.acts[l][:, i][rank].cpu().detach().numpy(), self.spline_postacts[l][:, j, i][rank].cpu().detach().numpy(), color=color, s=400 * scale ** 2) + plt.gca().spines[:].set_color(color) + + plt.savefig(f'{folder}/sp_{l}_{i}_{j}.png', bbox_inches="tight", dpi=400) + plt.close() + + def score2alpha(score): + return np.tanh(beta * score) + + + if metric == 'forward_n': + scores = self.acts_scale + elif metric == 'forward_u': + scores = self.edge_actscale + elif metric == 'backward': + scores = self.edge_scores + else: + raise Exception(f'metric = \'{metric}\' not recognized') + + alpha = [score2alpha(score.cpu().detach().numpy()) for score in scores] + + # draw skeleton + width = np.array(self.width) + width_in = np.array(self.width_in) + width_out = np.array(self.width_out) + A = 1 + y0 = 0.3 # height: from input to pre-mult + z0 = 0.1 # height: from pre-mult to post-mult (input of next layer) + + neuron_depth = len(width) + min_spacing = A / np.maximum(np.max(width_out), 5) + + max_neuron = np.max(width_out) + max_num_weights = np.max(width_in[:-1] * width_out[1:]) + y1 = 0.4 / np.maximum(max_num_weights, 5) # size (height/width) of 1D function diagrams + y2 = 0.15 / np.maximum(max_neuron, 5) # size (height/width) of operations (sum and mult) + + fig, ax = plt.subplots(figsize=(10 * scale, 10 * scale * (neuron_depth - 1) * (y0+z0))) + # fig, ax = plt.subplots(figsize=(5,5*(neuron_depth-1)*y0)) + + # -- Transformation functions + DC_to_FC = ax.transData.transform + FC_to_NFC = fig.transFigure.inverted().transform + # -- Take data coordinates and transform them to normalized figure coordinates + DC_to_NFC = lambda x: FC_to_NFC(DC_to_FC(x)) + + # plot scatters and lines + for l in range(neuron_depth): + + n = width_in[l] + + # scatters + for i in range(n): + plt.scatter(1 / (2 * n) + i / n, l * (y0+z0), s=min_spacing ** 2 * 10000 * scale ** 2, color='black') + + # plot connections (input to pre-mult) + for i in range(n): + if l < neuron_depth - 1: + n_next = width_out[l+1] + N = n * n_next + for j in range(n_next): + id_ = i * n_next + j + + symbol_mask = self.symbolic_fun[l].mask[j][i] + numerical_mask = self.act_fun[l].mask[i][j] + if symbol_mask == 1. and numerical_mask > 0.: + color = 'purple' + alpha_mask = 1. + if symbol_mask == 1. and numerical_mask == 0.: + color = "red" + alpha_mask = 1. + if symbol_mask == 0. and numerical_mask == 1.: + color = "black" + alpha_mask = 1. + if symbol_mask == 0. and numerical_mask == 0.: + color = "white" + alpha_mask = 0. + + plt.plot([1 / (2 * n) + i / n, 1 / (2 * N) + id_ / N], [l * (y0+z0), l * (y0+z0) + y0/2 - y1], color=color, lw=2 * scale, alpha=alpha[l][j][i] * alpha_mask) + plt.plot([1 / (2 * N) + id_ / N, 1 / (2 * n_next) + j / n_next], [l * (y0+z0) + y0/2 + y1, l * (y0+z0)+y0], color=color, lw=2 * scale, alpha=alpha[l][j][i] * alpha_mask) + + + # plot connections (pre-mult to post-mult, post-mult = next-layer input) + if l < neuron_depth - 1: + n_in = width_out[l+1] + n_out = width_in[l+1] + mult_id = 0 + for i in range(n_in): + if i < width[l+1][0]: + j = i + else: + if i == width[l+1][0]: + if isinstance(self.mult_arity,int): + ma = self.mult_arity + else: + ma = self.mult_arity[l+1][mult_id] + current_mult_arity = ma + if current_mult_arity == 0: + mult_id += 1 + if isinstance(self.mult_arity,int): + ma = self.mult_arity + else: + ma = self.mult_arity[l+1][mult_id] + current_mult_arity = ma + j = width[l+1][0] + mult_id + current_mult_arity -= 1 + #j = (i-width[l+1][0])//self.mult_arity + width[l+1][0] + plt.plot([1 / (2 * n_in) + i / n_in, 1 / (2 * n_out) + j / n_out], [l * (y0+z0) + y0, (l+1) * (y0+z0)], color='black', lw=2 * scale) + + + + plt.xlim(0, 1) + plt.ylim(-0.1 * (y0+z0), (neuron_depth - 1 + 0.1) * (y0+z0)) + + + plt.axis('off') + + for l in range(neuron_depth - 1): + # plot splines + n = width_in[l] + for i in range(n): + n_next = width_out[l + 1] + N = n * n_next + for j in range(n_next): + id_ = i * n_next + j + im = plt.imread(f'{folder}/sp_{l}_{i}_{j}.png') + left = DC_to_NFC([1 / (2 * N) + id_ / N - y1, 0])[0] + right = DC_to_NFC([1 / (2 * N) + id_ / N + y1, 0])[0] + bottom = DC_to_NFC([0, l * (y0+z0) + y0/2 - y1])[1] + up = DC_to_NFC([0, l * (y0+z0) + y0/2 + y1])[1] + newax = fig.add_axes([left, bottom, right - left, up - bottom]) + # newax = fig.add_axes([1/(2*N)+id_/N-y1, (l+1/2)*y0-y1, y1, y1], anchor='NE') + newax.imshow(im, alpha=alpha[l][j][i]) + newax.axis('off') + + + # plot sum symbols + N = n = width_out[l+1] + for j in range(n): + id_ = j + path = os.path.dirname(os.path.abspath(__file__)) + "/assets/img/sum_symbol.png" + im = plt.imread(path) + left = DC_to_NFC([1 / (2 * N) + id_ / N - y2, 0])[0] + right = DC_to_NFC([1 / (2 * N) + id_ / N + y2, 0])[0] + bottom = DC_to_NFC([0, l * (y0+z0) + y0 - y2])[1] + up = DC_to_NFC([0, l * (y0+z0) + y0 + y2])[1] + newax = fig.add_axes([left, bottom, right - left, up - bottom]) + newax.imshow(im) + newax.axis('off') + + # plot mult symbols + N = n = width_in[l+1] + n_sum = width[l+1][0] + n_mult = width[l+1][1] + for j in range(n_mult): + id_ = j + n_sum + path = os.path.dirname(os.path.abspath(__file__)) + "/assets/img/mult_symbol.png" + im = plt.imread(path) + left = DC_to_NFC([1 / (2 * N) + id_ / N - y2, 0])[0] + right = DC_to_NFC([1 / (2 * N) + id_ / N + y2, 0])[0] + bottom = DC_to_NFC([0, (l+1) * (y0+z0) - y2])[1] + up = DC_to_NFC([0, (l+1) * (y0+z0) + y2])[1] + newax = fig.add_axes([left, bottom, right - left, up - bottom]) + newax.imshow(im) + newax.axis('off') + + if in_vars != None: + n = self.width_in[0] + for i in range(n): + if isinstance(in_vars[i], sympy.Expr): + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), -0.1, f'${latex(in_vars[i])}$', fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + else: + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), -0.1, in_vars[i], fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + + + + if out_vars != None: + n = self.width_in[-1] + for i in range(n): + if isinstance(out_vars[i], sympy.Expr): + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), (y0+z0) * (len(self.width) - 1) + 0.15, f'${latex(out_vars[i])}$', fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + else: + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), (y0+z0) * (len(self.width) - 1) + 0.15, out_vars[i], fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + + if title != None: + plt.gcf().get_axes()[0].text(0.5, (y0+z0) * (len(self.width) - 1) + 0.3, title, fontsize=40 * scale, horizontalalignment='center', verticalalignment='center') + + + def reg(self, reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff): + ''' + Get regularization + + Args: + ----- + reg_metric : the regularization metric + 'edge_forward_spline_n', 'edge_forward_spline_u', 'edge_forward_sum', 'edge_backward', 'node_backward' + lamb_l1 : float + l1 penalty strength + lamb_entropy : float + entropy penalty strength + lamb_coef : float + coefficient penalty strength + lamb_coefdiff : float + coefficient smoothness strength + + Returns: + -------- + reg_ : torch.float + + Example + ------- + >>> model = KAN(width=[2,3,1], grid=5, k=3, noise_scale=1.) + >>> x = torch.rand(100,2) + >>> model.get_act(x) + >>> model.reg('edge_forward_spline_n', 1.0, 2.0, 1.0, 1.0) + ''' + if reg_metric == 'edge_forward_spline_n': + acts_scale = self.acts_scale_spline + + elif reg_metric == 'edge_forward_sum': + acts_scale = self.acts_scale + + elif reg_metric == 'edge_forward_spline_u': + acts_scale = self.edge_actscale + + elif reg_metric == 'edge_backward': + acts_scale = self.edge_scores + + elif reg_metric == 'node_backward': + acts_scale = self.node_attribute_scores + + else: + raise Exception(f'reg_metric = {reg_metric} not recognized!') + + reg_ = 0. + for i in range(len(acts_scale)): + vec = acts_scale[i] + + l1 = torch.sum(vec) + p_row = vec / (torch.sum(vec, dim=1, keepdim=True) + 1) + p_col = vec / (torch.sum(vec, dim=0, keepdim=True) + 1) + entropy_row = - torch.mean(torch.sum(p_row * torch.log2(p_row + 1e-4), dim=1)) + entropy_col = - torch.mean(torch.sum(p_col * torch.log2(p_col + 1e-4), dim=0)) + reg_ += lamb_l1 * l1 + lamb_entropy * (entropy_row + entropy_col) # both l1 and entropy + + # regularize coefficient to encourage spline to be zero + for i in range(len(self.act_fun)): + coeff_l1 = torch.sum(torch.mean(torch.abs(self.act_fun[i].coef), dim=1)) + coeff_diff_l1 = torch.sum(torch.mean(torch.abs(torch.diff(self.act_fun[i].coef)), dim=1)) + reg_ += lamb_coef * coeff_l1 + lamb_coefdiff * coeff_diff_l1 + + return reg_ + + def get_reg(self, reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff): + ''' + Get regularization. This seems unnecessary but in case a class wants to inherit this, it may want to rewrite get_reg, but not reg. + ''' + return self.reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) + + def disable_symbolic_in_fit(self, lamb): + ''' + during fitting, disable symbolic if either is true (lamb = 0, none of symbolic functions is active) + ''' + old_save_act = self.save_act + if lamb == 0.: + self.save_act = False + + # skip symbolic if no symbolic is turned on + depth = len(self.symbolic_fun) + no_symbolic = True + for l in range(depth): + no_symbolic *= torch.sum(torch.abs(self.symbolic_fun[l].mask)) == 0 + + old_symbolic_enabled = self.symbolic_enabled + + if no_symbolic: + self.symbolic_enabled = False + + return old_save_act, old_symbolic_enabled + + def get_params(self): + ''' + Get parameters + ''' + return self.parameters() + + + def fit(self, dataset, opt="LBFGS", steps=100, log=1, lamb=0., lamb_l1=1., lamb_entropy=2., lamb_coef=0., lamb_coefdiff=0., update_grid=True, grid_update_num=10, loss_fn=None, lr=1.,start_grid_update_step=-1, stop_grid_update_step=50, batch=-1, + metrics=None, save_fig=False, in_vars=None, out_vars=None, beta=3, save_fig_freq=1, img_folder='./video', singularity_avoiding=False, y_th=1000., reg_metric='edge_forward_spline_n', display_metrics=None): + ''' + training + + Args: + ----- + dataset : dic + contains dataset['train_input'], dataset['train_label'], dataset['test_input'], dataset['test_label'] + opt : str + "LBFGS" or "Adam" + steps : int + training steps + log : int + logging frequency + lamb : float + overall penalty strength + lamb_l1 : float + l1 penalty strength + lamb_entropy : float + entropy penalty strength + lamb_coef : float + coefficient magnitude penalty strength + lamb_coefdiff : float + difference of nearby coefficits (smoothness) penalty strength + update_grid : bool + If True, update grid regularly before stop_grid_update_step + grid_update_num : int + the number of grid updates before stop_grid_update_step + start_grid_update_step : int + no grid updates before this training step + stop_grid_update_step : int + no grid updates after this training step + loss_fn : function + loss function + lr : float + learning rate + batch : int + batch size, if -1 then full. + save_fig_freq : int + save figure every (save_fig_freq) steps + singularity_avoiding : bool + indicate whether to avoid singularity for the symbolic part + y_th : float + singularity threshold (anything above the threshold is considered singular and is softened in some ways) + reg_metric : str + regularization metric. Choose from {'edge_forward_spline_n', 'edge_forward_spline_u', 'edge_forward_sum', 'edge_backward', 'node_backward'} + metrics : a list of metrics (as functions) + the metrics to be computed in training + display_metrics : a list of functions + the metric to be displayed in tqdm progress bar + + Returns: + -------- + results : dic + results['train_loss'], 1D array of training losses (RMSE) + results['test_loss'], 1D array of test losses (RMSE) + results['reg'], 1D array of regularization + other metrics specified in metrics + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.plot() + # Most examples in toturals involve the fit() method. Please check them for useness. + ''' + + if lamb > 0. and not self.save_act: + print('setting lamb=0. If you want to set lamb > 0, set self.save_act=True') + + old_save_act, old_symbolic_enabled = self.disable_symbolic_in_fit(lamb) + + pbar = tqdm(range(steps), desc='description', ncols=100) + + if loss_fn == None: + loss_fn = loss_fn_eval = lambda x, y: torch.mean((x - y) ** 2) + else: + loss_fn = loss_fn_eval = loss_fn + + grid_update_freq = int(stop_grid_update_step / grid_update_num) + + if opt == "Adam": + optimizer = torch.optim.Adam(self.get_params(), lr=lr) + elif opt == "LBFGS": + optimizer = LBFGS(self.get_params(), lr=lr, history_size=10, line_search_fn="strong_wolfe", tolerance_grad=1e-32, tolerance_change=1e-32, tolerance_ys=1e-32) + + results = {} + results['train_loss'] = [] + results['test_loss'] = [] + results['reg'] = [] + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__] = [] + + if batch == -1 or batch > dataset['train_input'].shape[0]: + batch_size = dataset['train_input'].shape[0] + batch_size_test = dataset['test_input'].shape[0] + else: + batch_size = batch + batch_size_test = batch + + global train_loss, reg_ + + def closure(): + global train_loss, reg_ + optimizer.zero_grad() + pred = self.forward(dataset['train_input'][train_id], singularity_avoiding=singularity_avoiding, y_th=y_th) + train_loss = loss_fn(pred, dataset['train_label'][train_id]) + if self.save_act: + if reg_metric == 'edge_backward': + self.attribute() + if reg_metric == 'node_backward': + self.node_attribute() + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) + else: + reg_ = torch.tensor(0.) + objective = train_loss + lamb * reg_ + objective.backward() + return objective + + if save_fig: + if not os.path.exists(img_folder): + os.makedirs(img_folder) + + for _ in pbar: + + if _ == steps-1 and old_save_act: + self.save_act = True + + if save_fig and _ % save_fig_freq == 0: + save_act = self.save_act + self.save_act = True + + train_id = np.random.choice(dataset['train_input'].shape[0], batch_size, replace=False) + test_id = np.random.choice(dataset['test_input'].shape[0], batch_size_test, replace=False) + + if _ % grid_update_freq == 0 and _ < stop_grid_update_step and update_grid and _ >= start_grid_update_step: + self.update_grid(dataset['train_input'][train_id]) + + if opt == "LBFGS": + optimizer.step(closure) + + if opt == "Adam": + pred = self.forward(dataset['train_input'][train_id], singularity_avoiding=singularity_avoiding, y_th=y_th) + train_loss = loss_fn(pred, dataset['train_label'][train_id]) + if self.save_act: + if reg_metric == 'edge_backward': + self.attribute() + if reg_metric == 'node_backward': + self.node_attribute() + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) + else: + reg_ = torch.tensor(0.) + loss = train_loss + lamb * reg_ + optimizer.zero_grad() + loss.backward() + optimizer.step() + + test_loss = loss_fn_eval(self.forward(dataset['test_input'][test_id]), dataset['test_label'][test_id]) + + + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__].append(metrics[i]().item()) + + results['train_loss'].append(torch.sqrt(train_loss).cpu().detach().numpy()) + results['test_loss'].append(torch.sqrt(test_loss).cpu().detach().numpy()) + results['reg'].append(reg_.cpu().detach().numpy()) + + if _ % log == 0: + if display_metrics == None: + pbar.set_description("| train_loss: %.2e | test_loss: %.2e | reg: %.2e | " % (torch.sqrt(train_loss).cpu().detach().numpy(), torch.sqrt(test_loss).cpu().detach().numpy(), reg_.cpu().detach().numpy())) + else: + string = '' + data = () + for metric in display_metrics: + string += f' {metric}: %.2e |' + try: + results[metric] + except: + raise Exception(f'{metric} not recognized') + data += (results[metric][-1],) + pbar.set_description(string % data) + + + if save_fig and _ % save_fig_freq == 0: + self.plot(folder=img_folder, in_vars=in_vars, out_vars=out_vars, title="Step {}".format(_), beta=beta) + plt.savefig(img_folder + '/' + str(_) + '.jpg', bbox_inches='tight', dpi=200) + plt.close() + self.save_act = save_act + + self.log_history('fit') + # revert back to original state + self.symbolic_enabled = old_symbolic_enabled + return results + + def prune_node(self, threshold=1e-2, mode="auto", active_neurons_id=None, log_history=True): + ''' + pruning nodes + + Args: + ----- + threshold : float + if the attribution score of a neuron is below the threshold, it is considered dead and will be removed + mode : str + 'auto' or 'manual'. with 'auto', nodes are automatically pruned using threshold. with 'manual', active_neurons_id should be passed in. + + Returns: + -------- + pruned network : MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model = model.prune_node() + >>> model.plot() + ''' + if self.acts == None: + self.get_act() + + mask_up = [torch.ones(self.width_in[0], device=self.device)] + mask_down = [] + active_neurons_up = [list(range(self.width_in[0]))] + active_neurons_down = [] + num_sums = [] + num_mults = [] + mult_arities = [[]] + + if active_neurons_id != None: + mode = "manual" + + for i in range(len(self.acts_scale) - 1): + + mult_arity = [] + + if mode == "auto": + self.attribute() + overall_important_up = self.node_scores[i+1] > threshold + + elif mode == "manual": + overall_important_up = torch.zeros(self.width_in[i + 1], dtype=torch.bool, device=self.device) + overall_important_up[active_neurons_id[i]] = True + + + num_sum = torch.sum(overall_important_up[:self.width[i+1][0]]) + num_mult = torch.sum(overall_important_up[self.width[i+1][0]:]) + if self.mult_homo == True: + overall_important_down = torch.cat([overall_important_up[:self.width[i+1][0]], (overall_important_up[self.width[i+1][0]:][None,:].expand(self.mult_arity,-1)).T.reshape(-1,)], dim=0) + else: + overall_important_down = overall_important_up[:self.width[i+1][0]] + for j in range(overall_important_up[self.width[i+1][0]:].shape[0]): + active_bool = overall_important_up[self.width[i+1][0]+j] + arity = self.mult_arity[i+1][j] + overall_important_down = torch.cat([overall_important_down, torch.tensor([active_bool]*arity).to(self.device)]) + if active_bool: + mult_arity.append(arity) + + num_sums.append(num_sum.item()) + num_mults.append(num_mult.item()) + + mask_up.append(overall_important_up.float()) + mask_down.append(overall_important_down.float()) + + active_neurons_up.append(torch.where(overall_important_up == True)[0]) + active_neurons_down.append(torch.where(overall_important_down == True)[0]) + + mult_arities.append(mult_arity) + + active_neurons_down.append(list(range(self.width_out[-1]))) + mask_down.append(torch.ones(self.width_out[-1], device=self.device)) + + if self.mult_homo == False: + mult_arities.append(self.mult_arity[-1]) + + self.mask_up = mask_up + self.mask_down = mask_down + + # update act_fun[l].mask up + for l in range(len(self.acts_scale) - 1): + for i in range(self.width_in[l + 1]): + if i not in active_neurons_up[l + 1]: + self.remove_node(l + 1, i, mode='up',log_history=False) + + for i in range(self.width_out[l + 1]): + if i not in active_neurons_down[l]: + self.remove_node(l + 1, i, mode='down',log_history=False) + + model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun_name, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round).to(self.device) + model2.load_state_dict(self.state_dict()) + + width_new = [self.width[0]] + + for i in range(len(self.acts_scale)): + + if i < len(self.acts_scale) - 1: + num_sum = num_sums[i] + num_mult = num_mults[i] + model2.node_bias[i].data = model2.node_bias[i].data[active_neurons_up[i+1]] + model2.node_scale[i].data = model2.node_scale[i].data[active_neurons_up[i+1]] + model2.subnode_bias[i].data = model2.subnode_bias[i].data[active_neurons_down[i]] + model2.subnode_scale[i].data = model2.subnode_scale[i].data[active_neurons_down[i]] + model2.width[i+1] = [num_sum, num_mult] + + model2.act_fun[i].out_dim_sum = num_sum + model2.act_fun[i].out_dim_mult = num_mult + + model2.symbolic_fun[i].out_dim_sum = num_sum + model2.symbolic_fun[i].out_dim_mult = num_mult + + width_new.append([num_sum, num_mult]) + + model2.act_fun[i] = model2.act_fun[i].get_subset(active_neurons_up[i], active_neurons_down[i]) + model2.symbolic_fun[i] = self.symbolic_fun[i].get_subset(active_neurons_up[i], active_neurons_down[i]) + + model2.cache_data = self.cache_data + model2.acts = None + + width_new.append(self.width[-1]) + model2.width = width_new + + if self.mult_homo == False: + model2.mult_arity = mult_arities + + if log_history: + self.log_history('prune_node') + model2.state_id += 1 + + return model2 + + def prune_edge(self, threshold=3e-2, log_history=True): + ''' + pruning edges + + Args: + ----- + threshold : float + if the attribution score of an edge is below the threshold, it is considered dead and will be set to zero. + + Returns: + -------- + pruned network : MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model = model.prune_edge() + >>> model.plot() + ''' + if self.acts == None: + self.get_act() + + for i in range(len(self.width)-1): + #self.act_fun[i].mask.data = ((self.acts_scale[i] > threshold).permute(1,0)).float() + old_mask = self.act_fun[i].mask.data + self.act_fun[i].mask.data = ((self.edge_scores[i] > threshold).permute(1,0)*old_mask).float() + + if log_history: + self.log_history('fix_symbolic') + + def prune(self, node_th=1e-2, edge_th=3e-2): + ''' + prune (both nodes and edges) + + Args: + ----- + node_th : float + if the attribution score of a node is below node_th, it is considered dead and will be set to zero. + edge_th : float + if the attribution score of an edge is below node_th, it is considered dead and will be set to zero. + + Returns: + -------- + pruned network : MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model = model.prune() + >>> model.plot() + ''' + if self.acts == None: + self.get_act() + + self = self.prune_node(node_th, log_history=False) + #self.prune_node(node_th, log_history=False) + self.forward(self.cache_data) + self.attribute() + self.prune_edge(edge_th, log_history=False) + self.log_history('prune') + return self + + def prune_input(self, threshold=1e-2, active_inputs=None, log_history=True): + ''' + prune inputs + + Args: + ----- + threshold : float + if the attribution score of the input feature is below threshold, it is considered irrelevant. + active_inputs : None or list + if a list is passed, the manual mode will disregard attribution score and prune as instructed. + + Returns: + -------- + pruned network : MultKAN + + Example1 + -------- + >>> # automatic + >>> from kan import * + >>> model = KAN(width=[3,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: 1 * x[:,[0]]**2 + 0.3 * x[:,[1]]**2 + 0.0 * x[:,[2]]**2 + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.plot() + >>> model = model.prune_input() + >>> model.plot() + + Example2 + -------- + >>> # automatic + >>> from kan import * + >>> model = KAN(width=[3,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: 1 * x[:,[0]]**2 + 0.3 * x[:,[1]]**2 + 0.0 * x[:,[2]]**2 + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.plot() + >>> model = model.prune_input(active_inputs=[0,1]) + >>> model.plot() + ''' + if active_inputs == None: + self.attribute() + input_score = self.node_scores[0] + input_mask = input_score > threshold + print('keep:', input_mask.tolist()) + input_id = torch.where(input_mask==True)[0] + + else: + input_id = torch.tensor(active_inputs, dtype=torch.long).to(self.device) + + model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round).to(self.device) + model2.load_state_dict(self.state_dict()) + + model2.act_fun[0] = model2.act_fun[0].get_subset(input_id, torch.arange(self.width_out[1])) + model2.symbolic_fun[0] = self.symbolic_fun[0].get_subset(input_id, torch.arange(self.width_out[1])) + + model2.cache_data = self.cache_data + model2.acts = None + + model2.width[0] = [len(input_id), 0] + model2.input_id = input_id + + if log_history: + self.log_history('prune_input') + model2.state_id += 1 + + return model2 + + def remove_edge(self, l, i, j, log_history=True): + ''' + remove activtion phi(l,i,j) (set its mask to zero) + ''' + self.act_fun[l].mask[i][j] = 0. + if log_history: + self.log_history('remove_edge') + + def remove_node(self, l ,i, mode='all', log_history=True): + ''' + remove neuron (l,i) (set the masks of all incoming and outgoing activation functions to zero) + ''' + if mode == 'down': + self.act_fun[l - 1].mask[:, i] = 0. + self.symbolic_fun[l - 1].mask[i, :] *= 0. + + elif mode == 'up': + self.act_fun[l].mask[i, :] = 0. + self.symbolic_fun[l].mask[:, i] *= 0. + + else: + self.remove_node(l, i, mode='up') + self.remove_node(l, i, mode='down') + + if log_history: + self.log_history('remove_node') + + + def attribute(self, l=None, i=None, out_score=None, plot=True): + ''' + get attribution scores + + Args: + ----- + l : None or int + layer index + i : None or int + neuron index + out_score : None or 1D torch.float + specify output scores + plot : bool + when plot = True, display the bar show + + Returns: + -------- + attribution scores + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[3,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: 1 * x[:,[0]]**2 + 0.3 * x[:,[1]]**2 + 0.0 * x[:,[2]]**2 + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.attribute() + >>> model.feature_score + ''' + # output (out_dim, in_dim) + + if l != None: + self.attribute() + out_score = self.node_scores[l] + + if self.acts == None: + self.get_act() + + def score_node2subnode(node_score, width, mult_arity, out_dim): + + assert np.sum(width) == node_score.shape[1] + if isinstance(mult_arity, int): + n_subnode = width[0] + mult_arity * width[1] + else: + n_subnode = width[0] + int(np.sum(mult_arity)) + + #subnode_score_leaf = torch.zeros(out_dim, n_subnode).requires_grad_(True) + #subnode_score = subnode_score_leaf.clone() + #subnode_score[:,:width[0]] = node_score[:,:width[0]] + subnode_score = node_score[:,:width[0]] + if isinstance(mult_arity, int): + #subnode_score[:,width[0]:] = node_score[:,width[0]:][:,:,None].expand(out_dim, node_score[width[0]:].shape[0], mult_arity).reshape(out_dim,-1) + subnode_score = torch.cat([subnode_score, node_score[:,width[0]:][:,:,None].expand(out_dim, node_score[:,width[0]:].shape[1], mult_arity).reshape(out_dim,-1)], dim=1) + else: + acml = width[0] + for i in range(len(mult_arity)): + #subnode_score[:, acml:acml+mult_arity[i]] = node_score[:, width[0]+i] + subnode_score = torch.cat([subnode_score, node_score[:, width[0]+i].expand(out_dim, mult_arity[i])], dim=1) + acml += mult_arity[i] + return subnode_score + + + node_scores = [] + subnode_scores = [] + edge_scores = [] + + l_query = l + if l == None: + l_end = self.depth + else: + l_end = l + + # back propagate from the queried layer + out_dim = self.width_in[l_end] + if out_score == None: + node_score = torch.eye(out_dim).requires_grad_(True) + else: + node_score = torch.diag(out_score).requires_grad_(True) + node_scores.append(node_score) + + device = self.act_fun[0].grid.device + + for l in range(l_end,0,-1): + + # node to subnode + if isinstance(self.mult_arity, int): + subnode_score = score_node2subnode(node_score, self.width[l], self.mult_arity, out_dim=out_dim) + else: + mult_arity = self.mult_arity[l] + #subnode_score = score_node2subnode(node_score, self.width[l], mult_arity) + subnode_score = score_node2subnode(node_score, self.width[l], mult_arity, out_dim=out_dim) + + subnode_scores.append(subnode_score) + # subnode to edge + #print(self.edge_actscale[l-1].device, subnode_score.device, self.subnode_actscale[l-1].device) + edge_score = torch.einsum('ij,ki,i->kij', self.edge_actscale[l-1], subnode_score.to(device), 1/(self.subnode_actscale[l-1]+1e-4)) + edge_scores.append(edge_score) + + # edge to node + node_score = torch.sum(edge_score, dim=1) + node_scores.append(node_score) + + self.node_scores_all = list(reversed(node_scores)) + self.edge_scores_all = list(reversed(edge_scores)) + self.subnode_scores_all = list(reversed(subnode_scores)) + + self.node_scores = [torch.mean(l, dim=0) for l in self.node_scores_all] + self.edge_scores = [torch.mean(l, dim=0) for l in self.edge_scores_all] + self.subnode_scores = [torch.mean(l, dim=0) for l in self.subnode_scores_all] + + # return + if l_query != None: + if i == None: + return self.node_scores_all[0] + else: + + # plot + if plot: + in_dim = self.width_in[0] + plt.figure(figsize=(1*in_dim, 3)) + plt.bar(range(in_dim),self.node_scores_all[0][i].cpu().detach().numpy()) + plt.xticks(range(in_dim)); + + return self.node_scores_all[0][i] + + def node_attribute(self): + self.node_attribute_scores = [] + for l in range(1, self.depth+1): + node_attr = self.attribute(l) + self.node_attribute_scores.append(node_attr) + + def feature_interaction(self, l, neuron_th = 1e-2, feature_th = 1e-2): + ''' + get feature interaction + + Args: + ----- + l : int + layer index + neuron_th : float + threshold to determine whether a neuron is active + feature_th : float + threshold to determine whether a feature is active + + Returns: + -------- + dictionary + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[3,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: 1 * x[:,[0]]**2 + 0.3 * x[:,[1]]**2 + 0.0 * x[:,[2]]**2 + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.attribute() + >>> model.feature_interaction(1) + ''' + dic = {} + width = self.width_in[l] + + for i in range(width): + score = self.attribute(l,i,plot=False) + + if torch.max(score) > neuron_th: + features = tuple(torch.where(score > torch.max(score) * feature_th)[0].detach().numpy()) + if features in dic.keys(): + dic[features] += 1 + else: + dic[features] = 1 + + return dic + + def suggest_symbolic(self, l, i, j, a_range=(-10, 10), b_range=(-10, 10), lib=None, topk=5, verbose=True, r2_loss_fun=lambda x: np.log2(1+1e-5-x), c_loss_fun=lambda x: x, weight_simple = 0.8): + ''' + suggest symbolic function + + Args: + ----- + l : int + layer index + i : int + neuron index in layer l + j : int + neuron index in layer j + a_range : tuple + search range of a + b_range : tuple + search range of b + lib : list of str + library of candidate symbolic functions + topk : int + the number of top functions displayed + verbose : bool + if verbose = True, print more information + r2_loss_fun : functoon + function : r2 -> "bits" + c_loss_fun : fun + function : c -> 'bits' + weight_simple : float + the simplifty weight: the higher, more prefer simplicity over performance + + + Returns: + -------- + best_name (str), best_fun (function), best_r2 (float), best_c (float) + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,1,1], grid=5, k=3, noise_scale=0.0, seed=0) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]])+x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.suggest_symbolic(0,1,0) + ''' + r2s = [] + cs = [] + + if lib == None: + symbolic_lib = SYMBOLIC_LIB + else: + symbolic_lib = {} + for item in lib: + symbolic_lib[item] = SYMBOLIC_LIB[item] + + # getting r2 and complexities + for (name, content) in symbolic_lib.items(): + r2 = self.fix_symbolic(l, i, j, name, a_range=a_range, b_range=b_range, verbose=False, log_history=False) + if r2 == -1e8: # zero function + r2s.append(-1e8) + else: + r2s.append(r2.item()) + self.unfix_symbolic(l, i, j, log_history=False) + c = content[2] + cs.append(c) + + r2s = np.array(r2s) + cs = np.array(cs) + r2_loss = r2_loss_fun(r2s).astype('float') + cs_loss = c_loss_fun(cs) + + loss = weight_simple * cs_loss + (1-weight_simple) * r2_loss + + sorted_ids = np.argsort(loss)[:topk] + r2s = r2s[sorted_ids][:topk] + cs = cs[sorted_ids][:topk] + r2_loss = r2_loss[sorted_ids][:topk] + cs_loss = cs_loss[sorted_ids][:topk] + loss = loss[sorted_ids][:topk] + + topk = np.minimum(topk, len(symbolic_lib)) + + if verbose == True: + # print results in a dataframe + results = {} + results['function'] = [list(symbolic_lib.items())[sorted_ids[i]][0] for i in range(topk)] + results['fitting r2'] = r2s[:topk] + results['r2 loss'] = r2_loss[:topk] + results['complexity'] = cs[:topk] + results['complexity loss'] = cs_loss[:topk] + results['total loss'] = loss[:topk] + + df = pd.DataFrame(results) + print(df) + + best_name = list(symbolic_lib.items())[sorted_ids[0]][0] + best_fun = list(symbolic_lib.items())[sorted_ids[0]][1] + best_r2 = r2s[0] + best_c = cs[0] + + return best_name, best_fun, best_r2, best_c; + + def auto_symbolic(self, a_range=(-10, 10), b_range=(-10, 10), lib=None, verbose=1, weight_simple = 0.8, r2_threshold=0.0): + ''' + automatic symbolic regression for all edges + + Args: + ----- + a_range : tuple + search range of a + b_range : tuple + search range of b + lib : list of str + library of candidate symbolic functions + verbose : int + larger verbosity => more verbosity + weight_simple : float + a weight that prioritizies simplicity (low complexity) over performance (high r2) - set to 0.0 to ignore complexity + r2_threshold : float + If r2 is below this threshold, the edge will not be fixed with any symbolic function - set to 0.0 to ignore this threshold + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,1,1], grid=5, k=3, noise_scale=0.0, seed=0) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]])+x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.auto_symbolic() + ''' + for l in range(len(self.width_in) - 1): + for i in range(self.width_in[l]): + for j in range(self.width_out[l + 1]): + if self.symbolic_fun[l].mask[j, i] > 0. and self.act_fun[l].mask[i][j] == 0.: + print(f'skipping ({l},{i},{j}) since already symbolic') + elif self.symbolic_fun[l].mask[j, i] == 0. and self.act_fun[l].mask[i][j] == 0.: + self.fix_symbolic(l, i, j, '0', verbose=verbose > 1, log_history=False) + print(f'fixing ({l},{i},{j}) with 0') + else: + name, fun, r2, c = self.suggest_symbolic(l, i, j, a_range=a_range, b_range=b_range, lib=lib, verbose=False, weight_simple=weight_simple) + if r2 >= r2_threshold: + self.fix_symbolic(l, i, j, name, verbose=verbose > 1, log_history=False) + if verbose >= 1: + print(f'fixing ({l},{i},{j}) with {name}, r2={r2}, c={c}') + else: + print(f'For ({l},{i},{j}) the best fit was {name}, but r^2 = {r2} and this is lower than {r2_threshold}. This edge was omitted, keep training or try a different threshold.') + + self.log_history('auto_symbolic') + + def symbolic_formula(self, var=None, normalizer=None, output_normalizer = None): + ''' + get symbolic formula + + Args: + ----- + var : None or a list of sympy expression + input variables + normalizer : [mean, std] + output_normalizer : [mean, std] + + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,1,1], grid=5, k=3, noise_scale=0.0, seed=0) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]])+x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.auto_symbolic() + >>> model.symbolic_formula()[0][0] + ''' + + symbolic_acts = [] + symbolic_acts_premult = [] + x = [] + + def ex_round(ex1, n_digit): + ex2 = ex1 + for a in sympy.preorder_traversal(ex1): + if isinstance(a, sympy.Float): + ex2 = ex2.subs(a, round(a, n_digit)) + return ex2 + + # define variables + if var == None: + for ii in range(1, self.width[0][0] + 1): + exec(f"x{ii} = sympy.Symbol('x_{ii}')") + exec(f"x.append(x{ii})") + elif isinstance(var[0], sympy.Expr): + x = var + else: + x = [sympy.symbols(var_) for var_ in var] + + x0 = x + + if normalizer != None: + mean = normalizer[0] + std = normalizer[1] + x = [(x[i] - mean[i]) / std[i] for i in range(len(x))] + + symbolic_acts.append(x) + + for l in range(len(self.width_in) - 1): + num_sum = self.width[l + 1][0] + num_mult = self.width[l + 1][1] + y = [] + for j in range(self.width_out[l + 1]): + yj = 0. + for i in range(self.width_in[l]): + a, b, c, d = self.symbolic_fun[l].affine[j, i] + sympy_fun = self.symbolic_fun[l].funs_sympy[j][i] + try: + yj += c * sympy_fun(a * x[i] + b) + d + except: + print('make sure all activations need to be converted to symbolic formulas first!') + return + yj = self.subnode_scale[l][j] * yj + self.subnode_bias[l][j] + if simplify == True: + y.append(sympy.simplify(yj)) + else: + y.append(yj) + + symbolic_acts_premult.append(y) + + mult = [] + for k in range(num_mult): + if isinstance(self.mult_arity, int): + mult_arity = self.mult_arity + else: + mult_arity = self.mult_arity[l+1][k] + for i in range(mult_arity-1): + if i == 0: + mult_k = y[num_sum+2*k] * y[num_sum+2*k+1] + else: + mult_k = mult_k * y[num_sum+2*k+i+1] + mult.append(mult_k) + + y = y[:num_sum] + mult + + for j in range(self.width_in[l+1]): + y[j] = self.node_scale[l][j] * y[j] + self.node_bias[l][j] + + x = y + symbolic_acts.append(x) + + if output_normalizer != None: + output_layer = symbolic_acts[-1] + means = output_normalizer[0] + stds = output_normalizer[1] + + assert len(output_layer) == len(means), 'output_normalizer does not match the output layer' + assert len(output_layer) == len(stds), 'output_normalizer does not match the output layer' + + output_layer = [(output_layer[i] * stds[i] + means[i]) for i in range(len(output_layer))] + symbolic_acts[-1] = output_layer + + + self.symbolic_acts = [[symbolic_acts[l][i] for i in range(len(symbolic_acts[l]))] for l in range(len(symbolic_acts))] + self.symbolic_acts_premult = [[symbolic_acts_premult[l][i] for i in range(len(symbolic_acts_premult[l]))] for l in range(len(symbolic_acts_premult))] + + out_dim = len(symbolic_acts[-1]) + #return [symbolic_acts[-1][i] for i in range(len(symbolic_acts[-1]))], x0 + + if simplify: + return [symbolic_acts[-1][i] for i in range(len(symbolic_acts[-1]))], x0 + else: + return [symbolic_acts[-1][i] for i in range(len(symbolic_acts[-1]))], x0 + + + def expand_depth(self): + ''' + expand network depth, add an indentity layer to the end. For usage, please refer to tutorials interp_3_KAN_compiler.ipynb. + + Args: + ----- + var : None or a list of sympy expression + input variables + normalizer : [mean, std] + output_normalizer : [mean, std] + + Returns: + -------- + None + ''' + self.depth += 1 + + # add kanlayer, set mask to zero + dim_out = self.width_in[-1] + layer = KANLayer(dim_out, dim_out, num=self.grid, k=self.k) + layer.mask *= 0. + self.act_fun.append(layer) + + self.width.append([dim_out, 0]) + self.mult_arity.append([]) + + # add symbolic_kanlayer set mask to one. fun = identity on diagonal and zero for off-diagonal + layer = Symbolic_KANLayer(dim_out, dim_out) + layer.mask += 1. + + for j in range(dim_out): + for i in range(dim_out): + if i == j: + layer.fix_symbolic(i,j,'x') + else: + layer.fix_symbolic(i,j,'0') + + self.symbolic_fun.append(layer) + + self.node_bias.append(torch.nn.Parameter(torch.zeros(dim_out,device=self.device)).requires_grad_(self.affine_trainable)) + self.node_scale.append(torch.nn.Parameter(torch.ones(dim_out,device=self.device)).requires_grad_(self.affine_trainable)) + self.subnode_bias.append(torch.nn.Parameter(torch.zeros(dim_out,device=self.device)).requires_grad_(self.affine_trainable)) + self.subnode_scale.append(torch.nn.Parameter(torch.ones(dim_out,device=self.device)).requires_grad_(self.affine_trainable)) + + def expand_width(self, layer_id, n_added_nodes, sum_bool=True, mult_arity=2): + ''' + expand network width. For usage, please refer to tutorials interp_3_KAN_compiler.ipynb. + + Args: + ----- + layer_id : int + layer index + n_added_nodes : init + the number of added nodes + sum_bool : bool + if sum_bool == True, added nodes are addition nodes; otherwise multiplication nodes + mult_arity : init + multiplication arity (the number of numbers to be multiplied) + + Returns: + -------- + None + ''' + def _expand(layer_id, n_added_nodes, sum_bool=True, mult_arity=2, added_dim='out'): + l = layer_id + in_dim = self.symbolic_fun[l].in_dim + out_dim = self.symbolic_fun[l].out_dim + if sum_bool: + + if added_dim == 'out': + new = Symbolic_KANLayer(in_dim, out_dim + n_added_nodes) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim) + out_id = np.arange(out_dim + n_added_nodes) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if j > n_added_nodes-1: + new.funs[j][i] = old.funs[j-n_added_nodes][i] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j-n_added_nodes][i] + new.funs_sympy[j][i] = old.funs_sympy[j-n_added_nodes][i] + new.funs_name[j][i] = old.funs_name[j-n_added_nodes][i] + new.affine.data[j][i] = old.affine.data[j-n_added_nodes][i] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim, out_dim + n_added_nodes, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + self.node_scale[l].data = torch.cat([torch.ones(n_added_nodes, device=self.device), self.node_scale[l].data]) + self.node_bias[l].data = torch.cat([torch.zeros(n_added_nodes, device=self.device), self.node_bias[l].data]) + self.subnode_scale[l].data = torch.cat([torch.ones(n_added_nodes, device=self.device), self.subnode_scale[l].data]) + self.subnode_bias[l].data = torch.cat([torch.zeros(n_added_nodes, device=self.device), self.subnode_bias[l].data]) + + + + if added_dim == 'in': + new = Symbolic_KANLayer(in_dim + n_added_nodes, out_dim) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim + n_added_nodes) + out_id = np.arange(out_dim) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if i > n_added_nodes-1: + new.funs[j][i] = old.funs[j][i-n_added_nodes] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j][i-n_added_nodes] + new.funs_sympy[j][i] = old.funs_sympy[j][i-n_added_nodes] + new.funs_name[j][i] = old.funs_name[j][i-n_added_nodes] + new.affine.data[j][i] = old.affine.data[j][i-n_added_nodes] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim + n_added_nodes, out_dim, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + + else: + + if isinstance(mult_arity, int): + mult_arity = [mult_arity] * n_added_nodes + + if added_dim == 'out': + n_added_subnodes = np.sum(mult_arity) + new = Symbolic_KANLayer(in_dim, out_dim + n_added_subnodes) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim) + out_id = np.arange(out_dim + n_added_nodes) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if j < out_dim: + new.funs[j][i] = old.funs[j][i] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j][i] + new.funs_sympy[j][i] = old.funs_sympy[j][i] + new.funs_name[j][i] = old.funs_name[j][i] + new.affine.data[j][i] = old.affine.data[j][i] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim, out_dim + n_added_subnodes, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + self.node_scale[l].data = torch.cat([self.node_scale[l].data, torch.ones(n_added_nodes, device=self.device)]) + self.node_bias[l].data = torch.cat([self.node_bias[l].data, torch.zeros(n_added_nodes, device=self.device)]) + self.subnode_scale[l].data = torch.cat([self.subnode_scale[l].data, torch.ones(n_added_subnodes, device=self.device)]) + self.subnode_bias[l].data = torch.cat([self.subnode_bias[l].data, torch.zeros(n_added_subnodes, device=self.device)]) + + if added_dim == 'in': + new = Symbolic_KANLayer(in_dim + n_added_nodes, out_dim) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim + n_added_nodes) + out_id = np.arange(out_dim) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if i < in_dim: + new.funs[j][i] = old.funs[j][i] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j][i] + new.funs_sympy[j][i] = old.funs_sympy[j][i] + new.funs_name[j][i] = old.funs_name[j][i] + new.affine.data[j][i] = old.affine.data[j][i] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim + n_added_nodes, out_dim, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + _expand(layer_id-1, n_added_nodes, sum_bool, mult_arity, added_dim='out') + _expand(layer_id, n_added_nodes, sum_bool, mult_arity, added_dim='in') + if sum_bool: + self.width[layer_id][0] += n_added_nodes + else: + if isinstance(mult_arity, int): + mult_arity = [mult_arity] * n_added_nodes + + self.width[layer_id][1] += n_added_nodes + self.mult_arity[layer_id] += mult_arity + + def perturb(self, mag=1.0, mode='non-intrusive'): + ''' + preturb a network. For usage, please refer to tutorials interp_3_KAN_compiler.ipynb. + + Args: + ----- + mag : float + perturbation magnitude + mode : str + pertubatation mode, choices = {'non-intrusive', 'all', 'minimal'} + + Returns: + -------- + None + ''' + perturb_bool = {} + + if mode == 'all': + perturb_bool['aa_a'] = True + perturb_bool['aa_i'] = True + perturb_bool['ai'] = True + perturb_bool['ia'] = True + perturb_bool['ii'] = True + elif mode == 'non-intrusive': + perturb_bool['aa_a'] = False + perturb_bool['aa_i'] = False + perturb_bool['ai'] = True + perturb_bool['ia'] = False + perturb_bool['ii'] = True + elif mode == 'minimal': + perturb_bool['aa_a'] = True + perturb_bool['aa_i'] = False + perturb_bool['ai'] = False + perturb_bool['ia'] = False + perturb_bool['ii'] = False + else: + raise Exception('mode not recognized, valid modes are \'all\', \'non-intrusive\', \'minimal\'.') + + for l in range(self.depth): + funs_name = self.symbolic_fun[l].funs_name + for j in range(self.width_out[l+1]): + for i in range(self.width_in[l]): + out_array = list(np.array(self.symbolic_fun[l].funs_name)[j]) + in_array = list(np.array(self.symbolic_fun[l].funs_name)[:,i]) + out_active = len([i for i, x in enumerate(out_array) if x != "0"]) > 0 + in_active = len([i for i, x in enumerate(in_array) if x != "0"]) > 0 + dic = {True: 'a', False: 'i'} + edge_type = dic[in_active] + dic[out_active] + + if l < self.depth - 1 or mode != 'non-intrusive': + + if edge_type == 'aa': + if self.symbolic_fun[l].funs_name[j][i] == '0': + edge_type += '_i' + else: + edge_type += '_a' + + if perturb_bool[edge_type]: + self.act_fun[l].mask.data[i][j] = mag + + if l == self.depth - 1 and mode == 'non-intrusive': + + self.act_fun[l].mask.data[i][j] = torch.tensor(1.) + self.act_fun[l].scale_base.data[i][j] = torch.tensor(0.) + self.act_fun[l].scale_sp.data[i][j] = torch.tensor(0.) + + self.get_act(self.cache_data) + + self.log_history('perturb') + + + def module(self, start_layer, chain): + ''' + specify network modules + + Args: + ----- + start_layer : int + the earliest layer of the module + chain : str + specify neurons in the module + + Returns: + -------- + None + ''' + #chain = '[-1]->[-1,-2]->[-1]->[-1]' + groups = chain.split('->') + n_total_layers = len(groups)//2 + #start_layer = 0 + + for l in range(n_total_layers): + current_layer = cl = start_layer + l + id_in = [int(i) for i in groups[2*l][1:-1].split(',')] + id_out = [int(i) for i in groups[2*l+1][1:-1].split(',')] + + in_dim = self.width_in[cl] + out_dim = self.width_out[cl+1] + id_in_other = list(set(range(in_dim)) - set(id_in)) + id_out_other = list(set(range(out_dim)) - set(id_out)) + self.act_fun[cl].mask.data[np.ix_(id_in_other,id_out)] = 0. + self.act_fun[cl].mask.data[np.ix_(id_in,id_out_other)] = 0. + self.symbolic_fun[cl].mask.data[np.ix_(id_out,id_in_other)] = 0. + self.symbolic_fun[cl].mask.data[np.ix_(id_out_other,id_in)] = 0. + + self.log_history('module') + + def tree(self, x=None, in_var=None, style='tree', sym_th=1e-3, sep_th=1e-1, skip_sep_test=False, verbose=False): + ''' + turn KAN into a tree + ''' + if x == None: + x = self.cache_data + plot_tree(self, x, in_var=in_var, style=style, sym_th=sym_th, sep_th=sep_th, skip_sep_test=skip_sep_test, verbose=verbose) + + + def speed(self, compile=False): + ''' + turn on KAN's speed mode + ''' + self.symbolic_enabled=False + self.save_act=False + self.auto_save=False + if compile == True: + return torch.compile(self) + else: + return self + + def get_act(self, x=None): + ''' + collect intermidate activations + ''' + if isinstance(x, dict): + x = x['train_input'] + if x == None: + if self.cache_data != None: + x = self.cache_data + else: + raise Exception("missing input data x") + save_act = self.save_act + self.save_act = True + self.forward(x) + self.save_act = save_act + + def get_fun(self, l, i, j): + ''' + get function (l,i,j) + ''' + inputs = self.spline_preacts[l][:,j,i].cpu().detach().numpy() + outputs = self.spline_postacts[l][:,j,i].cpu().detach().numpy() + # they are not ordered yet + rank = np.argsort(inputs) + inputs = inputs[rank] + outputs = outputs[rank] + plt.figure(figsize=(3,3)) + plt.plot(inputs, outputs, marker="o") + return inputs, outputs + + + def history(self, k='all'): + ''' + get history + ''' + with open(self.ckpt_path+'/history.txt', 'r') as f: + data = f.readlines() + n_line = len(data) + if k == 'all': + k = n_line + + data = data[-k:] + for line in data: + print(line[:-1]) + @property + def n_edge(self): + ''' + the number of active edges + ''' + depth = len(self.act_fun) + complexity = 0 + for l in range(depth): + complexity += torch.sum(self.act_fun[l].mask > 0.) + return complexity.item() + + def evaluate(self, dataset): + evaluation = {} + evaluation['test_loss'] = torch.sqrt(torch.mean((self.forward(dataset['test_input']) - dataset['test_label'])**2)).item() + evaluation['n_edge'] = self.n_edge + evaluation['n_grid'] = self.grid + # add other metrics (maybe accuracy) + return evaluation + + def swap(self, l, i1, i2, log_history=True): + + self.act_fun[l-1].swap(i1,i2,mode='out') + self.symbolic_fun[l-1].swap(i1,i2,mode='out') + self.act_fun[l].swap(i1,i2,mode='in') + self.symbolic_fun[l].swap(i1,i2,mode='in') + + def swap_(data, i1, i2): + data[i1], data[i2] = data[i2], data[i1] + + swap_(self.node_scale[l-1].data, i1, i2) + swap_(self.node_bias[l-1].data, i1, i2) + swap_(self.subnode_scale[l-1].data, i1, i2) + swap_(self.subnode_bias[l-1].data, i1, i2) + + if log_history: + self.log_history('swap') + + @property + def connection_cost(self): + + cc = 0. + for t in self.edge_scores: + + def get_coordinate(n): + return torch.linspace(0,1,steps=n+1, device=self.device)[:n] + 1/(2*n) + + in_dim = t.shape[0] + x_in = get_coordinate(in_dim) + + out_dim = t.shape[1] + x_out = get_coordinate(out_dim) + + dist = torch.abs(x_in[:,None] - x_out[None,:]) + cc += torch.sum(dist * t) + + return cc + + def auto_swap_l(self, l): + + num = self.width_in[1] + for i in range(num): + ccs = [] + for j in range(num): + self.swap(l,i,j,log_history=False) + self.get_act() + self.attribute() + cc = self.connection_cost.detach().clone() + ccs.append(cc) + self.swap(l,i,j,log_history=False) + j = torch.argmin(torch.tensor(ccs)) + self.swap(l,i,j,log_history=False) + + def auto_swap(self): + ''' + automatically swap neurons such as connection costs are minimized + ''' + depth = self.depth + for l in range(1, depth): + self.auto_swap_l(l) + + self.log_history('auto_swap') + +KAN = MultKAN diff --git a/dl/kan/kan/.ipynb_checkpoints/Symbolic_KANLayer-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/Symbolic_KANLayer-checkpoint.py new file mode 100644 index 000000000..51baf0af5 --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/Symbolic_KANLayer-checkpoint.py @@ -0,0 +1,270 @@ +import torch +import torch.nn as nn +import numpy as np +import sympy +from .utils import * + + + +class Symbolic_KANLayer(nn.Module): + ''' + KANLayer class + + Attributes: + ----------- + in_dim : int + input dimension + out_dim : int + output dimension + funs : 2D array of torch functions (or lambda functions) + symbolic functions (torch) + funs_avoid_singularity : 2D array of torch functions (or lambda functions) with singularity avoiding + funs_name : 2D arry of str + names of symbolic functions + funs_sympy : 2D array of sympy functions (or lambda functions) + symbolic functions (sympy) + affine : 3D array of floats + affine transformations of inputs and outputs + ''' + def __init__(self, in_dim=3, out_dim=2, device='cpu'): + ''' + initialize a Symbolic_KANLayer (activation functions are initialized to be identity functions) + + Args: + ----- + in_dim : int + input dimension + out_dim : int + output dimension + device : str + device + + Returns: + -------- + self + + Example + ------- + >>> sb = Symbolic_KANLayer(in_dim=3, out_dim=3) + >>> len(sb.funs), len(sb.funs[0]) + ''' + super(Symbolic_KANLayer, self).__init__() + self.out_dim = out_dim + self.in_dim = in_dim + self.mask = torch.nn.Parameter(torch.zeros(out_dim, in_dim, device=device)).requires_grad_(False) + # torch + self.funs = [[lambda x: x*0. for i in range(self.in_dim)] for j in range(self.out_dim)] + self.funs_avoid_singularity = [[lambda x, y_th: ((), x*0.) for i in range(self.in_dim)] for j in range(self.out_dim)] + # name + self.funs_name = [['0' for i in range(self.in_dim)] for j in range(self.out_dim)] + # sympy + self.funs_sympy = [[lambda x: x*0. for i in range(self.in_dim)] for j in range(self.out_dim)] + ### make funs_name the only parameter, and make others as the properties of funs_name? + + self.affine = torch.nn.Parameter(torch.zeros(out_dim, in_dim, 4, device=device)) + # c*f(a*x+b)+d + + self.device = device + self.to(device) + + def to(self, device): + ''' + move to device + ''' + super(Symbolic_KANLayer, self).to(device) + self.device = device + return self + + def forward(self, x, singularity_avoiding=False, y_th=10.): + ''' + forward + + Args: + ----- + x : 2D array + inputs, shape (batch, input dimension) + singularity_avoiding : bool + if True, funs_avoid_singularity is used; if False, funs is used. + y_th : float + the singularity threshold + + Returns: + -------- + y : 2D array + outputs, shape (batch, output dimension) + postacts : 3D array + activations after activation functions but before being summed on nodes + + Example + ------- + >>> sb = Symbolic_KANLayer(in_dim=3, out_dim=5) + >>> x = torch.normal(0,1,size=(100,3)) + >>> y, postacts = sb(x) + >>> y.shape, postacts.shape + (torch.Size([100, 5]), torch.Size([100, 5, 3])) + ''' + + batch = x.shape[0] + postacts = [] + + for i in range(self.in_dim): + postacts_ = [] + for j in range(self.out_dim): + if singularity_avoiding: + xij = self.affine[j,i,2]*self.funs_avoid_singularity[j][i](self.affine[j,i,0]*x[:,[i]]+self.affine[j,i,1], torch.tensor(y_th))[1]+self.affine[j,i,3] + else: + xij = self.affine[j,i,2]*self.funs[j][i](self.affine[j,i,0]*x[:,[i]]+self.affine[j,i,1])+self.affine[j,i,3] + postacts_.append(self.mask[j][i]*xij) + postacts.append(torch.stack(postacts_)) + + postacts = torch.stack(postacts) + postacts = postacts.permute(2,1,0,3)[:,:,:,0] + y = torch.sum(postacts, dim=2) + + return y, postacts + + + def get_subset(self, in_id, out_id): + ''' + get a smaller Symbolic_KANLayer from a larger Symbolic_KANLayer (used for pruning) + + Args: + ----- + in_id : list + id of selected input neurons + out_id : list + id of selected output neurons + + Returns: + -------- + spb : Symbolic_KANLayer + + Example + ------- + >>> sb_large = Symbolic_KANLayer(in_dim=10, out_dim=10) + >>> sb_small = sb_large.get_subset([0,9],[1,2,3]) + >>> sb_small.in_dim, sb_small.out_dim + ''' + sbb = Symbolic_KANLayer(self.in_dim, self.out_dim, device=self.device) + sbb.in_dim = len(in_id) + sbb.out_dim = len(out_id) + sbb.mask.data = self.mask.data[out_id][:,in_id] + sbb.funs = [[self.funs[j][i] for i in in_id] for j in out_id] + sbb.funs_avoid_singularity = [[self.funs_avoid_singularity[j][i] for i in in_id] for j in out_id] + sbb.funs_sympy = [[self.funs_sympy[j][i] for i in in_id] for j in out_id] + sbb.funs_name = [[self.funs_name[j][i] for i in in_id] for j in out_id] + sbb.affine.data = self.affine.data[out_id][:,in_id] + return sbb + + + def fix_symbolic(self, i, j, fun_name, x=None, y=None, random=False, a_range=(-10,10), b_range=(-10,10), verbose=True): + ''' + fix an activation function to be symbolic + + Args: + ----- + i : int + the id of input neuron + j : int + the id of output neuron + fun_name : str + the name of the symbolic functions + x : 1D array + preactivations + y : 1D array + postactivations + a_range : tuple + sweeping range of a + b_range : tuple + sweeping range of a + verbose : bool + print more information if True + + Returns: + -------- + r2 (coefficient of determination) + + Example 1 + --------- + >>> # when x & y are not provided. Affine parameters are set to a = 1, b = 0, c = 1, d = 0 + >>> sb = Symbolic_KANLayer(in_dim=3, out_dim=2) + >>> sb.fix_symbolic(2,1,'sin') + >>> print(sb.funs_name) + >>> print(sb.affine) + + Example 2 + --------- + >>> # when x & y are provided, fit_params() is called to find the best fit coefficients + >>> sb = Symbolic_KANLayer(in_dim=3, out_dim=2) + >>> batch = 100 + >>> x = torch.linspace(-1,1,steps=batch) + >>> noises = torch.normal(0,1,(batch,)) * 0.02 + >>> y = 5.0*torch.sin(3.0*x + 2.0) + 0.7 + noises + >>> sb.fix_symbolic(2,1,'sin',x,y) + >>> print(sb.funs_name) + >>> print(sb.affine[1,2,:].data) + ''' + if isinstance(fun_name,str): + fun = SYMBOLIC_LIB[fun_name][0] + fun_sympy = SYMBOLIC_LIB[fun_name][1] + fun_avoid_singularity = SYMBOLIC_LIB[fun_name][3] + self.funs_sympy[j][i] = fun_sympy + self.funs_name[j][i] = fun_name + + if x == None or y == None: + #initialzie from just fun + self.funs[j][i] = fun + self.funs_avoid_singularity[j][i] = fun_avoid_singularity + if random == False: + self.affine.data[j][i] = torch.tensor([1.,0.,1.,0.]) + else: + self.affine.data[j][i] = torch.rand(4,) * 2 - 1 + return None + else: + #initialize from x & y and fun + params, r2 = fit_params(x,y,fun, a_range=a_range, b_range=b_range, verbose=verbose, device=self.device) + self.funs[j][i] = fun + self.funs_avoid_singularity[j][i] = fun_avoid_singularity + self.affine.data[j][i] = params + return r2 + else: + # if fun_name itself is a function + fun = fun_name + fun_sympy = fun_name + self.funs_sympy[j][i] = fun_sympy + self.funs_name[j][i] = "anonymous" + + self.funs[j][i] = fun + self.funs_avoid_singularity[j][i] = fun + if random == False: + self.affine.data[j][i] = torch.tensor([1.,0.,1.,0.]) + else: + self.affine.data[j][i] = torch.rand(4,) * 2 - 1 + return None + + def swap(self, i1, i2, mode='in'): + ''' + swap the i1 neuron with the i2 neuron in input (if mode == 'in') or output (if mode == 'out') + ''' + with torch.no_grad(): + def swap_list_(data, i1, i2, mode='in'): + + if mode == 'in': + for j in range(self.out_dim): + data[j][i1], data[j][i2] = data[j][i2], data[j][i1] + + elif mode == 'out': + data[i1], data[i2] = data[i2], data[i1] + + def swap_(data, i1, i2, mode='in'): + if mode == 'in': + data[:,i1], data[:,i2] = data[:,i2].clone(), data[:,i1].clone() + + elif mode == 'out': + data[i1], data[i2] = data[i2].clone(), data[i1].clone() + + swap_list_(self.funs_name,i1,i2,mode) + swap_list_(self.funs_sympy,i1,i2,mode) + swap_list_(self.funs_avoid_singularity,i1,i2,mode) + swap_(self.affine.data,i1,i2,mode) + swap_(self.mask.data,i1,i2,mode) diff --git a/dl/kan/kan/.ipynb_checkpoints/__init__-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/__init__-checkpoint.py new file mode 100644 index 000000000..1ce0e47b2 --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/__init__-checkpoint.py @@ -0,0 +1,3 @@ +from .MultKAN import * +from .utils import * +#torch.use_deterministic_algorithms(True) \ No newline at end of file diff --git a/dl/kan/kan/.ipynb_checkpoints/compiler-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/compiler-checkpoint.py new file mode 100644 index 000000000..c8014829e --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/compiler-checkpoint.py @@ -0,0 +1,498 @@ +from sympy import * +import sympy +import numpy as np +from kan.MultKAN import MultKAN +import torch + +def next_nontrivial_operation(expr, scale=1, bias=0): + ''' + remove the affine part of an expression + + Args: + ----- + expr : sympy expression + scale : float + bias : float + + Returns: + -------- + expr : sympy expression + scale : float + bias : float + + Example + ------- + >>> from kan.compiler import * + >>> from sympy import * + >>> input_vars = a, b = symbols('a b') + >>> expression = 3.14534242 * exp(sin(pi*a) + b**2) - 2.32345402 + >>> next_nontrivial_operation(expression) + ''' + if expr.func == Add or expr.func == Mul: + n_arg = len(expr.args) + n_num = 0 + n_var_id = [] + n_num_id = [] + var_args = [] + for i in range(n_arg): + is_number = expr.args[i].is_number + n_num += is_number + if not is_number: + n_var_id.append(i) + var_args.append(expr.args[i]) + else: + n_num_id.append(i) + if n_num > 0: + # trivial + if expr.func == Add: + for i in range(n_num): + if i == 0: + bias = expr.args[n_num_id[i]] + else: + bias += expr.args[n_num_id[i]] + if expr.func == Mul: + for i in range(n_num): + if i == 0: + scale = expr.args[n_num_id[i]] + else: + scale *= expr.args[n_num_id[i]] + + return next_nontrivial_operation(expr.func(*var_args), scale, bias) + else: + return expr, scale, bias + else: + return expr, scale, bias + + +def expr2kan(input_variables, expr, grid=5, k=3, auto_save=False): + ''' + compile a symbolic formula to a MultKAN + + Args: + ----- + input_variables : a list of sympy symbols + expr : sympy expression + grid : int + the number of grid intervals + k : int + spline order + auto_save : bool + if auto_save = True, models are automatically saved + + Returns: + -------- + MultKAN + + Example + ------- + >>> from kan.compiler import * + >>> from sympy import * + >>> input_vars = a, b = symbols('a b') + >>> expression = exp(sin(pi*a) + b**2) + >>> model = kanpiler(input_vars, expression) + >>> x = torch.rand(100,2) * 2 - 1 + >>> model(x) + >>> model.plot() + ''' + class Node: + def __init__(self, expr, mult_bool, depth, scale, bias, parent=None, mult_arity=None): + self.expr = expr + self.mult_bool = mult_bool + if self.mult_bool: + self.mult_arity = mult_arity + self.depth = depth + + if len(Nodes) <= depth: + Nodes.append([]) + index = 0 + else: + index = len(Nodes[depth]) + + Nodes[depth].append(self) + + self.index = index + if parent == None: + self.parent_index = None + else: + self.parent_index = parent.index + self.child_index = [] + + # update parent's child_index + if parent != None: + parent.child_index.append(self.index) + + + self.scale = scale + self.bias = bias + + + class SubNode: + def __init__(self, expr, depth, scale, bias, parent=None): + self.expr = expr + self.depth = depth + + if len(SubNodes) <= depth: + SubNodes.append([]) + index = 0 + else: + index = len(SubNodes[depth]) + + SubNodes[depth].append(self) + + self.index = index + self.parent_index = None # shape: (2,) + self.child_index = [] # shape: (n, 2) + + # update parent's child_index + parent.child_index.append(self.index) + + self.scale = scale + self.bias = bias + + + class Connection: + def __init__(self, affine, fun, fun_name, parent=None, child=None, power_exponent=None): + # connection = activation function that connects a subnode to a node in the next layer node + self.affine = affine #[1,0,1,0] # (a,b,c,d) + self.fun = fun # y = c*fun(a*x+b)+d + self.fun_name = fun_name + self.parent_index = parent.index + self.depth = parent.depth + self.child_index = child.index + self.power_exponent = power_exponent # if fun == Pow + Connections[(self.depth,self.parent_index,self.child_index)] = self + + def create_node(expr, parent=None, n_layer=None): + #print('before', expr) + expr, scale, bias = next_nontrivial_operation(expr) + #print('after', expr) + if parent == None: + depth = 0 + else: + depth = parent.depth + + + if expr.func == Mul: + mult_arity = len(expr.args) + node = Node(expr, True, depth, scale, bias, parent=parent, mult_arity=mult_arity) + # create mult_arity SubNodes, + 1 + for i in range(mult_arity): + # create SubNode + expr_i, scale, bias = next_nontrivial_operation(expr.args[i]) + subnode = SubNode(expr_i, node.depth+1, scale, bias, parent=node) + if expr_i.func == Add: + for j in range(len(expr_i.args)): + expr_ij, scale, bias = next_nontrivial_operation(expr_i.args[j]) + # expr_ij is impossible to be Add, should be Mul or 1D + if expr_ij.func == Mul: + #print(expr_ij) + # create a node with expr_ij + new_node = create_node(expr_ij, parent=subnode, n_layer=n_layer) + # create a connection which is a linear function + c = Connection([1,0,float(scale),float(bias)], lambda x: x, 'x', parent=subnode, child=new_node) + + elif expr_ij.func == Symbol: + #print(expr_ij) + new_node = create_node(expr_ij, parent=subnode, n_layer=n_layer) + c = Connection([1,0,float(scale),float(bias)], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + else: + # 1D function case + # create a node with expr_ij.args[0] + new_node = create_node(expr_ij.args[0], parent=subnode, n_layer=n_layer) + # create 1D function expr_ij.func + if expr_ij.func == Pow: + power_exponent = expr_ij.args[1] + else: + power_exponent = None + Connection([1,0,float(scale),float(bias)], expr_ij.func, fun_name = expr_ij.func, parent=subnode, child=new_node, power_exponent=power_exponent) + + + elif expr_i.func == Mul: + # create a node with expr_i + new_node = create_node(expr_i, parent=subnode, n_layer=n_layer) + # create 1D function, linear + Connection([1,0,1,0], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + elif expr_i.func == Symbol: + new_node = create_node(expr_i, parent=subnode, n_layer=n_layer) + Connection([1,0,1,0], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + else: + # 1D functions + # create a node with expr_i.args[0] + new_node = create_node(expr_i.args[0], parent=subnode, n_layer=n_layer) + # create 1D function expr_i.func + if expr_i.func == Pow: + power_exponent = expr_i.args[1] + else: + power_exponent = None + Connection([1,0,1,0], expr_i.func, fun_name = expr_i.func, parent=subnode, child=new_node, power_exponent=power_exponent) + + elif expr.func == Add: + + node = Node(expr, False, depth, scale, bias, parent=parent) + subnode = SubNode(expr, node.depth+1, 1, 0, parent=node) + + for i in range(len(expr.args)): + expr_i, scale, bias = next_nontrivial_operation(expr.args[i]) + if expr_i.func == Mul: + # create a node with expr_i + new_node = create_node(expr_i, parent=subnode, n_layer=n_layer) + # create a connection which is a linear function + Connection([1,0,float(scale),float(bias)], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + elif expr_i.func == Symbol: + new_node = create_node(expr_i, parent=subnode, n_layer=n_layer) + Connection([1,0,float(scale),float(bias)], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + else: + # 1D function case + # create a node with expr_ij.args[0] + new_node = create_node(expr_i.args[0], parent=subnode, n_layer=n_layer) + # create 1D function expr_i.func + if expr_i.func == Pow: + power_exponent = expr_i.args[1] + else: + power_exponent = None + Connection([1,0,float(scale),float(bias)], expr_i.func, fun_name = expr_i.func, parent=subnode, child=new_node, power_exponent=power_exponent) + + elif expr.func == Symbol: + # expr.func is a symbol (one of input variables) + if n_layer == None: + node = Node(expr, False, depth, scale, bias, parent=parent) + else: + node = Node(expr, False, depth, scale, bias, parent=parent) + return_node = node + for i in range(n_layer - depth): + subnode = SubNode(expr, node.depth+1, 1, 0, parent=node) + node = Node(expr, False, subnode.depth, 1, 0, parent=subnode) + Connection([1,0,1,0], lambda x: x, fun_name = 'x', parent=subnode, child=node) + node = return_node + + Start_Nodes.append(node) + + else: + # expr.func is 1D function + #print(expr, scale, bias) + node = Node(expr, False, depth, scale, bias, parent=parent) + expr_i, scale, bias = next_nontrivial_operation(expr.args[0]) + subnode = SubNode(expr_i, node.depth+1, 1, 0, parent=node) + # create a node with expr_i.args[0] + new_node = create_node(expr.args[0], parent=subnode, n_layer=n_layer) + # create 1D function expr_i.func + if expr.func == Pow: + power_exponent = expr.args[1] + else: + power_exponent = None + Connection([1,0,1,0], expr.func, fun_name = expr.func, parent=subnode, child=new_node, power_exponent=power_exponent) + + return node + + Nodes = [[]] + SubNodes = [[]] + Connections = {} + Start_Nodes = [] + + create_node(expr, n_layer=None) + + n_layer = len(Nodes) - 1 + + Nodes = [[]] + SubNodes = [[]] + Connections = {} + Start_Nodes = [] + + create_node(expr, n_layer=n_layer) + + # move affine parameters in leaf nodes to connections + for node in Start_Nodes: + c = Connections[(node.depth,node.parent_index,node.index)] + c.affine[0] = float(node.scale) + c.affine[1] = float(node.bias) + node.scale = 1. + node.bias = 0. + + #input_variables = symbol + node2var = [] + for node in Start_Nodes: + for i in range(len(input_variables)): + if node.expr == input_variables[i]: + node2var.append(i) + + # Nodes + n_mult = [] + n_sum = [] + for layer in Nodes: + n_mult.append(0) + n_sum.append(0) + for node in layer: + if node.mult_bool == True: + n_mult[-1] += 1 + else: + n_sum[-1] += 1 + + # depth + n_layer = len(Nodes) - 1 + + # converter + # input tree node id, output kan node id (distinguish sum and mult node) + # input tree subnode id, output tree subnode id + # node id + subnode_index_convert = {} + node_index_convert = {} + connection_index_convert = {} + mult_arities = [] + for layer_id in range(n_layer+1): + mult_arity = [] + i_sum = 0 + i_mult = 0 + for i in range(len(Nodes[layer_id])): + node = Nodes[layer_id][i] + if node.mult_bool == True: + kan_node_id = n_sum[layer_id] + i_mult + arity = len(node.child_index) + for i in range(arity): + subnode = SubNodes[node.depth+1][node.child_index[i]] + kan_subnode_id = n_sum[layer_id] + np.sum(mult_arity) + i + subnode_index_convert[(subnode.depth,subnode.index)] = (int(n_layer-subnode.depth),int(kan_subnode_id)) + i_mult += 1 + mult_arity.append(arity) + else: + kan_node_id = i_sum + if len(node.child_index) > 0: + subnode = SubNodes[node.depth+1][node.child_index[0]] + kan_subnode_id = i_sum + subnode_index_convert[(subnode.depth,subnode.index)] = (int(n_layer-subnode.depth),int(kan_subnode_id)) + i_sum += 1 + + if layer_id == n_layer: + # input layer + node_index_convert[(node.depth,node.index)] = (int(n_layer-node.depth),int(node2var[kan_node_id])) + else: + node_index_convert[(node.depth,node.index)] = (int(n_layer-node.depth),int(kan_node_id)) + + # node: depth (node.depth -> n_layer - node.depth) + # width (node.index -> kan_node_id) + # subnode: depth (subnode.depth -> n_layer - subnode.depth) + # width (subnote.index -> kan_subnode_id) + mult_arities.append(mult_arity) + + for index in list(Connections.keys()): + depth, subnode_id, node_id = index + # to int(n_layer-depth), + _, kan_subnode_id = subnode_index_convert[(depth, subnode_id)] + _, kan_node_id = node_index_convert[(depth, node_id)] + connection_index_convert[(depth, subnode_id, node_id)] = (n_layer-depth, kan_subnode_id, kan_node_id) + + + n_sum.reverse() + n_mult.reverse() + mult_arities.reverse() + + width = [[n_sum[i], n_mult[i]] for i in range(len(n_sum))] + width[0][0] = len(input_variables) + + # allow pass in other parameters (probably as a dictionary) in sf2kan, including grid k etc. + model = MultKAN(width=width, mult_arity=mult_arities, grid=grid, k=k, auto_save=auto_save) + + # clean the graph + for l in range(model.depth): + for i in range(model.width_in[l]): + for j in range(model.width_out[l+1]): + model.fix_symbolic(l,i,j,'0',fit_params_bool=False) + + # Nodes + Nodes_flat = [x for xs in Nodes for x in xs] + + self = model + + for node in Nodes_flat: + node_depth = node.depth + node_index = node.index + kan_node_depth, kan_node_index = node_index_convert[(node_depth,node_index)] + #print(kan_node_depth, kan_node_index) + if kan_node_depth > 0: + self.node_scale[kan_node_depth-1].data[kan_node_index] = float(node.scale) + self.node_bias[kan_node_depth-1].data[kan_node_index] = float(node.bias) + + + # SubNodes + SubNodes_flat = [x for xs in SubNodes for x in xs] + + for subnode in SubNodes_flat: + subnode_depth = subnode.depth + subnode_index = subnode.index + kan_subnode_depth, kan_subnode_index = subnode_index_convert[(subnode_depth,subnode_index)] + #print(kan_subnode_depth, kan_subnode_index) + self.subnode_scale[kan_subnode_depth].data[kan_subnode_index] = float(subnode.scale) + self.subnode_bias[kan_subnode_depth].data[kan_subnode_index] = float(subnode.bias) + + # Connections + Connections_flat = list(Connections.values()) + + for connection in Connections_flat: + c_depth = connection.depth + c_j = connection.parent_index + c_i = connection.child_index + kc_depth, kc_j, kc_i = connection_index_convert[(c_depth, c_j, c_i)] + + # get symbolic fun_name + fun_name = connection.fun_name + #if fun_name == Pow: + # print(connection.power_exponent) + + if fun_name == 'x': + kfun_name = 'x' + elif fun_name == exp: + kfun_name = 'exp' + elif fun_name == sin: + kfun_name = 'sin' + elif fun_name == cos: + kfun_name = 'cos' + elif fun_name == tan: + kfun_name = 'tan' + elif fun_name == sqrt: + kfun_name = 'sqrt' + elif fun_name == log: + kfun_name = 'log' + elif fun_name == tanh: + kfun_name = 'tanh' + elif fun_name == asin: + kfun_name = 'arcsin' + elif fun_name == acos: + kfun_name = 'arccos' + elif fun_name == atan: + kfun_name = 'arctan' + elif fun_name == atanh: + kfun_name = 'arctanh' + elif fun_name == sign: + kfun_name = 'sgn' + elif fun_name == Pow: + alpha = connection.power_exponent + if alpha == Rational(1,2): + kfun_name = 'x^0.5' + elif alpha == - Rational(1,2): + kfun_name = '1/x^0.5' + elif alpha == Rational(3,2): + kfun_name = 'x^1.5' + else: + alpha = int(connection.power_exponent) + if alpha > 0: + if alpha == 1: + kfun_name = 'x' + else: + kfun_name = f'x^{alpha}' + else: + if alpha == -1: + kfun_name = '1/x' + else: + kfun_name = f'1/x^{-alpha}' + + model.fix_symbolic(kc_depth, kc_i, kc_j, kfun_name, fit_params_bool=False) + model.symbolic_fun[kc_depth].affine.data.reshape(self.width_out[kc_depth+1], self.width_in[kc_depth], 4)[kc_j][kc_i] = torch.tensor(connection.affine) + + return model + + +sf2kan = kanpiler = expr2kan \ No newline at end of file diff --git a/dl/kan/kan/.ipynb_checkpoints/experiment-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/experiment-checkpoint.py new file mode 100644 index 000000000..9ab9e9de3 --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/experiment-checkpoint.py @@ -0,0 +1,55 @@ +import torch +from .MultKAN import * + + +def runner1(width, dataset, grids=[5,10,20], steps=20, lamb=0.001, prune_round=3, refine_round=3, edge_th=1e-2, node_th=1e-2, metrics=None, seed=1): + + result = {} + result['test_loss'] = [] + result['c'] = [] + result['G'] = [] + result['id'] = [] + if metrics != None: + for i in range(len(metrics)): + result[metrics[i].__name__] = [] + + def collect(evaluation): + result['test_loss'].append(evaluation['test_loss']) + result['c'].append(evaluation['n_edge']) + result['G'].append(evaluation['n_grid']) + result['id'].append(f'{model.round}.{model.state_id}') + if metrics != None: + for i in range(len(metrics)): + result[metrics[i].__name__].append(metrics[i](model, dataset).item()) + + for i in range(prune_round): + # train and prune + if i == 0: + model = KAN(width=width, grid=grids[0], seed=seed) + else: + model = model.rewind(f'{i-1}.{2*i}') + + model.fit(dataset, steps=steps, lamb=lamb) + model = model.prune(edge_th=edge_th, node_th=node_th) + evaluation = model.evaluate(dataset) + collect(evaluation) + + for j in range(refine_round): + model = model.refine(grids[j]) + model.fit(dataset, steps=steps) + evaluation = model.evaluate(dataset) + collect(evaluation) + + for key in list(result.keys()): + result[key] = np.array(result[key]) + + return result + + +def pareto_frontier(x,y): + + pf_id = np.where(np.sum((x[:,None] <= x[None,:]) * (y[:,None] <= y[None,:]), axis=0) == 1)[0] + x_pf = x[pf_id] + y_pf = y[pf_id] + + return x_pf, y_pf, pf_id \ No newline at end of file diff --git a/dl/kan/kan/.ipynb_checkpoints/feynman-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/feynman-checkpoint.py new file mode 100644 index 000000000..6cc55e96f --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/feynman-checkpoint.py @@ -0,0 +1,739 @@ +from sympy import * +import torch + + +def get_feynman_dataset(name): + + global symbols + + tpi = torch.tensor(torch.pi) + + if name == 'test': + symbol = x, y = symbols('x, y') + expr = (x+y) * sin(exp(2*y)) + f = lambda x: (x[:,[0]] + x[:,[1]])*torch.sin(torch.exp(2*x[:,[1]])) + ranges = [-1,1] + + if name == 'I.6.20a' or name == 1: + symbol = theta = symbols('theta') + symbol = [symbol] + expr = exp(-theta**2/2)/sqrt(2*pi) + f = lambda x: torch.exp(-x[:,[0]]**2/2)/torch.sqrt(2*tpi) + ranges = [[-3,3]] + + if name == 'I.6.20' or name == 2: + symbol = theta, sigma = symbols('theta sigma') + expr = exp(-theta**2/(2*sigma**2))/sqrt(2*pi*sigma**2) + f = lambda x: torch.exp(-x[:,[0]]**2/(2*x[:,[1]]**2))/torch.sqrt(2*tpi*x[:,[1]]**2) + ranges = [[-1,1],[0.5,2]] + + if name == 'I.6.20b' or name == 3: + symbol = theta, theta1, sigma = symbols('theta theta1 sigma') + expr = exp(-(theta-theta1)**2/(2*sigma**2))/sqrt(2*pi*sigma**2) + f = lambda x: torch.exp(-(x[:,[0]]-x[:,[1]])**2/(2*x[:,[2]]**2))/torch.sqrt(2*tpi*x[:,[2]]**2) + ranges = [[-1.5,1.5],[-1.5,1.5],[0.5,2]] + + if name == 'I.8.4' or name == 4: + symbol = x1, x2, y1, y2 = symbols('x1 x2 y1 y2') + expr = sqrt((x2-x1)**2+(y2-y1)**2) + f = lambda x: torch.sqrt((x[:,[1]]-x[:,[0]])**2+(x[:,[3]]-x[:,[2]])**2) + ranges = [[-1,1],[-1,1],[-1,1],[-1,1]] + + if name == 'I.9.18' or name == 5: + symbol = G, m1, m2, x1, x2, y1, y2, z1, z2 = symbols('G m1 m2 x1 x2 y1 y2 z1 z2') + expr = G*m1*m2/((x2-x1)**2+(y2-y1)**2+(z2-z1)**2) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/((x[:,[3]]-x[:,[4]])**2+(x[:,[5]]-x[:,[6]])**2+(x[:,[7]]-x[:,[8]])**2) + ranges = [[-1,1],[-1,1],[-1,1],[-1,-0.5],[0.5,1],[-1,-0.5],[0.5,1],[-1,-0.5],[0.5,1]] + + if name == 'I.10.7' or name == 6: + symbol = m0, v, c = symbols('m0 v c') + expr = m0/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[0,1],[1,2]] + + if name == 'I.11.19' or name == 7: + symbol = x1, y1, x2, y2, x3, y3 = symbols('x1 y1 x2 y2 x3 y3') + expr = x1*y1 + x2*y2 + x3*y3 + f = lambda x: x[:,[0]]*x[:,[1]] + x[:,[2]]*x[:,[3]] + x[:,[4]]*x[:,[5]] + ranges = [-1,1] + + if name == 'I.12.1' or name == 8: + symbol = mu, Nn = symbols('mu N_n') + expr = mu * Nn + f = lambda x: x[:,[0]]*x[:,[1]] + ranges = [-1,1] + + if name == 'I.12.2' or name == 9: + symbol = q1, q2, eps, r = symbols('q1 q2 epsilon r') + expr = q1*q2/(4*pi*eps*r**2) + f = lambda x: x[:,[0]]*x[:,[1]]/(4*tpi*x[:,[2]]*x[:,[3]]**2) + ranges = [[-1,1],[-1,1],[0.5,2],[0.5,2]] + + if name == 'I.12.4' or name == 10: + symbol = q1, eps, r = symbols('q1 epsilon r') + expr = q1/(4*pi*eps*r**2) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]*x[:,[2]]**2) + ranges = [[-1,1],[0.5,2],[0.5,2]] + + if name == 'I.12.5' or name == 11: + symbol = q2, Ef = symbols('q2, E_f') + expr = q2*Ef + f = lambda x: x[:,[0]]*x[:,[1]] + ranges = [-1,1] + + if name == 'I.12.11' or name == 12: + symbol = q, Ef, B, v, theta = symbols('q E_f B v theta') + expr = q*(Ef + B*v*sin(theta)) + f = lambda x: x[:,[0]]*(x[:,[1]]+x[:,[2]]*x[:,[3]]*torch.sin(x[:,[4]])) + ranges = [[-1,1],[-1,1],[-1,1],[-1,1],[0,2*tpi]] + + if name == 'I.13.4' or name == 13: + symbol = m, v, u, w = symbols('m u v w') + expr = 1/2*m*(v**2+u**2+w**2) + f = lambda x: 1/2*x[:,[0]]*(x[:,[1]]**2+x[:,[2]]**2+x[:,[3]]**2) + ranges = [[-1,1],[-1,1],[-1,1],[-1,1]] + + if name == 'I.13.12' or name == 14: + symbol = G, m1, m2, r1, r2 = symbols('G m1 m2 r1 r2') + expr = G*m1*m2*(1/r2-1/r1) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]*(1/x[:,[4]]-1/x[:,[3]]) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'I.14.3' or name == 15: + symbol = m, g, z = symbols('m g z') + expr = m*g*z + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]] + ranges = [[0,1],[0,1],[-1,1]] + + if name == 'I.14.4' or name == 16: + symbol = ks, x = symbols('k_s x') + expr = 1/2*ks*x**2 + f = lambda x: 1/2*x[:,[0]]*x[:,[1]]**2 + ranges = [[0,1],[-1,1]] + + if name == 'I.15.3x' or name == 17: + symbol = x, u, t, c = symbols('x u t c') + expr = (x-u*t)/sqrt(1-u**2/c**2) + f = lambda x: (x[:,[0]] - x[:,[1]]*x[:,[2]])/torch.sqrt(1-x[:,[1]]**2/x[:,[3]]**2) + ranges = [[-1,1],[-1,1],[-1,1],[1,2]] + + if name == 'I.15.3t' or name == 18: + symbol = t, u, x, c = symbols('t u x c') + expr = (t-u*x/c**2)/sqrt(1-u**2/c**2) + f = lambda x: (x[:,[0]] - x[:,[1]]*x[:,[2]]/x[:,[3]]**2)/torch.sqrt(1-x[:,[1]]**2/x[:,[3]]**2) + ranges = [[-1,1],[-1,1],[-1,1],[1,2]] + + if name == 'I.15.10' or name == 19: + symbol = m0, v, c = symbols('m0 v c') + expr = m0*v/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]*x[:,[1]]/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[-1,1],[-0.9,0.9],[1.1,2]] + + if name == 'I.16.6' or name == 20: + symbol = u, v, c = symbols('u v c') + expr = (u+v)/(1+u*v/c**2) + f = lambda x: x[:,[0]]*x[:,[1]]/(1+x[:,[0]]*x[:,[1]]/x[:,[2]]**2) + ranges = [[-0.8,0.8],[-0.8,0.8],[1,2]] + + if name == 'I.18.4' or name == 21: + symbol = m1, r1, m2, r2 = symbols('m1 r1 m2 r2') + expr = (m1*r1+m2*r2)/(m1+m2) + f = lambda x: (x[:,[0]]*x[:,[1]]+x[:,[2]]*x[:,[3]])/(x[:,[0]]+x[:,[2]]) + ranges = [[0.5,1],[-1,1],[0.5,1],[-1,1]] + + if name == 'I.18.4' or name == 22: + symbol = r, F, theta = symbols('r F theta') + expr = r*F*sin(theta) + f = lambda x: x[:,[0]]*x[:,[1]]*torch.sin(x[:,[2]]) + ranges = [[-1,1],[-1,1],[0,2*tpi]] + + if name == 'I.18.16' or name == 23: + symbol = m, r, v, theta = symbols('m r v theta') + expr = m*r*v*sin(theta) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]*torch.sin(x[:,[3]]) + ranges = [[-1,1],[-1,1],[-1,1],[0,2*tpi]] + + if name == 'I.24.6' or name == 24: + symbol = m, omega, omega0, x = symbols('m omega omega_0 x') + expr = 1/4*m*(omega**2+omega0**2)*x**2 + f = lambda x: 1/4*x[:,[0]]*(x[:,[1]]**2+x[:,[2]]**2)*x[:,[3]]**2 + ranges = [[0,1],[-1,1],[-1,1],[-1,1]] + + if name == 'I.25.13' or name == 25: + symbol = q, C = symbols('q C') + expr = q/C + f = lambda x: x[:,[0]]/x[:,[1]] + ranges = [[-1,1],[0.5,2]] + + if name == 'I.26.2' or name == 26: + symbol = n, theta2 = symbols('n theta2') + expr = asin(n*sin(theta2)) + f = lambda x: torch.arcsin(x[:,[0]]*torch.sin(x[:,[1]])) + ranges = [[0,0.99],[0,2*tpi]] + + if name == 'I.27.6' or name == 27: + symbol = d1, d2, n = symbols('d1 d2 n') + expr = 1/(1/d1+n/d2) + f = lambda x: 1/(1/x[:,[0]]+x[:,[2]]/x[:,[1]]) + ranges = [[0.5,2],[1,2],[0.5,2]] + + if name == 'I.29.4' or name == 28: + symbol = omega, c = symbols('omega c') + expr = omega/c + f = lambda x: x[:,[0]]/x[:,[1]] + ranges = [[0,1],[0.5,2]] + + if name == 'I.29.16' or name == 29: + symbol = x1, x2, theta1, theta2 = symbols('x1 x2 theta1 theta2') + expr = sqrt(x1**2+x2**2-2*x1*x2*cos(theta1-theta2)) + f = lambda x: torch.sqrt(x[:,[0]]**2+x[:,[1]]**2-2*x[:,[0]]*x[:,[1]]*torch.cos(x[:,[2]]-x[:,[3]])) + ranges = [[-1,1],[-1,1],[0,2*tpi],[0,2*tpi]] + + if name == 'I.30.3' or name == 30: + symbol = I0, n, theta = symbols('I_0 n theta') + expr = I0 * sin(n*theta/2)**2 / sin(theta/2) ** 2 + f = lambda x: x[:,[0]] * torch.sin(x[:,[1]]*x[:,[2]]/2)**2 / torch.sin(x[:,[2]]/2)**2 + ranges = [[0,1],[0,4],[0.4*tpi,1.6*tpi]] + + if name == 'I.30.5' or name == 31: + symbol = lamb, n, d = symbols('lambda n d') + expr = asin(lamb/(n*d)) + f = lambda x: torch.arcsin(x[:,[0]]/(x[:,[1]]*x[:,[2]])) + ranges = [[-1,1],[1,1.5],[1,1.5]] + + if name == 'I.32.5' or name == 32: + symbol = q, a, eps, c = symbols('q a epsilon c') + expr = q**2*a**2/(eps*c**3) + f = lambda x: x[:,[0]]**2*x[:,[1]]**2/(x[:,[2]]*x[:,[3]]**3) + ranges = [[-1,1],[-1,1],[0.5,2],[0.5,2]] + + if name == 'I.32.17' or name == 33: + symbol = eps, c, Ef, r, omega, omega0 = symbols('epsilon c E_f r omega omega_0') + expr = nsimplify((1/2*eps*c*Ef**2)*(8*pi*r**2/3)*(omega**4/(omega**2-omega0**2)**2)) + f = lambda x: (1/2*x[:,[0]]*x[:,[1]]*x[:,[2]]**2)*(8*tpi*x[:,[3]]**2/3)*(x[:,[4]]**4/(x[:,[4]]**2-x[:,[5]]**2)**2) + ranges = [[0,1],[0,1],[-1,1],[0,1],[0,1],[1,2]] + + if name == 'I.34.8' or name == 34: + symbol = q, V, B, p = symbols('q V B p') + expr = q*V*B/p + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[-1,1],[-1,1],[-1,1],[0.5,2]] + + if name == 'I.34.10' or name == 35: + symbol = omega0, v, c = symbols('omega_0 v c') + expr = omega0/(1-v/c) + f = lambda x: x[:,[0]]/(1-x[:,[1]]/x[:,[2]]) + ranges = [[0,1],[0,0.9],[1.1,2]] + + if name == 'I.34.14' or name == 36: + symbol = omega0, v, c = symbols('omega_0 v c') + expr = omega0 * (1+v/c)/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]*(1+x[:,[1]]/x[:,[2]])/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[-0.9,0.9],[1.1,2]] + + if name == 'I.34.27' or name == 37: + symbol = hbar, omega = symbols('hbar omega') + expr = hbar * omega + f = lambda x: x[:,[0]]*x[:,[1]] + ranges = [[-1,1],[-1,1]] + + if name == 'I.37.4' or name == 38: + symbol = I1, I2, delta = symbols('I_1 I_2 delta') + expr = I1 + I2 + 2*sqrt(I1*I2)*cos(delta) + f = lambda x: x[:,[0]] + x[:,[1]] + 2*torch.sqrt(x[:,[0]]*x[:,[1]])*torch.cos(x[:,[2]]) + ranges = [[0.1,1],[0.1,1],[0,2*tpi]] + + if name == 'I.38.12' or name == 39: + symbol = eps, hbar, m, q = symbols('epsilon hbar m q') + expr = 4*pi*eps*hbar**2/(m*q**2) + f = lambda x: 4*tpi*x[:,[0]]*x[:,[1]]**2/(x[:,[2]]*x[:,[3]]**2) + ranges = [[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'I.39.10' or name == 40: + symbol = pF, V = symbols('p_F V') + expr = 3/2 * pF * V + f = lambda x: 3/2 * x[:,[0]] * x[:,[1]] + ranges = [[0,1],[0,1]] + + if name == 'I.39.11' or name == 41: + symbol = gamma, pF, V = symbols('gamma p_F V') + expr = pF * V/(gamma - 1) + f = lambda x: 1/(x[:,[0]]-1) * x[:,[1]] * x[:,[2]] + ranges = [[1.5,3],[0,1],[0,1]] + + if name == 'I.39.22' or name == 42: + symbol = n, kb, T, V = symbols('n k_b T V') + expr = n*kb*T/V + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'I.40.1' or name == 43: + symbol = n0, m, g, x, kb, T = symbols('n_0 m g x k_b T') + expr = n0 * exp(-m*g*x/(kb*T)) + f = lambda x: x[:,[0]] * torch.exp(-x[:,[1]]*x[:,[2]]*x[:,[3]]/(x[:,[4]]*x[:,[5]])) + ranges = [[0,1],[-1,1],[-1,1],[-1,1],[1,2],[1,2]] + + if name == 'I.41.16' or name == 44: + symbol = hbar, omega, c, kb, T = symbols('hbar omega c k_b T') + expr = hbar * omega**3/(pi**2*c**2*(exp(hbar*omega/(kb*T))-1)) + f = lambda x: x[:,[0]]*x[:,[1]]**3/(tpi**2*x[:,[2]]**2*(torch.exp(x[:,[0]]*x[:,[1]]/(x[:,[3]]*x[:,[4]]))-1)) + ranges = [[0.5,1],[0.5,1],[0.5,2],[0.5,2],[0.5,2]] + + if name == 'I.43.16' or name == 45: + symbol = mu, q, Ve, d = symbols('mu q V_e d') + expr = mu*q*Ve/d + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'I.43.31' or name == 46: + symbol = mu, kb, T = symbols('mu k_b T') + expr = mu*kb*T + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]] + ranges = [[0,1],[0,1],[0,1]] + + if name == 'I.43.43' or name == 47: + symbol = gamma, kb, v, A = symbols('gamma k_b v A') + expr = kb*v/A/(gamma-1) + f = lambda x: 1/(x[:,[0]]-1)*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[1.5,3],[0,1],[0,1],[0.5,2]] + + if name == 'I.44.4' or name == 48: + symbol = n, kb, T, V1, V2 = symbols('n k_b T V_1 V_2') + expr = n*kb*T*log(V2/V1) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]*torch.log(x[:,[4]]/x[:,[3]]) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'I.47.23' or name == 49: + symbol = gamma, p, rho = symbols('gamma p rho') + expr = sqrt(gamma*p/rho) + f = lambda x: torch.sqrt(x[:,[0]]*x[:,[1]]/x[:,[2]]) + ranges = [[0.1,1],[0.1,1],[0.5,2]] + + if name == 'I.48.20' or name == 50: + symbol = m, v, c = symbols('m v c') + expr = m*c**2/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]*x[:,[2]]**2/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[-0.9,0.9],[1.1,2]] + + if name == 'I.50.26' or name == 51: + symbol = x1, alpha, omega, t = symbols('x_1 alpha omega t') + expr = x1*(cos(omega*t)+alpha*cos(omega*t)**2) + f = lambda x: x[:,[0]]*(torch.cos(x[:,[2]]*x[:,[3]])+x[:,[1]]*torch.cos(x[:,[2]]*x[:,[3]])**2) + ranges = [[0,1],[0,1],[0,2*tpi],[0,1]] + + if name == 'II.2.42' or name == 52: + symbol = kappa, T1, T2, A, d = symbols('kappa T_1 T_2 A d') + expr = kappa*(T2-T1)*A/d + f = lambda x: x[:,[0]]*(x[:,[2]]-x[:,[1]])*x[:,[3]]/x[:,[4]] + ranges = [[0,1],[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.3.24' or name == 53: + symbol = P, r = symbols('P r') + expr = P/(4*pi*r**2) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]**2) + ranges = [[0,1],[0.5,2]] + + if name == 'II.4.23' or name == 54: + symbol = q, eps, r = symbols('q epsilon r') + expr = q/(4*pi*eps*r) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]*x[:,[2]]) + ranges = [[0,1],[0.5,2],[0.5,2]] + + if name == 'II.6.11' or name == 55: + symbol = eps, pd, theta, r = symbols('epsilon p_d theta r') + expr = 1/(4*pi*eps)*pd*cos(theta)/r**2 + f = lambda x: 1/(4*tpi*x[:,[0]])*x[:,[1]]*torch.cos(x[:,[2]])/x[:,[3]]**2 + ranges = [[0.5,2],[0,1],[0,2*tpi],[0.5,2]] + + if name == 'II.6.15a' or name == 56: + symbol = eps, pd, z, x, y, r = symbols('epsilon p_d z x y r') + expr = 3/(4*pi*eps)*pd*z/r**5*sqrt(x**2+y**2) + f = lambda x: 3/(4*tpi*x[:,[0]])*x[:,[1]]*x[:,[2]]/x[:,[5]]**5*torch.sqrt(x[:,[3]]**2+x[:,[4]]**2) + ranges = [[0.5,2],[0,1],[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.6.15b' or name == 57: + symbol = eps, pd, r, theta = symbols('epsilon p_d r theta') + expr = 3/(4*pi*eps)*pd/r**3*cos(theta)*sin(theta) + f = lambda x: 3/(4*tpi*x[:,[0]])*x[:,[1]]/x[:,[2]]**3*torch.cos(x[:,[3]])*torch.sin(x[:,[3]]) + ranges = [[0.5,2],[0,1],[0.5,2],[0,2*tpi]] + + if name == 'II.8.7' or name == 58: + symbol = q, eps, d = symbols('q epsilon d') + expr = 3/5*q**2/(4*pi*eps*d) + f = lambda x: 3/5*x[:,[0]]**2/(4*tpi*x[:,[1]]*x[:,[2]]) + ranges = [[0,1],[0.5,2],[0.5,2]] + + if name == 'II.8.31' or name == 59: + symbol = eps, Ef = symbols('epsilon E_f') + expr = 1/2*eps*Ef**2 + f = lambda x: 1/2*x[:,[0]]*x[:,[1]]**2 + ranges = [[0,1],[0,1]] + + if name == 'I.10.9' or name == 60: + symbol = sigma, eps, chi = symbols('sigma epsilon chi') + expr = sigma/eps/(1+chi) + f = lambda x: x[:,[0]]/x[:,[1]]/(1+x[:,[2]]) + ranges = [[0,1],[0.5,2],[0,1]] + + if name == 'II.11.3' or name == 61: + symbol = q, Ef, m, omega0, omega = symbols('q E_f m omega_o omega') + expr = q*Ef/(m*(omega0**2-omega**2)) + f = lambda x: x[:,[0]]*x[:,[1]]/(x[:,[2]]*(x[:,[3]]**2-x[:,[4]]**2)) + ranges = [[0,1],[0,1],[0.5,2],[1.5,3],[0,1]] + + if name == 'II.11.17' or name == 62: + symbol = n0, pd, Ef, theta, kb, T = symbols('n_0 p_d E_f theta k_b T') + expr = n0*(1+pd*Ef*cos(theta)/(kb*T)) + f = lambda x: x[:,[0]]*(1+x[:,[1]]*x[:,[2]]*torch.cos(x[:,[3]])/(x[:,[4]]*x[:,[5]])) + ranges = [[0,1],[-1,1],[-1,1],[0,2*tpi],[0.5,2],[0.5,2]] + + + if name == 'II.11.20' or name == 63: + symbol = n, pd, Ef, kb, T = symbols('n p_d E_f k_b T') + expr = n*pd**2*Ef/(3*kb*T) + f = lambda x: x[:,[0]]*x[:,[1]]**2*x[:,[2]]/(3*x[:,[3]]*x[:,[4]]) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'II.11.27' or name == 64: + symbol = n, alpha, eps, Ef = symbols('n alpha epsilon E_f') + expr = n*alpha/(1-n*alpha/3)*eps*Ef + f = lambda x: x[:,[0]]*x[:,[1]]/(1-x[:,[0]]*x[:,[1]]/3)*x[:,[2]]*x[:,[3]] + ranges = [[0,1],[0,2],[0,1],[0,1]] + + if name == 'II.11.28' or name == 65: + symbol = n, alpha = symbols('n alpha') + expr = 1 + n*alpha/(1-n*alpha/3) + f = lambda x: 1 + x[:,[0]]*x[:,[1]]/(1-x[:,[0]]*x[:,[1]]/3) + ranges = [[0,1],[0,2]] + + if name == 'II.13.17' or name == 66: + symbol = eps, c, l, r = symbols('epsilon c l r') + expr = 1/(4*pi*eps*c**2)*(2*l/r) + f = lambda x: 1/(4*tpi*x[:,[0]]*x[:,[1]]**2)*(2*x[:,[2]]/x[:,[3]]) + ranges = [[0.5,2],[0.5,2],[0,1],[0.5,2]] + + if name == 'II.13.23' or name == 67: + symbol = rho, v, c = symbols('rho v c') + expr = rho/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[0,1],[1,2]] + + if name == 'II.13.34' or name == 68: + symbol = rho, v, c = symbols('rho v c') + expr = rho*v/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]*x[:,[1]]/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[0,1],[1,2]] + + if name == 'II.15.4' or name == 69: + symbol = muM, B, theta = symbols('mu_M B theta') + expr = - muM * B * cos(theta) + f = lambda x: - x[:,[0]]*x[:,[1]]*torch.cos(x[:,[2]]) + ranges = [[0,1],[0,1],[0,2*tpi]] + + if name == 'II.15.5' or name == 70: + symbol = pd, Ef, theta = symbols('p_d E_f theta') + expr = - pd * Ef * cos(theta) + f = lambda x: - x[:,[0]]*x[:,[1]]*torch.cos(x[:,[2]]) + ranges = [[0,1],[0,1],[0,2*tpi]] + + if name == 'II.21.32' or name == 71: + symbol = q, eps, r, v, c = symbols('q epsilon r v c') + expr = q/(4*pi*eps*r*(1-v/c)) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]*x[:,[2]]*(1-x[:,[3]]/x[:,[4]])) + ranges = [[0,1],[0.5,2],[0.5,2],[0,1],[1,2]] + + if name == 'II.24.17' or name == 72: + symbol = omega, c, d = symbols('omega c d') + expr = sqrt(omega**2/c**2-pi**2/d**2) + f = lambda x: torch.sqrt(x[:,[0]]**2/x[:,[1]]**2-tpi**2/x[:,[2]]**2) + ranges = [[1,1.5],[0.75,1],[1*tpi,1.5*tpi]] + + if name == 'II.27.16' or name == 73: + symbol = eps, c, Ef = symbols('epsilon c E_f') + expr = eps * c * Ef**2 + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]**2 + ranges = [[0,1],[0,1],[-1,1]] + + if name == 'II.27.18' or name == 74: + symbol = eps, Ef = symbols('epsilon E_f') + expr = eps * Ef**2 + f = lambda x: x[:,[0]]*x[:,[1]]**2 + ranges = [[0,1],[-1,1]] + + if name == 'II.34.2a' or name == 75: + symbol = q, v, r = symbols('q v r') + expr = q*v/(2*pi*r) + f = lambda x: x[:,[0]]*x[:,[1]]/(2*tpi*x[:,[2]]) + ranges = [[0,1],[0,1],[0.5,2]] + + if name == 'II.34.2' or name == 76: + symbol = q, v, r = symbols('q v r') + expr = q*v*r/2 + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/2 + ranges = [[0,1],[0,1],[0,1]] + + if name == 'II.34.11' or name == 77: + symbol = g, q, B, m = symbols('g q B m') + expr = g*q*B/(2*m) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/(2*x[:,[3]]) + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.34.29a' or name == 78: + symbol = q, h, m = symbols('q h m') + expr = q*h/(4*pi*m) + f = lambda x: x[:,[0]]*x[:,[1]]/(4*tpi*x[:,[2]]) + ranges = [[0,1],[0,1],[0.5,2]] + + if name == 'II.34.29b' or name == 79: + symbol = g, mu, B, J, hbar = symbols('g mu B J hbar') + expr = g*mu*B*J/hbar + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]*x[:,[3]]/x[:,[4]] + ranges = [[0,1],[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.35.18' or name == 80: + symbol = n0, mu, B, kb, T = symbols('n0 mu B k_b T') + expr = n0/(exp(mu*B/(kb*T))+exp(-mu*B/(kb*T))) + f = lambda x: x[:,[0]]/(torch.exp(x[:,[1]]*x[:,[2]]/(x[:,[3]]*x[:,[4]]))+torch.exp(-x[:,[1]]*x[:,[2]]/(x[:,[3]]*x[:,[4]]))) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'II.35.21' or name == 81: + symbol = n, mu, B, kb, T = symbols('n mu B k_b T') + expr = n*mu*tanh(mu*B/(kb*T)) + f = lambda x: x[:,[0]]*x[:,[1]]*torch.tanh(x[:,[1]]*x[:,[2]]/(x[:,[3]]*x[:,[4]])) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'II.36.38' or name == 82: + symbol = mu, B, kb, T, alpha, M, eps, c = symbols('mu B k_b T alpha M epsilon c') + expr = mu*B/(kb*T) + mu*alpha*M/(eps*c**2*kb*T) + f = lambda x: x[:,[0]]*x[:,[1]]/(x[:,[2]]*x[:,[3]]) + x[:,[0]]*x[:,[4]]*x[:,[5]]/(x[:,[6]]*x[:,[7]]**2*x[:,[2]]*x[:,[3]]) + ranges = [[0,1],[0,1],[0.5,2],[0.5,2],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'II.37.1' or name == 83: + symbol = mu, chi, B = symbols('mu chi B') + expr = mu*(1+chi)*B + f = lambda x: x[:,[0]]*(1+x[:,[1]])*x[:,[2]] + ranges = [[0,1],[0,1],[0,1]] + + if name == 'II.38.3' or name == 84: + symbol = Y, A, x, d = symbols('Y A x d') + expr = Y*A*x/d + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.38.14' or name == 85: + symbol = Y, sigma = symbols('Y sigma') + expr = Y/(2*(1+sigma)) + f = lambda x: x[:,[0]]/(2*(1+x[:,[1]])) + ranges = [[0,1],[0,1]] + + if name == 'III.4.32' or name == 86: + symbol = hbar, omega, kb, T = symbols('hbar omega k_b T') + expr = 1/(exp(hbar*omega/(kb*T))-1) + f = lambda x: 1/(torch.exp(x[:,[0]]*x[:,[1]]/(x[:,[2]]*x[:,[3]]))-1) + ranges = [[0.5,1],[0.5,1],[0.5,2],[0.5,2]] + + if name == 'III.4.33' or name == 87: + symbol = hbar, omega, kb, T = symbols('hbar omega k_b T') + expr = hbar*omega/(exp(hbar*omega/(kb*T))-1) + f = lambda x: x[:,[0]]*x[:,[1]]/(torch.exp(x[:,[0]]*x[:,[1]]/(x[:,[2]]*x[:,[3]]))-1) + ranges = [[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'III.7.38' or name == 88: + symbol = mu, B, hbar = symbols('mu B hbar') + expr = 2*mu*B/hbar + f = lambda x: 2*x[:,[0]]*x[:,[1]]/x[:,[2]] + ranges = [[0,1],[0,1],[0.5,2]] + + if name == 'III.8.54' or name == 89: + symbol = E, t, hbar = symbols('E t hbar') + expr = sin(E*t/hbar)**2 + f = lambda x: torch.sin(x[:,[0]]*x[:,[1]]/x[:,[2]])**2 + ranges = [[0,2*tpi],[0,1],[0.5,2]] + + if name == 'III.9.52' or name == 90: + symbol = pd, Ef, t, hbar, omega, omega0 = symbols('p_d E_f t hbar omega omega_0') + expr = pd*Ef*t/hbar*sin((omega-omega0)*t/2)**2/((omega-omega0)*t/2)**2 + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]]*torch.sin((x[:,[4]]-x[:,[5]])*x[:,[2]]/2)**2/((x[:,[4]]-x[:,[5]])*x[:,[2]]/2)**2 + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0,tpi],[0,tpi]] + + if name == 'III.10.19' or name == 91: + symbol = mu, Bx, By, Bz = symbols('mu B_x B_y B_z') + expr = mu*sqrt(Bx**2+By**2+Bz**2) + f = lambda x: x[:,[0]]*torch.sqrt(x[:,[1]]**2+x[:,[2]]**2+x[:,[3]]**2) + ranges = [[0,1],[0,1],[0,1],[0,1]] + + if name == 'III.12.43' or name == 92: + symbol = n, hbar = symbols('n hbar') + expr = n * hbar + f = lambda x: x[:,[0]]*x[:,[1]] + ranges = [[0,1],[0,1]] + + if name == 'III.13.18' or name == 93: + symbol = E, d, k, hbar = symbols('E d k hbar') + expr = 2*E*d**2*k/hbar + f = lambda x: 2*x[:,[0]]*x[:,[1]]**2*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'III.14.14' or name == 94: + symbol = I0, q, Ve, kb, T = symbols('I_0 q V_e k_b T') + expr = I0 * (exp(q*Ve/(kb*T))-1) + f = lambda x: x[:,[0]]*(torch.exp(x[:,[1]]*x[:,[2]]/(x[:,[3]]*x[:,[4]]))-1) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'III.15.12' or name == 95: + symbol = U, k, d = symbols('U k d') + expr = 2*U*(1-cos(k*d)) + f = lambda x: 2*x[:,[0]]*(1-torch.cos(x[:,[1]]*x[:,[2]])) + ranges = [[0,1],[0,2*tpi],[0,1]] + + if name == 'III.15.14' or name == 96: + symbol = hbar, E, d = symbols('hbar E d') + expr = hbar**2/(2*E*d**2) + f = lambda x: x[:,[0]]**2/(2*x[:,[1]]*x[:,[2]]**2) + ranges = [[0,1],[0.5,2],[0.5,2]] + + if name == 'III.15.27' or name == 97: + symbol = alpha, n, d = symbols('alpha n d') + expr = 2*pi*alpha/(n*d) + f = lambda x: 2*tpi*x[:,[0]]/(x[:,[1]]*x[:,[2]]) + ranges = [[0,1],[0.5,2],[0.5,2]] + + if name == 'III.17.37' or name == 98: + symbol = beta, alpha, theta = symbols('beta alpha theta') + expr = beta * (1+alpha*cos(theta)) + f = lambda x: x[:,[0]]*(1+x[:,[1]]*torch.cos(x[:,[2]])) + ranges = [[0,1],[0,1],[0,2*tpi]] + + if name == 'III.19.51' or name == 99: + symbol = m, q, eps, hbar, n = symbols('m q epsilon hbar n') + expr = - m * q**4/(2*(4*pi*eps)**2*hbar**2)*1/n**2 + f = lambda x: - x[:,[0]]*x[:,[1]]**4/(2*(4*tpi*x[:,[2]])**2*x[:,[3]]**2)*1/x[:,[4]]**2 + ranges = [[0,1],[0,1],[0.5,2],[0.5,2],[0.5,2]] + + if name == 'III.21.20' or name == 100: + symbol = rho, q, A, m = symbols('rho q A m') + expr = - rho*q*A/m + f = lambda x: - x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'Rutherforld scattering' or name == 101: + symbol = Z1, Z2, alpha, hbar, c, E, theta = symbols('Z_1 Z_2 alpha hbar c E theta') + expr = (Z1*Z2*alpha*hbar*c/(4*E*sin(theta/2)**2))**2 + f = lambda x: (x[:,[0]]*x[:,[1]]*x[:,[2]]*x[:,[3]]*x[:,[4]]/(4*x[:,[5]]*torch.sin(x[:,[6]]/2)**2))**2 + ranges = [[0,1],[0,1],[0,1],[0,1],[0,1],[0.5,2],[0.1*tpi,0.9*tpi]] + + if name == 'Friedman equation' or name == 102: + symbol = G, rho, kf, c, af = symbols('G rho k_f c a_f') + expr = sqrt(8*pi*G/3*rho-kf*c**2/af**2) + f = lambda x: torch.sqrt(8*tpi*x[:,[0]]/3*x[:,[1]] - x[:,[2]]*x[:,[3]]**2/x[:,[4]]**2) + ranges = [[1,2],[1,2],[0,1],[0,1],[1,2]] + + if name == 'Compton scattering' or name == 103: + symbol = E, m, c, theta = symbols('E m c theta') + expr = E/(1+E/(m*c**2)*(1-cos(theta))) + f = lambda x: x[:,[0]]/(1+x[:,[0]]/(x[:,[1]]*x[:,[2]]**2)*(1-torch.cos(x[:,[3]]))) + ranges = [[0,1],[0.5,2],[0.5,2],[0,2*tpi]] + + if name == 'Radiated gravitational wave power' or name == 104: + symbol = G, c, m1, m2, r = symbols('G c m_1 m_2 r') + expr = -32/5*G**4/c**5*(m1*m2)**2*(m1+m2)/r**5 + f = lambda x: -32/5*x[:,[0]]**4/x[:,[1]]**5*(x[:,[2]]*x[:,[3]])**2*(x[:,[2]]+x[:,[3]])/x[:,[4]]**5 + ranges = [[0,1],[0.5,2],[0,1],[0,1],[0.5,2]] + + if name == 'Relativistic aberration' or name == 105: + symbol = theta2, v, c = symbols('theta_2 v c') + expr = acos((cos(theta2)-v/c)/(1-v/c*cos(theta2))) + f = lambda x: torch.arccos((torch.cos(x[:,[0]])-x[:,[1]]/x[:,[2]])/(1-x[:,[1]]/x[:,[2]]*torch.cos(x[:,[0]]))) + ranges = [[0,tpi],[0,1],[1,2]] + + if name == 'N-slit diffraction' or name == 106: + symbol = I0, alpha, delta, N = symbols('I_0 alpha delta N') + expr = I0 * (sin(alpha/2)/(alpha/2)*sin(N*delta/2)/sin(delta/2))**2 + f = lambda x: x[:,[0]] * (torch.sin(x[:,[1]]/2)/(x[:,[1]]/2)*torch.sin(x[:,[3]]*x[:,[2]]/2)/torch.sin(x[:,[2]]/2))**2 + ranges = [[0,1],[0.1*tpi,0.9*tpi],[0.1*tpi,0.9*tpi],[0.5,1]] + + if name == 'Goldstein 3.16' or name == 107: + symbol = m, E, U, L, r = symbols('m E U L r') + expr = sqrt(2/m*(E-U-L**2/(2*m*r**2))) + f = lambda x: torch.sqrt(2/x[:,[0]]*(x[:,[1]]-x[:,[2]]-x[:,[3]]**2/(2*x[:,[0]]*x[:,[4]]**2))) + ranges = [[1,2],[2,3],[0,1],[0,1],[1,2]] + + if name == 'Goldstein 3.55' or name == 108: + symbol = m, kG, L, E, theta1, theta2 = symbols('m k_G L E theta_1 theta_2') + expr = m*kG/L**2*(1+sqrt(1+2*E*L**2/(m*kG**2))*cos(theta1-theta2)) + f = lambda x: x[:,[0]]*x[:,[1]]/x[:,[2]]**2*(1+torch.sqrt(1+2*x[:,[3]]*x[:,[2]]**2/(x[:,[0]]*x[:,[1]]**2))*torch.cos(x[:,[4]]-x[:,[5]])) + ranges = [[0.5,2],[0.5,2],[0.5,2],[0,1],[0,2*tpi],[0,2*tpi]] + + if name == 'Goldstein 3.64 (ellipse)' or name == 109: + symbol = d, alpha, theta1, theta2 = symbols('d alpha theta_1 theta_2') + expr = d*(1-alpha**2)/(1+alpha*cos(theta2-theta1)) + f = lambda x: x[:,[0]]*(1-x[:,[1]]**2)/(1+x[:,[1]]*torch.cos(x[:,[2]]-x[:,[3]])) + ranges = [[0,1],[0,0.9],[0,2*tpi],[0,2*tpi]] + + if name == 'Goldstein 3.74 (Kepler)' or name == 110: + symbol = d, G, m1, m2 = symbols('d G m_1 m_2') + expr = 2*pi*d**(3/2)/sqrt(G*(m1+m2)) + f = lambda x: 2*tpi*x[:,[0]]**(3/2)/torch.sqrt(x[:,[1]]*(x[:,[2]]+x[:,[3]])) + ranges = [[0,1],[0.5,2],[0.5,2],[0.5,2]] + + if name == 'Goldstein 3.99' or name == 111: + symbol = eps, E, L, m, Z1, Z2, q = symbols('epsilon E L m Z_1 Z_2 q') + expr = sqrt(1+2*eps**2*E*L**2/(m*(Z1*Z2*q**2)**2)) + f = lambda x: torch.sqrt(1+2*x[:,[0]]**2*x[:,[1]]*x[:,[2]]**2/(x[:,[3]]*(x[:,[4]]*x[:,[5]]*x[:,[6]]**2)**2)) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2],[0.5,2],[0.5,2]] + + if name == 'Goldstein 8.56' or name == 112: + symbol = p, q, A, c, m, Ve = symbols('p q A c m V_e') + expr = sqrt((p-q*A)**2*c**2+m**2*c**4) + q*Ve + f = lambda x: torch.sqrt((x[:,[0]]-x[:,[1]]*x[:,[2]])**2*x[:,[3]]**2+x[:,[4]]**2*x[:,[3]]**4) + x[:,[1]]*x[:,[5]] + ranges = [0,1] + + if name == 'Goldstein 12.80' or name == 113: + symbol = m, p, omega, x, alpha, y = symbols('m p omega x alpha y') + expr = 1/(2*m)*(p**2+m**2*omega**2*x**2*(1+alpha*y/x)) + f = lambda x: 1/(2*x[:,[0]]) * (x[:,[1]]**2+x[:,[0]]**2*x[:,[2]]**2*x[:,[3]]**2*(1+x[:,[4]]*x[:,[3]]/x[:,[5]])) + ranges = [[0.5,2],[0,1],[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'Jackson 2.11' or name == 114: + symbol = q, eps, y, Ve, d = symbols('q epsilon y V_e d') + expr = q/(4*pi*eps*y**2)*(4*pi*eps*Ve*d-q*d*y**3/(y**2-d**2)**2) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]*x[:,x[:,[2]]]**2)*(4*tpi*x[:,[1]]*x[:,[3]]*x[:,[4]]-x[:,[0]]*x[:,[4]]*x[:,[2]]**3/(x[:,[2]]**2-x[:,[4]]**2)**2) + ranges = [[0,1],[0.5,2],[1,2],[0,1],[0,1]] + + if name == 'Jackson 3.45' or name == 115: + symbol = q, r, d, alpha = symbols('q r d alpha') + expr = q/sqrt(r**2+d**2-2*d*r*cos(alpha)) + f = lambda x: x[:,[0]]/torch.sqrt(x[:,[1]]**2+x[:,[2]]**2-2*x[:,[1]]*x[:,[2]]*torch.cos(x[:,[3]])) + ranges = [[0,1],[0,1],[0,1],[0,2*tpi]] + + if name == 'Jackson 4.60' or name == 116: + symbol = Ef, theta, alpha, d, r = symbols('E_f theta alpha d r') + expr = Ef * cos(theta) * ((alpha-1)/(alpha+2) * d**3/r**2 - r) + f = lambda x: x[:,[0]] * torch.cos(x[:,[1]]) * ((x[:,[2]]-1)/(x[:,[2]]+2) * x[:,[3]]**3/x[:,[4]]**2 - x[:,[4]]) + ranges = [[0,1],[0,2*tpi],[0,2],[0,1],[0.5,2]] + + if name == 'Jackson 11.38 (Doppler)' or name == 117: + symbol = omega, v, c, theta = symbols('omega v c theta') + expr = sqrt(1-v**2/c**2)/(1+v/c*cos(theta))*omega + f = lambda x: torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2)/(1+x[:,[1]]/x[:,[2]]*torch.cos(x[:,[3]]))*x[:,[0]] + ranges = [[0,1],[0,1],[1,2],[0,2*tpi]] + + if name == 'Weinberg 15.2.1' or name == 118: + symbol = G, c, kf, af, H = symbols('G c k_f a_f H') + expr = 3/(8*pi*G)*(c**2*kf/af**2+H**2) + f = lambda x: 3/(8*tpi*x[:,[0]])*(x[:,[1]]**2*x[:,[2]]/x[:,[3]]**2+x[:,[4]]**2) + ranges = [[0.5,2],[0,1],[0,1],[0.5,2],[0,1]] + + if name == 'Weinberg 15.2.2' or name == 119: + symbol = G, c, kf, af, H, alpha = symbols('G c k_f a_f H alpha') + expr = -1/(8*pi*G)*(c**4*kf/af**2+c**2*H**2*(1-2*alpha)) + f = lambda x: -1/(8*tpi*x[:,[0]])*(x[:,[1]]**4*x[:,[2]]/x[:,[3]]**2 + x[:,[1]]**2*x[:,[4]]**2*(1-2*x[:,[5]])) + ranges = [[0.5,2],[0,1],[0,1],[0.5,2],[0,1],[0,1]] + + if name == 'Schwarz 13.132 (Klein-Nishina)' or name == 120: + symbol = alpha, hbar, m, c, omega0, omega, theta = symbols('alpha hbar m c omega_0 omega theta') + expr = pi*alpha**2*hbar**2/m**2/c**2*(omega0/omega)**2*(omega0/omega+omega/omega0-sin(theta)**2) + f = lambda x: tpi*x[:,[0]]**2*x[:,[1]]**2/x[:,[2]]**2/x[:,[3]]**2*(x[:,[4]]/x[:,[5]])**2*(x[:,[4]]/x[:,[5]]+x[:,[5]]/x[:,[4]]-torch.sin(x[:,[6]])**2) + ranges = [[0,1],[0,1],[0.5,2],[0.5,2],[0.5,2],[0.5,2],[0,2*tpi]] + + return symbol, expr, f, ranges \ No newline at end of file diff --git a/dl/kan/kan/.ipynb_checkpoints/hypothesis-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/hypothesis-checkpoint.py new file mode 100644 index 000000000..4850f5098 --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/hypothesis-checkpoint.py @@ -0,0 +1,695 @@ +import numpy as np +import torch +from sklearn.linear_model import LinearRegression +from sympy.utilities.lambdify import lambdify +from sklearn.cluster import AgglomerativeClustering +from .utils import batch_jacobian, batch_hessian +from functools import reduce +from kan.utils import batch_jacobian, batch_hessian +import copy +import matplotlib.pyplot as plt +import sympy +from sympy.printing import latex + + +def detect_separability(model, x, mode='add', score_th=1e-2, res_th=1e-2, n_clusters=None, bias=0., verbose=False): + ''' + detect function separability + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + mode : str + mode = 'add' or mode = 'mul' + score_th : float + threshold of score + res_th : float + threshold of residue + n_clusters : None or int + the number of clusters + bias : float + bias (for multiplicative separability) + verbose : bool + + Returns: + -------- + results (dictionary) + + Example1 + -------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 + torch.exp(x[:,[1]]+x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,3)) + >>> detect_separability(model, x, mode='add') + + Example2 + -------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 * (x[:,[1]]+x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,3)) + >>> detect_separability(model, x, mode='mul') + ''' + results = {} + + if mode == 'add': + hessian = batch_hessian(model, x) + elif mode == 'mul': + compose = lambda *F: reduce(lambda f, g: lambda x: f(g(x)), F) + hessian = batch_hessian(compose(torch.log, torch.abs, lambda x: x+bias, model), x) + + std = torch.std(x, dim=0) + hessian_normalized = hessian * std[None,:] * std[:,None] + score_mat = torch.median(torch.abs(hessian_normalized), dim=0)[0] + results['hessian'] = score_mat + + dist_hard = (score_mat < score_th).float() + + if isinstance(n_clusters, int): + n_cluster_try = [n_clusters, n_clusters] + elif isinstance(n_clusters, list): + n_cluster_try = n_clusters + else: + n_cluster_try = [1,x.shape[1]] + + n_cluster_try = list(range(n_cluster_try[0], n_cluster_try[1]+1)) + + for n_cluster in n_cluster_try: + + clustering = AgglomerativeClustering( + metric='precomputed', + n_clusters=n_cluster, + linkage='complete', + ).fit(dist_hard) + + labels = clustering.labels_ + + groups = [list(np.where(labels == i)[0]) for i in range(n_cluster)] + blocks = [torch.sum(score_mat[groups[i]][:,groups[i]]) for i in range(n_cluster)] + block_sum = torch.sum(torch.stack(blocks)) + total_sum = torch.sum(score_mat) + residual_sum = total_sum - block_sum + residual_ratio = residual_sum / total_sum + + if verbose == True: + print(f'n_group={n_cluster}, residual_ratio={residual_ratio}') + + if residual_ratio < res_th: + results['n_groups'] = n_cluster + results['labels'] = list(labels) + results['groups'] = groups + + if results['n_groups'] > 1: + print(f'{mode} separability detected') + else: + print(f'{mode} separability not detected') + + return results + + +def batch_grad_normgrad(model, x, group, create_graph=False): + # x in shape (Batch, Length) + group_A = group + group_B = list(set(range(x.shape[1])) - set(group)) + + def jac(x): + input_grad = batch_jacobian(model, x, create_graph=True) + input_grad_A = input_grad[:,group_A] + norm = torch.norm(input_grad_A, dim=1, keepdim=True) + 1e-6 + input_grad_A_normalized = input_grad_A/norm + return input_grad_A_normalized + + def _jac_sum(x): + return jac(x).sum(dim=0) + + return torch.autograd.functional.jacobian(_jac_sum, x, create_graph=create_graph).permute(1,0,2)[:,:,group_B] + + +def get_dependence(model, x, group): + group_A = group + group_B = list(set(range(x.shape[1])) - set(group)) + grad_normgrad = batch_grad_normgrad(model, x, group=group) + std = torch.std(x, dim=0) + dependence = grad_normgrad * std[None,group_A,None] * std[None,None,group_B] + dependence = torch.median(torch.abs(dependence), dim=0)[0] + return dependence + +def test_symmetry(model, x, group, dependence_th=1e-3): + ''' + detect function separability + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + group : a list of indices + dependence_th : float + threshold of dependence + + Returns: + -------- + bool + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 * (x[:,[1]]+x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,3)) + >>> print(test_symmetry(model, x, [1,2])) # True + >>> print(test_symmetry(model, x, [0,2])) # False + ''' + if len(group) == x.shape[1] or len(group) == 0: + return True + + dependence = get_dependence(model, x, group) + max_dependence = torch.max(dependence) + return max_dependence < dependence_th + + +def test_separability(model, x, groups, mode='add', threshold=1e-2, bias=0): + ''' + test function separability + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + mode : str + mode = 'add' or mode = 'mul' + score_th : float + threshold of score + res_th : float + threshold of residue + bias : float + bias (for multiplicative separability) + verbose : bool + + Returns: + -------- + bool + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 * (x[:,[1]]+x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,3)) + >>> print(test_separability(model, x, [[0],[1,2]], mode='mul')) # True + >>> print(test_separability(model, x, [[0],[1,2]], mode='add')) # False + ''' + if mode == 'add': + hessian = batch_hessian(model, x) + elif mode == 'mul': + compose = lambda *F: reduce(lambda f, g: lambda x: f(g(x)), F) + hessian = batch_hessian(compose(torch.log, torch.abs, lambda x: x+bias, model), x) + + std = torch.std(x, dim=0) + hessian_normalized = hessian * std[None,:] * std[:,None] + score_mat = torch.median(torch.abs(hessian_normalized), dim=0)[0] + + sep_bool = True + + # internal test + n_groups = len(groups) + for i in range(n_groups): + for j in range(i+1, n_groups): + sep_bool *= torch.max(score_mat[groups[i]][:,groups[j]]) < threshold + + # external test + group_id = [x for xs in groups for x in xs] + nongroup_id = list(set(range(x.shape[1])) - set(group_id)) + if len(nongroup_id) > 0 and len(group_id) > 0: + sep_bool *= torch.max(score_mat[group_id][:,nongroup_id]) < threshold + + return sep_bool + +def test_general_separability(model, x, groups, threshold=1e-2): + ''' + test function separability + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + mode : str + mode = 'add' or mode = 'mul' + score_th : float + threshold of score + res_th : float + threshold of residue + bias : float + bias (for multiplicative separability) + verbose : bool + + Returns: + -------- + bool + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 * (x[:,[1]]**2+x[:,[2]]**2)**2 + >>> x = torch.normal(0,1,size=(100,3)) + >>> print(test_general_separability(model, x, [[1],[0,2]])) # False + >>> print(test_general_separability(model, x, [[0],[1,2]])) # True + ''' + grad = batch_jacobian(model, x) + + gensep_bool = True + + n_groups = len(groups) + for i in range(n_groups): + for j in range(i+1,n_groups): + group_A = groups[i] + group_B = groups[j] + for member_A in group_A: + for member_B in group_B: + def func(x): + grad = batch_jacobian(model, x, create_graph=True) + return grad[:,[member_B]]/grad[:,[member_A]] + # test if func is multiplicative separable + gensep_bool *= test_separability(func, x, groups, mode='mul', threshold=threshold) + return gensep_bool + + +def get_molecule(model, x, sym_th=1e-3, verbose=True): + ''' + how variables are combined hierarchically + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + sym_th : float + threshold of symmetry + verbose : bool + + Returns: + -------- + list + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: ((x[:,[0]] ** 2 + x[:,[1]] ** 2) ** 2 + (x[:,[2]] ** 2 + x[:,[3]] ** 2) ** 2) ** 2 + ((x[:,[4]] ** 2 + x[:,[5]] ** 2) ** 2 + (x[:,[6]] ** 2 + x[:,[7]] ** 2) ** 2) ** 2 + >>> x = torch.normal(0,1,size=(100,8)) + >>> get_molecule(model, x, verbose=False) + [[[0], [1], [2], [3], [4], [5], [6], [7]], + [[0, 1], [2, 3], [4, 5], [6, 7]], + [[0, 1, 2, 3], [4, 5, 6, 7]], + [[0, 1, 2, 3, 4, 5, 6, 7]]] + ''' + n = x.shape[1] + atoms = [[i] for i in range(n)] + molecules = [] + moleculess = [copy.deepcopy(atoms)] + already_full = False + n_layer = 0 + last_n_molecule = n + + while True: + + + pointer = 0 + current_molecule = [] + remove_atoms = [] + n_atom = 0 + + while len(atoms) > 0: + + # assemble molecule + atom = atoms[pointer] + if verbose: + print(current_molecule) + print(atom) + + if len(current_molecule) == 0: + full = False + current_molecule += atom + remove_atoms.append(atom) + n_atom += 1 + else: + # try assemble the atom to the molecule + if len(current_molecule+atom) == x.shape[1] and already_full == False and n_atom > 1 and n_layer > 0: + full = True + already_full = True + else: + full = False + if test_symmetry(model, x, current_molecule+atom, dependence_th=sym_th): + current_molecule += atom + remove_atoms.append(atom) + n_atom += 1 + + pointer += 1 + + if pointer == len(atoms) or full: + molecules.append(current_molecule) + if full: + molecules.append(atom) + remove_atoms.append(atom) + # remove molecules from atoms + for atom in remove_atoms: + atoms.remove(atom) + current_molecule = [] + remove_atoms = [] + pointer = 0 + + # if not making progress, terminate + if len(molecules) == last_n_molecule: + def flatten(xss): + return [x for xs in xss for x in xs] + moleculess.append([flatten(molecules)]) + break + else: + moleculess.append(copy.deepcopy(molecules)) + + last_n_molecule = len(molecules) + + if len(molecules) == 1: + break + + atoms = molecules + molecules = [] + + n_layer += 1 + + #print(n_layer, atoms) + + + # sort + depth = len(moleculess) - 1 + + for l in list(range(depth,0,-1)): + + molecules_sorted = [] + molecules_l = moleculess[l] + molecules_lm1 = moleculess[l-1] + + + for molecule_l in molecules_l: + start = 0 + for i in range(1,len(molecule_l)+1): + if molecule_l[start:i] in molecules_lm1: + + molecules_sorted.append(molecule_l[start:i]) + start = i + + moleculess[l-1] = molecules_sorted + + return moleculess + + +def get_tree_node(model, x, moleculess, sep_th=1e-2, skip_test=True): + ''' + get tree nodes + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + sep_th : float + threshold of separability + skip_test : bool + if True, don't test the property of each module (to save time) + + Returns: + -------- + arities : list of numbers + properties : list of strings + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: ((x[:,[0]] ** 2 + x[:,[1]] ** 2) ** 2 + (x[:,[2]] ** 2 + x[:,[3]] ** 2) ** 2) ** 2 + ((x[:,[4]] ** 2 + x[:,[5]] ** 2) ** 2 + (x[:,[6]] ** 2 + x[:,[7]] ** 2) ** 2) ** 2 + >>> x = torch.normal(0,1,size=(100,8)) + >>> moleculess = get_molecule(model, x, verbose=False) + >>> get_tree_node(model, x, moleculess, skip_test=False) + ''' + arities = [] + properties = [] + + depth = len(moleculess) - 1 + + for l in range(depth): + molecules_l = copy.deepcopy(moleculess[l]) + molecules_lp1 = copy.deepcopy(moleculess[l+1]) + arity_l = [] + property_l = [] + + for molecule in molecules_lp1: + start = 0 + arity = 0 + groups = [] + for i in range(1,len(molecule)+1): + if molecule[start:i] in molecules_l: + groups.append(molecule[start:i]) + start = i + arity += 1 + arity_l.append(arity) + + if arity == 1: + property = 'Id' + else: + property = '' + # test property + if skip_test: + gensep_bool = False + else: + gensep_bool = test_general_separability(model, x, groups, threshold=sep_th) + + if gensep_bool: + property = 'GS' + if l == depth - 1: + if skip_test: + add_bool = False + mul_bool = False + else: + add_bool = test_separability(model, x, groups, mode='add', threshold=sep_th) + mul_bool = test_separability(model, x, groups, mode='mul', threshold=sep_th) + if add_bool: + property = 'Add' + if mul_bool: + property = 'Mul' + + + property_l.append(property) + + + arities.append(arity_l) + properties.append(property_l) + + return arities, properties + + +def plot_tree(model, x, in_var=None, style='tree', sym_th=1e-3, sep_th=1e-1, skip_sep_test=False, verbose=False): + ''' + get tree graph + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + in_var : list of symbols + input variables + style : str + 'tree' or 'box' + sym_th : float + threshold of symmetry + sep_th : float + threshold of separability + skip_sep_test : bool + if True, don't test the property of each module (to save time) + verbose : bool + + Returns: + -------- + a tree graph + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: ((x[:,[0]] ** 2 + x[:,[1]] ** 2) ** 2 + (x[:,[2]] ** 2 + x[:,[3]] ** 2) ** 2) ** 2 + ((x[:,[4]] ** 2 + x[:,[5]] ** 2) ** 2 + (x[:,[6]] ** 2 + x[:,[7]] ** 2) ** 2) ** 2 + >>> x = torch.normal(0,1,size=(100,8)) + >>> plot_tree(model, x) + ''' + moleculess = get_molecule(model, x, sym_th=sym_th, verbose=verbose) + arities, properties = get_tree_node(model, x, moleculess, sep_th=sep_th, skip_test=skip_sep_test) + + n = x.shape[1] + var = None + + in_vars = [] + + if in_var == None: + for ii in range(1, n + 1): + exec(f"x{ii} = sympy.Symbol('x_{ii}')") + exec(f"in_vars.append(x{ii})") + elif type(var[0]) == Symbol: + in_vars = var + else: + in_vars = [sympy.symbols(var_) for var_ in var] + + + def flatten(xss): + return [x for xs in xss for x in xs] + + def myrectangle(center_x, center_y, width_x, width_y): + plt.plot([center_x - width_x/2, center_x + width_x/2], [center_y + width_y/2, center_y + width_y/2], color='k') # up + plt.plot([center_x - width_x/2, center_x + width_x/2], [center_y - width_y/2, center_y - width_y/2], color='k') # down + plt.plot([center_x - width_x/2, center_x - width_x/2], [center_y - width_y/2, center_y + width_y/2], color='k') # left + plt.plot([center_x + width_x/2, center_x + width_x/2], [center_y - width_y/2, center_y + width_y/2], color='k') # left + + depth = len(moleculess) + + delta = 1/n + a = 0.3 + b = 0.15 + y0 = 0.5 + + + # draw rectangles + for l in range(depth-1): + molecules = moleculess[l+1] + n_molecule = len(molecules) + + centers = [] + + acc_arity = 0 + + for i in range(n_molecule): + start_id = len(flatten(molecules[:i])) + end_id = len(flatten(molecules[:i+1])) + + center_x = (start_id + (end_id - 1 - start_id)/2) * delta + delta/2 + center_y = (l+1/2)*y0 + width_x = (end_id - start_id - 1 + 2*a)*delta + width_y = 2*b + + # add text (numbers) on rectangles + if style == 'box': + myrectangle(center_x, center_y, width_x, width_y) + plt.text(center_x, center_y, properties[l][i], fontsize=15, horizontalalignment='center', + verticalalignment='center') + elif style == 'tree': + # if 'GS', no rectangle, n=arity tilted lines + # if 'Id', no rectangle, n=arity vertical lines + # if 'Add' or 'Mul'. rectangle, "+" or "x" + # if '', rectangle + property = properties[l][i] + if property == 'GS' or property == 'Add' or property == 'Mul': + color = 'blue' + arity = arities[l][i] + for j in range(arity): + + if l == 0: + # x = (start_id + j) * delta + delta/2, center_x + # y = center_y - b, center_y + b + plt.plot([(start_id + j) * delta + delta/2, center_x], [center_y - b, center_y + b], color=color) + else: + # x = last_centers[acc_arity:acc_arity+arity], center_x + # y = center_y - b, center_y + b + plt.plot([last_centers[acc_arity+j], center_x], [center_y - b, center_y + b], color=color) + + acc_arity += arity + + if property == 'Add' or property == 'Mul': + if property == 'Add': + symbol = '+' + else: + symbol = '*' + + plt.text(center_x, center_y + b, symbol, horizontalalignment='center', + verticalalignment='center', color='red', fontsize=40) + if property == 'Id': + plt.plot([center_x, center_x], [center_y-width_y/2, center_y+width_y/2], color='black') + + if property == '': + myrectangle(center_x, center_y, width_x, width_y) + + + + # connections to the next layer + plt.plot([center_x, center_x], [center_y+width_y/2, center_y+y0-width_y/2], color='k') + centers.append(center_x) + last_centers = copy.deepcopy(centers) + + # connections from input variables to the first layer + for i in range(n): + x_ = (i + 1/2) * delta + # connections to the next layer + plt.plot([x_, x_], [0, y0/2-width_y/2], color='k') + plt.text(x_, -0.05*(depth-1), f'${latex(in_vars[moleculess[0][i][0]])}$', fontsize=20, horizontalalignment='center') + plt.xlim(0,1) + #plt.ylim(0,1); + plt.axis('off'); + plt.show() + + +def test_symmetry_var(model, x, input_vars, symmetry_var): + ''' + test symmetry + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + input_vars : list of sympy symbols + symmetry_var : sympy expression + + Returns: + -------- + cosine similarity + + Example + ------- + >>> from kan.hypothesis import * + >>> from sympy import * + >>> model = lambda x: x[:,[0]] * (x[:,[1]] + x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,8)) + >>> input_vars = a, b, c = symbols('a b c') + >>> symmetry_var = b + c + >>> test_symmetry_var(model, x, input_vars, symmetry_var); + >>> symmetry_var = b * c + >>> test_symmetry_var(model, x, input_vars, symmetry_var); + ''' + orig_vars = input_vars + sym_var = symmetry_var + + # gradients wrt to input (model) + input_grad = batch_jacobian(model, x) + + # gradients wrt to input (symmetry var) + func = lambdify(orig_vars, sym_var,'numpy') # returns a numpy-ready function + + func2 = lambda x: func(*[x[:,[i]] for i in range(len(orig_vars))]) + sym_grad = batch_jacobian(func2, x) + + # get id + idx = [] + sym_symbols = list(sym_var.free_symbols) + for sym_symbol in sym_symbols: + for j in range(len(orig_vars)): + if sym_symbol == orig_vars[j]: + idx.append(j) + + input_grad_part = input_grad[:,idx] + sym_grad_part = sym_grad[:,idx] + + cossim = torch.abs(torch.sum(input_grad_part * sym_grad_part, dim=1)/(torch.norm(input_grad_part, dim=1)*torch.norm(sym_grad_part, dim=1))) + + ratio = torch.sum(cossim > 0.9)/len(cossim) + + print(f'{100*ratio}% data have more than 0.9 cosine similarity') + if ratio > 0.9: + print('suggesting symmetry') + else: + print('not suggesting symmetry') + + return cossim \ No newline at end of file diff --git a/dl/kan/kan/.ipynb_checkpoints/spline-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/spline-checkpoint.py new file mode 100644 index 000000000..6953bf081 --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/spline-checkpoint.py @@ -0,0 +1,144 @@ +import torch + + +def B_batch(x, grid, k=0, extend=True, device='cpu'): + ''' + evaludate x on B-spline bases + + Args: + ----- + x : 2D torch.tensor + inputs, shape (number of splines, number of samples) + grid : 2D torch.tensor + grids, shape (number of splines, number of grid points) + k : int + the piecewise polynomial order of splines. + extend : bool + If True, k points are extended on both ends. If False, no extension (zero boundary condition). Default: True + device : str + devicde + + Returns: + -------- + spline values : 3D torch.tensor + shape (batch, in_dim, G+k). G: the number of grid intervals, k: spline order. + + Example + ------- + >>> from kan.spline import B_batch + >>> x = torch.rand(100,2) + >>> grid = torch.linspace(-1,1,steps=11)[None, :].expand(2, 11) + >>> B_batch(x, grid, k=3).shape + ''' + + x = x.unsqueeze(dim=2) + grid = grid.unsqueeze(dim=0) + + if k == 0: + value = (x >= grid[:, :, :-1]) * (x < grid[:, :, 1:]) + else: + B_km1 = B_batch(x[:,:,0], grid=grid[0], k=k - 1) + + value = (x - grid[:, :, :-(k + 1)]) / (grid[:, :, k:-1] - grid[:, :, :-(k + 1)]) * B_km1[:, :, :-1] + ( + grid[:, :, k + 1:] - x) / (grid[:, :, k + 1:] - grid[:, :, 1:(-k)]) * B_km1[:, :, 1:] + + # in case grid is degenerate + value = torch.nan_to_num(value) + return value + + + +def coef2curve(x_eval, grid, coef, k, device="cpu"): + ''' + converting B-spline coefficients to B-spline curves. Evaluate x on B-spline curves (summing up B_batch results over B-spline basis). + + Args: + ----- + x_eval : 2D torch.tensor + shape (batch, in_dim) + grid : 2D torch.tensor + shape (in_dim, G+2k). G: the number of grid intervals; k: spline order. + coef : 3D torch.tensor + shape (in_dim, out_dim, G+k) + k : int + the piecewise polynomial order of splines. + device : str + devicde + + Returns: + -------- + y_eval : 3D torch.tensor + shape (batch, in_dim, out_dim) + + ''' + + b_splines = B_batch(x_eval, grid, k=k) + y_eval = torch.einsum('ijk,jlk->ijl', b_splines, coef.to(b_splines.device)) + + return y_eval + + +def curve2coef(x_eval, y_eval, grid, k): + ''' + converting B-spline curves to B-spline coefficients using least squares. + + Args: + ----- + x_eval : 2D torch.tensor + shape (batch, in_dim) + y_eval : 3D torch.tensor + shape (batch, in_dim, out_dim) + grid : 2D torch.tensor + shape (in_dim, grid+2*k) + k : int + spline order + lamb : float + regularized least square lambda + + Returns: + -------- + coef : 3D torch.tensor + shape (in_dim, out_dim, G+k) + ''' + #print('haha', x_eval.shape, y_eval.shape, grid.shape) + batch = x_eval.shape[0] + in_dim = x_eval.shape[1] + out_dim = y_eval.shape[2] + n_coef = grid.shape[1] - k - 1 + mat = B_batch(x_eval, grid, k) + mat = mat.permute(1,0,2)[:,None,:,:].expand(in_dim, out_dim, batch, n_coef) + #print('mat', mat.shape) + y_eval = y_eval.permute(1,2,0).unsqueeze(dim=3) + #print('y_eval', y_eval.shape) + device = mat.device + + #coef = torch.linalg.lstsq(mat, y_eval, driver='gelsy' if device == 'cpu' else 'gels').solution[:,:,:,0] + try: + coef = torch.linalg.lstsq(mat, y_eval).solution[:,:,:,0] + except: + print('lstsq failed') + + # manual psuedo-inverse + '''lamb=1e-8 + XtX = torch.einsum('ijmn,ijnp->ijmp', mat.permute(0,1,3,2), mat) + Xty = torch.einsum('ijmn,ijnp->ijmp', mat.permute(0,1,3,2), y_eval) + n1, n2, n = XtX.shape[0], XtX.shape[1], XtX.shape[2] + identity = torch.eye(n,n)[None, None, :, :].expand(n1, n2, n, n).to(device) + A = XtX + lamb * identity + B = Xty + coef = (A.pinverse() @ B)[:,:,:,0]''' + + return coef + + +def extend_grid(grid, k_extend=0): + ''' + extend grid + ''' + h = (grid[:, [-1]] - grid[:, [0]]) / (grid.shape[1] - 1) + + for i in range(k_extend): + grid = torch.cat([grid[:, [0]] - h, grid], dim=1) + grid = torch.cat([grid, grid[:, [-1]] + h], dim=1) + + return grid \ No newline at end of file diff --git a/dl/kan/kan/.ipynb_checkpoints/utils-checkpoint.py b/dl/kan/kan/.ipynb_checkpoints/utils-checkpoint.py new file mode 100644 index 000000000..abb4d558b --- /dev/null +++ b/dl/kan/kan/.ipynb_checkpoints/utils-checkpoint.py @@ -0,0 +1,594 @@ +import numpy as np +import torch +from sklearn.linear_model import LinearRegression +import sympy +import yaml +from sympy.utilities.lambdify import lambdify +import re + +# sigmoid = sympy.Function('sigmoid') +# name: (torch implementation, sympy implementation) + +# singularity protection functions +f_inv = lambda x, y_th: ((x_th := 1/y_th), y_th/x_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(1/x) * (torch.abs(x) >= x_th)) +f_inv2 = lambda x, y_th: ((x_th := 1/y_th**(1/2)), y_th * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**2) * (torch.abs(x) >= x_th)) +f_inv3 = lambda x, y_th: ((x_th := 1/y_th**(1/3)), y_th/x_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**3) * (torch.abs(x) >= x_th)) +f_inv4 = lambda x, y_th: ((x_th := 1/y_th**(1/4)), y_th * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**4) * (torch.abs(x) >= x_th)) +f_inv5 = lambda x, y_th: ((x_th := 1/y_th**(1/5)), y_th/x_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**5) * (torch.abs(x) >= x_th)) +f_sqrt = lambda x, y_th: ((x_th := 1/y_th**2), x_th/y_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(torch.sqrt(torch.abs(x))*torch.sign(x)) * (torch.abs(x) >= x_th)) +f_power1d5 = lambda x, y_th: torch.abs(x)**1.5 +f_invsqrt = lambda x, y_th: ((x_th := 1/y_th**2), y_th * (torch.abs(x) < x_th) + torch.nan_to_num(1/torch.sqrt(torch.abs(x))) * (torch.abs(x) >= x_th)) +f_log = lambda x, y_th: ((x_th := torch.e**(-y_th)), - y_th * (torch.abs(x) < x_th) + torch.nan_to_num(torch.log(torch.abs(x))) * (torch.abs(x) >= x_th)) +f_tan = lambda x, y_th: ((clip := x % torch.pi), (delta := torch.pi/2-torch.arctan(y_th)), - y_th/delta * (clip - torch.pi/2) * (torch.abs(clip - torch.pi/2) < delta) + torch.nan_to_num(torch.tan(clip)) * (torch.abs(clip - torch.pi/2) >= delta)) +f_arctanh = lambda x, y_th: ((delta := 1-torch.tanh(y_th) + 1e-4), y_th * torch.sign(x) * (torch.abs(x) > 1 - delta) + torch.nan_to_num(torch.arctanh(x)) * (torch.abs(x) <= 1 - delta)) +f_arcsin = lambda x, y_th: ((), torch.pi/2 * torch.sign(x) * (torch.abs(x) > 1) + torch.nan_to_num(torch.arcsin(x)) * (torch.abs(x) <= 1)) +f_arccos = lambda x, y_th: ((), torch.pi/2 * (1-torch.sign(x)) * (torch.abs(x) > 1) + torch.nan_to_num(torch.arccos(x)) * (torch.abs(x) <= 1)) +f_exp = lambda x, y_th: ((x_th := torch.log(y_th)), y_th * (x > x_th) + torch.exp(x) * (x <= x_th)) + +SYMBOLIC_LIB = {'x': (lambda x: x, lambda x: x, 1, lambda x, y_th: ((), x)), + 'x^2': (lambda x: x**2, lambda x: x**2, 2, lambda x, y_th: ((), x**2)), + 'x^3': (lambda x: x**3, lambda x: x**3, 3, lambda x, y_th: ((), x**3)), + 'x^4': (lambda x: x**4, lambda x: x**4, 3, lambda x, y_th: ((), x**4)), + 'x^5': (lambda x: x**5, lambda x: x**5, 3, lambda x, y_th: ((), x**5)), + '1/x': (lambda x: 1/x, lambda x: 1/x, 2, f_inv), + '1/x^2': (lambda x: 1/x**2, lambda x: 1/x**2, 2, f_inv2), + '1/x^3': (lambda x: 1/x**3, lambda x: 1/x**3, 3, f_inv3), + '1/x^4': (lambda x: 1/x**4, lambda x: 1/x**4, 4, f_inv4), + '1/x^5': (lambda x: 1/x**5, lambda x: 1/x**5, 5, f_inv5), + 'sqrt': (lambda x: torch.sqrt(x), lambda x: sympy.sqrt(x), 2, f_sqrt), + 'x^0.5': (lambda x: torch.sqrt(x), lambda x: sympy.sqrt(x), 2, f_sqrt), + 'x^1.5': (lambda x: torch.sqrt(x)**3, lambda x: sympy.sqrt(x)**3, 4, f_power1d5), + '1/sqrt(x)': (lambda x: 1/torch.sqrt(x), lambda x: 1/sympy.sqrt(x), 2, f_invsqrt), + '1/x^0.5': (lambda x: 1/torch.sqrt(x), lambda x: 1/sympy.sqrt(x), 2, f_invsqrt), + 'exp': (lambda x: torch.exp(x), lambda x: sympy.exp(x), 2, f_exp), + 'log': (lambda x: torch.log(x), lambda x: sympy.log(x), 2, f_log), + 'abs': (lambda x: torch.abs(x), lambda x: sympy.Abs(x), 3, lambda x, y_th: ((), torch.abs(x))), + 'sin': (lambda x: torch.sin(x), lambda x: sympy.sin(x), 2, lambda x, y_th: ((), torch.sin(x))), + 'cos': (lambda x: torch.cos(x), lambda x: sympy.cos(x), 2, lambda x, y_th: ((), torch.cos(x))), + 'tan': (lambda x: torch.tan(x), lambda x: sympy.tan(x), 3, f_tan), + 'tanh': (lambda x: torch.tanh(x), lambda x: sympy.tanh(x), 3, lambda x, y_th: ((), torch.tanh(x))), + 'sgn': (lambda x: torch.sign(x), lambda x: sympy.sign(x), 3, lambda x, y_th: ((), torch.sign(x))), + 'arcsin': (lambda x: torch.arcsin(x), lambda x: sympy.asin(x), 4, f_arcsin), + 'arccos': (lambda x: torch.arccos(x), lambda x: sympy.acos(x), 4, f_arccos), + 'arctan': (lambda x: torch.arctan(x), lambda x: sympy.atan(x), 4, lambda x, y_th: ((), torch.arctan(x))), + 'arctanh': (lambda x: torch.arctanh(x), lambda x: sympy.atanh(x), 4, f_arctanh), + '0': (lambda x: x*0, lambda x: x*0, 0, lambda x, y_th: ((), x*0)), + 'gaussian': (lambda x: torch.exp(-x**2), lambda x: sympy.exp(-x**2), 3, lambda x, y_th: ((), torch.exp(-x**2))), + #'cosh': (lambda x: torch.cosh(x), lambda x: sympy.cosh(x), 5), + #'sigmoid': (lambda x: torch.sigmoid(x), sympy.Function('sigmoid'), 4), + #'relu': (lambda x: torch.relu(x), relu), +} + +def create_dataset(f, + n_var=2, + f_mode = 'col', + ranges = [-1,1], + train_num=1000, + test_num=1000, + normalize_input=False, + normalize_label=False, + device='cpu', + seed=0): + ''' + create dataset + + Args: + ----- + f : function + the symbolic formula used to create the synthetic dataset + ranges : list or np.array; shape (2,) or (n_var, 2) + the range of input variables. Default: [-1,1]. + train_num : int + the number of training samples. Default: 1000. + test_num : int + the number of test samples. Default: 1000. + normalize_input : bool + If True, apply normalization to inputs. Default: False. + normalize_label : bool + If True, apply normalization to labels. Default: False. + device : str + device. Default: 'cpu'. + seed : int + random seed. Default: 0. + + Returns: + -------- + dataset : dic + Train/test inputs/labels are dataset['train_input'], dataset['train_label'], + dataset['test_input'], dataset['test_label'] + + Example + ------- + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2, train_num=100) + >>> dataset['train_input'].shape + torch.Size([100, 2]) + ''' + + np.random.seed(seed) + torch.manual_seed(seed) + + if len(np.array(ranges).shape) == 1: + ranges = np.array(ranges * n_var).reshape(n_var,2) + else: + ranges = np.array(ranges) + + + train_input = torch.zeros(train_num, n_var) + test_input = torch.zeros(test_num, n_var) + for i in range(n_var): + train_input[:,i] = torch.rand(train_num,)*(ranges[i,1]-ranges[i,0])+ranges[i,0] + test_input[:,i] = torch.rand(test_num,)*(ranges[i,1]-ranges[i,0])+ranges[i,0] + + if f_mode == 'col': + train_label = f(train_input) + test_label = f(test_input) + elif f_mode == 'row': + train_label = f(train_input.T) + test_label = f(test_input.T) + else: + print(f'f_mode {f_mode} not recognized') + + # if has only 1 dimension + if len(train_label.shape) == 1: + train_label = train_label.unsqueeze(dim=1) + test_label = test_label.unsqueeze(dim=1) + + def normalize(data, mean, std): + return (data-mean)/std + + if normalize_input == True: + mean_input = torch.mean(train_input, dim=0, keepdim=True) + std_input = torch.std(train_input, dim=0, keepdim=True) + train_input = normalize(train_input, mean_input, std_input) + test_input = normalize(test_input, mean_input, std_input) + + if normalize_label == True: + mean_label = torch.mean(train_label, dim=0, keepdim=True) + std_label = torch.std(train_label, dim=0, keepdim=True) + train_label = normalize(train_label, mean_label, std_label) + test_label = normalize(test_label, mean_label, std_label) + + dataset = {} + dataset['train_input'] = train_input.to(device) + dataset['test_input'] = test_input.to(device) + + dataset['train_label'] = train_label.to(device) + dataset['test_label'] = test_label.to(device) + + return dataset + + + +def fit_params(x, y, fun, a_range=(-10,10), b_range=(-10,10), grid_number=101, iteration=3, verbose=True, device='cpu'): + ''' + fit a, b, c, d such that + + .. math:: + |y-(cf(ax+b)+d)|^2 + + is minimized. Both x and y are 1D array. Sweep a and b, find the best fitted model. + + Args: + ----- + x : 1D array + x values + y : 1D array + y values + fun : function + symbolic function + a_range : tuple + sweeping range of a + b_range : tuple + sweeping range of b + grid_num : int + number of steps along a and b + iteration : int + number of zooming in + verbose : bool + print extra information if True + device : str + device + + Returns: + -------- + a_best : float + best fitted a + b_best : float + best fitted b + c_best : float + best fitted c + d_best : float + best fitted d + r2_best : float + best r2 (coefficient of determination) + + Example + ------- + >>> num = 100 + >>> x = torch.linspace(-1,1,steps=num) + >>> noises = torch.normal(0,1,(num,)) * 0.02 + >>> y = 5.0*torch.sin(3.0*x + 2.0) + 0.7 + noises + >>> fit_params(x, y, torch.sin) + r2 is 0.9999727010726929 + (tensor([2.9982, 1.9996, 5.0053, 0.7011]), tensor(1.0000)) + ''' + # fit a, b, c, d such that y=c*fun(a*x+b)+d; both x and y are 1D array. + # sweep a and b, choose the best fitted model + for _ in range(iteration): + a_ = torch.linspace(a_range[0], a_range[1], steps=grid_number, device=device) + b_ = torch.linspace(b_range[0], b_range[1], steps=grid_number, device=device) + a_grid, b_grid = torch.meshgrid(a_, b_, indexing='ij') + post_fun = fun(a_grid[None,:,:] * x[:,None,None] + b_grid[None,:,:]) + x_mean = torch.mean(post_fun, dim=[0], keepdim=True) + y_mean = torch.mean(y, dim=[0], keepdim=True) + numerator = torch.sum((post_fun - x_mean)*(y-y_mean)[:,None,None], dim=0)**2 + denominator = torch.sum((post_fun - x_mean)**2, dim=0)*torch.sum((y - y_mean)[:,None,None]**2, dim=0) + r2 = numerator/(denominator+1e-4) + r2 = torch.nan_to_num(r2) + + + best_id = torch.argmax(r2) + a_id, b_id = torch.div(best_id, grid_number, rounding_mode='floor'), best_id % grid_number + + + if a_id == 0 or a_id == grid_number - 1 or b_id == 0 or b_id == grid_number - 1: + if _ == 0 and verbose==True: + print('Best value at boundary.') + if a_id == 0: + a_range = [a_[0], a_[1]] + if a_id == grid_number - 1: + a_range = [a_[-2], a_[-1]] + if b_id == 0: + b_range = [b_[0], b_[1]] + if b_id == grid_number - 1: + b_range = [b_[-2], b_[-1]] + + else: + a_range = [a_[a_id-1], a_[a_id+1]] + b_range = [b_[b_id-1], b_[b_id+1]] + + a_best = a_[a_id] + b_best = b_[b_id] + post_fun = fun(a_best * x + b_best) + r2_best = r2[a_id, b_id] + + if verbose == True: + print(f"r2 is {r2_best}") + if r2_best < 0.9: + print(f'r2 is not very high, please double check if you are choosing the correct symbolic function.') + + post_fun = torch.nan_to_num(post_fun) + reg = LinearRegression().fit(post_fun[:,None].detach().cpu().numpy(), y.detach().cpu().numpy()) + c_best = torch.from_numpy(reg.coef_)[0].to(device) + d_best = torch.from_numpy(np.array(reg.intercept_)).to(device) + return torch.stack([a_best, b_best, c_best, d_best]), r2_best + + +def sparse_mask(in_dim, out_dim): + ''' + get sparse mask + ''' + in_coord = torch.arange(in_dim) * 1/in_dim + 1/(2*in_dim) + out_coord = torch.arange(out_dim) * 1/out_dim + 1/(2*out_dim) + + dist_mat = torch.abs(out_coord[:,None] - in_coord[None,:]) + in_nearest = torch.argmin(dist_mat, dim=0) + in_connection = torch.stack([torch.arange(in_dim), in_nearest]).permute(1,0) + out_nearest = torch.argmin(dist_mat, dim=1) + out_connection = torch.stack([out_nearest, torch.arange(out_dim)]).permute(1,0) + all_connection = torch.cat([in_connection, out_connection], dim=0) + mask = torch.zeros(in_dim, out_dim) + mask[all_connection[:,0], all_connection[:,1]] = 1. + + return mask + + +def add_symbolic(name, fun, c=1, fun_singularity=None): + ''' + add a symbolic function to library + + Args: + ----- + name : str + name of the function + fun : fun + torch function or lambda function + + Returns: + -------- + None + + Example + ------- + >>> print(SYMBOLIC_LIB['Bessel']) + KeyError: 'Bessel' + >>> add_symbolic('Bessel', torch.special.bessel_j0) + >>> print(SYMBOLIC_LIB['Bessel']) + (, Bessel) + ''' + exec(f"globals()['{name}'] = sympy.Function('{name}')") + if fun_singularity==None: + fun_singularity = fun + SYMBOLIC_LIB[name] = (fun, globals()[name], c, fun_singularity) + + +def ex_round(ex1, n_digit): + ''' + rounding the numbers in an expression to certain floating points + + Args: + ----- + ex1 : sympy expression + n_digit : int + + Returns: + -------- + ex2 : sympy expression + + Example + ------- + >>> from kan.utils import * + >>> from sympy import * + >>> input_vars = a, b = symbols('a b') + >>> expression = 3.14534242 * exp(sin(pi*a) + b**2) - 2.32345402 + >>> ex_round(expression, 2) + ''' + ex2 = ex1 + for a in sympy.preorder_traversal(ex1): + if isinstance(a, sympy.Float): + ex2 = ex2.subs(a, round(a, n_digit)) + return ex2 + + +def augment_input(orig_vars, aux_vars, x): + ''' + augment inputs + + Args: + ----- + orig_vars : list of sympy symbols + aux_vars : list of auxiliary symbols + x : inputs + + Returns: + -------- + augmented inputs + + Example + ------- + >>> from kan.utils import * + >>> from sympy import * + >>> orig_vars = a, b = symbols('a b') + >>> aux_vars = [a + b, a * b] + >>> x = torch.rand(100, 2) + >>> augment_input(orig_vars, aux_vars, x).shape + ''' + # if x is a tensor + if isinstance(x, torch.Tensor): + + aux_values = torch.tensor([]).to(x.device) + + for aux_var in aux_vars: + func = lambdify(orig_vars, aux_var,'numpy') # returns a numpy-ready function + aux_value = torch.from_numpy(func(*[x[:,[i]].numpy() for i in range(len(orig_vars))])) + aux_values = torch.cat([aux_values, aux_value], dim=1) + + x = torch.cat([aux_values, x], dim=1) + + # if x is a dataset + elif isinstance(x, dict): + x['train_input'] = augment_input(orig_vars, aux_vars, x['train_input']) + x['test_input'] = augment_input(orig_vars, aux_vars, x['test_input']) + + return x + + +def batch_jacobian(func, x, create_graph=False, mode='scalar'): + ''' + jacobian + + Args: + ----- + func : function or model + x : inputs + create_graph : bool + + Returns: + -------- + jacobian + + Example + ------- + >>> from kan.utils import batch_jacobian + >>> x = torch.normal(0,1,size=(100,2)) + >>> model = lambda x: x[:,[0]] + x[:,[1]] + >>> batch_jacobian(model, x) + ''' + # x in shape (Batch, Length) + def _func_sum(x): + return func(x).sum(dim=0) + if mode == 'scalar': + return torch.autograd.functional.jacobian(_func_sum, x, create_graph=create_graph)[0] + elif mode == 'vector': + return torch.autograd.functional.jacobian(_func_sum, x, create_graph=create_graph).permute(1,0,2) + +def batch_hessian(model, x, create_graph=False): + ''' + hessian + + Args: + ----- + func : function or model + x : inputs + create_graph : bool + + Returns: + -------- + jacobian + + Example + ------- + >>> from kan.utils import batch_hessian + >>> x = torch.normal(0,1,size=(100,2)) + >>> model = lambda x: x[:,[0]]**2 + x[:,[1]]**2 + >>> batch_hessian(model, x) + ''' + # x in shape (Batch, Length) + jac = lambda x: batch_jacobian(model, x, create_graph=True) + def _jac_sum(x): + return jac(x).sum(dim=0) + return torch.autograd.functional.jacobian(_jac_sum, x, create_graph=create_graph).permute(1,0,2) + + +def create_dataset_from_data(inputs, labels, train_ratio=0.8, device='cpu'): + ''' + create dataset from data + + Args: + ----- + inputs : 2D torch.float + labels : 2D torch.float + train_ratio : float + the ratio of training fraction + device : str + + Returns: + -------- + dataset (dictionary) + + Example + ------- + >>> from kan.utils import create_dataset_from_data + >>> x = torch.normal(0,1,size=(100,2)) + >>> y = torch.normal(0,1,size=(100,1)) + >>> dataset = create_dataset_from_data(x, y) + >>> dataset['train_input'].shape + ''' + num = inputs.shape[0] + train_id = np.random.choice(num, int(num*train_ratio), replace=False) + test_id = list(set(np.arange(num)) - set(train_id)) + dataset = {} + dataset['train_input'] = inputs[train_id].detach().to(device) + dataset['test_input'] = inputs[test_id].detach().to(device) + dataset['train_label'] = labels[train_id].detach().to(device) + dataset['test_label'] = labels[test_id].detach().to(device) + + return dataset + + +def get_derivative(model, inputs, labels, derivative='hessian', loss_mode='pred', reg_metric='w', lamb=0., lamb_l1=1., lamb_entropy=0.): + ''' + compute the jacobian/hessian of loss wrt to model parameters + + Args: + ----- + inputs : 2D torch.float + labels : 2D torch.float + derivative : str + 'jacobian' or 'hessian' + device : str + + Returns: + -------- + jacobian or hessian + ''' + def get_mapping(model): + + mapping = {} + name = 'model1' + + keys = list(model.state_dict().keys()) + for key in keys: + + y = re.findall(".[0-9]+", key) + if len(y) > 0: + y = y[0][1:] + x = re.split(".[0-9]+", key) + mapping[key] = name + '.' + x[0] + '[' + y + ']' + x[1] + + + y = re.findall("_[0-9]+", key) + if len(y) > 0: + y = y[0][1:] + x = re.split(".[0-9]+", key) + mapping[key] = name + '.' + x[0] + '[' + y + ']' + + return mapping + + + #model1 = copy.deepcopy(model) + model1 = model.copy() + mapping = get_mapping(model) + + # collect keys and shapes + keys = list(model.state_dict().keys()) + shapes = [] + + for params in model.parameters(): + shapes.append(params.shape) + + + # turn a flattened vector to model params + def param2statedict(p, keys, shapes): + + new_state_dict = {} + + start = 0 + n_group = len(keys) + for i in range(n_group): + shape = shapes[i] + n_params = torch.prod(torch.tensor(shape)) + new_state_dict[keys[i]] = p[start:start+n_params].reshape(shape) + start += n_params + + return new_state_dict + + def differentiable_load_state_dict(mapping, state_dict, model1): + + for key in keys: + if mapping[key][-1] != ']': + exec(f"del {mapping[key]}") + exec(f"{mapping[key]} = state_dict[key]") + + + # input: p, output: output + def get_param2loss_fun(inputs, labels): + + def param2loss_fun(p): + + p = p[0] + state_dict = param2statedict(p, keys, shapes) + # this step is non-differentiable + #model.load_state_dict(state_dict) + differentiable_load_state_dict(mapping, state_dict, model1) + if loss_mode == 'pred': + pred_loss = torch.mean((model1(inputs) - labels)**2, dim=(0,1), keepdim=True) + loss = pred_loss + elif loss_mode == 'reg': + reg_loss = model1.get_reg(reg_metric=reg_metric, lamb_l1=lamb_l1, lamb_entropy=lamb_entropy) * torch.ones(1,1) + loss = reg_loss + elif loss_mode == 'all': + pred_loss = torch.mean((model1(inputs) - labels)**2, dim=(0,1), keepdim=True) + reg_loss = model1.get_reg(reg_metric=reg_metric, lamb_l1=lamb_l1, lamb_entropy=lamb_entropy) * torch.ones(1,1) + loss = pred_loss + lamb * reg_loss + return loss + + return param2loss_fun + + fun = get_param2loss_fun(inputs, labels) + p = model2param(model)[None,:] + if derivative == 'hessian': + result = batch_hessian(fun, p) + elif derivative == 'jacobian': + result = batch_jacobian(fun, p) + return result + +def model2param(model): + ''' + turn model parameters into a flattened vector + ''' + p = torch.tensor([]).to(model.device) + for params in model.parameters(): + p = torch.cat([p, params.reshape(-1,)], dim=0) + return p diff --git a/dl/kan/kan/KANLayer.py b/dl/kan/kan/KANLayer.py new file mode 100644 index 000000000..b880bfe8b --- /dev/null +++ b/dl/kan/kan/KANLayer.py @@ -0,0 +1,364 @@ +import torch +import torch.nn as nn +import numpy as np +from .spline import * +from .utils import sparse_mask + + +class KANLayer(nn.Module): + """ + KANLayer class + + + Attributes: + ----------- + in_dim: int + input dimension + out_dim: int + output dimension + num: int + the number of grid intervals + k: int + the piecewise polynomial order of splines + noise_scale: float + spline scale at initialization + coef: 2D torch.tensor + coefficients of B-spline bases + scale_base_mu: float + magnitude of the residual function b(x) is drawn from N(mu, sigma^2), mu = sigma_base_mu + scale_base_sigma: float + magnitude of the residual function b(x) is drawn from N(mu, sigma^2), mu = sigma_base_sigma + scale_sp: float + mangitude of the spline function spline(x) + base_fun: fun + residual function b(x) + mask: 1D torch.float + mask of spline functions. setting some element of the mask to zero means setting the corresponding activation to zero function. + grid_eps: float in [0,1] + a hyperparameter used in update_grid_from_samples. When grid_eps = 1, the grid is uniform; when grid_eps = 0, the grid is partitioned using percentiles of samples. 0 < grid_eps < 1 interpolates between the two extremes. + the id of activation functions that are locked + device: str + device + """ + + def __init__(self, in_dim=3, out_dim=2, num=5, k=3, noise_scale=0.5, scale_base_mu=0.0, scale_base_sigma=1.0, scale_sp=1.0, base_fun=torch.nn.SiLU(), grid_eps=0.02, grid_range=[-1, 1], sp_trainable=True, sb_trainable=True, save_plot_data = True, device='cpu', sparse_init=False): + '''' + initialize a KANLayer + + Args: + ----- + in_dim : int + input dimension. Default: 2. + out_dim : int + output dimension. Default: 3. + num : int + the number of grid intervals = G. Default: 5. + k : int + the order of piecewise polynomial. Default: 3. + noise_scale : float + the scale of noise injected at initialization. Default: 0.1. + scale_base_mu : float + the scale of the residual function b(x) is intialized to be N(scale_base_mu, scale_base_sigma^2). + scale_base_sigma : float + the scale of the residual function b(x) is intialized to be N(scale_base_mu, scale_base_sigma^2). + scale_sp : float + the scale of the base function spline(x). + base_fun : function + residual function b(x). Default: torch.nn.SiLU() + grid_eps : float + When grid_eps = 1, the grid is uniform; when grid_eps = 0, the grid is partitioned using percentiles of samples. 0 < grid_eps < 1 interpolates between the two extremes. + grid_range : list/np.array of shape (2,) + setting the range of grids. Default: [-1,1]. + sp_trainable : bool + If true, scale_sp is trainable + sb_trainable : bool + If true, scale_base is trainable + device : str + device + sparse_init : bool + if sparse_init = True, sparse initialization is applied. + + Returns: + -------- + self + + Example + ------- + >>> from kan.KANLayer import * + >>> model = KANLayer(in_dim=3, out_dim=5) + >>> (model.in_dim, model.out_dim) + ''' + super(KANLayer, self).__init__() + # size + self.out_dim = out_dim + self.in_dim = in_dim + self.num = num + self.k = k + + grid = torch.linspace(grid_range[0], grid_range[1], steps=num + 1)[None,:].expand(self.in_dim, num+1) + grid = extend_grid(grid, k_extend=k) + self.grid = torch.nn.Parameter(grid).requires_grad_(False) + noises = (torch.rand(self.num+1, self.in_dim, self.out_dim) - 1/2) * noise_scale / num + + self.coef = torch.nn.Parameter(curve2coef(self.grid[:,k:-k].permute(1,0), noises, self.grid, k)) + + if sparse_init: + self.mask = torch.nn.Parameter(sparse_mask(in_dim, out_dim)).requires_grad_(False) + else: + self.mask = torch.nn.Parameter(torch.ones(in_dim, out_dim)).requires_grad_(False) + + self.scale_base = torch.nn.Parameter(scale_base_mu * 1 / np.sqrt(in_dim) + \ + scale_base_sigma * (torch.rand(in_dim, out_dim)*2-1) * 1/np.sqrt(in_dim)).requires_grad_(sb_trainable) + self.scale_sp = torch.nn.Parameter(torch.ones(in_dim, out_dim) * scale_sp * 1 / np.sqrt(in_dim) * self.mask).requires_grad_(sp_trainable) # make scale trainable + self.base_fun = base_fun + + + self.grid_eps = grid_eps + + self.to(device) + + def to(self, device): + super(KANLayer, self).to(device) + self.device = device + return self + + def forward(self, x): + ''' + KANLayer forward given input x + + Args: + ----- + x : 2D torch.float + inputs, shape (number of samples, input dimension) + + Returns: + -------- + y : 2D torch.float + outputs, shape (number of samples, output dimension) + preacts : 3D torch.float + fan out x into activations, shape (number of sampels, output dimension, input dimension) + postacts : 3D torch.float + the outputs of activation functions with preacts as inputs + postspline : 3D torch.float + the outputs of spline functions with preacts as inputs + + Example + ------- + >>> from kan.KANLayer import * + >>> model = KANLayer(in_dim=3, out_dim=5) + >>> x = torch.normal(0,1,size=(100,3)) + >>> y, preacts, postacts, postspline = model(x) + >>> y.shape, preacts.shape, postacts.shape, postspline.shape + ''' + batch = x.shape[0] + preacts = x[:,None,:].clone().expand(batch, self.out_dim, self.in_dim) + + base = self.base_fun(x) # (batch, in_dim) + y = coef2curve(x_eval=x, grid=self.grid, coef=self.coef, k=self.k) + + postspline = y.clone().permute(0,2,1) + + y = self.scale_base[None,:,:] * base[:,:,None] + self.scale_sp[None,:,:] * y + y = self.mask[None,:,:] * y + + postacts = y.clone().permute(0,2,1) + + y = torch.sum(y, dim=1) + return y, preacts, postacts, postspline + + def update_grid_from_samples(self, x, mode='sample'): + ''' + update grid from samples + + Args: + ----- + x : 2D torch.float + inputs, shape (number of samples, input dimension) + + Returns: + -------- + None + + Example + ------- + >>> model = KANLayer(in_dim=1, out_dim=1, num=5, k=3) + >>> print(model.grid.data) + >>> x = torch.linspace(-3,3,steps=100)[:,None] + >>> model.update_grid_from_samples(x) + >>> print(model.grid.data) + ''' + + batch = x.shape[0] + #x = torch.einsum('ij,k->ikj', x, torch.ones(self.out_dim, ).to(self.device)).reshape(batch, self.size).permute(1, 0) + x_pos = torch.sort(x, dim=0)[0] + y_eval = coef2curve(x_pos, self.grid, self.coef, self.k) + num_interval = self.grid.shape[1] - 1 - 2*self.k + + def get_grid(num_interval): + ids = [int(batch / num_interval * i) for i in range(num_interval)] + [-1] + grid_adaptive = x_pos[ids, :].permute(1,0) + margin = 0.00 + h = (grid_adaptive[:,[-1]] - grid_adaptive[:,[0]] + 2 * margin)/num_interval + grid_uniform = grid_adaptive[:,[0]] - margin + h * torch.arange(num_interval+1,)[None, :].to(x.device) + grid = self.grid_eps * grid_uniform + (1 - self.grid_eps) * grid_adaptive + return grid + + + grid = get_grid(num_interval) + + if mode == 'grid': + sample_grid = get_grid(2*num_interval) + x_pos = sample_grid.permute(1,0) + y_eval = coef2curve(x_pos, self.grid, self.coef, self.k) + + self.grid.data = extend_grid(grid, k_extend=self.k) + #print('x_pos 2', x_pos.shape) + #print('y_eval 2', y_eval.shape) + self.coef.data = curve2coef(x_pos, y_eval, self.grid, self.k) + + def initialize_grid_from_parent(self, parent, x, mode='sample'): + ''' + update grid from a parent KANLayer & samples + + Args: + ----- + parent : KANLayer + a parent KANLayer (whose grid is usually coarser than the current model) + x : 2D torch.float + inputs, shape (number of samples, input dimension) + + Returns: + -------- + None + + Example + ------- + >>> batch = 100 + >>> parent_model = KANLayer(in_dim=1, out_dim=1, num=5, k=3) + >>> print(parent_model.grid.data) + >>> model = KANLayer(in_dim=1, out_dim=1, num=10, k=3) + >>> x = torch.normal(0,1,size=(batch, 1)) + >>> model.initialize_grid_from_parent(parent_model, x) + >>> print(model.grid.data) + ''' + + batch = x.shape[0] + + # shrink grid + x_pos = torch.sort(x, dim=0)[0] + y_eval = coef2curve(x_pos, parent.grid, parent.coef, parent.k) + num_interval = self.grid.shape[1] - 1 - 2*self.k + + + ''' + # based on samples + def get_grid(num_interval): + ids = [int(batch / num_interval * i) for i in range(num_interval)] + [-1] + grid_adaptive = x_pos[ids, :].permute(1,0) + h = (grid_adaptive[:,[-1]] - grid_adaptive[:,[0]])/num_interval + grid_uniform = grid_adaptive[:,[0]] + h * torch.arange(num_interval+1,)[None, :].to(x.device) + grid = self.grid_eps * grid_uniform + (1 - self.grid_eps) * grid_adaptive + return grid''' + + #print('p', parent.grid) + # based on interpolating parent grid + def get_grid(num_interval): + x_pos = parent.grid[:,parent.k:-parent.k] + #print('x_pos', x_pos) + sp2 = KANLayer(in_dim=1, out_dim=self.in_dim,k=1,num=x_pos.shape[1]-1,scale_base_mu=0.0, scale_base_sigma=0.0).to(x.device) + + #print('sp2_grid', sp2.grid[:,sp2.k:-sp2.k].permute(1,0).expand(-1,self.in_dim)) + #print('sp2_coef_shape', sp2.coef.shape) + sp2_coef = curve2coef(sp2.grid[:,sp2.k:-sp2.k].permute(1,0).expand(-1,self.in_dim), x_pos.permute(1,0).unsqueeze(dim=2), sp2.grid[:,:], k=1).permute(1,0,2) + shp = sp2_coef.shape + #sp2_coef = torch.cat([torch.zeros(shp[0], shp[1], 1), sp2_coef, torch.zeros(shp[0], shp[1], 1)], dim=2) + #print('sp2_coef',sp2_coef) + #print(sp2.coef.shape) + sp2.coef.data = sp2_coef + percentile = torch.linspace(-1,1,self.num+1).to(self.device) + grid = sp2(percentile.unsqueeze(dim=1))[0].permute(1,0) + #print('c', grid) + return grid + + grid = get_grid(num_interval) + + if mode == 'grid': + sample_grid = get_grid(2*num_interval) + x_pos = sample_grid.permute(1,0) + y_eval = coef2curve(x_pos, parent.grid, parent.coef, parent.k) + + grid = extend_grid(grid, k_extend=self.k) + self.grid.data = grid + self.coef.data = curve2coef(x_pos, y_eval, self.grid, self.k) + + def get_subset(self, in_id, out_id): + ''' + get a smaller KANLayer from a larger KANLayer (used for pruning) + + Args: + ----- + in_id : list + id of selected input neurons + out_id : list + id of selected output neurons + + Returns: + -------- + spb : KANLayer + + Example + ------- + >>> kanlayer_large = KANLayer(in_dim=10, out_dim=10, num=5, k=3) + >>> kanlayer_small = kanlayer_large.get_subset([0,9],[1,2,3]) + >>> kanlayer_small.in_dim, kanlayer_small.out_dim + (2, 3) + ''' + spb = KANLayer(len(in_id), len(out_id), self.num, self.k, base_fun=self.base_fun) + spb.grid.data = self.grid[in_id] + spb.coef.data = self.coef[in_id][:,out_id] + spb.scale_base.data = self.scale_base[in_id][:,out_id] + spb.scale_sp.data = self.scale_sp[in_id][:,out_id] + spb.mask.data = self.mask[in_id][:,out_id] + + spb.in_dim = len(in_id) + spb.out_dim = len(out_id) + return spb + + + def swap(self, i1, i2, mode='in'): + ''' + swap the i1 neuron with the i2 neuron in input (if mode == 'in') or output (if mode == 'out') + + Args: + ----- + i1 : int + i2 : int + mode : str + mode = 'in' or 'out' + + Returns: + -------- + None + + Example + ------- + >>> from kan.KANLayer import * + >>> model = KANLayer(in_dim=2, out_dim=2, num=5, k=3) + >>> print(model.coef) + >>> model.swap(0,1,mode='in') + >>> print(model.coef) + ''' + with torch.no_grad(): + def swap_(data, i1, i2, mode='in'): + if mode == 'in': + data[i1], data[i2] = data[i2].clone(), data[i1].clone() + elif mode == 'out': + data[:,i1], data[:,i2] = data[:,i2].clone(), data[:,i1].clone() + + if mode == 'in': + swap_(self.grid.data, i1, i2, mode='in') + swap_(self.coef.data, i1, i2, mode=mode) + swap_(self.scale_base.data, i1, i2, mode=mode) + swap_(self.scale_sp.data, i1, i2, mode=mode) + swap_(self.mask.data, i1, i2, mode=mode) + diff --git a/dl/kan/kan/LBFGS.py b/dl/kan/kan/LBFGS.py new file mode 100644 index 000000000..212477f23 --- /dev/null +++ b/dl/kan/kan/LBFGS.py @@ -0,0 +1,493 @@ +import torch +from functools import reduce +from torch.optim import Optimizer + +__all__ = ['LBFGS'] + +def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None): + # ported from https://github.com/torch/optim/blob/master/polyinterp.lua + # Compute bounds of interpolation area + if bounds is not None: + xmin_bound, xmax_bound = bounds + else: + xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1) + + # Code for most common case: cubic interpolation of 2 points + # w/ function and derivative values for both + # Solution in this case (where x2 is the farthest point): + # d1 = g1 + g2 - 3*(f1-f2)/(x1-x2); + # d2 = sqrt(d1^2 - g1*g2); + # min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2)); + # t_new = min(max(min_pos,xmin_bound),xmax_bound); + d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2) + d2_square = d1**2 - g1 * g2 + if d2_square >= 0: + d2 = d2_square.sqrt() + if x1 <= x2: + min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2)) + else: + min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2)) + return min(max(min_pos, xmin_bound), xmax_bound) + else: + return (xmin_bound + xmax_bound) / 2. + + +def _strong_wolfe(obj_func, + x, + t, + d, + f, + g, + gtd, + c1=1e-4, + c2=0.9, + tolerance_change=1e-9, + max_ls=25): + # ported from https://github.com/torch/optim/blob/master/lswolfe.lua + d_norm = d.abs().max() + g = g.clone(memory_format=torch.contiguous_format) + # evaluate objective and gradient using initial step + f_new, g_new = obj_func(x, t, d) + ls_func_evals = 1 + gtd_new = g_new.dot(d) + + # bracket an interval containing a point satisfying the Wolfe criteria + t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd + done = False + ls_iter = 0 + while ls_iter < max_ls: + # check conditions + #print(f_prev, f_new, g_new) + if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev): + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + if abs(gtd_new) <= -c2 * gtd: + bracket = [t] + bracket_f = [f_new] + bracket_g = [g_new] + done = True + break + + if gtd_new >= 0: + bracket = [t_prev, t] + bracket_f = [f_prev, f_new] + bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] + bracket_gtd = [gtd_prev, gtd_new] + break + + # interpolate + min_step = t + 0.01 * (t - t_prev) + max_step = t * 10 + tmp = t + t = _cubic_interpolate( + t_prev, + f_prev, + gtd_prev, + t, + f_new, + gtd_new, + bounds=(min_step, max_step)) + + # next step + t_prev = tmp + f_prev = f_new + g_prev = g_new.clone(memory_format=torch.contiguous_format) + gtd_prev = gtd_new + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + + # reached max number of iterations? + if ls_iter == max_ls: + bracket = [0, t] + bracket_f = [f, f_new] + bracket_g = [g, g_new] + + # zoom phase: we now have a point satisfying the criteria, or + # a bracket around it. We refine the bracket until we find the + # exact point satisfying the criteria + insuf_progress = False + # find high and low points in bracket + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0) + while not done and ls_iter < max_ls: + # line-search bracket is so small + if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change: + break + + # compute new trial value + t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0], + bracket[1], bracket_f[1], bracket_gtd[1]) + + # test that we are making sufficient progress: + # in case `t` is so close to boundary, we mark that we are making + # insufficient progress, and if + # + we have made insufficient progress in the last step, or + # + `t` is at one of the boundary, + # we will move `t` to a position which is `0.1 * len(bracket)` + # away from the nearest boundary point. + eps = 0.1 * (max(bracket) - min(bracket)) + if min(max(bracket) - t, t - min(bracket)) < eps: + # interpolation close to boundary + if insuf_progress or t >= max(bracket) or t <= min(bracket): + # evaluate at 0.1 away from boundary + if abs(t - max(bracket)) < abs(t - min(bracket)): + t = max(bracket) - eps + else: + t = min(bracket) + eps + insuf_progress = False + else: + insuf_progress = True + else: + insuf_progress = False + + # Evaluate new point + f_new, g_new = obj_func(x, t, d) + ls_func_evals += 1 + gtd_new = g_new.dot(d) + ls_iter += 1 + + if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]: + # Armijo condition not satisfied or not lower than lowest point + bracket[high_pos] = t + bracket_f[high_pos] = f_new + bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format) + bracket_gtd[high_pos] = gtd_new + low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0) + else: + if abs(gtd_new) <= -c2 * gtd: + # Wolfe conditions satisfied + done = True + elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0: + # old low becomes new high + bracket[high_pos] = bracket[low_pos] + bracket_f[high_pos] = bracket_f[low_pos] + bracket_g[high_pos] = bracket_g[low_pos] + bracket_gtd[high_pos] = bracket_gtd[low_pos] + + # new point becomes new low + bracket[low_pos] = t + bracket_f[low_pos] = f_new + bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format) + bracket_gtd[low_pos] = gtd_new + + #print(bracket) + if len(bracket) == 1: + t = bracket[0] + f_new = bracket_f[0] + g_new = bracket_g[0] + else: + t = bracket[low_pos] + f_new = bracket_f[low_pos] + g_new = bracket_g[low_pos] + return f_new, g_new, t, ls_func_evals + + + +class LBFGS(Optimizer): + """Implements L-BFGS algorithm. + + Heavily inspired by `minFunc + `_. + + .. warning:: + This optimizer doesn't support per-parameter options and parameter + groups (there can be only one). + + .. warning:: + Right now all parameters have to be on a single device. This will be + improved in the future. + + .. note:: + This is a very memory intensive optimizer (it requires additional + ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory + try reducing the history size, or use a different algorithm. + + Args: + lr (float): learning rate (default: 1) + max_iter (int): maximal number of iterations per optimization step + (default: 20) + max_eval (int): maximal number of function evaluations per optimization + step (default: max_iter * 1.25). + tolerance_grad (float): termination tolerance on first order optimality + (default: 1e-7). + tolerance_change (float): termination tolerance on function + value/parameter changes (default: 1e-9). + history_size (int): update history size (default: 100). + line_search_fn (str): either 'strong_wolfe' or None (default: None). + """ + + def __init__(self, + params, + lr=1, + max_iter=20, + max_eval=None, + tolerance_grad=1e-7, + tolerance_change=1e-9, + tolerance_ys=1e-32, + history_size=100, + line_search_fn=None): + if max_eval is None: + max_eval = max_iter * 5 // 4 + defaults = dict( + lr=lr, + max_iter=max_iter, + max_eval=max_eval, + tolerance_grad=tolerance_grad, + tolerance_change=tolerance_change, + tolerance_ys=tolerance_ys, + history_size=history_size, + line_search_fn=line_search_fn) + super().__init__(params, defaults) + + if len(self.param_groups) != 1: + raise ValueError("LBFGS doesn't support per-parameter options " + "(parameter groups)") + + self._params = self.param_groups[0]['params'] + self._numel_cache = None + + def _numel(self): + if self._numel_cache is None: + self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0) + return self._numel_cache + + def _gather_flat_grad(self): + views = [] + for p in self._params: + if p.grad is None: + view = p.new(p.numel()).zero_() + elif p.grad.is_sparse: + view = p.grad.to_dense().view(-1) + else: + view = p.grad.view(-1) + views.append(view) + device = views[0].device + return torch.cat(views, dim=0) + + def _add_grad(self, step_size, update): + offset = 0 + for p in self._params: + numel = p.numel() + # view as to avoid deprecated pointwise semantics + p.add_(update[offset:offset + numel].view_as(p), alpha=step_size) + offset += numel + assert offset == self._numel() + + def _clone_param(self): + return [p.clone(memory_format=torch.contiguous_format) for p in self._params] + + def _set_param(self, params_data): + for p, pdata in zip(self._params, params_data): + p.copy_(pdata) + + def _directional_evaluate(self, closure, x, t, d): + self._add_grad(t, d) + loss = float(closure()) + flat_grad = self._gather_flat_grad() + self._set_param(x) + return loss, flat_grad + + + @torch.no_grad() + def step(self, closure): + """Perform a single optimization step. + + Args: + closure (Callable): A closure that reevaluates the model + and returns the loss. + """ + + torch.manual_seed(0) + + assert len(self.param_groups) == 1 + + # Make sure the closure is always called with grad enabled + closure = torch.enable_grad()(closure) + + group = self.param_groups[0] + lr = group['lr'] + max_iter = group['max_iter'] + max_eval = group['max_eval'] + tolerance_grad = group['tolerance_grad'] + tolerance_change = group['tolerance_change'] + tolerance_ys = group['tolerance_ys'] + line_search_fn = group['line_search_fn'] + history_size = group['history_size'] + + # NOTE: LBFGS has only global state, but we register it as state for + # the first param, because this helps with casting in load_state_dict + state = self.state[self._params[0]] + state.setdefault('func_evals', 0) + state.setdefault('n_iter', 0) + + # evaluate initial f(x) and df/dx + orig_loss = closure() + loss = float(orig_loss) + current_evals = 1 + state['func_evals'] += 1 + + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + + # optimal condition + if opt_cond: + return orig_loss + + # tensors cached in state (for tracing) + d = state.get('d') + t = state.get('t') + old_dirs = state.get('old_dirs') + old_stps = state.get('old_stps') + ro = state.get('ro') + H_diag = state.get('H_diag') + prev_flat_grad = state.get('prev_flat_grad') + prev_loss = state.get('prev_loss') + + n_iter = 0 + # optimize for a max of max_iter iterations + while n_iter < max_iter: + # keep track of nb of iterations + n_iter += 1 + state['n_iter'] += 1 + + ############################################################ + # compute gradient descent direction + ############################################################ + if state['n_iter'] == 1: + d = flat_grad.neg() + old_dirs = [] + old_stps = [] + ro = [] + H_diag = 1 + else: + # do lbfgs update (update memory) + y = flat_grad.sub(prev_flat_grad) + s = d.mul(t) + ys = y.dot(s) # y*s + if ys > tolerance_ys: + # updating memory + if len(old_dirs) == history_size: + # shift history by one (limited-memory) + old_dirs.pop(0) + old_stps.pop(0) + ro.pop(0) + + # store new direction/step + old_dirs.append(y) + old_stps.append(s) + ro.append(1. / ys) + + # update scale of initial Hessian approximation + H_diag = ys / y.dot(y) # (y*y) + + # compute the approximate (L-BFGS) inverse Hessian + # multiplied by the gradient + num_old = len(old_dirs) + + if 'al' not in state: + state['al'] = [None] * history_size + al = state['al'] + + # iteration in L-BFGS loop collapsed to use just one buffer + q = flat_grad.neg() + for i in range(num_old - 1, -1, -1): + al[i] = old_stps[i].dot(q) * ro[i] + q.add_(old_dirs[i], alpha=-al[i]) + + # multiply by initial Hessian + # r/d is the final direction + d = r = torch.mul(q, H_diag) + for i in range(num_old): + be_i = old_dirs[i].dot(r) * ro[i] + r.add_(old_stps[i], alpha=al[i] - be_i) + + if prev_flat_grad is None: + prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format) + else: + prev_flat_grad.copy_(flat_grad) + prev_loss = loss + + ############################################################ + # compute step length + ############################################################ + # reset initial guess for step size + if state['n_iter'] == 1: + t = min(1., 1. / flat_grad.abs().sum()) * lr + else: + t = lr + + # directional derivative + gtd = flat_grad.dot(d) # g * d + + # directional derivative is below tolerance + if gtd > -tolerance_change: + break + + # optional line search: user function + ls_func_evals = 0 + if line_search_fn is not None: + # perform line search, using user function + if line_search_fn != "strong_wolfe": + raise RuntimeError("only 'strong_wolfe' is supported") + else: + x_init = self._clone_param() + + def obj_func(x, t, d): + return self._directional_evaluate(closure, x, t, d) + loss, flat_grad, t, ls_func_evals = _strong_wolfe( + obj_func, x_init, t, d, loss, flat_grad, gtd) + self._add_grad(t, d) + opt_cond = flat_grad.abs().max() <= tolerance_grad + else: + # no line search, simply move with fixed-step + self._add_grad(t, d) + if n_iter != max_iter: + # re-evaluate function only if not in last iteration + # the reason we do this: in a stochastic setting, + # no use to re-evaluate that function here + with torch.enable_grad(): + loss = float(closure()) + flat_grad = self._gather_flat_grad() + opt_cond = flat_grad.abs().max() <= tolerance_grad + ls_func_evals = 1 + + # update func eval + current_evals += ls_func_evals + state['func_evals'] += ls_func_evals + + ############################################################ + # check conditions + ############################################################ + if n_iter == max_iter: + break + + if current_evals >= max_eval: + break + + # optimal condition + if opt_cond: + break + + # lack of progress + if d.mul(t).abs().max() <= tolerance_change: + break + + if abs(loss - prev_loss) < tolerance_change: + break + + state['d'] = d + state['t'] = t + state['old_dirs'] = old_dirs + state['old_stps'] = old_stps + state['ro'] = ro + state['H_diag'] = H_diag + state['prev_flat_grad'] = prev_flat_grad + state['prev_loss'] = prev_loss + + return orig_loss diff --git a/dl/kan/kan/MLP.py b/dl/kan/kan/MLP.py new file mode 100644 index 000000000..1066c3b3d --- /dev/null +++ b/dl/kan/kan/MLP.py @@ -0,0 +1,361 @@ +import torch +import torch.nn as nn +import matplotlib.pyplot as plt +import numpy as np +from tqdm import tqdm +from .LBFGS import LBFGS + +seed = 0 +torch.manual_seed(seed) + +class MLP(nn.Module): + + def __init__(self, width, act='silu', save_act=True, seed=0, device='cpu'): + super(MLP, self).__init__() + + torch.manual_seed(seed) + + linears = [] + self.width = width + self.depth = depth = len(width) - 1 + for i in range(depth): + linears.append(nn.Linear(width[i], width[i+1])) + self.linears = nn.ModuleList(linears) + + #if activation == 'silu': + self.act_fun = torch.nn.SiLU() + self.save_act = save_act + self.acts = None + + self.cache_data = None + + self.device = device + self.to(device) + + + def to(self, device): + super(MLP, self).to(device) + self.device = device + + return self + + + def get_act(self, x=None): + if isinstance(x, dict): + x = x['train_input'] + if x == None: + if self.cache_data != None: + x = self.cache_data + else: + raise Exception("missing input data x") + save_act = self.save_act + self.save_act = True + self.forward(x) + self.save_act = save_act + + @property + def w(self): + return [self.linears[l].weight for l in range(self.depth)] + + def forward(self, x): + + # cache data + self.cache_data = x + + self.acts = [] + self.acts_scale = [] + self.wa_forward = [] + self.a_forward = [] + + for i in range(self.depth): + + if self.save_act: + act = x.clone() + act_scale = torch.std(x, dim=0) + wa_forward = act_scale[None, :] * self.linears[i].weight + self.acts.append(act) + if i > 0: + self.acts_scale.append(act_scale) + self.wa_forward.append(wa_forward) + + x = self.linears[i](x) + if i < self.depth - 1: + x = self.act_fun(x) + else: + if self.save_act: + act_scale = torch.std(x, dim=0) + self.acts_scale.append(act_scale) + + return x + + def attribute(self): + if self.acts == None: + self.get_act() + + node_scores = [] + edge_scores = [] + + # back propagate from the last layer + node_score = torch.ones(self.width[-1]).requires_grad_(True).to(self.device) + node_scores.append(node_score) + + for l in range(self.depth,0,-1): + + edge_score = torch.einsum('ij,i->ij', torch.abs(self.wa_forward[l-1]), node_score/(self.acts_scale[l-1]+1e-4)) + edge_scores.append(edge_score) + + # this might be improper for MLPs (although reasonable for KANs) + node_score = torch.sum(edge_score, dim=0)/torch.sqrt(torch.tensor(self.width[l-1], device=self.device)) + #print(self.width[l]) + node_scores.append(node_score) + + self.node_scores = list(reversed(node_scores)) + self.edge_scores = list(reversed(edge_scores)) + self.wa_backward = self.edge_scores + + def plot(self, beta=3, scale=1., metric='w'): + # metric = 'w', 'act' or 'fa' + + if metric == 'fa': + self.attribute() + + depth = self.depth + y0 = 0.5 + fig, ax = plt.subplots(figsize=(3*scale,3*y0*depth*scale)) + shp = self.width + + min_spacing = 1/max(self.width) + for j in range(len(shp)): + N = shp[j] + for i in range(N): + plt.scatter(1 / (2 * N) + i / N, j * y0, s=min_spacing ** 2 * 5000 * scale ** 2, color='black') + + plt.ylim(-0.1*y0,y0*depth+0.1*y0) + plt.xlim(-0.02,1.02) + + linears = self.linears + + for ii in range(len(linears)): + linear = linears[ii] + p = linear.weight + p_shp = p.shape + + if metric == 'w': + pass + elif metric == 'act': + p = self.wa_forward[ii] + elif metric == 'fa': + p = self.wa_backward[ii] + else: + raise Exception('metric = \'{}\' not recognized. Choices are \'w\', \'act\', \'fa\'.'.format(metric)) + for i in range(p_shp[0]): + for j in range(p_shp[1]): + plt.plot([1/(2*p_shp[0])+i/p_shp[0], 1/(2*p_shp[1])+j/p_shp[1]], [y0*(ii+1),y0*ii], lw=0.5*scale, alpha=np.tanh(beta*np.abs(p[i,j].cpu().detach().numpy())), color="blue" if p[i,j]>0 else "red") + + ax.axis('off') + + def reg(self, reg_metric, lamb_l1, lamb_entropy): + + if reg_metric == 'w': + acts_scale = self.w + if reg_metric == 'act': + acts_scale = self.wa_forward + if reg_metric == 'fa': + acts_scale = self.wa_backward + if reg_metric == 'a': + acts_scale = self.acts_scale + + if len(acts_scale[0].shape) == 2: + reg_ = 0. + + for i in range(len(acts_scale)): + vec = acts_scale[i] + vec = torch.abs(vec) + + l1 = torch.sum(vec) + p_row = vec / (torch.sum(vec, dim=1, keepdim=True) + 1) + p_col = vec / (torch.sum(vec, dim=0, keepdim=True) + 1) + entropy_row = - torch.mean(torch.sum(p_row * torch.log2(p_row + 1e-4), dim=1)) + entropy_col = - torch.mean(torch.sum(p_col * torch.log2(p_col + 1e-4), dim=0)) + reg_ += lamb_l1 * l1 + lamb_entropy * (entropy_row + entropy_col) + + elif len(acts_scale[0].shape) == 1: + + reg_ = 0. + + for i in range(len(acts_scale)): + vec = acts_scale[i] + vec = torch.abs(vec) + + l1 = torch.sum(vec) + p = vec / (torch.sum(vec) + 1) + entropy = - torch.sum(p * torch.log2(p + 1e-4)) + reg_ += lamb_l1 * l1 + lamb_entropy * entropy + + return reg_ + + def get_reg(self, reg_metric, lamb_l1, lamb_entropy): + return self.reg(reg_metric, lamb_l1, lamb_entropy) + + def fit(self, dataset, opt="LBFGS", steps=100, log=1, lamb=0., lamb_l1=1., lamb_entropy=2., loss_fn=None, lr=1., batch=-1, + metrics=None, in_vars=None, out_vars=None, beta=3, device='cpu', reg_metric='w', display_metrics=None): + + if lamb > 0. and not self.save_act: + print('setting lamb=0. If you want to set lamb > 0, set =True') + + old_save_act = self.save_act + if lamb == 0.: + self.save_act = False + + pbar = tqdm(range(steps), desc='description', ncols=100) + + if loss_fn == None: + loss_fn = loss_fn_eval = lambda x, y: torch.mean((x - y) ** 2) + else: + loss_fn = loss_fn_eval = loss_fn + + if opt == "Adam": + optimizer = torch.optim.Adam(self.parameters(), lr=lr) + elif opt == "LBFGS": + optimizer = LBFGS(self.parameters(), lr=lr, history_size=10, line_search_fn="strong_wolfe", tolerance_grad=1e-32, tolerance_change=1e-32, tolerance_ys=1e-32) + + results = {} + results['train_loss'] = [] + results['test_loss'] = [] + results['reg'] = [] + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__] = [] + + if batch == -1 or batch > dataset['train_input'].shape[0]: + batch_size = dataset['train_input'].shape[0] + batch_size_test = dataset['test_input'].shape[0] + else: + batch_size = batch + batch_size_test = batch + + global train_loss, reg_ + + def closure(): + global train_loss, reg_ + optimizer.zero_grad() + pred = self.forward(dataset['train_input'][train_id].to(self.device)) + train_loss = loss_fn(pred, dataset['train_label'][train_id].to(self.device)) + if self.save_act: + if reg_metric == 'fa': + self.attribute() + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy) + else: + reg_ = torch.tensor(0.) + objective = train_loss + lamb * reg_ + objective.backward() + return objective + + for _ in pbar: + + if _ == steps-1 and old_save_act: + self.save_act = True + + train_id = np.random.choice(dataset['train_input'].shape[0], batch_size, replace=False) + test_id = np.random.choice(dataset['test_input'].shape[0], batch_size_test, replace=False) + + if opt == "LBFGS": + optimizer.step(closure) + + if opt == "Adam": + pred = self.forward(dataset['train_input'][train_id].to(self.device)) + train_loss = loss_fn(pred, dataset['train_label'][train_id].to(self.device)) + if self.save_act: + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy) + else: + reg_ = torch.tensor(0.) + loss = train_loss + lamb * reg_ + optimizer.zero_grad() + loss.backward() + optimizer.step() + + test_loss = loss_fn_eval(self.forward(dataset['test_input'][test_id].to(self.device)), dataset['test_label'][test_id].to(self.device)) + + + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__].append(metrics[i]().item()) + + results['train_loss'].append(torch.sqrt(train_loss).cpu().detach().numpy()) + results['test_loss'].append(torch.sqrt(test_loss).cpu().detach().numpy()) + results['reg'].append(reg_.cpu().detach().numpy()) + + if _ % log == 0: + if display_metrics == None: + pbar.set_description("| train_loss: %.2e | test_loss: %.2e | reg: %.2e | " % (torch.sqrt(train_loss).cpu().detach().numpy(), torch.sqrt(test_loss).cpu().detach().numpy(), reg_.cpu().detach().numpy())) + else: + string = '' + data = () + for metric in display_metrics: + string += f' {metric}: %.2e |' + try: + results[metric] + except: + raise Exception(f'{metric} not recognized') + data += (results[metric][-1],) + pbar.set_description(string % data) + + return results + + @property + def connection_cost(self): + + with torch.no_grad(): + cc = 0. + for linear in self.linears: + t = torch.abs(linear.weight) + def get_coordinate(n): + return torch.linspace(0,1,steps=n+1, device=self.device)[:n] + 1/(2*n) + + in_dim = t.shape[0] + x_in = get_coordinate(in_dim) + + out_dim = t.shape[1] + x_out = get_coordinate(out_dim) + + dist = torch.abs(x_in[:,None] - x_out[None,:]) + cc += torch.sum(dist * t) + + return cc + + def swap(self, l, i1, i2): + + def swap_row(data, i1, i2): + data[i1], data[i2] = data[i2].clone(), data[i1].clone() + + def swap_col(data, i1, i2): + data[:,i1], data[:,i2] = data[:,i2].clone(), data[:,i1].clone() + + swap_row(self.linears[l-1].weight.data, i1, i2) + swap_row(self.linears[l-1].bias.data, i1, i2) + swap_col(self.linears[l].weight.data, i1, i2) + + def auto_swap_l(self, l): + + num = self.width[l] + for i in range(num): + ccs = [] + for j in range(num): + self.swap(l,i,j) + self.get_act() + self.attribute() + cc = self.connection_cost.detach().clone() + ccs.append(cc) + self.swap(l,i,j) + j = torch.argmin(torch.tensor(ccs)) + self.swap(l,i,j) + + def auto_swap(self): + depth = self.depth + for l in range(1, depth): + self.auto_swap_l(l) + + def tree(self, x=None, in_var=None, style='tree', sym_th=1e-3, sep_th=1e-1, skip_sep_test=False, verbose=False): + if x == None: + x = self.cache_data + plot_tree(self, x, in_var=in_var, style=style, sym_th=sym_th, sep_th=sep_th, skip_sep_test=skip_sep_test, verbose=verbose) \ No newline at end of file diff --git a/dl/kan/kan/MultKAN.py b/dl/kan/kan/MultKAN.py new file mode 100644 index 000000000..37f3e5820 --- /dev/null +++ b/dl/kan/kan/MultKAN.py @@ -0,0 +1,2805 @@ +import torch +import torch.nn as nn +import numpy as np +from .KANLayer import KANLayer +#from .Symbolic_MultKANLayer import * +from .Symbolic_KANLayer import Symbolic_KANLayer +from .LBFGS import * +import os +import glob +import matplotlib.pyplot as plt +from tqdm import tqdm +import random +import copy +#from .MultKANLayer import MultKANLayer +import pandas as pd +from sympy.printing import latex +from sympy import * +import sympy +import yaml +from .spline import curve2coef +from .utils import SYMBOLIC_LIB +from .hypothesis import plot_tree + +class MultKAN(nn.Module): + ''' + KAN class + + Attributes: + ----------- + grid : int + the number of grid intervals + k : int + spline order + act_fun : a list of KANLayers + symbolic_fun: a list of Symbolic_KANLayer + depth : int + depth of KAN + width : list + number of neurons in each layer. + Without multiplication nodes, [2,5,5,3] means 2D inputs, 3D outputs, with 2 layers of 5 hidden neurons. + With multiplication nodes, [2,[5,3],[5,1],3] means besides the [2,5,53] KAN, there are 3 (1) mul nodes in layer 1 (2). + mult_arity : int, or list of int lists + multiplication arity for each multiplication node (the number of numbers to be multiplied) + grid : int + the number of grid intervals + k : int + the order of piecewise polynomial + base_fun : fun + residual function b(x). an activation function phi(x) = sb_scale * b(x) + sp_scale * spline(x) + symbolic_fun : a list of Symbolic_KANLayer + Symbolic_KANLayers + symbolic_enabled : bool + If False, the symbolic front is not computed (to save time). Default: True. + width_in : list + The number of input neurons for each layer + width_out : list + The number of output neurons for each layer + base_fun_name : str + The base function b(x) + grip_eps : float + The parameter that interpolates between uniform grid and adaptive grid (based on sample quantile) + node_bias : a list of 1D torch.float + node_scale : a list of 1D torch.float + subnode_bias : a list of 1D torch.float + subnode_scale : a list of 1D torch.float + symbolic_enabled : bool + when symbolic_enabled = False, the symbolic branch (symbolic_fun) will be ignored in computation (set to zero) + affine_trainable : bool + indicate whether affine parameters are trainable (node_bias, node_scale, subnode_bias, subnode_scale) + sp_trainable : bool + indicate whether the overall magnitude of splines is trainable + sb_trainable : bool + indicate whether the overall magnitude of base function is trainable + save_act : bool + indicate whether intermediate activations are saved in forward pass + node_scores : None or list of 1D torch.float + node attribution score + edge_scores : None or list of 2D torch.float + edge attribution score + subnode_scores : None or list of 1D torch.float + subnode attribution score + cache_data : None or 2D torch.float + cached input data + acts : None or a list of 2D torch.float + activations on nodes + auto_save : bool + indicate whether to automatically save a checkpoint once the model is modified + state_id : int + the state of the model (used to save checkpoint) + ckpt_path : str + the folder to store checkpoints + round : int + the number of times rewind() has been called + device : str + ''' + def __init__(self, width=None, grid=3, k=3, mult_arity = 2, noise_scale=0.3, scale_base_mu=0.0, scale_base_sigma=1.0, base_fun='silu', symbolic_enabled=True, affine_trainable=False, grid_eps=0.02, grid_range=[-1, 1], sp_trainable=True, sb_trainable=True, seed=1, save_act=True, sparse_init=False, auto_save=True, first_init=True, ckpt_path='./model', state_id=0, round=0, device='cpu'): + ''' + initalize a KAN model + + Args: + ----- + width : list of int + Without multiplication nodes: :math:`[n_0, n_1, .., n_{L-1}]` specify the number of neurons in each layer (including inputs/outputs) + With multiplication nodes: :math:`[[n_0,m_0=0], [n_1,m_1], .., [n_{L-1},m_{L-1}]]` specify the number of addition/multiplication nodes in each layer (including inputs/outputs) + grid : int + number of grid intervals. Default: 3. + k : int + order of piecewise polynomial. Default: 3. + mult_arity : int, or list of int lists + multiplication arity for each multiplication node (the number of numbers to be multiplied) + noise_scale : float + initial injected noise to spline. + base_fun : str + the residual function b(x). Default: 'silu' + symbolic_enabled : bool + compute (True) or skip (False) symbolic computations (for efficiency). By default: True. + affine_trainable : bool + affine parameters are updated or not. Affine parameters include node_scale, node_bias, subnode_scale, subnode_bias + grid_eps : float + When grid_eps = 1, the grid is uniform; when grid_eps = 0, the grid is partitioned using percentiles of samples. 0 < grid_eps < 1 interpolates between the two extremes. + grid_range : list/np.array of shape (2,)) + setting the range of grids. Default: [-1,1]. This argument is not important if fit(update_grid=True) (by default updata_grid=True) + sp_trainable : bool + If true, scale_sp is trainable. Default: True. + sb_trainable : bool + If true, scale_base is trainable. Default: True. + device : str + device + seed : int + random seed + save_act : bool + indicate whether intermediate activations are saved in forward pass + sparse_init : bool + sparse initialization (True) or normal dense initialization. Default: False. + auto_save : bool + indicate whether to automatically save a checkpoint once the model is modified + state_id : int + the state of the model (used to save checkpoint) + ckpt_path : str + the folder to store checkpoints. Default: './model' + round : int + the number of times rewind() has been called + device : str + + Returns: + -------- + self + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + checkpoint directory created: ./model + saving model version 0.0 + ''' + super(MultKAN, self).__init__() + + torch.manual_seed(seed) + np.random.seed(seed) + random.seed(seed) + + ### initializeing the numerical front ### + + self.act_fun = [] + self.depth = len(width) - 1 + + #print('haha1', width) + for i in range(len(width)): + #print(type(width[i]), type(width[i]) == int) + if type(width[i]) == int or type(width[i]) == np.int64: + width[i] = [width[i],0] + + #print('haha2', width) + + self.width = width + + # if mult_arity is just a scalar, we extend it to a list of lists + # e.g, mult_arity = [[2,3],[4]] means that in the first hidden layer, 2 mult ops have arity 2 and 3, respectively; + # in the second hidden layer, 1 mult op has arity 4. + if isinstance(mult_arity, int): + self.mult_homo = True # when homo is True, parallelization is possible + else: + self.mult_homo = False # when home if False, for loop is required. + self.mult_arity = mult_arity + + width_in = self.width_in + width_out = self.width_out + + self.base_fun_name = base_fun + if base_fun == 'silu': + base_fun = torch.nn.SiLU() + elif base_fun == 'identity': + base_fun = torch.nn.Identity() + elif base_fun == 'zero': + base_fun = lambda x: x*0. + + self.grid_eps = grid_eps + self.grid_range = grid_range + + + for l in range(self.depth): + # splines + if isinstance(grid, list): + grid_l = grid[l] + else: + grid_l = grid + + if isinstance(k, list): + k_l = k[l] + else: + k_l = k + + + sp_batch = KANLayer(in_dim=width_in[l], out_dim=width_out[l+1], num=grid_l, k=k_l, noise_scale=noise_scale, scale_base_mu=scale_base_mu, scale_base_sigma=scale_base_sigma, scale_sp=1., base_fun=base_fun, grid_eps=grid_eps, grid_range=grid_range, sp_trainable=sp_trainable, sb_trainable=sb_trainable, sparse_init=sparse_init) + self.act_fun.append(sp_batch) + + self.node_bias = [] + self.node_scale = [] + self.subnode_bias = [] + self.subnode_scale = [] + + globals()['self.node_bias_0'] = torch.nn.Parameter(torch.zeros(3,1)).requires_grad_(False) + exec('self.node_bias_0' + " = torch.nn.Parameter(torch.zeros(3,1)).requires_grad_(False)") + + for l in range(self.depth): + exec(f'self.node_bias_{l} = torch.nn.Parameter(torch.zeros(width_in[l+1])).requires_grad_(affine_trainable)') + exec(f'self.node_scale_{l} = torch.nn.Parameter(torch.ones(width_in[l+1])).requires_grad_(affine_trainable)') + exec(f'self.subnode_bias_{l} = torch.nn.Parameter(torch.zeros(width_out[l+1])).requires_grad_(affine_trainable)') + exec(f'self.subnode_scale_{l} = torch.nn.Parameter(torch.ones(width_out[l+1])).requires_grad_(affine_trainable)') + exec(f'self.node_bias.append(self.node_bias_{l})') + exec(f'self.node_scale.append(self.node_scale_{l})') + exec(f'self.subnode_bias.append(self.subnode_bias_{l})') + exec(f'self.subnode_scale.append(self.subnode_scale_{l})') + + + self.act_fun = nn.ModuleList(self.act_fun) + + self.grid = grid + self.k = k + self.base_fun = base_fun + + ### initializing the symbolic front ### + self.symbolic_fun = [] + for l in range(self.depth): + sb_batch = Symbolic_KANLayer(in_dim=width_in[l], out_dim=width_out[l+1]) + self.symbolic_fun.append(sb_batch) + + self.symbolic_fun = nn.ModuleList(self.symbolic_fun) + self.symbolic_enabled = symbolic_enabled + self.affine_trainable = affine_trainable + self.sp_trainable = sp_trainable + self.sb_trainable = sb_trainable + + self.save_act = save_act + + self.node_scores = None + self.edge_scores = None + self.subnode_scores = None + + self.cache_data = None + self.acts = None + + self.auto_save = auto_save + self.state_id = 0 + self.ckpt_path = ckpt_path + self.round = round + + self.device = device + self.to(device) + + if auto_save: + if first_init: + if not os.path.exists(ckpt_path): + # Create the directory + os.makedirs(ckpt_path) + print(f"checkpoint directory created: {ckpt_path}") + print('saving model version 0.0') + + history_path = self.ckpt_path+'/history.txt' + with open(history_path, 'w') as file: + file.write(f'### Round {self.round} ###' + '\n') + file.write('init => 0.0' + '\n') + self.saveckpt(path=self.ckpt_path+'/'+'0.0') + else: + self.state_id = state_id + + self.input_id = torch.arange(self.width_in[0],) + + def to(self, device): + ''' + move the model to device + + Args: + ----- + device : str or device + + Returns: + -------- + self + + Example + ------- + >>> from kan import * + >>> device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> model.to(device) + ''' + super(MultKAN, self).to(device) + self.device = device + + for kanlayer in self.act_fun: + kanlayer.to(device) + + for symbolic_kanlayer in self.symbolic_fun: + symbolic_kanlayer.to(device) + + return self + + @property + def width_in(self): + ''' + The number of input nodes for each layer + ''' + width = self.width + width_in = [width[l][0]+width[l][1] for l in range(len(width))] + return width_in + + @property + def width_out(self): + ''' + The number of output subnodes for each layer + ''' + width = self.width + if self.mult_homo == True: + width_out = [width[l][0]+self.mult_arity*width[l][1] for l in range(len(width))] + else: + width_out = [width[l][0]+int(np.sum(self.mult_arity[l])) for l in range(len(width))] + return width_out + + @property + def n_sum(self): + ''' + The number of addition nodes for each layer + ''' + width = self.width + n_sum = [width[l][0] for l in range(1,len(width)-1)] + return n_sum + + @property + def n_mult(self): + ''' + The number of multiplication nodes for each layer + ''' + width = self.width + n_mult = [width[l][1] for l in range(1,len(width)-1)] + return n_mult + + @property + def feature_score(self): + ''' + attribution scores for inputs + ''' + self.attribute() + if self.node_scores == None: + return None + else: + return self.node_scores[0] + + def initialize_from_another_model(self, another_model, x): + ''' + initialize from another model of the same width, but their 'grid' parameter can be different. + Note this is equivalent to refine() when we don't want to keep another_model + + Args: + ----- + another_model : MultKAN + x : 2D torch.float + + Returns: + -------- + self + + Example + ------- + >>> from kan import * + >>> model1 = KAN(width=[2,5,1], grid=3) + >>> model2 = KAN(width=[2,5,1], grid=10) + >>> x = torch.rand(100,2) + >>> model2.initialize_from_another_model(model1, x) + ''' + another_model(x) # get activations + batch = x.shape[0] + + self.initialize_grid_from_another_model(another_model, x) + + for l in range(self.depth): + spb = self.act_fun[l] + #spb_parent = another_model.act_fun[l] + + # spb = spb_parent + preacts = another_model.spline_preacts[l] + postsplines = another_model.spline_postsplines[l] + self.act_fun[l].coef.data = curve2coef(preacts[:,0,:], postsplines.permute(0,2,1), spb.grid, k=spb.k) + self.act_fun[l].scale_base.data = another_model.act_fun[l].scale_base.data + self.act_fun[l].scale_sp.data = another_model.act_fun[l].scale_sp.data + self.act_fun[l].mask.data = another_model.act_fun[l].mask.data + + for l in range(self.depth): + self.node_bias[l].data = another_model.node_bias[l].data + self.node_scale[l].data = another_model.node_scale[l].data + + self.subnode_bias[l].data = another_model.subnode_bias[l].data + self.subnode_scale[l].data = another_model.subnode_scale[l].data + + for l in range(self.depth): + self.symbolic_fun[l] = another_model.symbolic_fun[l] + + return self.to(self.device) + + def log_history(self, method_name): + + if self.auto_save: + + # save to log file + #print(func.__name__) + with open(self.ckpt_path+'/history.txt', 'a') as file: + file.write(str(self.round)+'.'+str(self.state_id)+' => '+ method_name + ' => ' + str(self.round)+'.'+str(self.state_id+1) + '\n') + + # update state_id + self.state_id += 1 + + # save to ckpt + self.saveckpt(path=self.ckpt_path+'/'+str(self.round)+'.'+str(self.state_id)) + print('saving model version '+str(self.round)+'.'+str(self.state_id)) + + + def refine(self, new_grid): + ''' + grid refinement + + Args: + ----- + new_grid : init + the number of grid intervals after refinement + + Returns: + -------- + a refined model : MultKAN + + Example + ------- + >>> from kan import * + >>> device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> print(model.grid) + >>> x = torch.rand(100,2) + >>> model.get_act(x) + >>> model = model.refine(10) + >>> print(model.grid) + checkpoint directory created: ./model + saving model version 0.0 + 5 + saving model version 0.1 + 10 + ''' + + model_new = MultKAN(width=self.width, + grid=new_grid, + k=self.k, + mult_arity=self.mult_arity, + base_fun=self.base_fun_name, + symbolic_enabled=self.symbolic_enabled, + affine_trainable=self.affine_trainable, + grid_eps=self.grid_eps, + grid_range=self.grid_range, + sp_trainable=self.sp_trainable, + sb_trainable=self.sb_trainable, + ckpt_path=self.ckpt_path, + auto_save=True, + first_init=False, + state_id=self.state_id, + round=self.round, + device=self.device) + + model_new.initialize_from_another_model(self, self.cache_data) + model_new.cache_data = self.cache_data + model_new.grid = new_grid + + self.log_history('refine') + model_new.state_id += 1 + + return model_new.to(self.device) + + + def saveckpt(self, path='model'): + ''' + save the current model to files (configuration file and state file) + + Args: + ----- + path : str + the path where checkpoints are saved + + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> model.saveckpt('./mark') + # There will be three files appearing in the current folder: mark_cache_data, mark_config.yml, mark_state + ''' + + model = self + + dic = dict( + width = model.width, + grid = model.grid, + k = model.k, + mult_arity = model.mult_arity, + base_fun_name = model.base_fun_name, + symbolic_enabled = model.symbolic_enabled, + affine_trainable = model.affine_trainable, + grid_eps = model.grid_eps, + grid_range = model.grid_range, + sp_trainable = model.sp_trainable, + sb_trainable = model.sb_trainable, + state_id = model.state_id, + auto_save = model.auto_save, + ckpt_path = model.ckpt_path, + round = model.round, + device = str(model.device) + ) + + for i in range (model.depth): + dic[f'symbolic.funs_name.{i}'] = model.symbolic_fun[i].funs_name + + with open(f'{path}_config.yml', 'w') as outfile: + yaml.dump(dic, outfile, default_flow_style=False) + + torch.save(model.state_dict(), f'{path}_state') + torch.save(model.cache_data, f'{path}_cache_data') + + @staticmethod + def loadckpt(path='model'): + ''' + load checkpoint from path + + Args: + ----- + path : str + the path where checkpoints are saved + + Returns: + -------- + MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> model.saveckpt('./mark') + >>> KAN.loadckpt('./mark') + ''' + with open(f'{path}_config.yml', 'r') as stream: + config = yaml.safe_load(stream) + + state = torch.load(f'{path}_state') + + model_load = MultKAN(width=config['width'], + grid=config['grid'], + k=config['k'], + mult_arity = config['mult_arity'], + base_fun=config['base_fun_name'], + symbolic_enabled=config['symbolic_enabled'], + affine_trainable=config['affine_trainable'], + grid_eps=config['grid_eps'], + grid_range=config['grid_range'], + sp_trainable=config['sp_trainable'], + sb_trainable=config['sb_trainable'], + state_id=config['state_id'], + auto_save=config['auto_save'], + first_init=False, + ckpt_path=config['ckpt_path'], + round = config['round']+1, + device = config['device']) + + model_load.load_state_dict(state) + model_load.cache_data = torch.load(f'{path}_cache_data') + + depth = len(model_load.width) - 1 + for l in range(depth): + out_dim = model_load.symbolic_fun[l].out_dim + in_dim = model_load.symbolic_fun[l].in_dim + funs_name = config[f'symbolic.funs_name.{l}'] + for j in range(out_dim): + for i in range(in_dim): + fun_name = funs_name[j][i] + model_load.symbolic_fun[l].funs_name[j][i] = fun_name + model_load.symbolic_fun[l].funs[j][i] = SYMBOLIC_LIB[fun_name][0] + model_load.symbolic_fun[l].funs_sympy[j][i] = SYMBOLIC_LIB[fun_name][1] + model_load.symbolic_fun[l].funs_avoid_singularity[j][i] = SYMBOLIC_LIB[fun_name][3] + return model_load + + def copy(self): + ''' + deepcopy + + Args: + ----- + path : str + the path where checkpoints are saved + + Returns: + -------- + MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[1,1], grid=5, k=3, seed=0) + >>> model2 = model.copy() + >>> model2.act_fun[0].coef.data *= 2 + >>> print(model2.act_fun[0].coef.data) + >>> print(model.act_fun[0].coef.data) + ''' + path='copy_temp' + self.saveckpt(path) + return KAN.loadckpt(path) + + def rewind(self, model_id): + ''' + rewind to an old version + + Args: + ----- + model_id : str + in format '{a}.{b}' where a is the round number, b is the version number in that round + + Returns: + -------- + MultKAN + + Example + ------- + Please refer to tutorials. API 12: Checkpoint, save & load model + ''' + self.round += 1 + self.state_id = model_id.split('.')[-1] + + history_path = self.ckpt_path+'/history.txt' + with open(history_path, 'a') as file: + file.write(f'### Round {self.round} ###' + '\n') + + self.saveckpt(path=self.ckpt_path+'/'+f'{self.round}.{self.state_id}') + + print('rewind to model version '+f'{self.round-1}.{self.state_id}'+', renamed as '+f'{self.round}.{self.state_id}') + + return MultKAN.loadckpt(path=self.ckpt_path+'/'+str(model_id)) + + + def checkout(self, model_id): + ''' + check out an old version + + Args: + ----- + model_id : str + in format '{a}.{b}' where a is the round number, b is the version number in that round + + Returns: + -------- + MultKAN + + Example + ------- + Same use as rewind, although checkout doesn't change states + ''' + return MultKAN.loadckpt(path=self.ckpt_path+'/'+str(model_id)) + + def update_grid_from_samples(self, x): + ''' + update grid from samples + + Args: + ----- + x : 2D torch.tensor + inputs + + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[1,1], grid=5, k=3, seed=0) + >>> print(model.act_fun[0].grid) + >>> x = torch.linspace(-10,10,steps=101)[:,None] + >>> model.update_grid_from_samples(x) + >>> print(model.act_fun[0].grid) + ''' + for l in range(self.depth): + self.get_act(x) + self.act_fun[l].update_grid_from_samples(self.acts[l]) + + def update_grid(self, x): + ''' + call update_grid_from_samples. This seems unnecessary but we retain it for the sake of classes that might inherit from MultKAN + ''' + self.update_grid_from_samples(x) + + def initialize_grid_from_another_model(self, model, x): + ''' + initialize grid from another model + + Args: + ----- + model : MultKAN + parent model + x : 2D torch.tensor + inputs + + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[1,1], grid=5, k=3, seed=0) + >>> print(model.act_fun[0].grid) + >>> x = torch.linspace(-10,10,steps=101)[:,None] + >>> model2 = KAN(width=[1,1], grid=10, k=3, seed=0) + >>> model2.initialize_grid_from_another_model(model, x) + >>> print(model2.act_fun[0].grid) + ''' + model(x) + for l in range(self.depth): + self.act_fun[l].initialize_grid_from_parent(model.act_fun[l], model.acts[l]) + + def forward(self, x, singularity_avoiding=False, y_th=10.): + ''' + forward pass + + Args: + ----- + x : 2D torch.tensor + inputs + singularity_avoiding : bool + whether to avoid singularity for the symbolic branch + y_th : float + the threshold for singularity + + Returns: + -------- + None + + Example1 + -------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, seed=0) + >>> x = torch.rand(100,2) + >>> model(x).shape + + Example2 + -------- + >>> from kan import * + >>> model = KAN(width=[1,1], grid=5, k=3, seed=0) + >>> x = torch.tensor([[1],[-0.01]]) + >>> model.fix_symbolic(0,0,0,'log',fit_params_bool=False) + >>> print(model(x)) + >>> print(model(x, singularity_avoiding=True)) + >>> print(model(x, singularity_avoiding=True, y_th=1.)) + ''' + x = x[:,self.input_id.long()] + assert x.shape[1] == self.width_in[0] + + # cache data + self.cache_data = x + + self.acts = [] # shape ([batch, n0], [batch, n1], ..., [batch, n_L]) + self.acts_premult = [] + self.spline_preacts = [] + self.spline_postsplines = [] + self.spline_postacts = [] + self.acts_scale = [] + self.acts_scale_spline = [] + self.subnode_actscale = [] + self.edge_actscale = [] + # self.neurons_scale = [] + + self.acts.append(x) # acts shape: (batch, width[l]) + + for l in range(self.depth): + + x_numerical, preacts, postacts_numerical, postspline = self.act_fun[l](x) + #print(preacts, postacts_numerical, postspline) + + if self.symbolic_enabled == True: + x_symbolic, postacts_symbolic = self.symbolic_fun[l](x, singularity_avoiding=singularity_avoiding, y_th=y_th) + else: + x_symbolic = 0. + postacts_symbolic = 0. + + x = x_numerical + x_symbolic + + if self.save_act: + # save subnode_scale + self.subnode_actscale.append(torch.std(x, dim=0).detach()) + + # subnode affine transform + x = self.subnode_scale[l][None,:] * x + self.subnode_bias[l][None,:] + + if self.save_act: + postacts = postacts_numerical + postacts_symbolic + + # self.neurons_scale.append(torch.mean(torch.abs(x), dim=0)) + #grid_reshape = self.act_fun[l].grid.reshape(self.width_out[l + 1], self.width_in[l], -1) + input_range = torch.std(preacts, dim=0) + 0.1 + output_range_spline = torch.std(postacts_numerical, dim=0) # for training, only penalize the spline part + output_range = torch.std(postacts, dim=0) # for visualization, include the contribution from both spline + symbolic + # save edge_scale + self.edge_actscale.append(output_range) + + self.acts_scale.append((output_range / input_range).detach()) + self.acts_scale_spline.append(output_range_spline / input_range) + self.spline_preacts.append(preacts.detach()) + self.spline_postacts.append(postacts.detach()) + self.spline_postsplines.append(postspline.detach()) + + self.acts_premult.append(x.detach()) + + # multiplication + dim_sum = self.width[l+1][0] + dim_mult = self.width[l+1][1] + + if self.mult_homo == True: + for i in range(self.mult_arity-1): + if i == 0: + x_mult = x[:,dim_sum::self.mult_arity] * x[:,dim_sum+1::self.mult_arity] + else: + x_mult = x_mult * x[:,dim_sum+i+1::self.mult_arity] + + else: + for j in range(dim_mult): + acml_id = dim_sum + np.sum(self.mult_arity[l+1][:j]) + for i in range(self.mult_arity[l+1][j]-1): + if i == 0: + x_mult_j = x[:,[acml_id]] * x[:,[acml_id+1]] + else: + x_mult_j = x_mult_j * x[:,[acml_id+i+1]] + + if j == 0: + x_mult = x_mult_j + else: + x_mult = torch.cat([x_mult, x_mult_j], dim=1) + + if self.width[l+1][1] > 0: + x = torch.cat([x[:,:dim_sum], x_mult], dim=1) + + # x = x + self.biases[l].weight + # node affine transform + x = self.node_scale[l][None,:] * x + self.node_bias[l][None,:] + + self.acts.append(x.detach()) + + + return x + + def set_mode(self, l, i, j, mode, mask_n=None): + if mode == "s": + mask_n = 0.; + mask_s = 1. + elif mode == "n": + mask_n = 1.; + mask_s = 0. + elif mode == "sn" or mode == "ns": + if mask_n == None: + mask_n = 1. + else: + mask_n = mask_n + mask_s = 1. + else: + mask_n = 0.; + mask_s = 0. + + self.act_fun[l].mask.data[i][j] = mask_n + self.symbolic_fun[l].mask.data[j,i] = mask_s + + def fix_symbolic(self, l, i, j, fun_name, fit_params_bool=True, a_range=(-10, 10), b_range=(-10, 10), verbose=True, random=False, log_history=True): + ''' + set (l,i,j) activation to be symbolic (specified by fun_name) + + Args: + ----- + l : int + layer index + i : int + input neuron index + j : int + output neuron index + fun_name : str + function name + fit_params_bool : bool + obtaining affine parameters through fitting (True) or setting default values (False) + a_range : tuple + sweeping range of a + b_range : tuple + sweeping range of b + verbose : bool + If True, more information is printed. + random : bool + initialize affine parameteres randomly or as [1,0,1,0] + log_history : bool + indicate whether to log history when the function is called + + Returns: + -------- + None or r2 (coefficient of determination) + + Example 1 + --------- + >>> # when fit_params_bool = False + >>> model = KAN(width=[2,5,1], grid=5, k=3) + >>> model.fix_symbolic(0,1,3,'sin',fit_params_bool=False) + >>> print(model.act_fun[0].mask.reshape(2,5)) + >>> print(model.symbolic_fun[0].mask.reshape(2,5)) + + Example 2 + --------- + >>> # when fit_params_bool = True + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=1.) + >>> x = torch.normal(0,1,size=(100,2)) + >>> model(x) # obtain activations (otherwise model does not have attributes acts) + >>> model.fix_symbolic(0,1,3,'sin',fit_params_bool=True) + >>> print(model.act_fun[0].mask.reshape(2,5)) + >>> print(model.symbolic_fun[0].mask.reshape(2,5)) + ''' + if not fit_params_bool: + self.symbolic_fun[l].fix_symbolic(i, j, fun_name, verbose=verbose, random=random) + r2 = None + else: + x = self.acts[l][:, i] + mask = self.act_fun[l].mask + y = self.spline_postacts[l][:, j, i] + #y = self.postacts[l][:, j, i] + r2 = self.symbolic_fun[l].fix_symbolic(i, j, fun_name, x, y, a_range=a_range, b_range=b_range, verbose=verbose) + if mask[i,j] == 0: + r2 = - 1e8 + self.set_mode(l, i, j, mode="s") + + if log_history: + self.log_history('fix_symbolic') + return r2 + + def unfix_symbolic(self, l, i, j, log_history=True): + ''' + unfix the (l,i,j) activation function. + ''' + self.set_mode(l, i, j, mode="n") + self.symbolic_fun[l].funs_name[j][i] = "0" + if log_history: + self.log_history('unfix_symbolic') + + def unfix_symbolic_all(self, log_history=True): + ''' + unfix all activation functions. + ''' + for l in range(len(self.width) - 1): + for i in range(self.width_in[l]): + for j in range(self.width_out[l + 1]): + self.unfix_symbolic(l, i, j, log_history) + + def get_range(self, l, i, j, verbose=True): + ''' + Get the input range and output range of the (l,i,j) activation + + Args: + ----- + l : int + layer index + i : int + input neuron index + j : int + output neuron index + + Returns: + -------- + x_min : float + minimum of input + x_max : float + maximum of input + y_min : float + minimum of output + y_max : float + maximum of output + + Example + ------- + >>> model = KAN(width=[2,3,1], grid=5, k=3, noise_scale=1.) + >>> x = torch.normal(0,1,size=(100,2)) + >>> model(x) # do a forward pass to obtain model.acts + >>> model.get_range(0,0,0) + ''' + x = self.spline_preacts[l][:, j, i] + y = self.spline_postacts[l][:, j, i] + x_min = torch.min(x).cpu().detach().numpy() + x_max = torch.max(x).cpu().detach().numpy() + y_min = torch.min(y).cpu().detach().numpy() + y_max = torch.max(y).cpu().detach().numpy() + if verbose: + print('x range: [' + '%.2f' % x_min, ',', '%.2f' % x_max, ']') + print('y range: [' + '%.2f' % y_min, ',', '%.2f' % y_max, ']') + return x_min, x_max, y_min, y_max + + def plot(self, folder="./figures", beta=3, metric='backward', scale=0.5, tick=False, sample=False, in_vars=None, out_vars=None, title=None, varscale=1.0): + ''' + plot KAN + + Args: + ----- + folder : str + the folder to store pngs + beta : float + positive number. control the transparency of each activation. transparency = tanh(beta*l1). + mask : bool + If True, plot with mask (need to run prune() first to obtain mask). If False (by default), plot all activation functions. + mode : bool + "supervised" or "unsupervised". If "supervised", l1 is measured by absolution value (not subtracting mean); if "unsupervised", l1 is measured by standard deviation (subtracting mean). + scale : float + control the size of the diagram + in_vars: None or list of str + the name(s) of input variables + out_vars: None or list of str + the name(s) of output variables + title: None or str + title + varscale : float + the size of input variables + + Returns: + -------- + Figure + + Example + ------- + >>> # see more interactive examples in demos + >>> model = KAN(width=[2,3,1], grid=3, k=3, noise_scale=1.0) + >>> x = torch.normal(0,1,size=(100,2)) + >>> model(x) # do a forward pass to obtain model.acts + >>> model.plot() + ''' + global Symbol + + if not self.save_act: + print('cannot plot since data are not saved. Set save_act=True first.') + + # forward to obtain activations + if self.acts == None: + if self.cache_data == None: + raise Exception('model hasn\'t seen any data yet.') + self.forward(self.cache_data) + + if metric == 'backward': + self.attribute() + + + if not os.path.exists(folder): + os.makedirs(folder) + # matplotlib.use('Agg') + depth = len(self.width) - 1 + for l in range(depth): + w_large = 2.0 + for i in range(self.width_in[l]): + for j in range(self.width_out[l+1]): + rank = torch.argsort(self.acts[l][:, i]) + fig, ax = plt.subplots(figsize=(w_large, w_large)) + + num = rank.shape[0] + + #print(self.width_in[l]) + #print(self.width_out[l+1]) + symbolic_mask = self.symbolic_fun[l].mask[j][i] + numeric_mask = self.act_fun[l].mask[i][j] + if symbolic_mask > 0. and numeric_mask > 0.: + color = 'purple' + alpha_mask = 1 + if symbolic_mask > 0. and numeric_mask == 0.: + color = "red" + alpha_mask = 1 + if symbolic_mask == 0. and numeric_mask > 0.: + color = "black" + alpha_mask = 1 + if symbolic_mask == 0. and numeric_mask == 0.: + color = "white" + alpha_mask = 0 + + + if tick == True: + ax.tick_params(axis="y", direction="in", pad=-22, labelsize=50) + ax.tick_params(axis="x", direction="in", pad=-15, labelsize=50) + x_min, x_max, y_min, y_max = self.get_range(l, i, j, verbose=False) + plt.xticks([x_min, x_max], ['%2.f' % x_min, '%2.f' % x_max]) + plt.yticks([y_min, y_max], ['%2.f' % y_min, '%2.f' % y_max]) + else: + plt.xticks([]) + plt.yticks([]) + if alpha_mask == 1: + plt.gca().patch.set_edgecolor('black') + else: + plt.gca().patch.set_edgecolor('white') + plt.gca().patch.set_linewidth(1.5) + # plt.axis('off') + + plt.plot(self.acts[l][:, i][rank].cpu().detach().numpy(), self.spline_postacts[l][:, j, i][rank].cpu().detach().numpy(), color=color, lw=5) + if sample == True: + plt.scatter(self.acts[l][:, i][rank].cpu().detach().numpy(), self.spline_postacts[l][:, j, i][rank].cpu().detach().numpy(), color=color, s=400 * scale ** 2) + plt.gca().spines[:].set_color(color) + + plt.savefig(f'{folder}/sp_{l}_{i}_{j}.png', bbox_inches="tight", dpi=400) + plt.close() + + def score2alpha(score): + return np.tanh(beta * score) + + + if metric == 'forward_n': + scores = self.acts_scale + elif metric == 'forward_u': + scores = self.edge_actscale + elif metric == 'backward': + scores = self.edge_scores + else: + raise Exception(f'metric = \'{metric}\' not recognized') + + alpha = [score2alpha(score.cpu().detach().numpy()) for score in scores] + + # draw skeleton + width = np.array(self.width) + width_in = np.array(self.width_in) + width_out = np.array(self.width_out) + A = 1 + y0 = 0.3 # height: from input to pre-mult + z0 = 0.1 # height: from pre-mult to post-mult (input of next layer) + + neuron_depth = len(width) + min_spacing = A / np.maximum(np.max(width_out), 5) + + max_neuron = np.max(width_out) + max_num_weights = np.max(width_in[:-1] * width_out[1:]) + y1 = 0.4 / np.maximum(max_num_weights, 5) # size (height/width) of 1D function diagrams + y2 = 0.15 / np.maximum(max_neuron, 5) # size (height/width) of operations (sum and mult) + + fig, ax = plt.subplots(figsize=(10 * scale, 10 * scale * (neuron_depth - 1) * (y0+z0))) + # fig, ax = plt.subplots(figsize=(5,5*(neuron_depth-1)*y0)) + + # -- Transformation functions + DC_to_FC = ax.transData.transform + FC_to_NFC = fig.transFigure.inverted().transform + # -- Take data coordinates and transform them to normalized figure coordinates + DC_to_NFC = lambda x: FC_to_NFC(DC_to_FC(x)) + + # plot scatters and lines + for l in range(neuron_depth): + + n = width_in[l] + + # scatters + for i in range(n): + plt.scatter(1 / (2 * n) + i / n, l * (y0+z0), s=min_spacing ** 2 * 10000 * scale ** 2, color='black') + + # plot connections (input to pre-mult) + for i in range(n): + if l < neuron_depth - 1: + n_next = width_out[l+1] + N = n * n_next + for j in range(n_next): + id_ = i * n_next + j + + symbol_mask = self.symbolic_fun[l].mask[j][i] + numerical_mask = self.act_fun[l].mask[i][j] + if symbol_mask == 1. and numerical_mask > 0.: + color = 'purple' + alpha_mask = 1. + if symbol_mask == 1. and numerical_mask == 0.: + color = "red" + alpha_mask = 1. + if symbol_mask == 0. and numerical_mask == 1.: + color = "black" + alpha_mask = 1. + if symbol_mask == 0. and numerical_mask == 0.: + color = "white" + alpha_mask = 0. + + plt.plot([1 / (2 * n) + i / n, 1 / (2 * N) + id_ / N], [l * (y0+z0), l * (y0+z0) + y0/2 - y1], color=color, lw=2 * scale, alpha=alpha[l][j][i] * alpha_mask) + plt.plot([1 / (2 * N) + id_ / N, 1 / (2 * n_next) + j / n_next], [l * (y0+z0) + y0/2 + y1, l * (y0+z0)+y0], color=color, lw=2 * scale, alpha=alpha[l][j][i] * alpha_mask) + + + # plot connections (pre-mult to post-mult, post-mult = next-layer input) + if l < neuron_depth - 1: + n_in = width_out[l+1] + n_out = width_in[l+1] + mult_id = 0 + for i in range(n_in): + if i < width[l+1][0]: + j = i + else: + if i == width[l+1][0]: + if isinstance(self.mult_arity,int): + ma = self.mult_arity + else: + ma = self.mult_arity[l+1][mult_id] + current_mult_arity = ma + if current_mult_arity == 0: + mult_id += 1 + if isinstance(self.mult_arity,int): + ma = self.mult_arity + else: + ma = self.mult_arity[l+1][mult_id] + current_mult_arity = ma + j = width[l+1][0] + mult_id + current_mult_arity -= 1 + #j = (i-width[l+1][0])//self.mult_arity + width[l+1][0] + plt.plot([1 / (2 * n_in) + i / n_in, 1 / (2 * n_out) + j / n_out], [l * (y0+z0) + y0, (l+1) * (y0+z0)], color='black', lw=2 * scale) + + + + plt.xlim(0, 1) + plt.ylim(-0.1 * (y0+z0), (neuron_depth - 1 + 0.1) * (y0+z0)) + + + plt.axis('off') + + for l in range(neuron_depth - 1): + # plot splines + n = width_in[l] + for i in range(n): + n_next = width_out[l + 1] + N = n * n_next + for j in range(n_next): + id_ = i * n_next + j + im = plt.imread(f'{folder}/sp_{l}_{i}_{j}.png') + left = DC_to_NFC([1 / (2 * N) + id_ / N - y1, 0])[0] + right = DC_to_NFC([1 / (2 * N) + id_ / N + y1, 0])[0] + bottom = DC_to_NFC([0, l * (y0+z0) + y0/2 - y1])[1] + up = DC_to_NFC([0, l * (y0+z0) + y0/2 + y1])[1] + newax = fig.add_axes([left, bottom, right - left, up - bottom]) + # newax = fig.add_axes([1/(2*N)+id_/N-y1, (l+1/2)*y0-y1, y1, y1], anchor='NE') + newax.imshow(im, alpha=alpha[l][j][i]) + newax.axis('off') + + + # plot sum symbols + N = n = width_out[l+1] + for j in range(n): + id_ = j + path = os.path.dirname(os.path.abspath(__file__)) + "/assets/img/sum_symbol.png" + im = plt.imread(path) + left = DC_to_NFC([1 / (2 * N) + id_ / N - y2, 0])[0] + right = DC_to_NFC([1 / (2 * N) + id_ / N + y2, 0])[0] + bottom = DC_to_NFC([0, l * (y0+z0) + y0 - y2])[1] + up = DC_to_NFC([0, l * (y0+z0) + y0 + y2])[1] + newax = fig.add_axes([left, bottom, right - left, up - bottom]) + newax.imshow(im) + newax.axis('off') + + # plot mult symbols + N = n = width_in[l+1] + n_sum = width[l+1][0] + n_mult = width[l+1][1] + for j in range(n_mult): + id_ = j + n_sum + path = os.path.dirname(os.path.abspath(__file__)) + "/assets/img/mult_symbol.png" + im = plt.imread(path) + left = DC_to_NFC([1 / (2 * N) + id_ / N - y2, 0])[0] + right = DC_to_NFC([1 / (2 * N) + id_ / N + y2, 0])[0] + bottom = DC_to_NFC([0, (l+1) * (y0+z0) - y2])[1] + up = DC_to_NFC([0, (l+1) * (y0+z0) + y2])[1] + newax = fig.add_axes([left, bottom, right - left, up - bottom]) + newax.imshow(im) + newax.axis('off') + + if in_vars != None: + n = self.width_in[0] + for i in range(n): + if isinstance(in_vars[i], sympy.Expr): + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), -0.1, f'${latex(in_vars[i])}$', fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + else: + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), -0.1, in_vars[i], fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + + + + if out_vars != None: + n = self.width_in[-1] + for i in range(n): + if isinstance(out_vars[i], sympy.Expr): + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), (y0+z0) * (len(self.width) - 1) + 0.15, f'${latex(out_vars[i])}$', fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + else: + plt.gcf().get_axes()[0].text(1 / (2 * (n)) + i / (n), (y0+z0) * (len(self.width) - 1) + 0.15, out_vars[i], fontsize=40 * scale * varscale, horizontalalignment='center', verticalalignment='center') + + if title != None: + plt.gcf().get_axes()[0].text(0.5, (y0+z0) * (len(self.width) - 1) + 0.3, title, fontsize=40 * scale, horizontalalignment='center', verticalalignment='center') + + + def reg(self, reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff): + ''' + Get regularization + + Args: + ----- + reg_metric : the regularization metric + 'edge_forward_spline_n', 'edge_forward_spline_u', 'edge_forward_sum', 'edge_backward', 'node_backward' + lamb_l1 : float + l1 penalty strength + lamb_entropy : float + entropy penalty strength + lamb_coef : float + coefficient penalty strength + lamb_coefdiff : float + coefficient smoothness strength + + Returns: + -------- + reg_ : torch.float + + Example + ------- + >>> model = KAN(width=[2,3,1], grid=5, k=3, noise_scale=1.) + >>> x = torch.rand(100,2) + >>> model.get_act(x) + >>> model.reg('edge_forward_spline_n', 1.0, 2.0, 1.0, 1.0) + ''' + if reg_metric == 'edge_forward_spline_n': + acts_scale = self.acts_scale_spline + + elif reg_metric == 'edge_forward_sum': + acts_scale = self.acts_scale + + elif reg_metric == 'edge_forward_spline_u': + acts_scale = self.edge_actscale + + elif reg_metric == 'edge_backward': + acts_scale = self.edge_scores + + elif reg_metric == 'node_backward': + acts_scale = self.node_attribute_scores + + else: + raise Exception(f'reg_metric = {reg_metric} not recognized!') + + reg_ = 0. + for i in range(len(acts_scale)): + vec = acts_scale[i] + + l1 = torch.sum(vec) + p_row = vec / (torch.sum(vec, dim=1, keepdim=True) + 1) + p_col = vec / (torch.sum(vec, dim=0, keepdim=True) + 1) + entropy_row = - torch.mean(torch.sum(p_row * torch.log2(p_row + 1e-4), dim=1)) + entropy_col = - torch.mean(torch.sum(p_col * torch.log2(p_col + 1e-4), dim=0)) + reg_ += lamb_l1 * l1 + lamb_entropy * (entropy_row + entropy_col) # both l1 and entropy + + # regularize coefficient to encourage spline to be zero + for i in range(len(self.act_fun)): + coeff_l1 = torch.sum(torch.mean(torch.abs(self.act_fun[i].coef), dim=1)) + coeff_diff_l1 = torch.sum(torch.mean(torch.abs(torch.diff(self.act_fun[i].coef)), dim=1)) + reg_ += lamb_coef * coeff_l1 + lamb_coefdiff * coeff_diff_l1 + + return reg_ + + def get_reg(self, reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff): + ''' + Get regularization. This seems unnecessary but in case a class wants to inherit this, it may want to rewrite get_reg, but not reg. + ''' + return self.reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) + + def disable_symbolic_in_fit(self, lamb): + ''' + during fitting, disable symbolic if either is true (lamb = 0, none of symbolic functions is active) + ''' + old_save_act = self.save_act + if lamb == 0.: + self.save_act = False + + # skip symbolic if no symbolic is turned on + depth = len(self.symbolic_fun) + no_symbolic = True + for l in range(depth): + no_symbolic *= torch.sum(torch.abs(self.symbolic_fun[l].mask)) == 0 + + old_symbolic_enabled = self.symbolic_enabled + + if no_symbolic: + self.symbolic_enabled = False + + return old_save_act, old_symbolic_enabled + + def get_params(self): + ''' + Get parameters + ''' + return self.parameters() + + + def fit(self, dataset, opt="LBFGS", steps=100, log=1, lamb=0., lamb_l1=1., lamb_entropy=2., lamb_coef=0., lamb_coefdiff=0., update_grid=True, grid_update_num=10, loss_fn=None, lr=1.,start_grid_update_step=-1, stop_grid_update_step=50, batch=-1, + metrics=None, save_fig=False, in_vars=None, out_vars=None, beta=3, save_fig_freq=1, img_folder='./video', singularity_avoiding=False, y_th=1000., reg_metric='edge_forward_spline_n', display_metrics=None): + ''' + training + + Args: + ----- + dataset : dic + contains dataset['train_input'], dataset['train_label'], dataset['test_input'], dataset['test_label'] + opt : str + "LBFGS" or "Adam" + steps : int + training steps + log : int + logging frequency + lamb : float + overall penalty strength + lamb_l1 : float + l1 penalty strength + lamb_entropy : float + entropy penalty strength + lamb_coef : float + coefficient magnitude penalty strength + lamb_coefdiff : float + difference of nearby coefficits (smoothness) penalty strength + update_grid : bool + If True, update grid regularly before stop_grid_update_step + grid_update_num : int + the number of grid updates before stop_grid_update_step + start_grid_update_step : int + no grid updates before this training step + stop_grid_update_step : int + no grid updates after this training step + loss_fn : function + loss function + lr : float + learning rate + batch : int + batch size, if -1 then full. + save_fig_freq : int + save figure every (save_fig_freq) steps + singularity_avoiding : bool + indicate whether to avoid singularity for the symbolic part + y_th : float + singularity threshold (anything above the threshold is considered singular and is softened in some ways) + reg_metric : str + regularization metric. Choose from {'edge_forward_spline_n', 'edge_forward_spline_u', 'edge_forward_sum', 'edge_backward', 'node_backward'} + metrics : a list of metrics (as functions) + the metrics to be computed in training + display_metrics : a list of functions + the metric to be displayed in tqdm progress bar + + Returns: + -------- + results : dic + results['train_loss'], 1D array of training losses (RMSE) + results['test_loss'], 1D array of test losses (RMSE) + results['reg'], 1D array of regularization + other metrics specified in metrics + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.plot() + # Most examples in toturals involve the fit() method. Please check them for useness. + ''' + + if lamb > 0. and not self.save_act: + print('setting lamb=0. If you want to set lamb > 0, set self.save_act=True') + + old_save_act, old_symbolic_enabled = self.disable_symbolic_in_fit(lamb) + + pbar = tqdm(range(steps), desc='description', ncols=100) + + if loss_fn == None: + loss_fn = loss_fn_eval = lambda x, y: torch.mean((x - y) ** 2) + else: + loss_fn = loss_fn_eval = loss_fn + + grid_update_freq = int(stop_grid_update_step / grid_update_num) + + if opt == "Adam": + optimizer = torch.optim.Adam(self.get_params(), lr=lr) + elif opt == "LBFGS": + optimizer = LBFGS(self.get_params(), lr=lr, history_size=10, line_search_fn="strong_wolfe", tolerance_grad=1e-32, tolerance_change=1e-32, tolerance_ys=1e-32) + + results = {} + results['train_loss'] = [] + results['test_loss'] = [] + results['reg'] = [] + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__] = [] + + if batch == -1 or batch > dataset['train_input'].shape[0]: + batch_size = dataset['train_input'].shape[0] + batch_size_test = dataset['test_input'].shape[0] + else: + batch_size = batch + batch_size_test = batch + + global train_loss, reg_ + + def closure(): + global train_loss, reg_ + optimizer.zero_grad() + pred = self.forward(dataset['train_input'][train_id], singularity_avoiding=singularity_avoiding, y_th=y_th) + train_loss = loss_fn(pred, dataset['train_label'][train_id]) + if self.save_act: + if reg_metric == 'edge_backward': + self.attribute() + if reg_metric == 'node_backward': + self.node_attribute() + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) + else: + reg_ = torch.tensor(0.) + objective = train_loss + lamb * reg_ + objective.backward() + return objective + + if save_fig: + if not os.path.exists(img_folder): + os.makedirs(img_folder) + + for _ in pbar: + + if _ == steps-1 and old_save_act: + self.save_act = True + + if save_fig and _ % save_fig_freq == 0: + save_act = self.save_act + self.save_act = True + + train_id = np.random.choice(dataset['train_input'].shape[0], batch_size, replace=False) + test_id = np.random.choice(dataset['test_input'].shape[0], batch_size_test, replace=False) + + if _ % grid_update_freq == 0 and _ < stop_grid_update_step and update_grid and _ >= start_grid_update_step: + self.update_grid(dataset['train_input'][train_id]) + + if opt == "LBFGS": + optimizer.step(closure) + + if opt == "Adam": + pred = self.forward(dataset['train_input'][train_id], singularity_avoiding=singularity_avoiding, y_th=y_th) + train_loss = loss_fn(pred, dataset['train_label'][train_id]) + if self.save_act: + if reg_metric == 'edge_backward': + self.attribute() + if reg_metric == 'node_backward': + self.node_attribute() + reg_ = self.get_reg(reg_metric, lamb_l1, lamb_entropy, lamb_coef, lamb_coefdiff) + else: + reg_ = torch.tensor(0.) + loss = train_loss + lamb * reg_ + optimizer.zero_grad() + loss.backward() + optimizer.step() + + test_loss = loss_fn_eval(self.forward(dataset['test_input'][test_id]), dataset['test_label'][test_id]) + + + if metrics != None: + for i in range(len(metrics)): + results[metrics[i].__name__].append(metrics[i]().item()) + + results['train_loss'].append(torch.sqrt(train_loss).cpu().detach().numpy()) + results['test_loss'].append(torch.sqrt(test_loss).cpu().detach().numpy()) + results['reg'].append(reg_.cpu().detach().numpy()) + + if _ % log == 0: + if display_metrics == None: + pbar.set_description("| train_loss: %.2e | test_loss: %.2e | reg: %.2e | " % (torch.sqrt(train_loss).cpu().detach().numpy(), torch.sqrt(test_loss).cpu().detach().numpy(), reg_.cpu().detach().numpy())) + else: + string = '' + data = () + for metric in display_metrics: + string += f' {metric}: %.2e |' + try: + results[metric] + except: + raise Exception(f'{metric} not recognized') + data += (results[metric][-1],) + pbar.set_description(string % data) + + + if save_fig and _ % save_fig_freq == 0: + self.plot(folder=img_folder, in_vars=in_vars, out_vars=out_vars, title="Step {}".format(_), beta=beta) + plt.savefig(img_folder + '/' + str(_) + '.jpg', bbox_inches='tight', dpi=200) + plt.close() + self.save_act = save_act + + self.log_history('fit') + # revert back to original state + self.symbolic_enabled = old_symbolic_enabled + return results + + def prune_node(self, threshold=1e-2, mode="auto", active_neurons_id=None, log_history=True): + ''' + pruning nodes + + Args: + ----- + threshold : float + if the attribution score of a neuron is below the threshold, it is considered dead and will be removed + mode : str + 'auto' or 'manual'. with 'auto', nodes are automatically pruned using threshold. with 'manual', active_neurons_id should be passed in. + + Returns: + -------- + pruned network : MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model = model.prune_node() + >>> model.plot() + ''' + if self.acts == None: + self.get_act() + + mask_up = [torch.ones(self.width_in[0], device=self.device)] + mask_down = [] + active_neurons_up = [list(range(self.width_in[0]))] + active_neurons_down = [] + num_sums = [] + num_mults = [] + mult_arities = [[]] + + if active_neurons_id != None: + mode = "manual" + + for i in range(len(self.acts_scale) - 1): + + mult_arity = [] + + if mode == "auto": + self.attribute() + overall_important_up = self.node_scores[i+1] > threshold + + elif mode == "manual": + overall_important_up = torch.zeros(self.width_in[i + 1], dtype=torch.bool, device=self.device) + overall_important_up[active_neurons_id[i]] = True + + + num_sum = torch.sum(overall_important_up[:self.width[i+1][0]]) + num_mult = torch.sum(overall_important_up[self.width[i+1][0]:]) + if self.mult_homo == True: + overall_important_down = torch.cat([overall_important_up[:self.width[i+1][0]], (overall_important_up[self.width[i+1][0]:][None,:].expand(self.mult_arity,-1)).T.reshape(-1,)], dim=0) + else: + overall_important_down = overall_important_up[:self.width[i+1][0]] + for j in range(overall_important_up[self.width[i+1][0]:].shape[0]): + active_bool = overall_important_up[self.width[i+1][0]+j] + arity = self.mult_arity[i+1][j] + overall_important_down = torch.cat([overall_important_down, torch.tensor([active_bool]*arity).to(self.device)]) + if active_bool: + mult_arity.append(arity) + + num_sums.append(num_sum.item()) + num_mults.append(num_mult.item()) + + mask_up.append(overall_important_up.float()) + mask_down.append(overall_important_down.float()) + + active_neurons_up.append(torch.where(overall_important_up == True)[0]) + active_neurons_down.append(torch.where(overall_important_down == True)[0]) + + mult_arities.append(mult_arity) + + active_neurons_down.append(list(range(self.width_out[-1]))) + mask_down.append(torch.ones(self.width_out[-1], device=self.device)) + + if self.mult_homo == False: + mult_arities.append(self.mult_arity[-1]) + + self.mask_up = mask_up + self.mask_down = mask_down + + # update act_fun[l].mask up + for l in range(len(self.acts_scale) - 1): + for i in range(self.width_in[l + 1]): + if i not in active_neurons_up[l + 1]: + self.remove_node(l + 1, i, mode='up',log_history=False) + + for i in range(self.width_out[l + 1]): + if i not in active_neurons_down[l]: + self.remove_node(l + 1, i, mode='down',log_history=False) + + model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun_name, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round).to(self.device) + model2.load_state_dict(self.state_dict()) + + width_new = [self.width[0]] + + for i in range(len(self.acts_scale)): + + if i < len(self.acts_scale) - 1: + num_sum = num_sums[i] + num_mult = num_mults[i] + model2.node_bias[i].data = model2.node_bias[i].data[active_neurons_up[i+1]] + model2.node_scale[i].data = model2.node_scale[i].data[active_neurons_up[i+1]] + model2.subnode_bias[i].data = model2.subnode_bias[i].data[active_neurons_down[i]] + model2.subnode_scale[i].data = model2.subnode_scale[i].data[active_neurons_down[i]] + model2.width[i+1] = [num_sum, num_mult] + + model2.act_fun[i].out_dim_sum = num_sum + model2.act_fun[i].out_dim_mult = num_mult + + model2.symbolic_fun[i].out_dim_sum = num_sum + model2.symbolic_fun[i].out_dim_mult = num_mult + + width_new.append([num_sum, num_mult]) + + model2.act_fun[i] = model2.act_fun[i].get_subset(active_neurons_up[i], active_neurons_down[i]) + model2.symbolic_fun[i] = self.symbolic_fun[i].get_subset(active_neurons_up[i], active_neurons_down[i]) + + model2.cache_data = self.cache_data + model2.acts = None + + width_new.append(self.width[-1]) + model2.width = width_new + + if self.mult_homo == False: + model2.mult_arity = mult_arities + + if log_history: + self.log_history('prune_node') + model2.state_id += 1 + + return model2 + + def prune_edge(self, threshold=3e-2, log_history=True): + ''' + pruning edges + + Args: + ----- + threshold : float + if the attribution score of an edge is below the threshold, it is considered dead and will be set to zero. + + Returns: + -------- + pruned network : MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model = model.prune_edge() + >>> model.plot() + ''' + if self.acts == None: + self.get_act() + + for i in range(len(self.width)-1): + #self.act_fun[i].mask.data = ((self.acts_scale[i] > threshold).permute(1,0)).float() + old_mask = self.act_fun[i].mask.data + self.act_fun[i].mask.data = ((self.edge_scores[i] > threshold).permute(1,0)*old_mask).float() + + if log_history: + self.log_history('fix_symbolic') + + def prune(self, node_th=1e-2, edge_th=3e-2): + ''' + prune (both nodes and edges) + + Args: + ----- + node_th : float + if the attribution score of a node is below node_th, it is considered dead and will be set to zero. + edge_th : float + if the attribution score of an edge is below node_th, it is considered dead and will be set to zero. + + Returns: + -------- + pruned network : MultKAN + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model = model.prune() + >>> model.plot() + ''' + if self.acts == None: + self.get_act() + + self = self.prune_node(node_th, log_history=False) + #self.prune_node(node_th, log_history=False) + self.forward(self.cache_data) + self.attribute() + self.prune_edge(edge_th, log_history=False) + self.log_history('prune') + return self + + def prune_input(self, threshold=1e-2, active_inputs=None, log_history=True): + ''' + prune inputs + + Args: + ----- + threshold : float + if the attribution score of the input feature is below threshold, it is considered irrelevant. + active_inputs : None or list + if a list is passed, the manual mode will disregard attribution score and prune as instructed. + + Returns: + -------- + pruned network : MultKAN + + Example1 + -------- + >>> # automatic + >>> from kan import * + >>> model = KAN(width=[3,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: 1 * x[:,[0]]**2 + 0.3 * x[:,[1]]**2 + 0.0 * x[:,[2]]**2 + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.plot() + >>> model = model.prune_input() + >>> model.plot() + + Example2 + -------- + >>> # automatic + >>> from kan import * + >>> model = KAN(width=[3,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: 1 * x[:,[0]]**2 + 0.3 * x[:,[1]]**2 + 0.0 * x[:,[2]]**2 + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.plot() + >>> model = model.prune_input(active_inputs=[0,1]) + >>> model.plot() + ''' + if active_inputs == None: + self.attribute() + input_score = self.node_scores[0] + input_mask = input_score > threshold + print('keep:', input_mask.tolist()) + input_id = torch.where(input_mask==True)[0] + + else: + input_id = torch.tensor(active_inputs, dtype=torch.long).to(self.device) + + model2 = MultKAN(copy.deepcopy(self.width), grid=self.grid, k=self.k, base_fun=self.base_fun, mult_arity=self.mult_arity, ckpt_path=self.ckpt_path, auto_save=True, first_init=False, state_id=self.state_id, round=self.round).to(self.device) + model2.load_state_dict(self.state_dict()) + + model2.act_fun[0] = model2.act_fun[0].get_subset(input_id, torch.arange(self.width_out[1])) + model2.symbolic_fun[0] = self.symbolic_fun[0].get_subset(input_id, torch.arange(self.width_out[1])) + + model2.cache_data = self.cache_data + model2.acts = None + + model2.width[0] = [len(input_id), 0] + model2.input_id = input_id + + if log_history: + self.log_history('prune_input') + model2.state_id += 1 + + return model2 + + def remove_edge(self, l, i, j, log_history=True): + ''' + remove activtion phi(l,i,j) (set its mask to zero) + ''' + self.act_fun[l].mask[i][j] = 0. + if log_history: + self.log_history('remove_edge') + + def remove_node(self, l ,i, mode='all', log_history=True): + ''' + remove neuron (l,i) (set the masks of all incoming and outgoing activation functions to zero) + ''' + if mode == 'down': + self.act_fun[l - 1].mask[:, i] = 0. + self.symbolic_fun[l - 1].mask[i, :] *= 0. + + elif mode == 'up': + self.act_fun[l].mask[i, :] = 0. + self.symbolic_fun[l].mask[:, i] *= 0. + + else: + self.remove_node(l, i, mode='up') + self.remove_node(l, i, mode='down') + + if log_history: + self.log_history('remove_node') + + + def attribute(self, l=None, i=None, out_score=None, plot=True): + ''' + get attribution scores + + Args: + ----- + l : None or int + layer index + i : None or int + neuron index + out_score : None or 1D torch.float + specify output scores + plot : bool + when plot = True, display the bar show + + Returns: + -------- + attribution scores + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[3,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: 1 * x[:,[0]]**2 + 0.3 * x[:,[1]]**2 + 0.0 * x[:,[2]]**2 + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.attribute() + >>> model.feature_score + ''' + # output (out_dim, in_dim) + + if l != None: + self.attribute() + out_score = self.node_scores[l] + + if self.acts == None: + self.get_act() + + def score_node2subnode(node_score, width, mult_arity, out_dim): + + assert np.sum(width) == node_score.shape[1] + if isinstance(mult_arity, int): + n_subnode = width[0] + mult_arity * width[1] + else: + n_subnode = width[0] + int(np.sum(mult_arity)) + + #subnode_score_leaf = torch.zeros(out_dim, n_subnode).requires_grad_(True) + #subnode_score = subnode_score_leaf.clone() + #subnode_score[:,:width[0]] = node_score[:,:width[0]] + subnode_score = node_score[:,:width[0]] + if isinstance(mult_arity, int): + #subnode_score[:,width[0]:] = node_score[:,width[0]:][:,:,None].expand(out_dim, node_score[width[0]:].shape[0], mult_arity).reshape(out_dim,-1) + subnode_score = torch.cat([subnode_score, node_score[:,width[0]:][:,:,None].expand(out_dim, node_score[:,width[0]:].shape[1], mult_arity).reshape(out_dim,-1)], dim=1) + else: + acml = width[0] + for i in range(len(mult_arity)): + #subnode_score[:, acml:acml+mult_arity[i]] = node_score[:, width[0]+i] + subnode_score = torch.cat([subnode_score, node_score[:, width[0]+i].expand(out_dim, mult_arity[i])], dim=1) + acml += mult_arity[i] + return subnode_score + + + node_scores = [] + subnode_scores = [] + edge_scores = [] + + l_query = l + if l == None: + l_end = self.depth + else: + l_end = l + + # back propagate from the queried layer + out_dim = self.width_in[l_end] + if out_score == None: + node_score = torch.eye(out_dim).requires_grad_(True) + else: + node_score = torch.diag(out_score).requires_grad_(True) + node_scores.append(node_score) + + device = self.act_fun[0].grid.device + + for l in range(l_end,0,-1): + + # node to subnode + if isinstance(self.mult_arity, int): + subnode_score = score_node2subnode(node_score, self.width[l], self.mult_arity, out_dim=out_dim) + else: + mult_arity = self.mult_arity[l] + #subnode_score = score_node2subnode(node_score, self.width[l], mult_arity) + subnode_score = score_node2subnode(node_score, self.width[l], mult_arity, out_dim=out_dim) + + subnode_scores.append(subnode_score) + # subnode to edge + #print(self.edge_actscale[l-1].device, subnode_score.device, self.subnode_actscale[l-1].device) + edge_score = torch.einsum('ij,ki,i->kij', self.edge_actscale[l-1], subnode_score.to(device), 1/(self.subnode_actscale[l-1]+1e-4)) + edge_scores.append(edge_score) + + # edge to node + node_score = torch.sum(edge_score, dim=1) + node_scores.append(node_score) + + self.node_scores_all = list(reversed(node_scores)) + self.edge_scores_all = list(reversed(edge_scores)) + self.subnode_scores_all = list(reversed(subnode_scores)) + + self.node_scores = [torch.mean(l, dim=0) for l in self.node_scores_all] + self.edge_scores = [torch.mean(l, dim=0) for l in self.edge_scores_all] + self.subnode_scores = [torch.mean(l, dim=0) for l in self.subnode_scores_all] + + # return + if l_query != None: + if i == None: + return self.node_scores_all[0] + else: + + # plot + if plot: + in_dim = self.width_in[0] + plt.figure(figsize=(1*in_dim, 3)) + plt.bar(range(in_dim),self.node_scores_all[0][i].cpu().detach().numpy()) + plt.xticks(range(in_dim)); + + return self.node_scores_all[0][i] + + def node_attribute(self): + self.node_attribute_scores = [] + for l in range(1, self.depth+1): + node_attr = self.attribute(l) + self.node_attribute_scores.append(node_attr) + + def feature_interaction(self, l, neuron_th = 1e-2, feature_th = 1e-2): + ''' + get feature interaction + + Args: + ----- + l : int + layer index + neuron_th : float + threshold to determine whether a neuron is active + feature_th : float + threshold to determine whether a feature is active + + Returns: + -------- + dictionary + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[3,5,1], grid=5, k=3, noise_scale=0.3, seed=2) + >>> f = lambda x: 1 * x[:,[0]]**2 + 0.3 * x[:,[1]]**2 + 0.0 * x[:,[2]]**2 + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.attribute() + >>> model.feature_interaction(1) + ''' + dic = {} + width = self.width_in[l] + + for i in range(width): + score = self.attribute(l,i,plot=False) + + if torch.max(score) > neuron_th: + features = tuple(torch.where(score > torch.max(score) * feature_th)[0].detach().numpy()) + if features in dic.keys(): + dic[features] += 1 + else: + dic[features] = 1 + + return dic + + def suggest_symbolic(self, l, i, j, a_range=(-10, 10), b_range=(-10, 10), lib=None, topk=5, verbose=True, r2_loss_fun=lambda x: np.log2(1+1e-5-x), c_loss_fun=lambda x: x, weight_simple = 0.8): + ''' + suggest symbolic function + + Args: + ----- + l : int + layer index + i : int + neuron index in layer l + j : int + neuron index in layer j + a_range : tuple + search range of a + b_range : tuple + search range of b + lib : list of str + library of candidate symbolic functions + topk : int + the number of top functions displayed + verbose : bool + if verbose = True, print more information + r2_loss_fun : functoon + function : r2 -> "bits" + c_loss_fun : fun + function : c -> 'bits' + weight_simple : float + the simplifty weight: the higher, more prefer simplicity over performance + + + Returns: + -------- + best_name (str), best_fun (function), best_r2 (float), best_c (float) + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,1,1], grid=5, k=3, noise_scale=0.0, seed=0) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]])+x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.suggest_symbolic(0,1,0) + ''' + r2s = [] + cs = [] + + if lib == None: + symbolic_lib = SYMBOLIC_LIB + else: + symbolic_lib = {} + for item in lib: + symbolic_lib[item] = SYMBOLIC_LIB[item] + + # getting r2 and complexities + for (name, content) in symbolic_lib.items(): + r2 = self.fix_symbolic(l, i, j, name, a_range=a_range, b_range=b_range, verbose=False, log_history=False) + if r2 == -1e8: # zero function + r2s.append(-1e8) + else: + r2s.append(r2.item()) + self.unfix_symbolic(l, i, j, log_history=False) + c = content[2] + cs.append(c) + + r2s = np.array(r2s) + cs = np.array(cs) + r2_loss = r2_loss_fun(r2s).astype('float') + cs_loss = c_loss_fun(cs) + + loss = weight_simple * cs_loss + (1-weight_simple) * r2_loss + + sorted_ids = np.argsort(loss)[:topk] + r2s = r2s[sorted_ids][:topk] + cs = cs[sorted_ids][:topk] + r2_loss = r2_loss[sorted_ids][:topk] + cs_loss = cs_loss[sorted_ids][:topk] + loss = loss[sorted_ids][:topk] + + topk = np.minimum(topk, len(symbolic_lib)) + + if verbose == True: + # print results in a dataframe + results = {} + results['function'] = [list(symbolic_lib.items())[sorted_ids[i]][0] for i in range(topk)] + results['fitting r2'] = r2s[:topk] + results['r2 loss'] = r2_loss[:topk] + results['complexity'] = cs[:topk] + results['complexity loss'] = cs_loss[:topk] + results['total loss'] = loss[:topk] + + df = pd.DataFrame(results) + print(df) + + best_name = list(symbolic_lib.items())[sorted_ids[0]][0] + best_fun = list(symbolic_lib.items())[sorted_ids[0]][1] + best_r2 = r2s[0] + best_c = cs[0] + + return best_name, best_fun, best_r2, best_c; + + def auto_symbolic(self, a_range=(-10, 10), b_range=(-10, 10), lib=None, verbose=1, weight_simple = 0.8, r2_threshold=0.0): + ''' + automatic symbolic regression for all edges + + Args: + ----- + a_range : tuple + search range of a + b_range : tuple + search range of b + lib : list of str + library of candidate symbolic functions + verbose : int + larger verbosity => more verbosity + weight_simple : float + a weight that prioritizies simplicity (low complexity) over performance (high r2) - set to 0.0 to ignore complexity + r2_threshold : float + If r2 is below this threshold, the edge will not be fixed with any symbolic function - set to 0.0 to ignore this threshold + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,1,1], grid=5, k=3, noise_scale=0.0, seed=0) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]])+x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.auto_symbolic() + ''' + for l in range(len(self.width_in) - 1): + for i in range(self.width_in[l]): + for j in range(self.width_out[l + 1]): + if self.symbolic_fun[l].mask[j, i] > 0. and self.act_fun[l].mask[i][j] == 0.: + print(f'skipping ({l},{i},{j}) since already symbolic') + elif self.symbolic_fun[l].mask[j, i] == 0. and self.act_fun[l].mask[i][j] == 0.: + self.fix_symbolic(l, i, j, '0', verbose=verbose > 1, log_history=False) + print(f'fixing ({l},{i},{j}) with 0') + else: + name, fun, r2, c = self.suggest_symbolic(l, i, j, a_range=a_range, b_range=b_range, lib=lib, verbose=False, weight_simple=weight_simple) + if r2 >= r2_threshold: + self.fix_symbolic(l, i, j, name, verbose=verbose > 1, log_history=False) + if verbose >= 1: + print(f'fixing ({l},{i},{j}) with {name}, r2={r2}, c={c}') + else: + print(f'For ({l},{i},{j}) the best fit was {name}, but r^2 = {r2} and this is lower than {r2_threshold}. This edge was omitted, keep training or try a different threshold.') + + self.log_history('auto_symbolic') + + def symbolic_formula(self, var=None, normalizer=None, output_normalizer = None): + ''' + get symbolic formula + + Args: + ----- + var : None or a list of sympy expression + input variables + normalizer : [mean, std] + output_normalizer : [mean, std] + + Returns: + -------- + None + + Example + ------- + >>> from kan import * + >>> model = KAN(width=[2,1,1], grid=5, k=3, noise_scale=0.0, seed=0) + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]])+x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=3) + >>> model.fit(dataset, opt='LBFGS', steps=20, lamb=0.001); + >>> model.auto_symbolic() + >>> model.symbolic_formula()[0][0] + ''' + + symbolic_acts = [] + symbolic_acts_premult = [] + x = [] + + def ex_round(ex1, n_digit): + ex2 = ex1 + for a in sympy.preorder_traversal(ex1): + if isinstance(a, sympy.Float): + ex2 = ex2.subs(a, round(a, n_digit)) + return ex2 + + # define variables + if var == None: + for ii in range(1, self.width[0][0] + 1): + exec(f"x{ii} = sympy.Symbol('x_{ii}')") + exec(f"x.append(x{ii})") + elif isinstance(var[0], sympy.Expr): + x = var + else: + x = [sympy.symbols(var_) for var_ in var] + + x0 = x + + if normalizer != None: + mean = normalizer[0] + std = normalizer[1] + x = [(x[i] - mean[i]) / std[i] for i in range(len(x))] + + symbolic_acts.append(x) + + for l in range(len(self.width_in) - 1): + num_sum = self.width[l + 1][0] + num_mult = self.width[l + 1][1] + y = [] + for j in range(self.width_out[l + 1]): + yj = 0. + for i in range(self.width_in[l]): + a, b, c, d = self.symbolic_fun[l].affine[j, i] + sympy_fun = self.symbolic_fun[l].funs_sympy[j][i] + try: + yj += c * sympy_fun(a * x[i] + b) + d + except: + print('make sure all activations need to be converted to symbolic formulas first!') + return + yj = self.subnode_scale[l][j] * yj + self.subnode_bias[l][j] + if simplify == True: + y.append(sympy.simplify(yj)) + else: + y.append(yj) + + symbolic_acts_premult.append(y) + + mult = [] + for k in range(num_mult): + if isinstance(self.mult_arity, int): + mult_arity = self.mult_arity + else: + mult_arity = self.mult_arity[l+1][k] + for i in range(mult_arity-1): + if i == 0: + mult_k = y[num_sum+2*k] * y[num_sum+2*k+1] + else: + mult_k = mult_k * y[num_sum+2*k+i+1] + mult.append(mult_k) + + y = y[:num_sum] + mult + + for j in range(self.width_in[l+1]): + y[j] = self.node_scale[l][j] * y[j] + self.node_bias[l][j] + + x = y + symbolic_acts.append(x) + + if output_normalizer != None: + output_layer = symbolic_acts[-1] + means = output_normalizer[0] + stds = output_normalizer[1] + + assert len(output_layer) == len(means), 'output_normalizer does not match the output layer' + assert len(output_layer) == len(stds), 'output_normalizer does not match the output layer' + + output_layer = [(output_layer[i] * stds[i] + means[i]) for i in range(len(output_layer))] + symbolic_acts[-1] = output_layer + + + self.symbolic_acts = [[symbolic_acts[l][i] for i in range(len(symbolic_acts[l]))] for l in range(len(symbolic_acts))] + self.symbolic_acts_premult = [[symbolic_acts_premult[l][i] for i in range(len(symbolic_acts_premult[l]))] for l in range(len(symbolic_acts_premult))] + + out_dim = len(symbolic_acts[-1]) + #return [symbolic_acts[-1][i] for i in range(len(symbolic_acts[-1]))], x0 + + if simplify: + return [symbolic_acts[-1][i] for i in range(len(symbolic_acts[-1]))], x0 + else: + return [symbolic_acts[-1][i] for i in range(len(symbolic_acts[-1]))], x0 + + + def expand_depth(self): + ''' + expand network depth, add an indentity layer to the end. For usage, please refer to tutorials interp_3_KAN_compiler.ipynb. + + Args: + ----- + var : None or a list of sympy expression + input variables + normalizer : [mean, std] + output_normalizer : [mean, std] + + Returns: + -------- + None + ''' + self.depth += 1 + + # add kanlayer, set mask to zero + dim_out = self.width_in[-1] + layer = KANLayer(dim_out, dim_out, num=self.grid, k=self.k) + layer.mask *= 0. + self.act_fun.append(layer) + + self.width.append([dim_out, 0]) + self.mult_arity.append([]) + + # add symbolic_kanlayer set mask to one. fun = identity on diagonal and zero for off-diagonal + layer = Symbolic_KANLayer(dim_out, dim_out) + layer.mask += 1. + + for j in range(dim_out): + for i in range(dim_out): + if i == j: + layer.fix_symbolic(i,j,'x') + else: + layer.fix_symbolic(i,j,'0') + + self.symbolic_fun.append(layer) + + self.node_bias.append(torch.nn.Parameter(torch.zeros(dim_out,device=self.device)).requires_grad_(self.affine_trainable)) + self.node_scale.append(torch.nn.Parameter(torch.ones(dim_out,device=self.device)).requires_grad_(self.affine_trainable)) + self.subnode_bias.append(torch.nn.Parameter(torch.zeros(dim_out,device=self.device)).requires_grad_(self.affine_trainable)) + self.subnode_scale.append(torch.nn.Parameter(torch.ones(dim_out,device=self.device)).requires_grad_(self.affine_trainable)) + + def expand_width(self, layer_id, n_added_nodes, sum_bool=True, mult_arity=2): + ''' + expand network width. For usage, please refer to tutorials interp_3_KAN_compiler.ipynb. + + Args: + ----- + layer_id : int + layer index + n_added_nodes : init + the number of added nodes + sum_bool : bool + if sum_bool == True, added nodes are addition nodes; otherwise multiplication nodes + mult_arity : init + multiplication arity (the number of numbers to be multiplied) + + Returns: + -------- + None + ''' + def _expand(layer_id, n_added_nodes, sum_bool=True, mult_arity=2, added_dim='out'): + l = layer_id + in_dim = self.symbolic_fun[l].in_dim + out_dim = self.symbolic_fun[l].out_dim + if sum_bool: + + if added_dim == 'out': + new = Symbolic_KANLayer(in_dim, out_dim + n_added_nodes) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim) + out_id = np.arange(out_dim + n_added_nodes) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if j > n_added_nodes-1: + new.funs[j][i] = old.funs[j-n_added_nodes][i] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j-n_added_nodes][i] + new.funs_sympy[j][i] = old.funs_sympy[j-n_added_nodes][i] + new.funs_name[j][i] = old.funs_name[j-n_added_nodes][i] + new.affine.data[j][i] = old.affine.data[j-n_added_nodes][i] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim, out_dim + n_added_nodes, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + self.node_scale[l].data = torch.cat([torch.ones(n_added_nodes, device=self.device), self.node_scale[l].data]) + self.node_bias[l].data = torch.cat([torch.zeros(n_added_nodes, device=self.device), self.node_bias[l].data]) + self.subnode_scale[l].data = torch.cat([torch.ones(n_added_nodes, device=self.device), self.subnode_scale[l].data]) + self.subnode_bias[l].data = torch.cat([torch.zeros(n_added_nodes, device=self.device), self.subnode_bias[l].data]) + + + + if added_dim == 'in': + new = Symbolic_KANLayer(in_dim + n_added_nodes, out_dim) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim + n_added_nodes) + out_id = np.arange(out_dim) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if i > n_added_nodes-1: + new.funs[j][i] = old.funs[j][i-n_added_nodes] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j][i-n_added_nodes] + new.funs_sympy[j][i] = old.funs_sympy[j][i-n_added_nodes] + new.funs_name[j][i] = old.funs_name[j][i-n_added_nodes] + new.affine.data[j][i] = old.affine.data[j][i-n_added_nodes] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim + n_added_nodes, out_dim, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + + else: + + if isinstance(mult_arity, int): + mult_arity = [mult_arity] * n_added_nodes + + if added_dim == 'out': + n_added_subnodes = np.sum(mult_arity) + new = Symbolic_KANLayer(in_dim, out_dim + n_added_subnodes) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim) + out_id = np.arange(out_dim + n_added_nodes) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if j < out_dim: + new.funs[j][i] = old.funs[j][i] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j][i] + new.funs_sympy[j][i] = old.funs_sympy[j][i] + new.funs_name[j][i] = old.funs_name[j][i] + new.affine.data[j][i] = old.affine.data[j][i] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim, out_dim + n_added_subnodes, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + self.node_scale[l].data = torch.cat([self.node_scale[l].data, torch.ones(n_added_nodes, device=self.device)]) + self.node_bias[l].data = torch.cat([self.node_bias[l].data, torch.zeros(n_added_nodes, device=self.device)]) + self.subnode_scale[l].data = torch.cat([self.subnode_scale[l].data, torch.ones(n_added_subnodes, device=self.device)]) + self.subnode_bias[l].data = torch.cat([self.subnode_bias[l].data, torch.zeros(n_added_subnodes, device=self.device)]) + + if added_dim == 'in': + new = Symbolic_KANLayer(in_dim + n_added_nodes, out_dim) + old = self.symbolic_fun[l] + in_id = np.arange(in_dim + n_added_nodes) + out_id = np.arange(out_dim) + + for j in out_id: + for i in in_id: + new.fix_symbolic(i,j,'0') + new.mask += 1. + + for j in out_id: + for i in in_id: + if i < in_dim: + new.funs[j][i] = old.funs[j][i] + new.funs_avoid_singularity[j][i] = old.funs_avoid_singularity[j][i] + new.funs_sympy[j][i] = old.funs_sympy[j][i] + new.funs_name[j][i] = old.funs_name[j][i] + new.affine.data[j][i] = old.affine.data[j][i] + + self.symbolic_fun[l] = new + self.act_fun[l] = KANLayer(in_dim + n_added_nodes, out_dim, num=self.grid, k=self.k) + self.act_fun[l].mask *= 0. + + _expand(layer_id-1, n_added_nodes, sum_bool, mult_arity, added_dim='out') + _expand(layer_id, n_added_nodes, sum_bool, mult_arity, added_dim='in') + if sum_bool: + self.width[layer_id][0] += n_added_nodes + else: + if isinstance(mult_arity, int): + mult_arity = [mult_arity] * n_added_nodes + + self.width[layer_id][1] += n_added_nodes + self.mult_arity[layer_id] += mult_arity + + def perturb(self, mag=1.0, mode='non-intrusive'): + ''' + preturb a network. For usage, please refer to tutorials interp_3_KAN_compiler.ipynb. + + Args: + ----- + mag : float + perturbation magnitude + mode : str + pertubatation mode, choices = {'non-intrusive', 'all', 'minimal'} + + Returns: + -------- + None + ''' + perturb_bool = {} + + if mode == 'all': + perturb_bool['aa_a'] = True + perturb_bool['aa_i'] = True + perturb_bool['ai'] = True + perturb_bool['ia'] = True + perturb_bool['ii'] = True + elif mode == 'non-intrusive': + perturb_bool['aa_a'] = False + perturb_bool['aa_i'] = False + perturb_bool['ai'] = True + perturb_bool['ia'] = False + perturb_bool['ii'] = True + elif mode == 'minimal': + perturb_bool['aa_a'] = True + perturb_bool['aa_i'] = False + perturb_bool['ai'] = False + perturb_bool['ia'] = False + perturb_bool['ii'] = False + else: + raise Exception('mode not recognized, valid modes are \'all\', \'non-intrusive\', \'minimal\'.') + + for l in range(self.depth): + funs_name = self.symbolic_fun[l].funs_name + for j in range(self.width_out[l+1]): + for i in range(self.width_in[l]): + out_array = list(np.array(self.symbolic_fun[l].funs_name)[j]) + in_array = list(np.array(self.symbolic_fun[l].funs_name)[:,i]) + out_active = len([i for i, x in enumerate(out_array) if x != "0"]) > 0 + in_active = len([i for i, x in enumerate(in_array) if x != "0"]) > 0 + dic = {True: 'a', False: 'i'} + edge_type = dic[in_active] + dic[out_active] + + if l < self.depth - 1 or mode != 'non-intrusive': + + if edge_type == 'aa': + if self.symbolic_fun[l].funs_name[j][i] == '0': + edge_type += '_i' + else: + edge_type += '_a' + + if perturb_bool[edge_type]: + self.act_fun[l].mask.data[i][j] = mag + + if l == self.depth - 1 and mode == 'non-intrusive': + + self.act_fun[l].mask.data[i][j] = torch.tensor(1.) + self.act_fun[l].scale_base.data[i][j] = torch.tensor(0.) + self.act_fun[l].scale_sp.data[i][j] = torch.tensor(0.) + + self.get_act(self.cache_data) + + self.log_history('perturb') + + + def module(self, start_layer, chain): + ''' + specify network modules + + Args: + ----- + start_layer : int + the earliest layer of the module + chain : str + specify neurons in the module + + Returns: + -------- + None + ''' + #chain = '[-1]->[-1,-2]->[-1]->[-1]' + groups = chain.split('->') + n_total_layers = len(groups)//2 + #start_layer = 0 + + for l in range(n_total_layers): + current_layer = cl = start_layer + l + id_in = [int(i) for i in groups[2*l][1:-1].split(',')] + id_out = [int(i) for i in groups[2*l+1][1:-1].split(',')] + + in_dim = self.width_in[cl] + out_dim = self.width_out[cl+1] + id_in_other = list(set(range(in_dim)) - set(id_in)) + id_out_other = list(set(range(out_dim)) - set(id_out)) + self.act_fun[cl].mask.data[np.ix_(id_in_other,id_out)] = 0. + self.act_fun[cl].mask.data[np.ix_(id_in,id_out_other)] = 0. + self.symbolic_fun[cl].mask.data[np.ix_(id_out,id_in_other)] = 0. + self.symbolic_fun[cl].mask.data[np.ix_(id_out_other,id_in)] = 0. + + self.log_history('module') + + def tree(self, x=None, in_var=None, style='tree', sym_th=1e-3, sep_th=1e-1, skip_sep_test=False, verbose=False): + ''' + turn KAN into a tree + ''' + if x == None: + x = self.cache_data + plot_tree(self, x, in_var=in_var, style=style, sym_th=sym_th, sep_th=sep_th, skip_sep_test=skip_sep_test, verbose=verbose) + + + def speed(self, compile=False): + ''' + turn on KAN's speed mode + ''' + self.symbolic_enabled=False + self.save_act=False + self.auto_save=False + if compile == True: + return torch.compile(self) + else: + return self + + def get_act(self, x=None): + ''' + collect intermidate activations + ''' + if isinstance(x, dict): + x = x['train_input'] + if x == None: + if self.cache_data != None: + x = self.cache_data + else: + raise Exception("missing input data x") + save_act = self.save_act + self.save_act = True + self.forward(x) + self.save_act = save_act + + def get_fun(self, l, i, j): + ''' + get function (l,i,j) + ''' + inputs = self.spline_preacts[l][:,j,i].cpu().detach().numpy() + outputs = self.spline_postacts[l][:,j,i].cpu().detach().numpy() + # they are not ordered yet + rank = np.argsort(inputs) + inputs = inputs[rank] + outputs = outputs[rank] + plt.figure(figsize=(3,3)) + plt.plot(inputs, outputs, marker="o") + return inputs, outputs + + + def history(self, k='all'): + ''' + get history + ''' + with open(self.ckpt_path+'/history.txt', 'r') as f: + data = f.readlines() + n_line = len(data) + if k == 'all': + k = n_line + + data = data[-k:] + for line in data: + print(line[:-1]) + @property + def n_edge(self): + ''' + the number of active edges + ''' + depth = len(self.act_fun) + complexity = 0 + for l in range(depth): + complexity += torch.sum(self.act_fun[l].mask > 0.) + return complexity.item() + + def evaluate(self, dataset): + evaluation = {} + evaluation['test_loss'] = torch.sqrt(torch.mean((self.forward(dataset['test_input']) - dataset['test_label'])**2)).item() + evaluation['n_edge'] = self.n_edge + evaluation['n_grid'] = self.grid + # add other metrics (maybe accuracy) + return evaluation + + def swap(self, l, i1, i2, log_history=True): + + self.act_fun[l-1].swap(i1,i2,mode='out') + self.symbolic_fun[l-1].swap(i1,i2,mode='out') + self.act_fun[l].swap(i1,i2,mode='in') + self.symbolic_fun[l].swap(i1,i2,mode='in') + + def swap_(data, i1, i2): + data[i1], data[i2] = data[i2], data[i1] + + swap_(self.node_scale[l-1].data, i1, i2) + swap_(self.node_bias[l-1].data, i1, i2) + swap_(self.subnode_scale[l-1].data, i1, i2) + swap_(self.subnode_bias[l-1].data, i1, i2) + + if log_history: + self.log_history('swap') + + @property + def connection_cost(self): + + cc = 0. + for t in self.edge_scores: + + def get_coordinate(n): + return torch.linspace(0,1,steps=n+1, device=self.device)[:n] + 1/(2*n) + + in_dim = t.shape[0] + x_in = get_coordinate(in_dim) + + out_dim = t.shape[1] + x_out = get_coordinate(out_dim) + + dist = torch.abs(x_in[:,None] - x_out[None,:]) + cc += torch.sum(dist * t) + + return cc + + def auto_swap_l(self, l): + + num = self.width_in[1] + for i in range(num): + ccs = [] + for j in range(num): + self.swap(l,i,j,log_history=False) + self.get_act() + self.attribute() + cc = self.connection_cost.detach().clone() + ccs.append(cc) + self.swap(l,i,j,log_history=False) + j = torch.argmin(torch.tensor(ccs)) + self.swap(l,i,j,log_history=False) + + def auto_swap(self): + ''' + automatically swap neurons such as connection costs are minimized + ''' + depth = self.depth + for l in range(1, depth): + self.auto_swap_l(l) + + self.log_history('auto_swap') + +KAN = MultKAN diff --git a/dl/kan/kan/Symbolic_KANLayer.py b/dl/kan/kan/Symbolic_KANLayer.py new file mode 100644 index 000000000..3b199293c --- /dev/null +++ b/dl/kan/kan/Symbolic_KANLayer.py @@ -0,0 +1,270 @@ +import torch +import torch.nn as nn +import numpy as np +import sympy +from .utils import * + + + +class Symbolic_KANLayer(nn.Module): + ''' + KANLayer class + + Attributes: + ----------- + in_dim : int + input dimension + out_dim : int + output dimension + funs : 2D array of torch functions (or lambda functions) + symbolic functions (torch) + funs_avoid_singularity : 2D array of torch functions (or lambda functions) with singularity avoiding + funs_name : 2D arry of str + names of symbolic functions + funs_sympy : 2D array of sympy functions (or lambda functions) + symbolic functions (sympy) + affine : 3D array of floats + affine transformations of inputs and outputs + ''' + def __init__(self, in_dim=3, out_dim=2, device='cpu'): + ''' + initialize a Symbolic_KANLayer (activation functions are initialized to be identity functions) + + Args: + ----- + in_dim : int + input dimension + out_dim : int + output dimension + device : str + device + + Returns: + -------- + self + + Example + ------- + >>> sb = Symbolic_KANLayer(in_dim=3, out_dim=3) + >>> len(sb.funs), len(sb.funs[0]) + ''' + super(Symbolic_KANLayer, self).__init__() + self.out_dim = out_dim + self.in_dim = in_dim + self.mask = torch.nn.Parameter(torch.zeros(out_dim, in_dim, device=device)).requires_grad_(False) + # torch + self.funs = [[lambda x: x*0. for i in range(self.in_dim)] for j in range(self.out_dim)] + self.funs_avoid_singularity = [[lambda x, y_th: ((), x*0.) for i in range(self.in_dim)] for j in range(self.out_dim)] + # name + self.funs_name = [['0' for i in range(self.in_dim)] for j in range(self.out_dim)] + # sympy + self.funs_sympy = [[lambda x: x*0. for i in range(self.in_dim)] for j in range(self.out_dim)] + ### make funs_name the only parameter, and make others as the properties of funs_name? + + self.affine = torch.nn.Parameter(torch.zeros(out_dim, in_dim, 4, device=device)) + # c*f(a*x+b)+d + + self.device = device + self.to(device) + + def to(self, device): + ''' + move to device + ''' + super(Symbolic_KANLayer, self).to(device) + self.device = device + return self + + def forward(self, x, singularity_avoiding=False, y_th=10.): + ''' + forward + + Args: + ----- + x : 2D array + inputs, shape (batch, input dimension) + singularity_avoiding : bool + if True, funs_avoid_singularity is used; if False, funs is used. + y_th : float + the singularity threshold + + Returns: + -------- + y : 2D array + outputs, shape (batch, output dimension) + postacts : 3D array + activations after activation functions but before being summed on nodes + + Example + ------- + >>> sb = Symbolic_KANLayer(in_dim=3, out_dim=5) + >>> x = torch.normal(0,1,size=(100,3)) + >>> y, postacts = sb(x) + >>> y.shape, postacts.shape + (torch.Size([100, 5]), torch.Size([100, 5, 3])) + ''' + + batch = x.shape[0] + postacts = [] + + for i in range(self.in_dim): + postacts_ = [] + for j in range(self.out_dim): + if singularity_avoiding: + xij = self.affine[j,i,2]*self.funs_avoid_singularity[j][i](self.affine[j,i,0]*x[:,[i]]+self.affine[j,i,1], torch.tensor(y_th))[1]+self.affine[j,i,3] + else: + xij = self.affine[j,i,2]*self.funs[j][i](self.affine[j,i,0]*x[:,[i]]+self.affine[j,i,1])+self.affine[j,i,3] + postacts_.append(self.mask[j][i]*xij) + postacts.append(torch.stack(postacts_)) + + postacts = torch.stack(postacts) + postacts = postacts.permute(2,1,0,3)[:,:,:,0] + y = torch.sum(postacts, dim=2) + + return y, postacts + + + def get_subset(self, in_id, out_id): + ''' + get a smaller Symbolic_KANLayer from a larger Symbolic_KANLayer (used for pruning) + + Args: + ----- + in_id : list + id of selected input neurons + out_id : list + id of selected output neurons + + Returns: + -------- + spb : Symbolic_KANLayer + + Example + ------- + >>> sb_large = Symbolic_KANLayer(in_dim=10, out_dim=10) + >>> sb_small = sb_large.get_subset([0,9],[1,2,3]) + >>> sb_small.in_dim, sb_small.out_dim + ''' + sbb = Symbolic_KANLayer(self.in_dim, self.out_dim, device=self.device) + sbb.in_dim = len(in_id) + sbb.out_dim = len(out_id) + sbb.mask.data = self.mask.data[out_id][:,in_id] + sbb.funs = [[self.funs[j][i] for i in in_id] for j in out_id] + sbb.funs_avoid_singularity = [[self.funs_avoid_singularity[j][i] for i in in_id] for j in out_id] + sbb.funs_sympy = [[self.funs_sympy[j][i] for i in in_id] for j in out_id] + sbb.funs_name = [[self.funs_name[j][i] for i in in_id] for j in out_id] + sbb.affine.data = self.affine.data[out_id][:,in_id] + return sbb + + + def fix_symbolic(self, i, j, fun_name, x=None, y=None, random=False, a_range=(-10,10), b_range=(-10,10), verbose=True): + ''' + fix an activation function to be symbolic + + Args: + ----- + i : int + the id of input neuron + j : int + the id of output neuron + fun_name : str + the name of the symbolic functions + x : 1D array + preactivations + y : 1D array + postactivations + a_range : tuple + sweeping range of a + b_range : tuple + sweeping range of a + verbose : bool + print more information if True + + Returns: + -------- + r2 (coefficient of determination) + + Example 1 + --------- + >>> # when x & y are not provided. Affine parameters are set to a = 1, b = 0, c = 1, d = 0 + >>> sb = Symbolic_KANLayer(in_dim=3, out_dim=2) + >>> sb.fix_symbolic(2,1,'sin') + >>> print(sb.funs_name) + >>> print(sb.affine) + + Example 2 + --------- + >>> # when x & y are provided, fit_params() is called to find the best fit coefficients + >>> sb = Symbolic_KANLayer(in_dim=3, out_dim=2) + >>> batch = 100 + >>> x = torch.linspace(-1,1,steps=batch) + >>> noises = torch.normal(0,1,(batch,)) * 0.02 + >>> y = 5.0*torch.sin(3.0*x + 2.0) + 0.7 + noises + >>> sb.fix_symbolic(2,1,'sin',x,y) + >>> print(sb.funs_name) + >>> print(sb.affine[1,2,:].data) + ''' + if isinstance(fun_name,str): + fun = SYMBOLIC_LIB[fun_name][0] + fun_sympy = SYMBOLIC_LIB[fun_name][1] + fun_avoid_singularity = SYMBOLIC_LIB[fun_name][3] + self.funs_sympy[j][i] = fun_sympy + self.funs_name[j][i] = fun_name + + if x == None or y == None: + #initialzie from just fun + self.funs[j][i] = fun + self.funs_avoid_singularity[j][i] = fun_avoid_singularity + if random == False: + self.affine.data[j][i] = torch.tensor([1.,0.,1.,0.], device=self.device) + else: + self.affine.data[j][i] = torch.rand(4, device=self.device) * 2 - 1 + return None + else: + #initialize from x & y and fun + params, r2 = fit_params(x,y,fun, a_range=a_range, b_range=b_range, verbose=verbose, device=self.device) + self.funs[j][i] = fun + self.funs_avoid_singularity[j][i] = fun_avoid_singularity + self.affine.data[j][i] = params + return r2 + else: + # if fun_name itself is a function + fun = fun_name + fun_sympy = fun_name + self.funs_sympy[j][i] = fun_sympy + self.funs_name[j][i] = "anonymous" + + self.funs[j][i] = fun + self.funs_avoid_singularity[j][i] = fun + if random == False: + self.affine.data[j][i] = torch.tensor([1.,0.,1.,0.], device=self.device) + else: + self.affine.data[j][i] = torch.rand(4, device=self.device) * 2 - 1 + return None + + def swap(self, i1, i2, mode='in'): + ''' + swap the i1 neuron with the i2 neuron in input (if mode == 'in') or output (if mode == 'out') + ''' + with torch.no_grad(): + def swap_list_(data, i1, i2, mode='in'): + + if mode == 'in': + for j in range(self.out_dim): + data[j][i1], data[j][i2] = data[j][i2], data[j][i1] + + elif mode == 'out': + data[i1], data[i2] = data[i2], data[i1] + + def swap_(data, i1, i2, mode='in'): + if mode == 'in': + data[:,i1], data[:,i2] = data[:,i2].clone(), data[:,i1].clone() + + elif mode == 'out': + data[i1], data[i2] = data[i2].clone(), data[i1].clone() + + swap_list_(self.funs_name,i1,i2,mode) + swap_list_(self.funs_sympy,i1,i2,mode) + swap_list_(self.funs_avoid_singularity,i1,i2,mode) + swap_(self.affine.data,i1,i2,mode) + swap_(self.mask.data,i1,i2,mode) diff --git a/dl/kan/kan/__init__.py b/dl/kan/kan/__init__.py new file mode 100644 index 000000000..1ce0e47b2 --- /dev/null +++ b/dl/kan/kan/__init__.py @@ -0,0 +1,3 @@ +from .MultKAN import * +from .utils import * +#torch.use_deterministic_algorithms(True) \ No newline at end of file diff --git a/dl/kan/kan/assets/img/mult_symbol.png b/dl/kan/kan/assets/img/mult_symbol.png new file mode 100644 index 0000000000000000000000000000000000000000..16d9960fbd6eb0f0e9ebf3628b3a6e2b13e56c95 GIT binary patch literal 6392 zcmcI}cQ{<#_wN{t8b)t}F?zJ>Fb3SLSwf9+Puk~36qpPiUhn$rh007)kN2B!d{m{RS z6ofz5n$*1l00^EwQ&!e>QC3s-aP{yu@O)}(k9y|g>H0KKR}ugaNVB!J)>ju67<&5D z+InbQNRZsyUq3D`QQtarsGBP4Go=kBIc9i(IGKRp0g{*U9xFr8r@?O0GO-^KV{*LG z&FamT-SakYj)Rky;)NGUwi3!|u#x^1f`!%8g zw!k`G!psWe%rK{&zY!y4W4{t7eD&&N++>KF%{ z{&uK$tnD~Ab$LjA|B1(ne{FN|&iUVvpEoNW>2YMhh%hLWl=T032POk*!iKq{q*w)p zj87O({BS>pL$y~>EAXv{uUqOy_Ut9}w$Ar{FJ!Kko`Ei;Pk!K(agn%9oJ}$q91aJ1 z&q+bA&jeSM+~hA*PaKDuK2%CwbPbUViDjH?A^Ld+kw~ImfZHps$*Gs}!~SWv{D?XF z2P|{Kp}Y^(Zn|)*83|;-JlI#oCjYch3RF>DM<~p4C9*vMV5T1Xoc@lDBf^jckY9LVc#$_M{GS-g|c)iVlS@wTd zNrkNW7!g1*{g6;DF^Wcu9kPS4QWHB;;mV)~e8oKeTKVtCNhrg4?=d6*!maeExN-K} z-g6f~18cUTEc;j&X``c$xZ5o=;^N@5Z+6S=e(F9xKGO>exl+>7yqYWQF0fyoFI3<5ad$M+U3KY#ysUs5<@dv9mGVEnikh-5#Fv{G%qH>w1a{hK$?Av*YCD+ik= z>8PI}@+*0W0pS=W=p3Do*$V5PbYf|}mCLYkU;I#Q*qbZjN=nRZ7bfN$vxu>eLsE=i z>v-=FS7i9@|M_Qge>j7`31QHF8&1#w;7e{)Jv1E}kU#=rsbYsMn6%j8p2+3S;7`6v zCc;mxfvynSWY(7S1O%!idfQtxIf+g7?di^>*kHzVC{JiDqomyoDoc|WLW zf7yT4$ah$EZ@8EdU`;$$#aIWC&I+d03|||HoniNGhwJ|S^{Zkl`d2|Tpn)(ZIVxgt zWJ=@>+Vfy(uhhKV^Ikk%a1*@XstZ0vD$I@{lFmNmiGlTedtAv+K+SQ#YVgY!AAW-x z3Zm@p*)+7S3u4n9ntsksVAXcCyE|3hc=F-+b69P;%O_o*MtagUm@NP5BVW37x!_CR zb)I+yfHh?e`>d9zr#T7=pI^tlHE$PGw`ld`dhY%rgR=p&oefhw>>+3j1b2^X=M1|k z8spv1FgQ4PCH2b=(v#)yO2Q&R$Lj7Wg?MMSguAV=M%76M1f)o#TwdWTd33CLi7gQGL@|tzwK#7L-XFipVwAryCZL@$$~*fSJXM%2 zE^E*7*=<~^`{T#+koN_hdYTZ!()ciCCUsV?U30wp<|!YE@6#NY1AV2cxg}bWouRiO z1|nm7#i|gw30)N%*nbq4sJ_rvrAS^-KqvB%PCa;2;udCL_R-zw7p?~pB^%g>g2=6W zMfK9ePGFb{5%g&bad-=rc&Zqj z-|R@a_Aq$9k#e}EBWjV~lG!M^Yeatd32pvAki1tZQxTG{8;UYt-_dnm?F{yr1MzvZ zf@3Fo9;da^|5v5F0%tj~f0>N=wCnFtZI}5vBmbW((o=njouUJZZ2@h3!%7gj$7X3I z&#OvbO*Y`6ox&@Zi5K6R938&2ws<^2{uSjN3q&mBY1N#iKRm4N{3_PYa0V^`u%}Y zpJtljp`p0(t$*}xP`{a2O|(Z9%wAQ(eJukMDoFnogR=3;W;c%?u`F|4a7mGNnQPD{ zW#pnNafgnz5(MEo+8Sb6RFIM3T28#x0~Wb&92yx3@Bj}1!ffPmlHKKp-~KCsvCYEniO;^Y z(PAby*0$qV%jcdo*6fqG6T2D9?|eySm%NsEyTp;8MB}`I$U(A3G2?|P*XBu6yl}kc zc9_nx#ho85&-`I&7N->)EPR^~Zz{>jMwCqKqD+tYQLv5=DcC&hzB-is@m#T{WmPwn(vIFBbBc*}0UP7qKUky3j1oOc$wg)lx5bVk zTzXipb{QOZJ{v14@tDSm&%ae%oXnN~(ZfF8dzu)+BWoGCCNAFB*Z1t7N6TV7z0ve8 z`@o8WxJ5}02BJCzrs=zOoN5r}rr@iyOx=8citqrgey8Nm-$*^fdN@5rnXkB2M%49# z|E$M4II*6c<%QLMK8cU;X&m(~JVZb;B*F<3&AZ#IT~lFOJk_1oXJ&N5PlAIP$(TM0 zf7ueGqNj&g<4&nr>7u0dG7E{Yr;`sSVfP(V<9IBRP8ehBcw4*IXj&cJZl5YBACk4q z-vq4d_#ch^EM@SWlesu(nxutw_Y!3NrL!hX3^Q68!~qLlVlM)PQYR)R_TiGDBCsdS z8?b@!s$6qVSZcJM*XeNaGuDZH@tkYw4)S_;dmk5vj`+ozw{>Jb4~dpeS8lXvSSg4e zwx0ajn{iI`4i-yF>9(CSa{%9k&>5R^@;OUH0o(B~| zALPMXNvKSpWM4sR>Ay)9?k;}+j_4~)7Ce2RK6M2-Wk~NK^Q4A+crQ#m-J6t10P@$9 z{*tA~^B|s3#m2krwL`c;Q0{Bw+-sAY&lZb|i`&6nak8U4v{nP*?UIO|?UHTNY~Yi6 zCY!}Nt4R*$_A0~cQ7CQkKsZbsvCfp{(0CZS8=6t{R>R7?1M%QN-TC-uXWL~jey@Pr z)44UG%@Iq_uOsixBOA1T*PAxqt*)>CsB!mQk9eJWkd@D_X$0L`Olc$Bq$3e;pGil_ zhx&qof|=(c6;_X6WGayMuaxc$@T#0x8O`v9+3&Octj0wK+x50!b*Bxa9?R0PDYWL@ zA}d?1&xkqH1UUpF(}Q#u_9PYC6eiiXfvPu&T738PCmNInqXg1*2f~4ptvwso(s`fh z&=y60+Sm*G8#$h|C}Ar4^qxB|5*Q=|jZTPiPxHHHCSw3?DLdNn%+hlOdPOIUag)-= zAhTUk$+V5>=xUx8svy`-*LosxwI5eB@bAUN(Wm_tX(N|vKkHlZ)~8y0G;zFUAK;yr z`$D66?>Up1l^HRlQScLd%sj~3a3bTvC|5$>qnVeEU)=GwVj8GmXJeDxvou)+50v(h z8}a;S-d;)tTYQWOd;UjD)yb2i^IQqagsf%whpfGHZKLMo!xNK}jlb+H|6uMfRlK8x zf5?OaWt*iUl=Dea^t@7uc@<_YAEON&**c;SfR0KJB@t&oIdy9%;AsoFelh+)C5U+@ z-&Q7st46(>%(L&S38cnSH5=Gw+yTv)|8+Xfw=DUStqCp_(3uc5Oy&Z!7X+U6Zz#4_iMs=e3z0VvGggwgA+zeH@*vZ5=`1{s>$jBq*)Z8`l* zEVpT(zZaLdMo!oj#6fAyjA!hc+H(S9-+_sS-i{>wn$dZ%?(4~m0%`!LhWf#0iRjYS+ccGxVF@waX+Ai+Gi$}|!{9hD~OIV~}`3ab` zp!Tr~s0da`agvV_vVX}p;RuSTxjAEL1R^h9&Nt)Xc|jb5S$?58-s#ty(8nrSbGl}; z@H^Q}mth7Wx2?RyxpVE++$2#W%o)vbIJMuunm@h|SyC#KtEH%8~0C=tl$L`DHCljhYvs$MQQ(*lU%M{vG0jNa_b) z3(;6ce9EE;VI}26+1#b-PtKP>5QwTceiGysh2p~~H#c`aUG*XimHrVf1y+fV;-;ES z4_qOH0CUPdgp9RStF&-OV!|ViU>)>EEXGgtI&r13;rjcoofZzVC z;5c#l+-r0YLoA4g0-pmCN;g%>!wpzprb!I=7=8<@HEC43Lq+8*%G@l9+V}*Qf{Lox z;HO3|Z94}sCveoT=QLrmYL3gs5ht{vj=P7A+KPmUxqH zrFD4T0(<7}DdSeh02&x=YT7VKx-*uaS7Vf|n+Oiukl~EW-B5s2;DEV2nME5CUN8-&VAy z>+nxmGUaK_yzGspXgvL{9s(xo1RoD{OIy2nU9{K9I#W}{VUgX#rB?Y?A{On2MGu1W z&3ssLXbJH2DZK4<&P}TO9gDF)yTO_zl@!r^iE1evT2Gw!9O3~D&G$3kxkTqDyhZ_X zq;4_hk&erFMz5Qcl2R~dbtx7GXdrr%>>OpP7Rn;OwL822tVL|tfYgSCsa}fstwWJ)vmW;#=ZEef<_x99BiT7kF2&_pc*|9{;DoF#v;6E%$ z*Zgho$_%#=zPqVqt3riFAs7B!L}3i~d`w7Y>6;mR!tfke{L#^Sq%%GvH_oXPM!i{B z`C`xAJ;HTUWlzzTt4!n>wA6-n()L^HCh)_1?m~cD5%oO+neL&|@|aqj4GZ+El8@Z# zQW?U?z#u~|VjaCA$Dd%+l^gmXTB>4krGD?H>Wl=!Ay`OI=nB}UT!hkoSmb+D16JX7UyKdF;Zm; zN-mOD$U~owi@I(x`9HkC(f!7g_k*rZA4{OOlKYrdc2OiY=#Lf_J>dfet1wS=cIv!p zF*yi%_qg5|Uvj`~pt!Yvas9X4kW|RU4~~~H$8F>8TIW~tJ@Z_iD)8v6)_!%LWp?Yh z)rLu&`?=b@Hnx|nIhBGs!FZr%YxCZ*{fTL`&m}%p?O5!1`BBbmBdR^{)M0`RoghXf z&Lz_yXX4Wf!n4(g=G@1Dty9~!Cy^?Kj(BQ2Txanq9iM&L;TZs#TQ81vu-5D&ZTqtH z-r=FvM3H=sc_(GvwsUC7>+6`mPwO=OJ~!kNmzDNjk9BcdyQM4f z)oev5lFe%MR_kba{MOgZSxE=3lV4CkC&!ZWbs&^%*)ww%i#3Au=sbF4_;;#YZFph9 z#CvC=$j;YSiiLwidzWl)cOyZOzKgGGOA|>O%K}_@(m!)?vIr6z^$x2(h%4}#`o6O8 zrl)h5C-4brq0y^uu*squ|1^=!4c9R&+U|OWLQ2gTcN%Pochv!ynl=Actd-D7QEF#D zNljRu2?pHVLw?YGo^hpwTHOH1bXN(JJQpx(ya0Dkl7}iDu_-3RlbE2c3`S3BkSm(8 z-I}wt1wn!6gsY$s);1@qo%+}4Sk>Nf_jGiQP&iCdRe+L}a!l!-FR@=Dy}jjf~p_$a&D_ z$#SP)mvSvLAps33q11LNh;?3LMX576Na12~nCs18^>akCpOLm$2Ig)kvR?75vHA?u yJU>*QL7hgOzDb&y`~RgP{C}%5>q3CHp#IIXk`7u^{LcyiKwU)}Ri|Vf@xK73=%I=L literal 0 HcmV?d00001 diff --git a/dl/kan/kan/assets/img/sum_symbol.png b/dl/kan/kan/assets/img/sum_symbol.png new file mode 100644 index 0000000000000000000000000000000000000000..724084c5e6fd2554874fdecb77254cdb3b7cca13 GIT binary patch literal 6210 zcmb_gXHZk!x=uoX&=Zi}LKl$|k=_EK1O${`1PK8|dPgL*&_R$Uy-648QWTV?0V2gh zk=_(UkS5ZUcH?*M+%xCT{dLyN+I#K2-aYT^HE(&IC*hW%4jnZIH2?sh(?x3+mU0RTXI7ZmE28%hV|5tyA~msy z3|obo;zVu=LQWu-2Iy4j)KD^_Ir=>soY#9yM*OU+!FW>#i`pUwdS(xQi7X)K#Q+>Y z^+IHi%grB0+*>=194sB)Phuq3!CbDm67)^z$NFV9cD6iesfagRrGNb3@>kjW-opub zJJM29+ZUwa8J#m3MiHBgEVNOAVL4c6X(Q)oX+;8A1X+Y4V{-60qT)1+)!>$Gh0`U-n?630!5)H zc$|M!d3T+i*m`rMzvB8m<9y#VMK$D%_Ke0Kv+mLyO*U{jlI>%x%iirj^;s>C_8H2j zOf+>Hou_ZiAUpE+?aKKzdamHFY=o+;zbMZ}cjjDnj~Qv?R*i=M9l)L?Mdx}m>KxgT zMCY}XGu2)>7#}pJN7MDEi1F#fNVdC=9Ru&OSO^OYq(HSK>8c`HPu-{TGirvRc=^?) z86=Xp3TF8few0dK@T}aEfd#{#js?BUdlB7|d%ZfL{s-$+M4HWSL5K^ZP0_s0vwWnB z%LQE%NIET==~(6Wk=$sDV4I6tFFmamqc(=?cQbw;RJ~rVdpjHh$--Po2Y`-J8FbKN zS;V$8a+5}bg7Vd9H%6`PZZxZoc;uvwUvi#ez_Tl2VkSaDLeE-SOq02l%$b>)otO^~ z4-T9THm94PRaJ@9*xK4|d@Mn25E7V-d|fEnn`8Hb9w&`93DJ>9ib>|9QJudt-#MC^ znPujw;M$%&eR^wkb(MNCnX~G?zwe)Ymx)iGN@iwf&2Hb$clGcv8yOjy-tlKdm3fG? z+8QKc6FLRkkzuG$y+40GlPWc}vZ~(L*f2FPNPbpdZ=NA+FY?Xs5wBW(US6K5gF~6? zy?eLB#e-f>R=Zbz`kY=Ap!kf|9E!JUVZQd}u;~bM(bw0vFn@n>%VYX^pHkpv4yz2_ zppj1Yjk%6Hl#HMur~&qV9+W;F%$UR>GnlUy-t{8jhxyKUwTV$p0&ELk*T~-XmU5^T z`cSi|C&R9ZkxN4LrmNBt54XgFAD`=feV+;^2qq0t9|P=`GMobA4bmCb-*tZ1L*yRv zR8PP^E`E_f@;RJ-3=@azjON%`q7 zU3?)iAF^`z&Es?9$w1v@h~4wnOw9W{88NdML`$R5Wef%{iH)5~uoO%!=?aI-@E|*^ z);#83eo_iQKmAysJAcs&xY|RIawABfu3+&132q)?8Sd*|eTmF>oSmIFsQHTDVnDL7 zxdO4$x>~f1#9Ma#4;}vOuP-Pf_6^k6p#rUU?x>jj5eZSejl9hG25 zfrEE;mjR6+>BJaOEq#FphKyA|MqloILc4BqgO$0CKi~GM?t8eU_V)Ob4zE*jCjUtt zP0@jMn|tnoPvG`kHBZ>_^5)wN3W^h@10&Ro$CHNf0KD6pw&dz)RFt9DzPMf2+_>HB z>od$%i%8C7jP6fFNxV<_c7_cs%J}V_ZE@pa%X^yUb=DP!+w(P*2Kjf(yRY8|#bT%` zDZmKr+OPhO4m}>75=B+tgwGY7ij{(YuV&kx`EXLj9{CSpAs>W8YUxGwLA}l%MZwvBxC++dEYzJWO*N6v+x=hyFx~`8_%!KMvwo^qQ zEPr%Cw9DX!MEotGXXKIDg>7k%CM%@v-+)_jN1g#|h4(iCg|=t}0iQASha8bp0+gvV zCAy|Gj%2Xhx*O!cQqJ2Oq+^=c(wm_a1bo9ZSP2q=D;oD0`#`-)D>K=e^&K0}HtWy7 z<$KZ7dCpK$;J7^bSU4hWkcQFxMq<#{FO#WQS{#RS5E~gcRgGTE$MQa4q{;wbBRCV| z6Jp^ab6w+$Uhd=6V3S|>MpQMj*_k_!%eFgy)Q7RI3b=!QeH$6-2RGSM`-0k+ZAwky zFKesLh6iXQUq9J?wWGQQcj}n~LzxKLs8r6hm-{u#WUoYp;}kWX1)QCh#PRqBdkGLR zF6|8IDOA{UnDYw6w=Y-IiQ##Xa#8Kq0|UAF@`aFJ289ykb!4D;7`E@yzbs0SyZ+l> z6?yDJ4<=k^+dQU4l>+{xwH;Dntj!422nT9ka)qy&rMKW0x7hgN%Y*T*_?*hEK$`UQ>XoC5Z*66b}t{q(v_&b zl`XK^B(0ng(JUF-+dl@Fm7_QX-2Q?J0nsbb~_fHooZq(m;`!YJiw&FekJ1TeyC`S8ISXYS#40Ryn@rki}?8*9A!R6EP zGA2u5vX-v`>pJ!Z#dIvi>_lP!*d$A7aJ$o~&P9hCrs zhI5>WrYm^}A2ayoV5Oa{*|mU_rl5gJ0RmT(Gel`JmUGAqKc_Oc6ch5ocXd!hHp7Dv z9l?$Ztf!0nsD;OeU3u`nw<5Sqis&=L?7B-B7&}E9bjoH z8yr=O9THRanMhGlgz&iPJ2EvJD?wfUXy~{Im&9Y}JCYj6$B6j`P+E)a*eFGw%lm)S zxajsS`TS!ehF)M+D+eG=d5#$rnk6^Zi+#VuhTCeLd^R|!XK3P*0SEm}WMnT>3Q@(Q z$ripjUFPPN%&qN;N~eD#)H6&Txc$2`*&RPV-ndx3cOo)G1@9S-vAhI1VXN-wPz07R zv+6@J-#|)7r?Slw-G7vb&8H03yvO&y?M8`-+$=J+ut~#F>Zq|*tDf$@RTkECILNjO ztA50#@P)u;!_#?oI9r(1$Gx(E+YvwJnXI^x{oEw8NWo8-=zUS1>_t1;bI^9&_n4*} z@BFLl>m#K-1I8lTi#fj!cuhzen=7J{$0^;XRRblId^)tZ1P?(d^Z(0_sLSgb8UV zRkZ1}`h+)X1Y|$IQ7rFMdPU;y2ilb*gDfUZfC7FFa=DgkKgGTosdq}o++kyGZvMUU zx$PiC$~P)ADy&wPCfcmCqN4V0Sh1r*37D-NwhppCv+>eRXuC;ez~J*RjdR9id1@}| zl8?p}CKJD>6=ie+a+V6;tJvE?qE|)_-|ZA%6^RE3$hBnXQVXh6J@&UqH;3z{WFKdP zK(a^hP_D4h(Av8NCFKUgB&R5)+;Su~OVCBuf;zhNVo0*PT!E5s#PyG76=F44*Q1vt z(Bw=Ac+Njhdf7XuuI7{9^kNhLY6RFId4i)+>+J32Z}58pk9vI+JO zlhLdYH;a$c)72#OKv_bJgaUvoU|)eJjlLh%Vn0s^}sRsTVfl6zh{_gOY^xEd~+U&!{L%NL@GN-5l9tCPk-## zj8vqcp!zfY`7=vPf_t$yXt?0}$3AQ`@-V}PRe|D~y?1Q7EQ0Uj%nZl4@d}UctsC{@ zu7$6DDp-d`_?vH!gmnfkc7;-p8GOqRzT_uFYUHPr+?uL4Wu?4RvV21Bm^028{3_I}%*m!&hW}aKFD0BeD%V6I_%Ww=yJI|2| z_N1Eo7k;-o4Y7Foxv)zAYdi2!^7X7C(6o+rBmEWo(^xvr@fez8kDxR`CS(%KEf)SF z)j_+@3E#6DTz{Z;eq0uOG=IN+q=J<3@cwui)W$yz!xm2TJ}glEG1*|jD)~hVktWE9 zbcZ>_!mE;a@AT47l1C;aFAVf>O?QWP2d{C9amjx}bG`*bZxeF(h@xWB^rXgie9l4E zgUKMlP;=pzJmqNx?2S& zAhRb@9O7F(u@^cFfoBb$@DTY{(q%|3e(7WQ_n=mfS=od2D)Z^@IU*i3f)Q-|S^A#_ zOEmgvn~{O(c)WRr0(Dp z95(BEZDgZu;f-4#%B^1vOx9YJk{@F!9>*Ld-wPD>A1aa7AaR} zN!VVIg9gAAT_18r#xrC4FRf7f_9tkTAeX*Nms`up-a9uh?A9~H48gB(3J?-SiLjMK z=>$Fe((*-X#~rWv4zteVFAqEB(YPC6C?^4pN)@PO)wJwhOkEtexxg&47u@(EOQ>_k(UVAbwh@#pl{hv`r>su>KWAw(l$ zK1A>lsSd5qkRk;io-z}Ce6cbEu<(4pwf7{}z*^JLb?sVsQ@5KHJsN;OUWGEzsT;!O z3T}|FEWkUbLM8cKuC(*`n;06WLHL^J!37z1gNFcU`UqVC`SM>%AaNbAVZU`i!YRqE z>-p*Lf_1TgIKxn!K0EbcJU$jq*&?}cFKOpA`HhB$$_-8lAr-TGQE6?c2v;|^i6Rx8 z_KaqioIaqo*QQw>idzBr@?B5Z-~l7gb^kO{T?=~5@hUjEO4}eOEm5_Htu{BM;e?&< zA09AB4wB-{=)y!ox0k-K`q(|_+c=RC)=&1}#nAL)>O|ueu#(0Ayip5Xk?UA{Y?#%o zS&jJvf3v>z&UC>=O5hDCnM(o$4g@O|@<~qHS>(7W^!2l6R~pL7-dd#zwjz6=SLymF zNc=`Xz(|e20f>2!l9kGVL)W#pTa6SdS!~U@oE7E+c^QwW0!dV#0^FpPH2tfRy>Z#X zmQaDj1j)IGuLDm9qWAarp9Y9B}BQ zg`L`s{xC={Pz~*vX>rNOlk+Ni=Sg|;ra`E+wUzFC@tcO0R?J^om&vy~MckOEnZT3v zpBr-Xp?2U~jaY9zr9!PRUSXMlO{J+1iOgcJ-@i9>UK93Vhv@h8OZgGJg{$MqLF^k$ z4X`}=JPT{2kSP9ecC195D-Y12Q~`U&8 z8Nvp8pX+X~?Q56SX}n2RJq8tDZAQ6&37U93Y?w8N+Qtmh&H_JlX>?$EK`9H(hu`Bw zJXRZjhS`g`$8O~{^s>BRYeMR4@@oRJ@wd@5R8P`85+6S7)OyJBV4v*qLX*Qll1Qs( zTq9LmulCeNJRLT|l3PHJogkA=fc~(g%IvvhRbbf5n(Lh1+|3zA-!fBr(Q` ze&N-VUyzA~g`!TA#hIDBP+9857{MG2a3?kHFt1j(d+Zi`DMoG4E4|c`Ux-Oq|CULm zzLArYQ=(^ob$U9ZOyH)anW^bxGq%~u$!xRN&UokIj#sbDEG#Snzc`P)IMi=O8rAaA zIYp9>y)SQ4E4cb)n%81r$%UO#e^pbNk2%JHqafb~Zg2xMyT?S^YrG?$rU*^o!7pa$ zh6}W2D(mjBZuZ;~Tn4+o>-Iut6n(5bB@>fk(uDzSYI~?cq^qkn^{mw}WZEw6ji~xW zS{)25v_0;+3)TAr%Bww^nh+c9C6+&SDx$)ftHKsGv%SuwpyAhmivZ^^r%a+@X>#>c zc2DPbVO_DoCYk-L38KJLturwfN|zE{)`P7jnvnEPa&;&|=SgCR_07cYj%;+<9SGTB zulW@1>~a*E9^|XVPgww{*}Xgj8dV%sOMeefPG36PkwkN%FXBvj$oU{K@t4qik0ZTI z^b9lvw{?!S;#H(glFh%eq(O SnD-}uG<3BLHS0Ank^ciTup_?! literal 0 HcmV?d00001 diff --git a/dl/kan/kan/compiler.py b/dl/kan/kan/compiler.py new file mode 100644 index 000000000..c8014829e --- /dev/null +++ b/dl/kan/kan/compiler.py @@ -0,0 +1,498 @@ +from sympy import * +import sympy +import numpy as np +from kan.MultKAN import MultKAN +import torch + +def next_nontrivial_operation(expr, scale=1, bias=0): + ''' + remove the affine part of an expression + + Args: + ----- + expr : sympy expression + scale : float + bias : float + + Returns: + -------- + expr : sympy expression + scale : float + bias : float + + Example + ------- + >>> from kan.compiler import * + >>> from sympy import * + >>> input_vars = a, b = symbols('a b') + >>> expression = 3.14534242 * exp(sin(pi*a) + b**2) - 2.32345402 + >>> next_nontrivial_operation(expression) + ''' + if expr.func == Add or expr.func == Mul: + n_arg = len(expr.args) + n_num = 0 + n_var_id = [] + n_num_id = [] + var_args = [] + for i in range(n_arg): + is_number = expr.args[i].is_number + n_num += is_number + if not is_number: + n_var_id.append(i) + var_args.append(expr.args[i]) + else: + n_num_id.append(i) + if n_num > 0: + # trivial + if expr.func == Add: + for i in range(n_num): + if i == 0: + bias = expr.args[n_num_id[i]] + else: + bias += expr.args[n_num_id[i]] + if expr.func == Mul: + for i in range(n_num): + if i == 0: + scale = expr.args[n_num_id[i]] + else: + scale *= expr.args[n_num_id[i]] + + return next_nontrivial_operation(expr.func(*var_args), scale, bias) + else: + return expr, scale, bias + else: + return expr, scale, bias + + +def expr2kan(input_variables, expr, grid=5, k=3, auto_save=False): + ''' + compile a symbolic formula to a MultKAN + + Args: + ----- + input_variables : a list of sympy symbols + expr : sympy expression + grid : int + the number of grid intervals + k : int + spline order + auto_save : bool + if auto_save = True, models are automatically saved + + Returns: + -------- + MultKAN + + Example + ------- + >>> from kan.compiler import * + >>> from sympy import * + >>> input_vars = a, b = symbols('a b') + >>> expression = exp(sin(pi*a) + b**2) + >>> model = kanpiler(input_vars, expression) + >>> x = torch.rand(100,2) * 2 - 1 + >>> model(x) + >>> model.plot() + ''' + class Node: + def __init__(self, expr, mult_bool, depth, scale, bias, parent=None, mult_arity=None): + self.expr = expr + self.mult_bool = mult_bool + if self.mult_bool: + self.mult_arity = mult_arity + self.depth = depth + + if len(Nodes) <= depth: + Nodes.append([]) + index = 0 + else: + index = len(Nodes[depth]) + + Nodes[depth].append(self) + + self.index = index + if parent == None: + self.parent_index = None + else: + self.parent_index = parent.index + self.child_index = [] + + # update parent's child_index + if parent != None: + parent.child_index.append(self.index) + + + self.scale = scale + self.bias = bias + + + class SubNode: + def __init__(self, expr, depth, scale, bias, parent=None): + self.expr = expr + self.depth = depth + + if len(SubNodes) <= depth: + SubNodes.append([]) + index = 0 + else: + index = len(SubNodes[depth]) + + SubNodes[depth].append(self) + + self.index = index + self.parent_index = None # shape: (2,) + self.child_index = [] # shape: (n, 2) + + # update parent's child_index + parent.child_index.append(self.index) + + self.scale = scale + self.bias = bias + + + class Connection: + def __init__(self, affine, fun, fun_name, parent=None, child=None, power_exponent=None): + # connection = activation function that connects a subnode to a node in the next layer node + self.affine = affine #[1,0,1,0] # (a,b,c,d) + self.fun = fun # y = c*fun(a*x+b)+d + self.fun_name = fun_name + self.parent_index = parent.index + self.depth = parent.depth + self.child_index = child.index + self.power_exponent = power_exponent # if fun == Pow + Connections[(self.depth,self.parent_index,self.child_index)] = self + + def create_node(expr, parent=None, n_layer=None): + #print('before', expr) + expr, scale, bias = next_nontrivial_operation(expr) + #print('after', expr) + if parent == None: + depth = 0 + else: + depth = parent.depth + + + if expr.func == Mul: + mult_arity = len(expr.args) + node = Node(expr, True, depth, scale, bias, parent=parent, mult_arity=mult_arity) + # create mult_arity SubNodes, + 1 + for i in range(mult_arity): + # create SubNode + expr_i, scale, bias = next_nontrivial_operation(expr.args[i]) + subnode = SubNode(expr_i, node.depth+1, scale, bias, parent=node) + if expr_i.func == Add: + for j in range(len(expr_i.args)): + expr_ij, scale, bias = next_nontrivial_operation(expr_i.args[j]) + # expr_ij is impossible to be Add, should be Mul or 1D + if expr_ij.func == Mul: + #print(expr_ij) + # create a node with expr_ij + new_node = create_node(expr_ij, parent=subnode, n_layer=n_layer) + # create a connection which is a linear function + c = Connection([1,0,float(scale),float(bias)], lambda x: x, 'x', parent=subnode, child=new_node) + + elif expr_ij.func == Symbol: + #print(expr_ij) + new_node = create_node(expr_ij, parent=subnode, n_layer=n_layer) + c = Connection([1,0,float(scale),float(bias)], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + else: + # 1D function case + # create a node with expr_ij.args[0] + new_node = create_node(expr_ij.args[0], parent=subnode, n_layer=n_layer) + # create 1D function expr_ij.func + if expr_ij.func == Pow: + power_exponent = expr_ij.args[1] + else: + power_exponent = None + Connection([1,0,float(scale),float(bias)], expr_ij.func, fun_name = expr_ij.func, parent=subnode, child=new_node, power_exponent=power_exponent) + + + elif expr_i.func == Mul: + # create a node with expr_i + new_node = create_node(expr_i, parent=subnode, n_layer=n_layer) + # create 1D function, linear + Connection([1,0,1,0], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + elif expr_i.func == Symbol: + new_node = create_node(expr_i, parent=subnode, n_layer=n_layer) + Connection([1,0,1,0], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + else: + # 1D functions + # create a node with expr_i.args[0] + new_node = create_node(expr_i.args[0], parent=subnode, n_layer=n_layer) + # create 1D function expr_i.func + if expr_i.func == Pow: + power_exponent = expr_i.args[1] + else: + power_exponent = None + Connection([1,0,1,0], expr_i.func, fun_name = expr_i.func, parent=subnode, child=new_node, power_exponent=power_exponent) + + elif expr.func == Add: + + node = Node(expr, False, depth, scale, bias, parent=parent) + subnode = SubNode(expr, node.depth+1, 1, 0, parent=node) + + for i in range(len(expr.args)): + expr_i, scale, bias = next_nontrivial_operation(expr.args[i]) + if expr_i.func == Mul: + # create a node with expr_i + new_node = create_node(expr_i, parent=subnode, n_layer=n_layer) + # create a connection which is a linear function + Connection([1,0,float(scale),float(bias)], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + elif expr_i.func == Symbol: + new_node = create_node(expr_i, parent=subnode, n_layer=n_layer) + Connection([1,0,float(scale),float(bias)], lambda x: x, fun_name = 'x', parent=subnode, child=new_node) + + else: + # 1D function case + # create a node with expr_ij.args[0] + new_node = create_node(expr_i.args[0], parent=subnode, n_layer=n_layer) + # create 1D function expr_i.func + if expr_i.func == Pow: + power_exponent = expr_i.args[1] + else: + power_exponent = None + Connection([1,0,float(scale),float(bias)], expr_i.func, fun_name = expr_i.func, parent=subnode, child=new_node, power_exponent=power_exponent) + + elif expr.func == Symbol: + # expr.func is a symbol (one of input variables) + if n_layer == None: + node = Node(expr, False, depth, scale, bias, parent=parent) + else: + node = Node(expr, False, depth, scale, bias, parent=parent) + return_node = node + for i in range(n_layer - depth): + subnode = SubNode(expr, node.depth+1, 1, 0, parent=node) + node = Node(expr, False, subnode.depth, 1, 0, parent=subnode) + Connection([1,0,1,0], lambda x: x, fun_name = 'x', parent=subnode, child=node) + node = return_node + + Start_Nodes.append(node) + + else: + # expr.func is 1D function + #print(expr, scale, bias) + node = Node(expr, False, depth, scale, bias, parent=parent) + expr_i, scale, bias = next_nontrivial_operation(expr.args[0]) + subnode = SubNode(expr_i, node.depth+1, 1, 0, parent=node) + # create a node with expr_i.args[0] + new_node = create_node(expr.args[0], parent=subnode, n_layer=n_layer) + # create 1D function expr_i.func + if expr.func == Pow: + power_exponent = expr.args[1] + else: + power_exponent = None + Connection([1,0,1,0], expr.func, fun_name = expr.func, parent=subnode, child=new_node, power_exponent=power_exponent) + + return node + + Nodes = [[]] + SubNodes = [[]] + Connections = {} + Start_Nodes = [] + + create_node(expr, n_layer=None) + + n_layer = len(Nodes) - 1 + + Nodes = [[]] + SubNodes = [[]] + Connections = {} + Start_Nodes = [] + + create_node(expr, n_layer=n_layer) + + # move affine parameters in leaf nodes to connections + for node in Start_Nodes: + c = Connections[(node.depth,node.parent_index,node.index)] + c.affine[0] = float(node.scale) + c.affine[1] = float(node.bias) + node.scale = 1. + node.bias = 0. + + #input_variables = symbol + node2var = [] + for node in Start_Nodes: + for i in range(len(input_variables)): + if node.expr == input_variables[i]: + node2var.append(i) + + # Nodes + n_mult = [] + n_sum = [] + for layer in Nodes: + n_mult.append(0) + n_sum.append(0) + for node in layer: + if node.mult_bool == True: + n_mult[-1] += 1 + else: + n_sum[-1] += 1 + + # depth + n_layer = len(Nodes) - 1 + + # converter + # input tree node id, output kan node id (distinguish sum and mult node) + # input tree subnode id, output tree subnode id + # node id + subnode_index_convert = {} + node_index_convert = {} + connection_index_convert = {} + mult_arities = [] + for layer_id in range(n_layer+1): + mult_arity = [] + i_sum = 0 + i_mult = 0 + for i in range(len(Nodes[layer_id])): + node = Nodes[layer_id][i] + if node.mult_bool == True: + kan_node_id = n_sum[layer_id] + i_mult + arity = len(node.child_index) + for i in range(arity): + subnode = SubNodes[node.depth+1][node.child_index[i]] + kan_subnode_id = n_sum[layer_id] + np.sum(mult_arity) + i + subnode_index_convert[(subnode.depth,subnode.index)] = (int(n_layer-subnode.depth),int(kan_subnode_id)) + i_mult += 1 + mult_arity.append(arity) + else: + kan_node_id = i_sum + if len(node.child_index) > 0: + subnode = SubNodes[node.depth+1][node.child_index[0]] + kan_subnode_id = i_sum + subnode_index_convert[(subnode.depth,subnode.index)] = (int(n_layer-subnode.depth),int(kan_subnode_id)) + i_sum += 1 + + if layer_id == n_layer: + # input layer + node_index_convert[(node.depth,node.index)] = (int(n_layer-node.depth),int(node2var[kan_node_id])) + else: + node_index_convert[(node.depth,node.index)] = (int(n_layer-node.depth),int(kan_node_id)) + + # node: depth (node.depth -> n_layer - node.depth) + # width (node.index -> kan_node_id) + # subnode: depth (subnode.depth -> n_layer - subnode.depth) + # width (subnote.index -> kan_subnode_id) + mult_arities.append(mult_arity) + + for index in list(Connections.keys()): + depth, subnode_id, node_id = index + # to int(n_layer-depth), + _, kan_subnode_id = subnode_index_convert[(depth, subnode_id)] + _, kan_node_id = node_index_convert[(depth, node_id)] + connection_index_convert[(depth, subnode_id, node_id)] = (n_layer-depth, kan_subnode_id, kan_node_id) + + + n_sum.reverse() + n_mult.reverse() + mult_arities.reverse() + + width = [[n_sum[i], n_mult[i]] for i in range(len(n_sum))] + width[0][0] = len(input_variables) + + # allow pass in other parameters (probably as a dictionary) in sf2kan, including grid k etc. + model = MultKAN(width=width, mult_arity=mult_arities, grid=grid, k=k, auto_save=auto_save) + + # clean the graph + for l in range(model.depth): + for i in range(model.width_in[l]): + for j in range(model.width_out[l+1]): + model.fix_symbolic(l,i,j,'0',fit_params_bool=False) + + # Nodes + Nodes_flat = [x for xs in Nodes for x in xs] + + self = model + + for node in Nodes_flat: + node_depth = node.depth + node_index = node.index + kan_node_depth, kan_node_index = node_index_convert[(node_depth,node_index)] + #print(kan_node_depth, kan_node_index) + if kan_node_depth > 0: + self.node_scale[kan_node_depth-1].data[kan_node_index] = float(node.scale) + self.node_bias[kan_node_depth-1].data[kan_node_index] = float(node.bias) + + + # SubNodes + SubNodes_flat = [x for xs in SubNodes for x in xs] + + for subnode in SubNodes_flat: + subnode_depth = subnode.depth + subnode_index = subnode.index + kan_subnode_depth, kan_subnode_index = subnode_index_convert[(subnode_depth,subnode_index)] + #print(kan_subnode_depth, kan_subnode_index) + self.subnode_scale[kan_subnode_depth].data[kan_subnode_index] = float(subnode.scale) + self.subnode_bias[kan_subnode_depth].data[kan_subnode_index] = float(subnode.bias) + + # Connections + Connections_flat = list(Connections.values()) + + for connection in Connections_flat: + c_depth = connection.depth + c_j = connection.parent_index + c_i = connection.child_index + kc_depth, kc_j, kc_i = connection_index_convert[(c_depth, c_j, c_i)] + + # get symbolic fun_name + fun_name = connection.fun_name + #if fun_name == Pow: + # print(connection.power_exponent) + + if fun_name == 'x': + kfun_name = 'x' + elif fun_name == exp: + kfun_name = 'exp' + elif fun_name == sin: + kfun_name = 'sin' + elif fun_name == cos: + kfun_name = 'cos' + elif fun_name == tan: + kfun_name = 'tan' + elif fun_name == sqrt: + kfun_name = 'sqrt' + elif fun_name == log: + kfun_name = 'log' + elif fun_name == tanh: + kfun_name = 'tanh' + elif fun_name == asin: + kfun_name = 'arcsin' + elif fun_name == acos: + kfun_name = 'arccos' + elif fun_name == atan: + kfun_name = 'arctan' + elif fun_name == atanh: + kfun_name = 'arctanh' + elif fun_name == sign: + kfun_name = 'sgn' + elif fun_name == Pow: + alpha = connection.power_exponent + if alpha == Rational(1,2): + kfun_name = 'x^0.5' + elif alpha == - Rational(1,2): + kfun_name = '1/x^0.5' + elif alpha == Rational(3,2): + kfun_name = 'x^1.5' + else: + alpha = int(connection.power_exponent) + if alpha > 0: + if alpha == 1: + kfun_name = 'x' + else: + kfun_name = f'x^{alpha}' + else: + if alpha == -1: + kfun_name = '1/x' + else: + kfun_name = f'1/x^{-alpha}' + + model.fix_symbolic(kc_depth, kc_i, kc_j, kfun_name, fit_params_bool=False) + model.symbolic_fun[kc_depth].affine.data.reshape(self.width_out[kc_depth+1], self.width_in[kc_depth], 4)[kc_j][kc_i] = torch.tensor(connection.affine) + + return model + + +sf2kan = kanpiler = expr2kan \ No newline at end of file diff --git a/dl/kan/kan/experiment.py b/dl/kan/kan/experiment.py new file mode 100644 index 000000000..9ab9e9de3 --- /dev/null +++ b/dl/kan/kan/experiment.py @@ -0,0 +1,55 @@ +import torch +from .MultKAN import * + + +def runner1(width, dataset, grids=[5,10,20], steps=20, lamb=0.001, prune_round=3, refine_round=3, edge_th=1e-2, node_th=1e-2, metrics=None, seed=1): + + result = {} + result['test_loss'] = [] + result['c'] = [] + result['G'] = [] + result['id'] = [] + if metrics != None: + for i in range(len(metrics)): + result[metrics[i].__name__] = [] + + def collect(evaluation): + result['test_loss'].append(evaluation['test_loss']) + result['c'].append(evaluation['n_edge']) + result['G'].append(evaluation['n_grid']) + result['id'].append(f'{model.round}.{model.state_id}') + if metrics != None: + for i in range(len(metrics)): + result[metrics[i].__name__].append(metrics[i](model, dataset).item()) + + for i in range(prune_round): + # train and prune + if i == 0: + model = KAN(width=width, grid=grids[0], seed=seed) + else: + model = model.rewind(f'{i-1}.{2*i}') + + model.fit(dataset, steps=steps, lamb=lamb) + model = model.prune(edge_th=edge_th, node_th=node_th) + evaluation = model.evaluate(dataset) + collect(evaluation) + + for j in range(refine_round): + model = model.refine(grids[j]) + model.fit(dataset, steps=steps) + evaluation = model.evaluate(dataset) + collect(evaluation) + + for key in list(result.keys()): + result[key] = np.array(result[key]) + + return result + + +def pareto_frontier(x,y): + + pf_id = np.where(np.sum((x[:,None] <= x[None,:]) * (y[:,None] <= y[None,:]), axis=0) == 1)[0] + x_pf = x[pf_id] + y_pf = y[pf_id] + + return x_pf, y_pf, pf_id \ No newline at end of file diff --git a/dl/kan/kan/experiments/experiment1.ipynb b/dl/kan/kan/experiments/experiment1.ipynb new file mode 100644 index 000000000..b453cefe9 --- /dev/null +++ b/dl/kan/kan/experiments/experiment1.ipynb @@ -0,0 +1,338 @@ +{ + "cells": [ + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "# Introduction\n", + "This experiment focuses on learning the $xy$ function using a simple KAN model. This will later on be used as a symbolic activation function itself to prove the concept of \"extendible\" symbolic activation functions and to show that having such an activation function can indeed help the training process in having faster and more accurate convergence. The $xy$ function is defined as follows:\n", + "$$ xy(x) = x_0 * x_1 $$\n", + "where $x = [x_0, x_1]$" + ], + "id": "970c8945fd5c9242" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:13.250344Z", + "start_time": "2024-05-09T06:27:09.779193Z" + } + }, + "cell_type": "code", + "source": [ + "import matplotlib.pyplot as plt\n", + "import torch.cuda\n", + "\n", + "from kan import KAN, create_dataset\n", + "\n", + "# Let's set the device to be used for the dataset generation\n", + "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n", + "# device = 'cpu'\n", + "\n", + "def xy(x):\n", + " return x[:, [0]] * x[:, [1]]" + ], + "id": "8c68074396791092", + "outputs": [], + "execution_count": 1 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Let's create an extensive dataset for the $xy$ function.", + "id": "10afe2e28e22096c" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:13.424018Z", + "start_time": "2024-05-09T06:27:13.251332Z" + } + }, + "cell_type": "code", + "source": [ + "dataset = create_dataset(\n", + " f=xy,\n", + " n_var=2,\n", + " train_num=10000,\n", + " test_num=1000,\n", + " device=device,\n", + " ranges=[(-10, 10), (-10, 10)]\n", + ")" + ], + "id": "933e311db73997be", + "outputs": [], + "execution_count": 2 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Let's take a look at the dataset through a scatter plot.", + "id": "b6b4bca80383ec8c" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:13.886290Z", + "start_time": "2024-05-09T06:27:13.425002Z" + } + }, + "cell_type": "code", + "source": [ + "plt.scatter(dataset['train_input'][:, [0]].to('cpu'), dataset['train_input'][:, [1]].to('cpu'),\n", + " c=dataset['train_label'].flatten().to('cpu'), cmap='viridis')\n", + "plt.title('Train Dataset')\n", + "plt.xlabel('x0')\n", + "plt.ylabel('x1')\n", + "plt.colorbar()\n", + "plt.show()" + ], + "id": "e3810f6b64419170", + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjoAAAHHCAYAAAC2rPKaAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOy9edwlRXkv/n2q+5x3mY19hgEEZN9ERQRU1qiIxsgVUTT+xCUa7zV6DdcsJCYu0RA1RrOYGL0qivGqKMQlboBgVMAIgoDs+zoDDLO+2znd9fz+qKWrqquXM+/AOzPp73zOvOd0Vz31VHV31befeuopYmZGhw4dOnTo0KHDdgix0Ap06NChQ4cOHTo8WeiITocOHTp06NBhu0VHdDp06NChQ4cO2y06otOhQ4cOHTp02G7REZ0OHTp06NChw3aLjuh06NChQ4cOHbZbdESnQ4cOHTp06LDdoiM6HTp06NChQ4ftFh3R6dChQ4cOHTpst+iITocO/03wxje+Efvss89Cq9GhQ4cOTyk6otOhwwKDiFp9rrjiioVW1cMVV1zh6Tc2Nobly5fjpJNOwl//9V/jscce22zZN998M97//vfj3nvv3XIKzwNf+cpX8MlPfnKh1ejQocNmgLq9rjp0WFh8+ctf9n5/6UtfwiWXXIILLrjAO/6iF70Iy5cv3+xyhsMhpJQYGxvbbBkurrjiCpx88sl417vehaOPPhp5nuOxxx7DlVdeie985ztYtmwZvv71r+OUU04ZWfY3vvENnHnmmbj88stx0kknbRF954Pf/u3fxk033bTVEK8OHTq0R7rQCnTo8N8dr3/9673fV199NS655JLS8RDT09OYnJxsXU6v19ss/Zpw/PHH41WvepV37Ne//jVe/OIX44wzzsDNN9+M3Xff/Ukpu0OHDh2a0E1ddeiwDeCkk07C4YcfjmuvvRYnnHACJicn8Wd/9mcAgG9961t42ctehpUrV2JsbAz77bcf/uqv/gp5nnsyQh+de++9F0SEv/3bv8VnPvMZ7LfffhgbG8PRRx+NX/7yl/PS98gjj8QnP/lJrFu3Dv/0T/9kj9933334X//rf+Gggw7CxMQEdt55Z5x55pmepeT888/HmWeeCQA4+eSTS1N3bet7xx134IwzzsCKFSswPj6OPffcE2eddRbWr1/vpfvyl7+Mo446ChMTE9hpp51w1lln4YEHHrDnTzrpJPzHf/wH7rvvPqtL5+vUocO2g86i06HDNoI1a9bgtNNOw1lnnYXXv/71dhrr/PPPx+LFi3HOOedg8eLF+PGPf4y//Mu/xIYNG/Cxj32sUe5XvvIVbNy4Eb//+78PIsJHP/pRvPKVr8Tdd989LyvQq171KrzlLW/Bj370I3z4wx8GAPzyl7/ElVdeibPOOgt77rkn7r33XvzLv/wLTjrpJNx8882YnJzECSecgHe96134h3/4B/zZn/0ZDjnkEACwf9vUdzAY4NRTT8Xc3Bze+c53YsWKFXjooYfw3e9+F+vWrcOyZcsAAB/+8IfxF3/xF3j1q1+N3/u938Njjz2Gf/zHf8QJJ5yA6667DjvssAP+/M//HOvXr8eDDz6IT3ziEwCAxYsXb3a7dOjQ4SkGd+jQYavCO97xDg4fzRNPPJEB8Kc//elS+unp6dKx3//93+fJyUmenZ21x84++2zee++97e977rmHAfDOO+/MTzzxhD3+rW99iwHwd77znVo9L7/8cgbAF154YWWaI488knfcccdaXa+66ioGwF/60pfssQsvvJAB8OWXX15K36a+1113XaNu9957LydJwh/+8Ie94zfeeCOnaeodf9nLXua1XYcOHbYddFNXHTpsIxgbG8Ob3vSm0vGJiQn7fePGjXj88cdx/PHHY3p6Grfeemuj3Ne85jXYcccd7e/jjz8eAHD33XfPW+fFixdj48aNUV2HwyHWrFmD/fffHzvssAN+9atftZLZpr7GYvPDH/4Q09PTUTkXXXQRpJR49atfjccff9x+VqxYgQMOOACXX375yPXt0KHD1odu6qpDh20Ee+yxB/r9fun4b37zG7z3ve/Fj3/8Y2zYsME7F/qjxPC0pz3N+21Iz9q1a+ehrcKmTZuwZMkS+3tmZgbnnXcevvCFL+Chhx4CO4s+2+gKtKvvvvvui3POOQd/93d/h3/7t3/D8ccfj9/5nd/B61//ekuC7rjjDjAzDjjggGg5T5bzdocOHZ5adESnQ4dtBK4lw2DdunU48cQTsXTpUnzwgx/Efvvth/HxcfzqV7/Cn/zJn0BK2Sg3SZLocZ5n5InhcIjbb78dhx9+uD32zne+E1/4whfw7ne/G8cddxyWLVsGIsJZZ53VStdR6vvxj38cb3zjG/Gtb30LP/rRj/Cud70L5513Hq6++mrsueeekFKCiPD9738/2gadH06HDtsHOqLTocM2jCuuuAJr1qzBRRddhBNOOMEev+eeexZQK4VvfOMbmJmZwamnnuodO/vss/Hxj3/cHpudncW6deu8vEQUlTlqfY844ggcccQReO9734srr7wSz3/+8/HpT38aH/rQh7DffvuBmbHvvvviwAMPrK1LlT4dOnTY+tH56HTosA3DWCJc68tgMMA///M/L5RKAFQcnXe/+93Ycccd8Y53vMMeT5KkZCn6x3/8x9LS8EWLFgFAiQC1re+GDRuQZZl37IgjjoAQAnNzcwCAV77ylUiSBB/4wAdKOjEz1qxZ4+nTdmqtQ4cOWxc6i06HDtswnve852HHHXfE2WefjXe9610gIlxwwQXznnYaBT/96U8xOzuLPM+xZs0a/PznP8e3v/1tLFu2DBdffDFWrFhh0/72b/82LrjgAixbtgyHHnoorrrqKlx66aXYeeedPZnPfOYzkSQJPvKRj2D9+vUYGxvDKaec0rq+P/7xj/EHf/AHOPPMM3HggQciyzJccMEFSJIEZ5xxBgBgv/32w4c+9CGce+65uPfee3H66adjyZIluOeee3DxxRfjbW97G97znvcAAI466ih87WtfwznnnIOjjz4aixcvxstf/vInuWU7dOiwRbBAq706dOhQgarl5Ycddlg0/c9//nM+9thjeWJigleuXMl//Md/zD/84Q9Ly7Orlpd/7GMfK8kEwO973/tq9TTLy82n1+vxrrvuyieccAJ/+MMf5kcffbSUZ+3atfymN72Jd9llF168eDGfeuqpfOutt/Lee+/NZ599tpf2s5/9LD/96U/nJEm8urSp7913381vfvObeb/99uPx8XHeaaed+OSTT+ZLL720pNM3v/lNfsELXsCLFi3iRYsW8cEHH8zveMc7+LbbbrNpNm3axK973et4hx12YADdUvMOHbYhdHtddejQoUOHDh22W3Q+Oh06dOjQoUOH7RYd0enQoUOHDh06bLfoiE6HDh06dOjQYbtFR3Q6dOjQoUOHDtstOqLToUOHDh06dNhu0RGdDh06dOjQocN2iy5g4BaAlBIPP/wwlixZ0oWK79ChQ4cOlWBmbNy4EStXroQQT56tYXZ2FoPBYIvI6vf7GB8f3yKyFgId0dkCePjhh7HXXnsttBodOnTo0GEbwQMPPIA999zzSZE9OzuLffdejFWP5s2JW2DFihW45557tlmy0xGdLYAlS5YAUDfu0qVLF1ibDh06dOiwtWLDhg3Ya6+97LjxZGAwGGDVoznuu3YfLF0yP6vRho0Sex91LwaDQUd0/jvDTFctXbq0IzodOnTo0KERT4Wbw+IlhMVL5leOxLbvjtERnQ4dOnTo0GE7RM4S+Tw3ecpZbhllFhAd0enQoUOHDh22Q0gwJObHdOabf2tAt7y8Q4cOHTp06LDdorPodOjQoUOHDtshJCTmO/E0fwkLj47odOjQoUOHDtshcmbkPL+pp/nm3xrQTV116NChQ4cOHbZbdBadDh06dOjQYTtE54ys0BGdDh06dOjQYTuEBCPviE43ddWhQ4cOHTp02H7RWXS2Adx5/b343mcvxf23PoRFyyZx4quOwwvOOAb9sV40fZ7lmJ2aw8SS8cZN427/9X244G+/h9t/fT+mN85ibKKPY154OF7zBy/E0w7cfWRdpzbO4tLvXI9Lv3MdHn1kPbIsx5Jlkzjy6H0hegL33PkoRCJw1LH74bTTn42ddvHDoN9/3+P43revw113rMb4RB/PP/5AnPTCwzA+Hq9rDGue2IQrfnYb7rhrNXZYNokXHHsADj14JYR4aiN85lLi3keewCXX3I47HnocaSKwyw6L8ODj67F63SbssnQSLz/2MJzy7P3RS5JaOf958z347jW34PFN09hjp6X4H889HM/Zbw/c+9ha3PLwY+inCY5++p5YNrl1h2jPpcTl992Db99xK9bOzuBpS3fAaw49HM/YbQUAteHhDY+twsObNmCniUk8Z/keSJ7EjQ9juHX9Knz7gV9jzewUlk8swSue9kzst2TXaNon5jbhslU34fHZDThw6UqcsuIwTGVz2JjNYMf+Iown/dqy1g834YePXIVLV/8SawfrwZBYlEzgOTsdiufsdDBuWH8Lrl17EyTnOGjpfnjpipNw2LIDnoxqj4x1gwexbvAg+mISKyYOgyD/Hp7N1iLjWczma/DQpssxkBuxuLcn9l3yMvSSJVg9dTk2DO5EIsaxYvIULOnvWypjkK3FE9Pfw7qZyzDMH0UqlmHnRS/HzotegURMgFkCIBtlmDnH9Mz3sXHqAgyzuyDEMiyePAOLF70Oidihtj7Mc5Az34ec+xFYzkD0DkIy+VpQuveWarKnHN3UlQIxbzsu1f/5n/+Jj33sY7j22mvxyCOP4OKLL8bpp59uzzMz3ve+9+Gzn/0s1q1bh+c///n4l3/5FxxwQH3H8KlPfQof+9jHsGrVKhx55JH4x3/8Rzz3uc9trdeGDRuwbNkyrF+/fotuAcHM+Nyf/T987aPfQpIK5JkECQJLxl4Hr8Q5n3k7fv6da3Hrf92J3lgPBx61Lx57aC1+/t3rkA0yTCwex0vOPgGvfvdp2HG3ZZ7sbJjj4+d8GVdcfG1l+S8+61i8629egyStHoRd3H/3o/iT3/sC1j6xSVcAQBDmnAUBgiAEoddL8f6/OwvPPmY/bNgwg4u/9l/48vk/hUgFZM4gIjAzdt1tKT72D7+LPfbcCcyMn/7sNlx08bW4/Y5VSFOB4449AK864zlYsXwHfPLTl+Kyy28uP5oEHHDgcjx9712xYreleNGJh2KvPXaqrU+W5Xh83RT6aYKddlgEQA3UM3NDTIz1vAF4enaAH1x9K358ze3YMD2HjCXuW7UWc8PMNgUnAByyJYggmXHo3svxL+96JZZESMqm2Tn8z89cjOvvfcSmTwQhk4ydlk3giakZW9dEEF58+AH467NORT/d+t5h/uuhB/Cm/7gYU8OhvjcAgvr6+sOPxEv2PwDv+/mluGvdEzbPikWL8WfHnITf2f+QeZf/4NQ6fOmOX+KSh27D2sE0+iLFETvtjjce8Fy8YPnTkbHEX173bXzrgV8jIQHJEqTb/Nhd9sUnn3smfrnmXvz00dsxmw9x/9SjuH3jQ84tzkgE298pJThwyQocvGxP7LtoN7xwxTOxQ3+R1ee+qUfwxzf8IzYOp3QrIAi2z+gJRiLU8l4BAQmJV+/5Mhy902F4ZHY1JpJxHLb0YDAz7th0O3619lqsy9ZhWW8Znrfz83HA4gNH3mpgkM/gurU/xHVrf4ANw8cxkSzGQUufj+fv+hpMJotxw9pv4+rHz8es3Kh1ZkyKZTh21zfj8B1fjoemrsavn/g8Hpu9qWgXMFJIEDEEJMZFDrW5QAqGBCCxYvIUPGu3v0YqJjHI1+DuNX+NJ6b/HQlJMJuuRN0xqdgJk8kOGGR3AEiwdOIU7LrkLZja9BnMzP4QQALAbGhJSMRyrNjtYvTSfcA8g9mpf8Ps9AWQ2YMgsQRj4y+GmPs5hHwEaqJDahkSydjJEGIRQBOgsd8Cxk4G0eY/X0/WeBEr4/ZblmPJPPe62rhR4sBDVj+p+j7Z2KaIzve//338/Oc/x1FHHYVXvvKVJaLzkY98BOeddx6++MUvYt9998Vf/MVf4MYbb8TNN99cuRnZ1772NbzhDW/Apz/9aRxzzDH45Cc/iQsvvBC33XYbdtttt1Z6PVk37vc/92P83dv+tThAVBAHZgAMMT6G0qieJLZzE4nAzit2wCcu/XPssnJHm+SzH7wYF33m8kYdjjzhIBz9W4djauMsdlmxDHkucfOv7gMzcNhR++CUVzwLixaPY9OGGZx92scxtXG20NG8ZQGqf7LH1XcigHop9j5gOe66Y7VTT3j5hSAsX7EMn/+3t+MfPnUJvvsf10MIgpRsz0vJyFMRjhRF+YDqtzTJkpLx8hcfiT98+wuRJn5HMDM7xBe/9QtcdMn12Dg1BwB4+l47Y7ddluK62x/CzNwQY70UpxxzAF587MFYPNnHuZ/+Dzy6dhPYLZ+KutvjYTto/U8+cj987G0vL+l+zvnfxWU33gnpPKas68KODLfei8Z6+Nc3/w88a589yo3hYNPcAL+6/yEMc4lDdt8VK5dV37uDPMfFN92Mr1x3A+5buw5LxsbwisMOxu8+60hLWHZbvBgiGFTvXvsEPnfdtfiPO27DuuFcSVcPxIDgIo2T7uMnnYYzDjy8tj4AMJdn+P79t+L6xx9GSgLHr9wXx+/+dPzgwVvwh1dfDAmGaUpX1d/e6zAsn5zABXdfHX1/JV1HEjl6Quiw+CplKiQSYgitu98EitElxEiFwNv2OxWv2+ck5Jzjzf/1V3h8bp0e6KuahdFPcoTGyJ7IkJDqA3pEVgbAEGCkRGCSOHLZs3DWXmdhzeAx9MUY9l60L5KaQXo624AL7v1TPD53v5LG7u0qsHNvB0znD5tWAUHa8wKMfScPxaOz18EQEgLrW179HccA42IYaSclYdeJ4/DsXf8av171Kgyy+5BS1e7bSl4fuZajiM0ESfQpFvclQS89ALvv+m2sX/Ma5NkNVo6pCQBMIEVCwh5LjHcHCRAEgBxI9gPt9AVQsqKyHevQEZ2nHtsU0XFBRB7RYWasXLkS/+f//B+85z3vAQCsX78ey5cvx/nnn4+zzjorKueYY47B0UcfjX/6p38CAEgpsddee+Gd73wn/vRP/7SVLk/GjcvMeOPB78bDd65SpEAIMBdWDhfUS8tvbYbsJAkoTZQlCACIsGznxdiwbkZ1+HVve71U5RcEIQTyXHfIQukjmTExOYY//buz8KmPfA+rH1pbJjkElHppc04QuM5a5JICAC982ZH44Y9viibNCUBa/UBbUpAW8gjAa04/Gv/rTSfZY6sf34BzPnoR7nlwDWJPhhTaKhUMxOac9XrTJIeFroMZMZh1Pl8GAfjOh96MlTsXlrcH16zHaR/+fLkuAgV5il0+BtJE4A0nPhtzWYYl42M49fADceCKXQAAwzzHX37nUnz7xluQy6KSx++/Dz78Oy/CbksWe+JmhxnecuHF+MUDD1oLjK+PqtOeS5fiLc85Ckeu3B0X/Pp6/Oz++/Do1JRqG2I1Frko6c6q/Uibe8C2jZaNjeO/Xv8/MZZUD9K/fPQB/P5Pvom1czNI9WCVscTTFu+AhwdPAOCYkdGqkiYMiGJgLaczxFo6t7QiGxO9vDYPAAhS1p4/OviV2HV8En918+dAUUuOn58gMZa6rS4hAPRFBgFDsMjLAwAp5egJCUFF3iXpUrxkxek4adcXRS09Fz3wEdyy/me6pfz6JMjR18SD4NzHYAA5JsUQKRX1MaWSJl9EjCViRlt2KiuM/Za8FGumvokespIOIXrI7LUwr1VLqEwMDZaNHY9s+PPoOQIgQJiA6k/71LN9ro8ESJ4O2uXbIGpn7XbxVBKdW7cQ0Tl4Gyc6W599ezNxzz33YNWqVXjhC19ojy1btgzHHHMMrrrqqijRGQwGuPbaa3HuuefaY0IIvPCFL8RVV11VWdbc3Bzm5ubs7w0bNmyhWgCDuSGm1k1hav20R3KAYrfbYj5adyVZDvR78EZmIYB+ryBGtocnrH98k997CFHuTTTJARGYYUkOALBkSJZAIjA9O8Rf/sGXCzLjyGECkMQfMjWV0/4BZACXfO/XwHjiHSMoIzOSevM8wRjBildUBnDhd67FxKI+liwZxzW/eQA/veZOSCPYJRLs/CV/YDEkihhgCUt2fOtOuX2ckQAM4JrbH8TvHFcQnX/90dWVleG66hKQSYnPX34Nkr4Ag/Evl/8CLzx0P/zNq16C13zuq7jjsTWlbD+9816c+dn/h39/++ux4+SEPf4PP78Kv3zwIU/logqsLoAAHtywAR/48eWa3MI2EYN9jmfkuO1sFIfbtsVwuX5uFt+581a86qC4Vee+jWtx9mVfw1yuBsfM2YjwgU3rwASk/WpCwQCGueLBcZcgtlYJMGkiwNbIKhkB+SnXwTye//euH+HE5fvp6bG8luQYa5HJK4hhuHPGKQCGyBk9kTu3lrK09EXZGrIx24ALH/wSNmXr8fKVZxYlMeOhmTtxy/qfQVorTJEvgcSYyIMppAJCT0K5Z4prTeoWYW4kOYQED236PsapPp2RPLQ2FzU9JsAYsMB4xKpDEJUkR0lQ/ihDSEygX0FyACAH8juAwU+BsZOalFxQ5Ftg1dV8828N2G6IzqpVqwAAy5cv944vX77cngvx+OOPI8/zaJ5bb721sqzzzjsPH/jAB+apcYE8y/G9/3sZvvF338Ejd68GS7bkBpqoxB4495ibhsbGlAXITefmD2VJWSY7muTEoAYpx/5ve7bCcsGA6vld27crow3JccYLBsB94cmzUlvypZg1Is8lPve1KyETOHo7GYLMljAJx0okCsGmCaSR47V7WQE7tSWAv/rapfibb14OIQiLJ8aweuNGf8x00rcBA8iG0pKOH99yN17z2f+HO9Y8EU0LAKs2bcIbL/gGjtxzd+y4aAKz2RAXXH+DN3Xmwpj82akYsW6jYMSLNKdH9srawKv/e39+CQ7eeVccvov/vALAF279JQYyizpOMhiUVN6KgVbldKRJDlBcX3NGWWkImUyRikxf8rAQMloAzHhisAnff/ga9BJlmQmvpxHf0ySnmDpij/SYoxLAQKboi8ym7YkqC5PCD1Z9G8/f5WTs1N8FVz3+A/z40Yuwafg4xkX8nSGlrKb9CBIJcofslGuvCE8TeZHIMcsAIUWfBvWJA+QgSAhkiE939YIXlGodGHOcISFCj9PSdKxCCp79MWhrJzqMLbB7+ZbRZSGx3RCdpxLnnnsuzjnnHPt7w4YN2GuvvTZL1mMPrsE5J/4lVt3zqHecpQRINDoSeiTIIRmK2wR562S5vVjNCiDAeVOLWSnqCFVMSBOYwUTghMpTYG3KcUVVHJcuUfIsDGE5xWmPEJl8DgSjsAyhOp0rZ5hLDHM1VG+cGYA1P2XHOmKJVosqu2SCAeTEuOOxJ0qJ7BSYznDL6sdxy6OPK/0I4IYFb2wKcEipsfKwmY6q08+RBGL/GjCsFW1WDnH6dy4ASDlyH7/HPnjr4Ufj2BVPw3fvvaU6VL2AJSqjwyc5xV+lfS5JOyAzBNXdipqca1lzMgWQKYuNsXjBbw+X5EBbK1w9XNkMRsYCPVK+Q2mDRYRA+Oljl+GeTb/Ew7P3av0STMkES5OZgOjJJoMpAEbGCVLKKstjhJavKknANPewiAcNaY1UwJJUAJl1JvYxymCXIUfGwBwyjKOHfsmviQGei+btsPVhuyE6K1Yox7DVq1dj992LZdGrV6/GM5/5zGieXXbZBUmSYPXq1d7x1atXW3kxjI2NYWxsbN4653mOc1/yoRLJsRjVfcq1dLQlHLGyRiAhJdnNr806XXM5DED2hJr+onZ56mSVLCzmuGunbyiHAVAekCP3MpEjQmqSYuRGYAd7lyRE5EA401VRK0i1fAmtR4S/GutUqL8pJpyOa0LpjnXZTJW+piCXcIUJtP52SoqBKx68G5c9cBc+dNyLMJ0PK4VT1Ek4nlYyaSdfo3JdXtJ5gNQjJdXyGWagJ+RMIBZIKA9IMZWccJsvNSFngZSlt2ahDletuRSS15WkD1kghfFDYodMNOvQhAGnGEPW+A4kAGRI0OM8mtaQ/ZiYLHKztq1BLN0sD0Eg9Dx/HAnqHdpS6sJBIkb5RpexrWO7CRi47777YsWKFbjsssvssQ0bNuAXv/gFjjvuuGiefr+Po446yssjpcRll11WmWdL4pofXI/7bn6wJgWDmUvOxy5aLx1tmw5oRVbYlWn0C/PV6S3ry2Dowdl9pasiCy1kASjd7QxjzQgtX5WiFHEQNWm4SFf2QanOY9/mjUOzzue80BcYpecxo0bVk15JLopDIkftSEEgZfkxZSX6U0ee3XYyP7RTc1S/BG4GAMVmg++96hKMIQVk5JZz6s1cd0v6ZZu0bUgDs1lb1AQ3DSNnAQZhKKkijXu0zVBd5G16R2IwZvKN0XMzcgwb83FM58rvj1vIU6VX35is/884qZVnatCjHAMWMB5MbnrzfZaTCusWQUrjuVPcuhKo7Uvd8kMMeBik6gMTp9fK2hogQcjn+ZGb+3a5FWGbIjqbNm3C9ddfj+uvvx6AckC+/vrrcf/994OI8O53vxsf+tCH8O1vfxs33ngj3vCGN2DlypXeEvTf+q3fsiusAOCcc87BZz/7WXzxi1/ELbfcgv/5P/8npqam8KY3velJr89V37kWoslXRbYY1Yh8wuM7EiiMYh0y+SvyWJJDRSdkU+p8dqCvksF6bidy3hzhNPAbqqqCKadKFkHZLl2fJv1XxmbpuPjjfgBAEsC9ilfJIH9rbmkIkSjIl/sxaYyhgaCtPE2X1EyHJKjW17W4VJwuzal4quuAZBHyp6xRFM8blptyZW/ESfPtu3ZmAGYBSAr4VWFOcvm4+93oai6EbGpXD6rSkqnFI+bSIYddUnntVTviVJZvLDB5gz4M5RhcDcKQE2zKxwE94JW6FCjykLPyj5nmPqZlDzLiKU9g9JFDEGOWe8ghavVTflGEOaSY4wRSly8ZGEJgBj299iyONehho0xVF6N1HUhGzsCQGVlF4aKi3XOYF071MNEOHwWJZdUV6LBVYZuaurrmmmtw8skn29/GT+bss8/G+eefjz/+4z/G1NQU3va2t2HdunV4wQtegB/84AdeDJ277roLjz/+uP39mte8Bo899hj+8i//EqtWrcIzn/lM/OAHPyg5KD8ZGA6GaBytmuYNhAAmx1WPneWA0H49DCXbkJam6LJpotL0+mqqqIlg6WXhDAA9AdOjKKuELl+y8mwMLD1mXGf3uPbFMZB94a/Y4upmIACUMTh1WAHIWl5I+GVzQsWyb7Dqrc24w4jHvGHlBCpTtLJ46SyNICjyZHSzjs1uVYzLgbmccNK4aUPBNdaaUWDJjlOucq3Vgg2RKhnG9AFzK4XXLzRkcPkca0tLm2khC0lgYfxefD8O80bvvgsUfwmsfW04aOvq8gvZmRToJVXPTVEuoJdbA8ilcqClBN6UWc7FSwSRDq1X62ek1jaZpeQZJ0gqHYgZCSQS0XSDEnIkGHKifJCQe/egdKuvLSmz3MMc97BEzCJ1Av09Y8f/D0vSRXhs5lpIDLHj2MGYGd6HVdOXgSLOw+bdhYiQIUFWmndtnlKbhcAs99HjHCkYKdi7lxMwJgWQguyaiiqio7IJ0NjJoEVvA/Wf2dB2WwekJofzlbGtY5uNo7M1YXPjIlz09/+BT5/zxUZzKno9RPu4XgoxoUhcIUNZd8g8uaHDcKzHFgT0eurj2uojVpLFyyZVED2djlO9TCNGqMwAYoo2hwja78bvqliQ/sASKQ8OGfGmGcxfgnJcTilixdBv3r1yO7ARSJp0uHmDlW2y3441GAdfmTqVD9Pov7IHPU3XJBS2DZRlKThu4LYNKfm1etZNxWnxRewe1n8LXTgyrVSWwdHpJ3VOAn24nKH4SwASbpRNgNN+mhwJBoOR9FyWpkVH5SmzWS8tHHkJjCSpej4L8mIITCokeokZ4N0KsV6hpZZB+zqoC5iK3CNKCUmMJcWSbqEnEWLEBVAro3rC1V0tMfeIG6s6jdEQqWjnV5Qix5LenP6unKeZbO2jeQQYS2kGkoAMY1gxfjievuQFOHDpCzGZ7ggAeGjTpbjm0f9jnaxZW4ZyCPSQY0lS5+zL6FOOscqAgr7+EyYoY0TdxQSME0EQFeQ8BE1g6fKbIcT8bQNPZRydX/xmBRbPM47Opo0Sxxy2apuOo7NNTV1tb3jRG05Er2EPJ0pTiDQF9VI1+PdSYNEExNLFluQAQBFjBzBDN3o9IE3919cQaQJMTAD9vk9yjDCz7Fx//vdHzsT7/+UN6PVTCLMSyuRxlsSrvOpjSBcnpIL6pcXSdaWu7ugMaapaamFfI/1DRlCSCDz7OfuAe6SsOFoPWz4hao2xnZs55eT1E7YjOUYWk6NvxOxPKAhE0xSSJY1wnIuNbAo+Yb4WcmtPm+YAIJggpF4mXEGuYiAQKDKlYVdllaIfklPZZj1LpJahjKGSwI7vTr3PDZWIBKMwbnp+IiaHJ4+QyQRzmfDeggVJuzKrTHIK5TOZIHf8dXIWmMsTW5Z0bRjsN4ogiVT4DwdDYE6mmMsTDFlgIAXmZIKMEwy41/J2JmeKSFlX5iD09FSVAJVnE/cx5B6YJR6ZuQFXPvbP+OJdZ+JXa76CnAf4zdoL7RQbMzBAggwpGAIDpBhyfHrLxGPqIW9lNR2vITkAY4oZVHOPMQM5D/HEI0/H2tXHYWbjp8ByU3PBHbYadERnAbFkx8X40y+9EyKpWEYuhCIqDmisX8TYCeDJ6PfVVFCSKBIzNqb+pmnx6feA8fHmAdwhOh9617/h0m9fh3+++J04/fXPgwijEVfK0h1jbM6ACHIs8QlOhf+OyqcGMdM3cUqg8RSHPWsvXHvzAyaJm9xqUNU5W64hUF2u0blF58ruKqmK2QwVZRkgX8E4NFFj7eir+WytLvZUjT8POfrFfDDiP0rjbHuYfLYCCMi1/a/4mzt5Q3G1I53KL3Nrfhh5ISNAkEzIc7LTKUoGV5AmBjMh5wQDh6QU2tQ9boxh7j9PORNm8wRzGSGXhIwFBpIAMtsrSCSkVkjFrT2KdOScIOdE276U/81Q1vvJGJ3sN+etot1UonC7Dn00x9WPfxZfvvMVeHDmRmzIJyBBGHAS+BURNsoJDDgpdRcShIzJEr+6OiRoXvbPIGw0L1vs1lf55eSQmOVZDJFhmN2H6Y0fwfrHXwEp1zU1woJjvo7I5rOtoyM6C4zjzzgWn/zZh/D804/2ekLq9UD9vkdeTOA/j9BoMkS9HshYcNJUbdMQs+S4PU+vVxxrCQbw00tvxkf/7Bs47rcOxcq9dmolQw2o7L0eMxFkTyh/HGdqzZumC/RnKAdi2SfkPYIcF5A9AU4I19/8ILhmQpkBUFP0q/hoUdShNiSxghQAp1DELYFdGi71OZno89UxGSv1j1psIofcFVACUM7LAeGx6QBFJgJLBoyMoExJ7B+PyC4hkK3K1qOzlzesoGZ4mZ+5NKvqZSufYG3ZaQJHnXiLlTy5FMgl1Q6eDCODMMxTzGYJ8lzUt48tR+h7uGCSCUn0EhMvS12UnIWKkuwsa28a9AHfGXoqH0PW6ESt6q7SioaVa0HOCp8iZmBKzoBZkbB1+SSmeAKzPI4Z7mMgE/0+QdjEE1gnJ7FJjmGWU0zLHma5jznuo9hJqxqiudEBABkYmzjHEMUqVwYwB4kZ/SZgDIw5Z8izOzG1/oPtGmIBIZm2yGdbR0d0tgIccswBOOvPzoCYmASNj0OMj4PSyP5VIRLlQOwZ/LUVx76ZuNNK5mOOh9GQG8pjQE07JQluv3UV3vOWz+Ohe9fUW1+cvEywfjgSgBxLVITk0EJF5L3wm97V+MhwSnp7CcfMn8vGDrjV49o0WmT1hcgEelUWeYYJz9Af84lpsJBIwF86YMSb3rcIL1PE3AnKEDotM4q4Oto3iLUTtCU/LskxclhFjWWjhzHSQVulJBRharIyefUnp4JVIKVgVuhfbohIScTwWDM3WSJU2voVVAxRG4xPE6tgd9dRVlFJ7QBDOmcvEj455wRZUB/JTd05BQM/YSoftyuaQrBjtss4wbQcx5ATyFaEJyzLOaOtdlKnoSBfhgSz3HMsOAJz3FPH7IYPqnxp5cURvjPlTNEVaQR1606zxHrOsUFmmIIiPq4c+7jxEIOZiyHl2rpG6LCVoCM6WwGuu+JmvO81fw8SAiJN43tPuRBCbbYJgCJrZv1BVRR+OuaTpsWKJs8hobr3YkD7zzi3DJF6+2QoshPRmQHIVKipqX4KTgRYCOWr04IgmU00pSDIMVFYXCJ7a80XNsJvDQShmAYJdSU4K7/KIDiWlfBchS+PPeQGHnQJlNEnsNSU3IvMcbes4DxMOYbkBAnYTReppj0UWnfI5Gc/aKGpi1G2liSp0v0psxg75KIe+jdp5Y2lpcrvAyC7A3nAsZ1ZVkbSuNzdn1IRWs+mN2NTm4FMi6apST+UiadH/VSOkl6WR5jOejrGjJOalTwZkCl1udQUWuWjx3oFWMPDZC9RSZAihgN2mX1x95mXuJwJU3IMc0E7uHkyKH+pWU6wET1M6c9G9DDLiSVzJQ0auhU1mzpEPqzeKmhrQDd1pbBNLS/fHvHd//tj/NM5F5RPuCufAEVsxsfsOd9Sz356PS1ELmEKn1zhjDg1TzU7abgfcZx2psdcK5Idf/uJ2spAa1ws9iWHIHF0+bsZbyXB3wLCHRxDZZve7iqnGwKZEadl2xaGIwbnOInnC0GybHEhQFkr3OXljGJ1S83KKFOclACn1cW7ZCc6LlDwN1IOA7XtbGUzynuG1crXOXP4q94c2P20IiRO6VRYcFwHYQsJ9VwQFzo6yYikui2Z9LQKezt/MwO9NNpyFfWR9tsBS1Zir8kdcdWaG+ymmVU5GITcbD8HFdcnfssrp2C1M7iSoLZhKJaBa81BgN1qwgejJySGMgURQ+hnWDo0izizS98lCIIZkgRyLvbeMt2AuUfGqT76sdJWtY/kgmq4dckhMJRkDbcCuVaJIJkxA7VL6xT3AQwwRr5zMpGSN622EoV/4xAGEMjQw1Iegpzup+3bvwqP0W+ZemGQQyCfpz2jaV3btoCO6CwgHrnnUXzq/3y5dJwccqL2vCJLciqnswKyQ4CzYgoeIdGJUDVald6Rg5VSEYVVLhPgj1lPj+h9t0rG6WJcAlC5GsroIaB30GW0ipQcq5p9Q9fPPJulG0Y7UucI8EMXsV58ruso3WCBbqdq/mthYWKXcRgx5Bx3CAIZnRvEGlVaG7gkyltCtMhLIMRjHfhinGgHFs1T/YbdwTE/6bwmSTDtZS1aJpOZgis4dyCbHMZmNVPPjDceqMLdzUwJwDAD+v0277hFvrfu/yK89YBTIFni5B/fYM+6t1FBPtXRAacQOlCNBDCexIeboRTeEnRFdtSu5qStFWNkYuaUte6Ru/lnLAouI5MCiS5/KBO9fJ4wxypyck/k1nojpUCPhvYeiL6PsCKVRGa7hqL2EgLEjAQ5UpIAJXaglUgAOURKOXokja0OADDFY5jNJfqUgcCQICzhOUBQzUCv6rsOfSzjIcaovGt7kTKWfTHS3hEVsrcO8BbwseHtwEenIzoLiO9/8ScgQeAKB1ljZSDtNNx6uwcD10/E/PYKiPdEtgM2U1VJw1Sal0uVo5YNU6XO5qgNOhb0jJ4lyRwIp6uCvtsOsI7Jwn1GbZBAO90HQDCkoHJPJqGdWpS83ERW9l8Ky2hh0VHbIxQq2Jg/sdtg1D6mwar1pOUNEFbHBP4bSb5zX0z2epgRNbtZR8i8a2UAoIhMfMRSxUmdJnqRHfbFCEhRTHFFCo/f9WCcufexAADJxR7vYfsUtp8yhjKFZMKiXgb/IrHarVuq+D2+NUMNcAxAklS7a5TEK3+jevgbO+RQy9SlQ1CGMrXfe8jQS1S6BLL0OFgDNWRAQFzlWG9OGmoLzKEPyQOMi/LmoTkEZriwsCQkMcZ6PrThmVzPPeyMod7h3IeqKZVu3/FFrwNt5RadDgqdj84C4q5f3w+ZV3c0qu8mIE1GJzlGAFD9kDd5FDL85bsE5adjPnVbC7trSqvF1x9PimLtQBY6DITFGgFckBw2K50SZ08mNFhLtIXHfJqC6gGI+r6U6mWviUNyXOXLL9TFp65sFHWvS0qlL0FZNXVUanBjO5i0nu7snqjN5UMCAGEmyyBQ4SRM7OyT5ZNRb/qmga9Lqb24Q70tKVWDeZYL5LF9tQC9JFmfY4GfPnobXnzZ3+BTt10CQYTl4zsAUD4o7icc6Itf6lvOCaYzgUwKDHL1ySUhYQkispGUbV24iGg7kIkXadn8HZV3GuTs+sT4UoZIMZVPYFPWR26Mc06ZqnaA9yAE8K1MLkiX0WsVrXeW+8pC1fIlbRM74QD0lxjJYQYE7YxFS/+iWYkFRuejo9BZdBYQ/fEeiAiNG81tDslpY1moSWesScqcIQE3sKFNz4rwZFJZf0bUs/zW73R7dosGp2MsvR46QgIjjyTYTUGjsc3My3/F4B7d7LOpPlIRF297BEdVwCc2TTubGwg4YdhjusI5p7ez4IiRyuMakVccgrJqcORSFk1NanOj2B5hoT72tz4Smy4LNRDs6+ZcnzxnUCk/K6ITjFChxYVE0yNBlthH0zAB5PiP5AQk7N6m1qFZlaP2U0pI7bj+ubt+AiLCWU87CX9/+7/XtIHWP2J9yWQK4tzWTbJaZp7qhhVgJKSHJfPsMiMhwkBC+9QYl2XlB9QXhdecdUDWDU5Q0ZgT57zQRDfeJ6k7RIKQoYcpSVgkZuGvOVPfRMWDx6yenpwMiYu30oB7GKf4jvXmVlC2HKq+poHucxCYYQIxYxxA6vhCuiQHADD2fFC9WW+rQK7vkfnJ2ELKLCC2/iu1HeOY054ZWExILRnXy8bNsaYdzAGoPO4WDr36iMtAg0VFD/QEAGNpoZ9nUdHfU+HvS2UF1etsuzqXu9hlz0HXyPDKdl7Uy3INsdE+Qt6Ul/cJCnfFuYvLgMY4MWzKdbQLjRilbSlGeaV2ynflejuGG5HOUvPQGMQAuOb1hs2+X5H89r5os6Go9XHSH2NNsavLSlopwlJladJFV99S9qID3C5mTlxGxRmSSJzIw+r20RtNSu0Ibiw5IOvX4Or7xbt+ihN3fQYOXbJX1JiqjhnCFZ7X1gxOrOyUGKlg/VsRlCGnGHDq+FWQirAse8hYqPOyh4FMkXFaLOFm5S8jnaGdAeRI1HEdoFM0+GcZPZSuKdbli/RqsPqb3Sz9lkgwy33MyD421ayoisX/YZhICzpIIlLMcHM/GKvDHBEGXFi+DCSAGQDrZ76LYfbQZsjusBDoiM4C4qRXHYOdd99B/UhTUL9viQ71eiqSsRDtdjBP9Ejn5msiR4i9fWukAmY37dppKHMumMZS5CTSmwfllCwnkUhsBOCQg1aUZHldpzsiaxmsB9jKV3nX18fVLWLNaVr+zQJq76nAkmLJQorNftosIZQA544aYZwbQjFlFJARqevV9HJnDQHRk+qPAHlbM0TJkG3a8jWLxswxhC1ieTOC2VYOxcGq+rDYDLJTTf3NsnPSrLu43SnyMRYQfy+3Ief48arf4N5Na8rTOZbEst3KoWrqJpNCW2YQpCsaPvMutCYeMvFuDUWMVITkWdnDUCYYygS53XpB5ZMQOkIxVejlt5VyBi5Ix05jR3gWEcnkLWc3lqQyCAPuYU6WmTnrqMvekvgIoZrlXmWcoLJE3xl5CGAKwDQDMwxMMTANswpJ4oHHzoTk2SbBCwpD+ub3afs2tvWiIzoLiPHJMfzRZ39fkRNtwbEbchokiZ23qHxW3Tg5gCJGJVtrBQT5g5QgoK8sSgwUG3bWgKuK4SBNUAfXP8b2qyGZIeDP//x38C9//wa8/CVHVupARp4mNgRlIGjjK1RSt2I/UeSauwV5PCuJXeXmFF1X/AhmYSIUe/JUTHuZ8twVbV5wQDMOxxbxBKSp9HGTmlDP7jU2ZMWzhmkPB+fia64Aa8Vp8oHy+YMpzTkWY0esLDumHWTzAA1ochJcExEQjwgPrlA4PMr42C3fxtrBHDKZOJFni0HaTIXVTbHZpmtIE1qEGP4eXATGnEwwRE+Xr4b6nIW2HBUpzfLyIupzvNQiAGDRBvfOPIihHEfOhDmkGCBVK7Y0iomteIWGSEsrh4zuhQxDqf10DMI6OdHKz6RXEbUyh41VWchlYJjdi/WbyqtmtyZ0PjoKHdFZYJz3+/8XJOJ7XbnLzJGk5T2uiHTwv8jIbKIgk/9WCbhWmwQY6wH9BNxPFMHpqWXkTPp8bBfxCoQEAIAK9OftBl7s8VSy3ujBMEkIO+20CC9+8eG48GvvxCknHQoiwotOPlT53sAbN1U5TYSipe5SuB2vfvMlIE+0xSbxVFUf02fXrTALFYa2EjXobLNY61rwqSoPKKbwyD9nf+aRg6MQL0NgXIIT0YnBIBm0Q5UFpw5m6biE3eusOr+ulElbbVy0U0YM1lNRQu1tpRlFiXhQtayYFuqOknraRwUONJt3Gp8fm76V7DY3e3ypuJtTEKPYqi5kuPDIjolvk1dGl9ZUrUTSCDlnWC9TzDkOagNOkWviFSMooeyhzsusVmwJSrFo7DlYOflKbZ2qHsokBNbKSczIQoYvnnU6xM9HYOq4dtMXmxN3WHB0zsgLiN/84i5seLx+F1wix89EOCFZ3R4lXAJuHAaE0PFiyr0np5rUGFnmOKBkpaos1ddzlIhZHQGwmS7S6QGAe4XvDovAFOLqYoQQ4Y//6GV40YsORxLx+Xnk0Q1qwHdIA7v9syvPfGlwyiYAuenb9cDLWq63FUKoO4Xf68uAdlTWhoYiuUt2YiJckjLqANuGDMVOcpAocqqQQ+Aq3xo3jZnj2gxCpUIVuGXozLkuu3Y1FdksnLOKlRS1mOgL4xyXkpAkEUdWRsPy8iIh6c03gWIJuSE/MajggGQtNoY8VFVrFKTkhs1TjsnVj0dB0dTVI/ss5UxeoECVuvmCZpyiTwW73iTHMEEDpI1+P7DTT8pfSGCOBDbO3IEHcRt2TfsoNkOrgto3S8gZ9KiwURgfHAG1VH+WgXEqbyeT6RwCaqdzO72V3deo+0Jiyzgjj/CwbqXoiM4C4kf/7+et0nkkw7XyAMqiE8aXMYECmYFeWiIgtXMqhuTodE39qUtSLPRKJ3bJCpFKHXtmdH7REzj55EM8kiMl4457HsXMzMAfgFt09AUxiPfmVpUk0N+8eBqSY74j8t2RVacSAeAcUV8dMrpEhBDgxQIiQ8QayqvSM9Q5KshEJw7OOTMZDn9g/7rEygk3ATXC3MCMdfCIVFDICI3BkkCQBVl2s5K9Cl457lSW69IVHisrBR2kD0HCYopFmmbx2pi0f41hxMpnJ7FkQOcOvWQjOvhbMOgVV/5j2gBlfRLEGLLQJEXpJ+0KLl09VLUFbIoBJ5C5AEE5djMEhjyBHnKMi2E0do6BZGAGOp4YGAkrEgkw1mcJliRZCx0YGQulP4w/D2Mx5pTnACkitYnJTuapFWR+hyPA6HOuI2wsamrEBYW7im4+MrZ1dFNXC4g8G8FbMvY6n4hi+idChtj9TeRNZ6njvjhrzWkBrwuN5in0tRaSRG/ImZL6LsjRX5GaN7/98/j+JTciyyW+f9lNeM1b/xW/d86X8M4//yo+/A/fq7UsWb3cfikvxsKY/tIEAjTHTV7zaXiZsR19jVqmfE7gk6ioZaE4b7d/iAisa4VYfdvA8pW6Ze8xkllTGFtnIT8dAd5mpJXWPo6QpFChaEyb+FVn6fuYFEaimAJmKissnOyxeLnqL0VvjqIBh3lip9T8uDSeZsik0D4qipYRCgtHHOUIv5WWoQZIEIZSYE6mztZ0ysF1yAlmZYLZiLNwHGrQJYLeGFS1xRAJNsoJTMteRZ10vCHr06TKHmqn6Qw9rM8na0tWq7qAIRLMIcVQrybLkGAdJrFGTmJT3kMu1Wqxaf0ZRmJTSACzSCA5wdJFr2xZ9/8+2Geffayvqft5xzveAQA46aSTSufe/va3P6k6dRadBcRzX3wELvv61cGLpB78GcVqqwpiEvXNCVE34pmVWfb1FDUjbyCOAClEZR4CwFJNFcBsdGlfidkSCja+G7rKDz+8Dh/5u+/j/134C9yz6glLgsy4ZZ0Ow/7HDJhaH7XBJpR1iWFDrbiGARksYwd0Ojdhw+CQJgJzzrRMFTeQxiG4wvpQvO8HMygRYkFBdaPltRjULI8zRplgFVcp8rurCxe2CWds9y1A5MQTMulNGkPcpSPX/R4rs64mroXDXMDSlJrbynExIZTvjgnxoJNp/dV+VFX7kpO2OMRgfGUImVTbNfQqtngwVymTAj2h/HyIVBDAsSSDu72aid8DwIlKbMhRWZeGWV2YBmGoaadNOWFxOodEE0B2mHoO0laWKlnqAU6jwQDVj1nuI4HEGOVBPqBYUVUcESxhvO4lBGa4h8lIbB1LnlhtBxHTkaEcpcHlCFhjyILpNXVNBkiw45K3VVV4q4DcAntdyRFfm375y18iz4treNNNN+FFL3oRzjzzTHvsrW99Kz74wQ/a35OT9UR1vuiIzgLi+Jc/Gx9JE8gsh3Usdh2OmYE8LwZ7d2fz5l6qkBFD+LbJwShVJ9JsDdFk8ZAA+j7JYZPfHFPrTVG8qSrc98ATgBtd3bVuWIuDo3tCtg6SYB2EzQBu3+/NwFnjODxCU2CYS7WNhzvAB2k8f58aGNJhrDlWoSBNLH6IByqaNibDy2POhUvVw3yRa22CXSpSC99aBXNFnfSsfWoMDzH3hGR/I89wTBkZBbmKySg9EuTcDl4edUeyJEXqrdzC8ZZZDfIEdvx5CFTp/8Kl29eQl7r6WLUcBjyXp0hIkQfLFfVALfW0s2Cgl2Sw/na2Fmp1FbG0jryC2E6JuWlJW2FyJJjKx5BSDkFSbZeg/feGMkWaxAP4GWnxTUX9dLPcw5j1kvdJTqET6yuTgB2yMyP7YCZMiIH3/sJQjtUTNKwtX+pd9ULMcQpC5kwfKi1yAInYta5CC46F8NHZdVe/Tf7mb/4G++23H0488UR7bHJyEitWrJiXXqOgm7paQBAR9j5kD0VgTLA/P0GxqsrExnE/Jk0tONKzwx8xzbRWC29XMukrBmIjryAVxShioxUHerMgtU2DI08avxkqrByq57ZMpWAvjt8SQ1ty3NHLyRKL0xM2i/ejpjkY8JfIk7bcOG1jl7y3halahBgZdTgtzsVIjh2oGvRvrVfsWtsmN6N1JE1UVERxhhpfYjO5o/SxsfqW8ofsxzRWQMuYAUgIS8D8yjNIT7+oCy5EQXJsqkhbhO8XXnGNCAWaYIAphtJGgYHy81ExceZkgmGufs/lCTIpbIiEIQsMuGf9VgYyxRyneruI4sWkiHnDOqifsK1lrn+OxMa74dJDZB49bqgnIUeiybOKXWB8TAopivhI7YE0ROrIJMxyH2vzRdiQj2Eq72NOphhwD8Wu6PXll/1R1O8Bx6znErlc1yR0QTH/GDrCEuENGzZ4n7m5ucbyB4MBvvzlL+PNb36z53bwb//2b9hll11w+OGH49xzz8X09PST1gZAZ9FZcOy0YhnuuW1VM2Fh9qeqWlhzCutEPC1rb0hzA6r0rn0+SO8KLh30zxH0/lImmSUbETIHrai7cstZxl05Org6uINxSBJiUyIVIPiEy1oyIqTDO+8M9JWb/bawFHGQLuQDXqDXCAGxpHDUpdt1utWc86ejmgqIw67KCq8T63NqKVKFfD0Etqorw7UklRrOM+sUU09x2W7a8iCqnJjLx2Oiah5RP290KqywegzMknVNeRK9a3mOnr2xMgbmwJhIMkeeX3jGCXrInXZSAQCFnlNlBhJR3p5hyCnyXKBHGRLNWm1coObqWUzLMYyJDAnljv3LtoKjk3LanuE++sjUbuc6zZB7GIIxicFI5cfvUkWAJIcz3QlSsWPbam3z2Guvvbzf73vf+/D+97+/Ns+///u/Y926dXjjG99oj73uda/D3nvvjZUrV+KGG27An/zJn+C2227DRRdd9CRordARnQXEYHaIX191R9teujpdjSWGmLWvjJ+XANvDet1dzkBKdlm6250DAKfBWl5/bPDkFVswtKqc7kQZbHx6jKDm5SElddzfhjy0IXJMKE3BGJ+ZcFhgY3XSr71mP6WYIpZ0xpREpIPlkOQ5nwpYC1OsvQOyYuoTJa+uvg1lqnVMda/pXCaNUX0cshOStByqpyoRLi2s8f5y9KsgLazNHIVVhlssIWdIBlIRq395tVYVKYut7ArLIXDkMQidjpXeZpqnR6H/kPol7OqrWGFKRsbCWQVl5ALEynqj4gaXISEwx3192SWWJMVbv2SqaCu/PgBhwCn6UKQt3iikc6g8A+4BGDpkx69f25VDouZeZu8GBnacfAWEmGgld6GQc3nD182RAQAPPPAAli5dao+PjY015v3c5z6H0047DStXrrTH3va2wq/piCOOwO67747f+q3fwl133YX99ttvXrpWoSM6C4grf3gjsmFL27xxHG5JdhjQ2ziQipljDpqVUELoVU9FXkuGGOCQ5Aiq3bjTbJtg8wioQcuNGFwHU3QK5W/jDmJt/ZFqRFtLiRlImUt1ZFO+o4+BdN4KbXBCV6fYlEmosomlE5xyyYbhTcQwq4uL+D4mnk8oVwuo3IndFBhYikpEKszixP6pJztlS5QtJZTv6FEmUs497HITJnDO/vSYk7f61iDYENFt0noXUQ+5tbddcfcoVzMzTOvgelIgSaQljcpCEidFuRRIk5hfj0rfT/zYLjpMVkQXo4OsXEAZxsCJ6WO6ivItruqQM9l6xqGWt1u/cGi6Vkvo1PkBEqTIy89YWIK5gUmVN+AUibbguBoDsBaZapFK37p3Knfz00RMYOWyd1cn3kqQbwFn5FzXe+nSpR7RacJ9992HSy+9tNFSc8wxxwAA7rzzzo7obI9Ydf/j5f61Di0He0NMTAwdmzcJei1BQK/6IbAWjjAic6w8PZBIQbY3KY6PoLdRF45laNT8VavATDpLVBxuyChZcqxcQzRq9LCDmZFvFSrLM6e8w86AH3CSIkBijMiQk6apmRyS4xGMCrJDgHIyrrn8bE1l1bqV5FKR3HO6rrD2KAJNgOSiPENcKnULrDi1JGdzYS4MOcu/9Zu/ISK5gCRlRTHXKIydA6jl1sMcdp8rFwRGLs0zUVw4mTMSkXuPtYHa7DNe53bNELuYjJxVtzGUKXpplfOxSp9QHlhDCTkn6rirm/5hfIcA5UA7UWE1csHBzSchihVn0M8sM3IIbOJxLBGzZQKnf/ei+6LA6pVRgj5yMPVw0IrvYqy3T6N+/53xhS98Abvtthte9rKX1aa7/vrrAQC77777k6ZLR3QWEBvXTaveMG8ZT6fmNcybahIE9J1AgSbejmvxIahl3xUyw7G6UiVAWXqqXh9HJSmaVNmVSiOAoLZq8OQaOWQIYKAX6bZz9vzabJi3Vy7qY+rAZlAmFFsZuIrXwLPuuEq6Te4SlzaqxohOVdlAlMiYdTyeJcmm43q5TnJfD+c+LeUnSyjN+Fb2g3EUpSJdm2mo8DdLgGojOKjVVe5vN7+UpMtlu/JFElvrTGwKa5gLdVm1JSoVys+m2CC02IKBAWRSrTxKRBtn23hN26VSOpD+nrHAIBelupjvibYjSMfyQzCWlUS7uBZTVczlm7FuybohH8rxmqwspuLcACkGMsEkDcGkHrr1chKTYg49zq0RXIJADN3OZRIEAHNIMSt7GMcQO4wfjYneAa1acaEhWUDOc9WVHHHVFQBIKfGFL3wBZ599NtK0oBl33XUXvvKVr+ClL30pdt55Z9xwww34wz/8Q5xwwgl4xjOeMS8969ARnYWEIR+C4O22FyJNnNVGFdCrm7iXBE69whlIIq8xTahQyw4nhEofGvclvQ3YnWGL6VynB8pxcaRA0cbWI7Fcb7PNhh3Uw3owWpEu23EGe3s5f5zEzfIMmABOg8tn/gstIk1yvamE5vQEqKCLZsDXBRfENFJ2Gz0CfSqPxxouiI/j3coVJKvJB8a3+BREq/4xMVMdsQSqUXJJSBN2ylZkppdIrZce7O2CAHOfqvyZTECUe5YcMCOBtAsaJQSkVJ45qSiWvVddg0wm6KV1L1fsr1JiRSiMUdHUb1b2kHOOfpIjsdYsIJeEjFMMKUGfhuiJ3NtFhYgwZAEpCWOiOqLxnOxhUTIoaxc8pwyCKaEnAQgzXUNIKdOrxJTOORJslJPaNqamFceF2j5ioxRYRLNes0kQBmphPABgFimWL35VTdttXdiSU1ej4NJLL8X999+PN7/5zd7xfr+PSy+9FJ/85CcxNTWFvfbaC2eccQbe+973zkvHJnREZwExNt4Dmf2o1OtPJFG/2JLBvsb4va+yVJC14lhESIgtoYWDb9PtLVUM9HoyUl6qEC2HUyri3YxAcry/jj8Pu3Wn5mkwz8JQWVADOZSA7KHaumLM6bbHry7Kjmma5ER1dixHrWBIjm5nkiiW+9f0hZbshFs51HHvtn1jNH+EIlcM3N7lKBsFigN6haF/CYsyyBCkERhosw1Q1SP0p2EQBrnaL0oId6dxdczVIBHG3dbXK4eAYOlMWyk5Q0noCYkMAj1UWI6goh33RLVPUEKGiBV51HF3eocw5BTDLEFCmb41zGo1Qs6MIadIZYaJZKDi9EBHmtbbN9R1DQNOkcocY6KY6vJJjl3kbo/Moo++ttYAjBTl1WE22KGeq845R0KMIRLMmKXoMOSOvHwggvhvtNJqc/HiF7+4WPjhYK+99sJPfvKTp1yfjugsIA559r7FzWDi6DAXVh43Xg6gX98iW15rkuP6kNi3RKcns4N/Sxt3ZLixv2VPFESC/Tzuocl+D9NZVjuEsACQ6A4/4sAQDv7uwSIuD1QFXbIU7+Xrx7LIqh8CCl+VuulDY+Gog+rfLZmLVMlPWuE3ZK+N2/YUsfQEwkuWKZO2inzp82pjTRTkJlS8gtjVtfXI74mu2S3mO6QHorhkMjtm6mSF1QZewD7nOJsBOY52j1Fst2/WM70hVVJTXAyp91Gqin+t6ig12QlXXWWS0EsYAynQj5AZZmCQCxDYC4JnyJbQZMY4WBviklAerQsA5Jwi1+ULZrWBqE6cIcFs3sd4ogP2aXl1K5xMfaZlH4wh+pRpy5Jb13J6ZaeRSEKrlKlPKb9yYh7jodbZ2LMi0nWWm9d8FLtMHg9BvUiqrQsSmPeqqxE2Ktpq0QUMXEA8+4SDsPPuOxQkhzS5MUECY07AQhMMQWpaKhFqBVUilLNxKoBU6BVSRTa1bDoir2b+tcpawNqSYwgFpwTuCXBKkCnZ7QdIAK8+87k44xVH+TKd71IA6ImC5MR00ETIG2T1MbNXiiz6rcqYPfolsvUIaw1obrmR0c22g2ufrwE5BCC0SqkEzvealVQu37AGCVepUE+z9Nx8qKJNYseoyGPviwoyZTPUtHWlDFeQqRSpDxMXZNL4OYX7XBkSFL3WpC6UDgVAonjsPH3MtWm4lu1uo3Iqd31AcAaAIhfG96h+lZhvcTDHzeomZlIBAl1tNHkZygSso+ZmUn1UFN0EQ9nHTD6GmbyPoY5OmSD3tkFgVpEo1NJ4/06UUBt4uoH8hkgwm6uPOWan2GohkEs15ZShCIpYDcaQE2R6Hyyv3l7bFRhygqG27lRt6OFiU/Ygrln97sZ0WwO2ZMDAbRnbfg22YSSJwMqnL0epR6sYUIvzZD9MBPSTaB7WUzaKLFT0rlXWich5OygnutzQCdmk1YRrctE4Xv7bz8JrzjzGz28+PRSrviiQYeA53JL/8ZLp6MoVcWQo+BuD5R8OweEExbSanhJjUkvMpVCbgnJPW16qjAkN8CxWLV++PHJlLi0DlMeNEDadK9+QBldo+Ppm0ockLrTixRgb+8eNsdKerh3EoSoiImesXH0h8nCDS32RYhalCoIWfwzUjudAbEBuivJrFI3c0jVWIoOcY5agtjoooqQIjcBcnmIuU87DQ2m2BHCjDQcMz9VVpzI6K6IEZCj79bvlAyrysntsiD4G3NMz9Epe3eak5n7ZKMcxlMKRXA3jeTPHPUxx38quIjnmdxERukY2mzKA1dP/ibvWfalBmw5bCzqis4B44tENuOmae+clg3WsnBjJsWgxeMbGMHYIhRlXWJjjiL+aOtNkf/qnv42dd16M3XZZgoMOXgEYS1BKkH3yLFaxPoaNvBZzBAw9wBun7ToCFyMCgR4sSK1Kc+pYsqCEpIEilpVahVE3xtRmLfnKOF/teUcvmzFih67U2bRF8DuGqAxDNpzZVmM9abP3l1e/8JJaEhMqGQiIWpXIJ11M4FwPrE77qN8EWVp1XAisc1QGiuknUx4zkOXKEXczFrOUUSqf1VJ2JMhZQEIFjBtyYgmOSkUYhEsUY/pDgpkwkAnmcoEhk9rbi5sezcKyFGIqH7eWoJwLH6XQAgMoawtDYEM+ASlrjYRe2bolMGOJVXG8CrMoT0VJBuZkiinZxzSPYUr2kWtr1S1P/D2GclOjNgsJs9fVfD/bOrb9Gjho2h4+xPnnn19KOz4+/pTpu+rBJ6IOW23BgJqqisB7pFsQBZNCQllsZC9RhETATnlwKiDHUhuIsFqYOnfJZb+xh97+ppPUICxQRBSuyOcp1aJ9XEuMej2vUc18iXWamqhwjCREZFDEXcoqVKd2S1Iz2Uuw107Lyk0VEAAAnqUpFg/Ifo2QnbAN3BNclbCiXcjIlzqDswLNckYAwqSraicRFh4QJOEcZQCSSoOlXxef9JQtNbrxGIr0eIRHkZ08I+SZm0dNq/hyjKbmXYCU862nlIq7YwhPCawGaRlOy5VQ9nMxDr8FCSluEjVoFeUNWdjIFm4/ZKZsBSSYlIUkh0CG1JIltaFrnW6q7HISAkNgOu/ZPbUGUmDIRU3Mku8Bp3raRJGmdfkizOZp5T1TNiqSIiYt3ySYBYbOnlY5E2a4jyx4q5AgZEggeYhHNl3SKHchYfYKm+9nW8d25YzcZnv4EEuXLsVtt91mf1MLUrClsHhpRfjwmMk9lqzB2mHe7tn5XQUJgPvF0nTlwaiWqbBeOSVrggvG8Itr7sbp/9+nkGUSBx+wAq8542h85VvXwJARL1yKJSmb2f6edaO5Ab3dxqmwWFkZpvEqxFgjSUUaa1mJlGuPV10YLXMgJd5z2gn4w698FzZmjatf5Hed3lYnLqcxnNKzBhn9zLSV8NN6ulOQhzTJCUcft0xDTDw/JC7KdK9RqSyndS1LVzVkghP/JmgIDr6X2knJ8OPu+AOlzAlJaiw1AlludjM3S7u19U/A+tooh+CQ8Kjl50S+06z5MZQCY2n1LujlyMTSkpwydHkm5gxpMiEFUijLk9mdQUJFdR5CBchzgxiaqMrz9G9FjhQZ53pLCYGUlQtx3ZJECYEpHkcPU0gclpVBWa4EtBtx0M4b8nEsE7ONXQvplVck1fL9OVSESTdtCYHZ/NERa/7UYsvsXr7t20O2K6LTZnv4EET0lG4X72Kv/XbDxKIxzEwFu8DWBMFgQDkg1wXp0+nUtJZwZNakNauoYtNRgtRS8vB4A2ZmhpjOM4AI1/76PvzyuntB0NGTjSg9uBKjtB+XGlwbCAugSBMcS4zzJhnjEAAKq4d7zvqxjNaLh5ZxAjDRTzE9VPE50kTg2fvtgb132wFfu+rGaiIVKJ1Lxj677IB/Pvt0fPDfL8XD6zZ6573B30WN+iH5raoDAEts2FhmXH1dguJOOQBO24bsx5dREC8ubgZAWXOq6hAbc8IDzOAckWB/puHY08EHR5ahl2Vw4EOjpmnIcHiAVDTjRBgiU3WhVGDBJCk/n5ITDDKgF2z/YETZLRagBunmu5ZMDT29JQPMCfKSAKl2RbdlFCG9Go05jlWr6vys7KmpPS9R3YVXN8+07KNPOQiMAVLvHMCKnDkr1nTUIYiK4IMmXYpcTdNRqqIr1054qL3ZElpUk6bD1oJtn6pVoGp7+BCbNm3C3nvvjb322guveMUr8Jvf/KYyrcHc3Fxpy/rNwYP3PIbpGRXm3Aw+LEh9EBmMCODxHngs1YEAq8mQChzoLCuJyLNpCaoHq5hOMkRkswgAA2BGLtmWb9+Pja+P+a3TSKMTaZN6BUmzxgIzfiVOfQjagbhs1WI3oB/0jIUJLjhCHa1c/4UfTMD0ILMFHrznrvjzM0/Ba094Fsb6adkKAud3cG7x+BiOP2gf/OiP34KXPOMAP0+VnDYglPQGUExrWTIVsZxUlO+RnJL1IkopiqPSzpc4wiJlmZNxkU6SqqmV4iDHprvgPTIVMPM98G4ugnsLKQWpMWIx2T2yYsg5wWyWeDoqnQUy7XdjSE7b28A0tSpb6CXfYSq2ix0lK1uJZMIgT+0ydIm69yeyy7WrzjMIU/m4ll/7LgZFydRfgDBEigFM0Cp4FRhwioyFk09iTqp3+qp7QujtHxhqmwoZMviKOizuPb0hzcLCBAyc72dbx7ZfgwrEtocPcdBBB+Hzn/88vvWtb+HLX/4ypJR43vOehwcffLBW9nnnnYdly5bZT7h9fVv84opb1U7hiX5VGktV0L9+4QfjWv55rOc81zXdmonBE3MShtc3q2OpqO1lyHqQNr/H+RnVoC+1Xw4n8JZgM6uNNVmo5eF2FZFuE7NEXunrl21+5amKRCxT+FN5kXGWEazK0sfJSW/bJphyMQSKtc+S3Voi9OeJDP43PbQaL//IF3H6R7+EuWEWN6kEEAQcvtdyrNhhiRJHhDefcLSdUolaYMLGqToVDmymfQieT406RSC9JNuTW7WUvnbEdRmR21Rh41O8jVy5te2nB6k8vGXLBImlsISnHTi+9VtAmLaIo7FFGMCoaDnpLqNuydOllad8emK6Go8Z89Kh3K7I+rAYPxr7mLD7YSTIvRg9cSgdZvOejtdTn5YgkdSSJyOTneXtKkrzNE9gfT5h/U2kBDIW1mIlAGScKvII4RClevTTHVqlWyhIpi3y2dax3RKd2PbwIY477ji84Q1vwDOf+UyceOKJuOiii7DrrrviX//1X2tln3vuuVi/fr39PPDAA5ul48P3P1HYg8OdwYlUbJw0UQN4xeqqGDiJEBeiYs8rszRdW4/qrEM2+wj1smQqLVtPXNJhrC4QUMQmpXJBlrwEpE8A+RjUwOyuBAuVdcYGThCdsiKg8Dtw48xYQoboWFMVzM9Aaj0rd/eoIyQMvOPU59nfqzdswps+/w3fOllFcir0se1XFa8ovFb2HHsrpyrLr3wJLtPrkMfYNW15UAc34UjkIWRcHNwf/nmX7FSTlMIPp9xM5OU1l6mZRCmZ9ag/7+5lVGMA9RyvM0l6o9Ca8qhIb+sHgU3ZmJannJSHTNb/XDnqUiWBKkNZdoac6Ck0p3xHF4KEIDUJ1UYmoFd0OaIy9LA2n8SGfAwbeQJTchzTPI4NchJP5IuwUY5hRqpl5k0OzOa2XNY/sE0lOywwtisfHYO228OH6PV6eNaznoU777yzNt3Y2BjGxsbmoyIAYMOGaVhngAjRMP0xm+mOmkEMcLuHGkIU9tJJtNeOy65wXAjHJUBZWTg11pUgT5gYzuAbnT4r0pvYNRWjTf0gb3on14CgrTO1shoIVGV5ARGq5AEB+mmCU56xPy6/5S5ceM0NeGT9Jtzx6BplDXLLjtwPBDhRnMvnvQ04XV0r0iuZepCT3OpeiUivPhIjtsxqPlGwn2k+L5axa1g6qQthZT+MOQBHrTmO8HCTUSkJomYncaApro6KXlz3HpKzQKqXgTPY297NSgmsTcpIKzCbE8aTobcDul9UeNOrfFPZGCbTOX0kXJlDmJYpFomstt5uHXNOkOuNPkUpj0TGolX8IV+qGyvIaA5w6W1HpZvjHgboYU5KTNAsUjGo7WZyJmwcPoBl/X1H0uuphNwCU0/bQ8DA7ZLotN0ePkSe57jxxhvx0pe+9EnSzMfOuy5t7AUYUHbWNGKKCEBAsRqiRQ/DUJaUphHEdi96Xy7XqmDGH3dgMFs6tFoRxlw4ITcuizCkJNL7NGe1+hqC448INYP4iNMQlji04ZDakuQSkFnO8b0bb6smIOxc6zqyE5yvjV3ToChrRdkMNs50iauX3yc6DRcMpDVGE5VA6pTW1WIzSJZNH1oo4gWz3u7A7Foe3uui1knZKcc7IpDnjCQJyU6bm8oM03VESe8fxaSJgNpjyiUuMeNuQVqA2TzFJGUR+dUPhARBSgGRFBYW1yJGJLAx62FxOhzhspnlzKyeC2Y9yKqIyMM8xTT6GKMhJtNhC2l+HYxVqDq18REkTPEEZvM+dkymkZL01oeo9lb+L3dv+C6etcs7W9fwqcaW2b28IzpbHaq2hweAN7zhDdhjjz1w3nnnAQA++MEP4thjj8X++++PdevW4WMf+xjuu+8+/N7v/d5Touu+B+nVXi0IQWtrTq/hkrpyDGnwBFTI1U93TFXru+LKI7QjWyOu5moVaK4JBLuaywZWbCq/rv1jx0X1m2BomI9NgTEjGg/Hy1hjuTHNHy7T3vy2c8hgSG6CS14qJ1Jm5X3tDMJqI06VgEA6zAEKq08DcSkV2AQ7dULgjCF6roKadNTKiVE4Vn40OSMRjFAOkd7yQeoYOAERUhKVpcZ9pNy/qVDWnKGz3UNOKnCgeifgmllvNXWUs0CfAGmmhlrVU9qpMqmXeNtzrDbUnMpTLEqqdyh35RXdhVoJJu1cKusWUTF4ZpgwxhnCvazcNi93E21XpRWEV0Jgbb4IPcoxRgP0dC3NtNaQE0xlqxqldlh4bPtULUDV9vAAcP/99+ORRx6xv9euXYu3vvWtOOSQQ/DSl74UGzZswJVXXolDDz30KdH1pJc8o+itKlA6U/ciGC43D+VyeZBtDVLRUJXzsGOF0JYRinQtTRhlyTpDOxInFWlbEEGGo7e3XreFsuWmrCc+NY3r8YQqMtMgA0CxT1UE3nV2r3tVnqaygPiGoI5MZgC5c3JLkFKnAAJUkEbXQ7YqPUGRTfJENJdjbpISm5svCLkUyPIEmY5IXNz2OkqwBLIcALNRX/Nxk7dYncSA9rPRAex0FGS9lSVyFhjKVFt6mh+xp03ug2fveBg8x/A6MHQ5Onied7HV90xvrTngOmu0ujAMtT/WQCYYynB/JQIC+dN5X+Wu6CDDXcvJmciqR5HJ0KUhp9gkJ7A2X4wpOa51Vcvax5MdW8hcOOSgLfLZ1rHdWXSqtocHgCuuuML7/YlPfAKf+MQnngKt4hif7GOPvXfGQ/c+XpmGAMhEKGdku6whYoghslaK+Htl8bKc96hIpF+jvAHBLZ+CMcK8UUfu/SQhSGYkQiCTsrZbka6stmii5TFrgnNIVvkBNZCkmEwvenJD2ipxZnamUkZLOWwukJPHEBMGikB/FFxedshLTdvZFW8xnZx89r7LUenY3FiZGOGLHLL2jtCyY757/vVNFqCw8lw0oIlPY563WiNl7KmrTle2zgj0hB5W2FFbK8AAcpl4cgjsrIoJ/zIymaAnmh14c2b8+aFvR84Sv3v1ORjIQU1zse4T3LjMcaaec4KMlRNxj/wpOPNdODENVBDE5odxwD1M5cC4GJQ2G01JhT2f08vMlXFUYjKZQ9KGwTn6+/UBZrmPQZ6gR8r7ZZ8lpzXIW1h0U1cK2x3R2dZw5DH74aH71kR7UGs16Jl5ED1SSWdFA6HY0dzJb3kMYJ2C1SaVVF69xewNluHYZ0iJklOQqRCvPfMYEBFmZwa45c7VuOH2h6IcgqE79lFi1iQoggdGBmU74Fe8kNptLGL9XBs13OYyv6vytSBOtRahEWCKcuUV5BclPW3asA+vqI812ujVbbWKwB/EWhFINLSnmZ6rTE/+RTU3l5nmMqgkdM5Q7baTMLKh/JL0O0ZSaZzg4LvzLFI5XZ4rJ2VbTT3WZyyKtoSalqqb8iniaVaxZbbXu3J9Agh7TqhgqwkJjNEYBhhUkzpSq5lypkoDq5GsuivCHFJkOaMncgh9vdQ5AUAFwOqLbASHY2VpyfIEIpd6fy9NlGytGAlJCAIyqJg9y9KZGplsqgf/evqQEJjjBJNiKXbqH9JS3w4LiW2fqm3jOPk0PX0VIzmCVHBAN/CfEGqKKhXgNFE+OZHl4Z4FRhCQ6OXqkUjDJpYNp/ojilkBKQAeF+C+0QFeID5TFhPwpYt+gS9+82rct2otZvLCWZCDD4DyNEgFGGoFl7v9QLmS/jFLePTHG6QNI9Jvpa2egHAQbtK9YYC3bdCCOLRBieQQ/NVkoZXO+bQiOW7sobb6SFehGtk15cczxNKS/7ERJKm4Ub29N0KhjmUFwFiaILHPkyMPZAlJSZK572D+uoOmm8aQj2I6Skqyj7ZbHwYwlAkGWbEnlV8WW2tPHbK8/uIxGCsndsVMNodHZ9di/yX7FM3tckivftT6Td9MAeUQmJU9TOV9TMueF88HUHUNy6yTCqj2VXFvlAmR7UYQpOP+pMjYlJ9gTiYV8q2NUKO8jxiccwAwLWdw79RP2yi7YMixJaavtn10Fp0FxuHP3huHH7UPbv71/ZBZMd0jUwH0/NfH4uVNLWM1Bh5rvRFU/DDBW0zQQWUjtvlDubZvJihCRKxCwqfkJwwymk5DOnfSL667V62mMrqatyxvkDIjS3UnraxRKJEzb+w2Orsn9UAf6/9Dy48dH10W5oj2zhu1GwZlAool3pE6Ad7YWS2kBazq7jjv6t0ki+N6WlS0Y5NOAKqXuZtrRvVl7zQ+gSeymfjIV6sTK+FemeTkY6eR2LuPP3Lsy3Dynk/H1+76NS644xo8NjtV5CdFVPJcesvMDdkwZImZkSSFYckpSd9/qlGM5YCtjLBipGtDyKReGC3MAN92q0XlR5PqapdmbXVTfP3+H+Or91+ibwcCUYqxJFORkSX0LuPkaUiCkEiln1nCLLQVxS1H7bcEfV4tIVdbT5BnwWEQZvIU4yJz2rW6XsYZGmA1JVWqH+nyE7XImhhTcgwkZtGz7VfcnGTLbPeWQRC4ce2FePqSE1ulXwh0U1cK234NtnEQEd7/ydfhyOfsCxAhTRMVN6cfWnKKAH82AjAZiwdB9oWyxujAe9wXav8q7URcF68HiLxUayuQVrIyAxMge9B6KYIhiX2iQQQpCDJVe2ZJ0i6PNTHk7ThUsddVWV/4fiER4uIJdxmiLcsxAphBOBjoW0e8ZZSiCUctJHUEqwk6jdU1JBNtYqvV1Mf2+aE5ri1ch+GIVSleflHYeJrit/Z6OnYen/Tr2qZtPLlOZm9/EfgXnAn3b1wHAYG3HPRcbBoOCkHBzSal0NYZfdp5VIRQzsVFoMiCJJTX/rQPrpdJQibdJtVbxdTmVRM6uSQnIJ8+w4qIEYAcefGSBUbOAtNZD4NcYCDVVhPMpl7G2tTDVDaGgXZ6lqycoud00D1TvqknQJAQyJAgY7IWHKNLxgIDTrEhH8fGbAxzeZ31he3jWxeCy6TPrD4Cm+SECgrIhRSX5BQu3fVgSKyeubkh1cLCbOo538+2js6isxVg8dIJnPNXr8SX/+9/4vJLb8JgxokRESUZmrQwF5txhmBW5Eh3ZKP4wwCuo2x1PgYUOTCWJDcujS4/6nQsAMmsQvRT8Lar/4K0NacBBO1kTJEOv6bKVQNmyWHXrWuDzFim0Nri5ldv9ygq7soPx4mSoo7spHSqyGqsKqNdfq9oQmFBjE8dBQV7amtyIdiri2tBLGQ6CQh4eNNGPDy1Qf3WJNZrszpEz2sdiMvtrfGPN/4cn73lF3ja0h0wNRgCBIioX04RJblUhj1WJjU6BaCXMAO17x9BXrXbeSKAXO+yylKgn1QxWiVfWU2EWrLtPCQM9hx5w/ISknovLI50MUVdMikCXyLGUCYgkUGUKqbvJQgMwRiD3qYguPElgBnZR8YZJpOhs1hCWYOGUt34CUmMi6aYOuX1VjNyDAS1A3nOwBgyLElnYbqytqDOVrBNoCM6WwF+ctnNOO99F0Pmzkql2p5PdXq15MUZse2gMgJGJkeu1UiXb0lOVRYd4dgEvjMDmDSDd1MMHldZJ7k0Fo66fDUxalzeYWZB7AlnYG4abw3BITgyYmW5gy4Dh+yxK25//HHkQSTi8BJaS06FIs7YUC3H2eqilMYhZx7ZqapIKN9Nq1cwuce9ursEwfurGyVcyVXb+E5ZJSV9MuWDAQHMygx3rH+8nD6Uthnk0Slps+BHkFBWmkwSUlG1oklNjeVOjB0B1jF7qiI9s/6fkIOROvLKKDYgJucYUD21RtqZWTJhOkvQTyI3oP4+5BQDKdEXGQDSFobixs84gWTlxJxzEcvH7FlupsYYhIFM0KMcRMp3x8UcEozJTBE2LqJR182uEwT2WHRURS23DhhfpfnK2NbREZ0Fxm23PIwPv/ebLQmOAz39U9thhoNCxVssUCHHjEK1hMoRLeFbF6gmr6N/aerFKNMq4GBcp1okjuIVKOnmnmMo/5PEb1KvmR2SACC6eqhK3Vc/9whc/9AqfOs6ZRZnNxEF16qhrrX3hze9GNGL/bIt2XEqzYE1otry5WviGXKoLp+T2t1vK3EFVJdTeazKEmNIrLn12FggHFE1bW5WmzU9xq6NQTLVBL8rIBwLkCspkwKSGamQ3suNlKT5gM+GpS4zAUPKkOy40zhKfvNKKC752zRFjrD+TBBgrllZxowZ2YNk1tahMhnKOCn5ODEAiRQJ50hIWbwyqGmYcQyj5Q050fc8YUYKECQWiWFlN8SQOHKn11RXcivAlph62h6mrrb9GmzjOO99FwOo7jvZ+cTOtX41NBaeCDtyBxzzRBPQysfDPAO2C9KDgp36qstr/jMDrrUGFefDXcvdvOGGobadKoict0+WlyGil0FMFqHwQSI/j/W/CXiUE+aoEbssWYSzjjmikBuQEU9Mm+sfpLHX2jsQSQOUghISTHBIeJYtr5hwPAqOl64T1+RxM7O5Nwm1S0H0PWkcg0ttWPlQ+QqyWzE3vWbAeV4o7K66avOuwiCbx8hp8rUxkJIgJTllqhVQgzzBME/0jubBDepVXOkokWBOpshkobCv+ua/yVMLvyPlZthA8Ex/RFXzry4LKd/UORJnakzRy1gAQ4ayHs3IPuakiYGcYJqLwITuqjNlJd0RKyeeWV/JDlsFOovOAuKJNZvw8ANP+Af164OyJDijo4l1k49uSPQIeWwXasDbvsGzUthVU06nbs6ZmDxVq2IaenyPc7nenEDJmuNytFIf7shzuZkZ4DyrTPzVzJNlywktFa4cN3HF64I3JQVUr8QKyvnEpT/HPY8/4eWN1dVa0UbwwYlb7qrzRy110NYdcEF2KvRUMoppgGZC0wSX7HDga8Wg1K0jVxA6Q/pdBhcmZJjpNrUlQKRWUm8IEEwntWGfrkWHSO8bJaRHXtQ5dng52eXpAOzqLdca5JZsnJzDlURhXYYyBZAjFXn0fLNhlVD2gmkHaiHfTHPFrF6FFalKACNDgtQJSqiWuZtbUU/jgTGgnn37EiwxIYaQENjEY+ghR0o5iKF3bE8g8xk8PHML9pg8bLPq/lTAOInPV8a2jo7oLCCuv/Zeb4wp3rm4vJ0D6bOp2ofH9O9NsVjUIB+JuMY6Po2R41pyoPLJvjJLkFR78bi3u0yoejsGK7S+ByuRHC9/Yb7wQrK4gyqKNiM4x9mx+AgnxahkwOFc9kkJZZi3vIrjocUJwZSXSeMShbtXP1HIrLm+tro1RMXeU2Gaqu8V+UMYS5tPVmMSitUxje1fU48q7cghW4CzH5YhOYyAiETKi5apaTNV3cJKuJQCiZDOMdiprjYOxuT4kOTSnWJyb3pGIvKKJeg+CaBgminUoayTOjCUAgnlEZ8dZQ0RXL2vFFDegT1noSIUV8C8BOQkkNaY59T0FmFG9iAgMSZyJN4UWRPBUtfJ74rIj+Gj27gHI5uQc4IpKTBOA6QCGCIt+fUAwHS2rqH8hUWO+e9ePt/8WwO2/Rpsw8h1FDD3UWVAbfcAVPdSSeEAWDcyFAHjvHdcAIrkcE8tSZdJsX+VFECeAnKM1KotUsvCuUeQ+sO9gOSQq89oqNsGx76IiuLjLf926yPgLdv24+i0Jzl2GyU9Laauh6tQ8dUxJJQRsbI4PKyoU1LoGszcjQ4u/zRkqTRIMcrkbASQ26belE54TLeUy+ZjqLA0BoVWH3RJUqADS3hTD17WujIbmYq2CYTtLg3hcY6x+ZhBt6CIPoGJm+dymXjL2X0diirHO/R2xCuX8dw5F8vYvbrqH27cHM+5HJE2N1rrZyuTxf5dJa3N9dO/JQgzMnWW7c8H5cYYcrEsXlkgCdNyTMcwimNJb9ctoUyHJxmdRWcBcdAhKwEEL9dUWFeicDpIJNUDuGctCdLIFP6u4aEMveLJewtv4Xhgpr8sQWkzkG4O1TZkpuK0XXU1wrIYZkU4rN8PO4TEqUvUesEAmSmcNoUFg35L40o93MGdHN2DcrxxiNHY/tH6MNS2CI54NkTFLcTqwp4elajZjb1MSrh0no0Vp2T5MKwuDM5XUVYVYo1Bsb2ZtAVUuH4q7kVn5Hq38viqp1IhUNYjQpLE7zATt4cTf2q7IXyWh5ypwgir4uOoYIAFgWEU0YNtVAkAWU7IOQUlEr2Eo+3mvqTNZinG0yyaLpewChlyOJAJxhNlBZKoi2DslNTCugaoVVwCmffSMSXHsJRmfKd7BiaSJVg+fkCT4AVFN3Wl0Fl0FhBP22cXPOPZe9vpJ0UsqPo1yIC0RcX1awn8WViQstgkKsigFMpKIfua5FQ8+aUpBr3SobYrCbwwpQA4hfbvqcmX1lsVWk13VGHEtb9qHzA3f1G2tmwX0z8BgYjqGBlAbX2aLAnmdAszWem0kR3qGPvZJqCgW5B9wy4rZfWVFVVrrK8eJWOzGF57aUXcY64lp0RyEEkYS1+hV2glCsGo2BaCwJJ0kL3yOUBFnM1zsh8p6x79ugGrsOoM8gS5ljOS5UMTl30n96wsI2cVPHA6SzCTJxjmKuruUKaYyxPMZgkGeYpc77kykH3M5j0VmM+xzhiSY6OqQ2Am79nAhDkTBlJgNk+RI0HO6qNWlxEyCGtdMnWsazfR+kY3e5wXVh2DIfvBDQFg/8UviJDcrQtSx6Ke72dbx7Zfg20cBx6+B2QvQZ5Se4sAKkgEOVGTDZlxPiqwX9k0bqY4pImq7FpzItNeJRAgSe+RpffLUo7U5XyFGVpZUKhmpCGdxq938dduYxSUMUrXY/NX+eCgxdhJ5cOhbuwcb6ugcTWpaviodanmuHu4bRtRKMe88UcIgElbZfVqLov8+yESmdoejxFGNyZQFK51IZgXiSbn+JRXmCyPkZkC5cFQDaXhhpzMxWqqGpViR+Gy26FMkEvl82Mcm5vqobZkEDh9j5NwxLL9vbLcvEUMG+XUW5C88l2Vs9GBYLZ8MOncdyPSN07GahXYnEzBLAzd8GSaLQ0kK2tOxoRhZNVasWJuFDZv2qKMzCE6EoRZ2cOGfHZk2R0WBt3U1QLiscc24hsX/pf6kQjlF9LSyk8wgz2VxhzuUWFHdo87BiA4eQwxKSwzQSJHiPsSbOVqx+RSQDkzIAVvqsbiYyM3c1kuQU9BOfLMlFQxkJKTiX1OENrga97ea6bgvey1DEEfLyJBB5nJUa6FJaFQrpAXJXMh+aqzMtWoVJUm/GbKKN0D5nysLVvWlcMbNpzKYm5+NastS9XY85ExjVDKpw6yZJCou2RmkNV2ihgZbqVw0aJSqp3N63z04zKK7xLqTUDmBJKMVORIRPmxULoX3//5zu9jOt8AQO+urp9NZvNi5evbNHWUs8BM1sdEmpWWnBNCJ2EFIy/uOK1i9szIntqlBhkkJZjLgL7IVcA/qOCBg1x5/Y8nQ6SUl9Z3VMHpUmzNMlZWpyIAH+GWDb/AxuETWNLbqZ3gBYDa6mKUV7+4jG0dHdFZQFzyoxvLB3XH2zQWygTgVEBv2OLszeQ/zXYgSqGcnCXbB5gIdvsGNZBTyYpTTPIXwjwLDSmiw0BhxQlH5JjDsXmdNWWzn02Sb81hwFlWXjcCcHUaUwDpPX60I7DDCb005TIix0LxTWRohD7D3gOsTfQubwtlbUZf1Lr/qiBQJWNTbOm9Qd0yeMs5HCulNSHAJztNOrcgkXa5dSX5LGrV7pIxWBJI+88YC4hIYrdhGJCvpF1JB/O7OXCfk5rNfa1u7qHsQSJDTxQ6xsp+YrARfYIOG9Fc85zJOl8neqNMM/tuglJIEpjJBSaSMDBgTIk27cPIWMURSIjQ16u75qTAXMSAM50DRD0sxmzt9n22bBSEzBxR20UkQWqJ+6ZvxuHLXlCl7IKj89FR6IjOAuLR1eu91RchQr5gX3RTKlZmpdV+MDZ9D3pHcujVUqoXrHR6do9738vyrUXXEK1IukZUjCaEwkIyypRPVUI7pjGAFHb6K6pD+zGllToeOTSGL6oel0MrXC1JCDO6N0vVJUbVYBeIpvq2t0XVRFn2Yu5U6VTw3sJyYBoqZu6LoZHkNFTYsUhujuuFPz2kFI9ZLJplm9g9ZrRVehHF5cXgu+2Zt6cgyjPc88X3HBKildVRRdgyO5PnnEJAoqf3vTLTS4IZjBQzeY5UxwVKSUItXCjrM0rbJyQb2oTBUFGOp/Ix9CnHeJJF8ig9UuSWqEnvpquQvhlTY08leAvsXs7bQWTkjugsIJYtm0TMsTP2fFnS0hdOEMEgfQAmgPuK0HhWCyGqR7lWHVxkIA4HpKa8zGoXc6OXMcQ44sxCHm+gna/zn5Elgt9t0FA/q2sdhHN5tTz3MoZjOqCtcaHcOj0Mp6gKZGjSUaGDR45g9OLaskx9vaXFlWmpfK83kadQKUb91FXUOhOc9O6jdmy2LbkIC2VJIDE/xmzKToS/hLtu+sm5coX1Sv8/ivNssMagAv5JCeVI3BMS7F4/VpGYB/ommQOjT0OMpdXkzQQKNIQj0YH93Do0L64snioGMMcp8pwwkWRIgnsrpRxErHdnL87Vbc+x5+RBdYV32Eqw7VO1bRinvPAwyLy6I7QOt4mKdeORHKDayg3t7DtWkBwvT6RnadP9sfv4k1rBZbf7bUEYzFQHJ9ovxrXU6GkkaXQHyivfN5PkeM2jywqnykqoeAuvs57VyYtqHljALJk1393l7o3CyklYOm1epZ95IeXiWlhLl0uAIgI840UbcMXfCv1tGrfsmoYmpoqtIYoM9T40inX6t5n6UW39qiaDrjNveLzemqYsL2YVlnlk1dYPcXkhJAvk0rXgqCmMZkdns+ZItEgbu/AEhtl4k0q7u7h5BtzDTJaWLGGSgVwS5mSqY9sIveIrxUAmXvpmGhmwdxAyTrEx62Nj1sdcnmAgE8zp7R8GMkVmt9AwxCpWS4EDlxyNHfvLGzVYSOSgLfLZ1tERnQXE3nvvglNfckStOwmnBEkEKQiSnEHQoGpA7pH320tQQRgsV4n0Hgyo1ViJDijYM7Kq6xfmJ2hHZLsiDP5HQNkYTRC9zYAxhodVcPeg0gnbCNP11p+Ihc0bg2vIXkkfo4Mo/nKiiU0C1Q7mXCzzZhoKQnJCgL2pSpYg716pYHrhOFIDu4S8JcpGGfKIWTyDJjuRh8QacRouWLjayMxOlAf+guRUOQnLPFzxVE82CgXNdg8mb8GK1eoso1+MhRbKFCuvlIy8huy41h/1rZhYCvPUv3Owni5h9R5U0TaKdCSYzXuqBmx0Jm85t/sWxVAESC1FJy9fFeKT+4o8zckUxg9HQmCOUyt/OuvD3ZPMtINZ1n78Llv3hp6Anj7Ufjqb/1noWswfHdFZYJzznpdin/12i740y1RACgL3CegL5ZejoxhL8vtrhhrMd1+5I05+4aF+nB0XTb1CBRgA98xAHMgOB5AICMbBmNx+qxoiIrJBd2txSlBEUYZqFxMM0CNQbSwjhoyEEeMD3byxqFIYatvIssGQU1Tlq5Bl2y2cnquwvFm+KlGyMvk/CsWs/JrI1jFQ7Eavq0PM6lLjFqGq56dRfC2ovHlryAHffKVuFt/1gsDaUlRawtx4L5PaE8vEyslDi069qYrZXyZuPswiiN8Tt64YGQaZLMhO+FHkySVXpMuviIzcAIHmjT0V2VErpKQuq3D6raqT+uSsrDFA1e3UVLiaFvN1JAylwIwcw4B7WDecwFTeR+4saVdLy/v43D1/i6EcNFWww1aAjugsMPJc4uHHNkL2hNpioS+Q9wXkWAKkBDJrIt1+UI9M7Cx7VYM44cHV63D1r+6uLnAzpn/soFZjDVKyg3ykVoflKZAneufwWKyTktWp+Otv5VAPAvlTPSZ/eMx8b9FpVxosYuShYgDnME+MKTWhqg2qrG+b4RjOjXFolMBKUmduxrhGVnj7W7CqMevJjgJFzJ/OOfdvNJ1hyootkyi+s9RWHhbgHHaLiUZ92AQQFJBSER+OOAiz91E6Znl5Kqm8Q3kc/qoZQiYTDPLEkp5cqlg0DAKRIh6MgvhIJuS5QK4D9rUhO+GeW00YyBSZFI7/TV2d1AuNZGAu72FOupvH+Q9h4+7ocDsD1qTOfXgE5mQf67MJrB0uwsZsHAMddGt9tg6/eOLHo1TzKUcRd2h+n1Hw/ve/H0TkfQ4++GB7fnZ2Fu94xzuw8847Y/HixTjjjDOwevXqLV11Dx3RWWA8/Mg6TM8M9NJsvdbZWD0A57XR/NZ/TRBAd+DW2LRxrrrAml7K7SLCMZsT073UQJ+W0FGYe2SJilrGHelx2gx6niLV725e3B3zXfeBboDBUGRjsS36Xs/q7771my/BtJm1oKDQz30a49aMZuy14zKImuCHrnxbrnlTb0UqubI9bBtwJH0wJrvtFU4JciikpFPbQVSTi4aIwwAC4lQu319N5d5oApChBSiGsGG0tSQXkFIRH4cKemUYspNrHx3XktOKYEW1UZGOMx2NWBE71jIVGRpygoFMMZQphqynihqmi5TVhzSfb3sDq3RzMtV1a5OvaJuZvI91w3EMpN2QwthqGmSoFpfWimWiItdYx4IO5srHL22h68JB6paY72dUHHbYYXjkkUfs52c/+5k994d/+If4zne+gwsvvBA/+clP8PDDD+OVr3zllqx2Cd2qqwXG7XeuahxISTsP24HJsVAwk2cpILdTZ5RnmCreuk0Ws5klaUdWaVf8tLvZ7XLwIGBFNPcIzw+xa4lx2kPLMVNMbh1te4Vlhv1fpE08gtKQ3T3n5m20qlChf03fOhL2X74T7tu4fjRLzihpzZc6C5PLYnxjggeCsoZUxlapM1Y0kDjvG6N6B3NXGMf2r2iyCOgCJEES1xg9qVJWsTaguiDWpE2tXWDkUkKIYl+5uuXVdefcUu00VjStIkaEHJCERJRXS5l+x/h0ZFLNuLeDEjSWLMKcnPKONeUxmMrGMJnMoSeMRzojIZT0LPRV1qs5pJjgod25vA6SyYtn9MTg8QYd/3siTVOsWLGidHz9+vX43Oc+h6985Ss45ZRTAABf+MIXcMghh+Dqq6/Gscce+6To01l0Fhjf/f4N6kvDM81QVhX0SL+omu0enLxCT93oGDGVBhTJiL2SqWkeAoTepbxPsN6E+o9rCQgtJPbjRHiOlR9/869IbOQ7lgBJgExYLbvu6fqKQC+XGLkFhx9bQKT8CgtH7FLZss3vhlkFl6RtKfx/xzwT/3nPfa3SUukLgovYJnPFKZO/gsDHimDWHlaasLazLtUo4U6dUBvLB5rvx9oCoU2KDmFgv0FrCVMdg3bUK5IoHx2ZU+PUTKVlI7Ck5d4Fi928xXEZWHYKkkNWN0aCTLP9Nu3PADYMJXq0JFJ2qEf82HTexyBXW0nknDrWGqccoyuKWDmzsof47vD15YqtfAg1kZHn+xkVd9xxB1auXImnP/3p+N3f/V3cf//9AIBrr70Ww+EQL3zhC23agw8+GE972tNw1VVXbbF6h+gsOguIwSDDjb95sF2Hbjp/oAj0Z94CTSfJ0JYOVhZ1beFxYd/eYm842gfQG4BjfR3MQK4KZmNJ0odqd19H0J/XvEQxtIXJTOVJVhajRCtCRTrTB0f9byrqUKlb20GW/K/W6u7qFStPj+eb40cTggGkCeFFB++P4/bbG1+85vrR5DGqV7iZCozQfiVESE7sHCHYgqROnvkEg2dxK7GfFnUWjyYUejX6erCOkGxIlhOrp4nkjKKfiU9DmnBIWZCoaCTmwKphlpEDvsscQ4DtkrW4QkWTm2ksjt7oUq8myCHAOduIzE0Yco61wxwTSSxGTpPFRV3wGTkGQcBAaYlxMcCYDhJokEm1V5eqB8BQfYsotZOWrBl8SBr3XnRgq3otFDbHxyYmAwA2bNjgHR8bG8PY2Fgp/THHHIPzzz8fBx10EB555BF84AMfwPHHH4+bbroJq1atQr/fxw477ODlWb58OVatWjUvPevQEZ0FRJZFg36UYK0kiLz0RQmJNi0IAus1w6bvzQklX5mCKLgEKpTpH7I+HYZcpergZi3qqpg6UtN0zolIDKFKUhYSnhrrSilPg6qxY6HTtE3nbl+A4nvdih1bp4Z+PU0IyxZP4PGpaXz/1jvw/Vvv8Mpogh2Gm9rGkdeakESYSBXJaSrTg3Az+/LrxsTmoH81N27L9ixdNE0o6qfNNpfnGksFIcsF0tL2CkU6KaECFzptILUfjekYNB3T7VS2gph8OQsklDdMt/nWK4kEA5mjL9oQRiVzJu+hL3L0ENs2ornFjH4MYEaOgUEYS3JIBoYsIIhLPjkDCUwkQ50msXtaAYyEGSnl6An/zfHk3V7WqMv2gr322sv7/b73vQ/vf//7S+lOO+00+/0Zz3gGjjnmGOy99974+te/jomJiSdbzSg6orOAmJjoY7fdluLRRzc0JyZqPVAQHMuGWaEFRPty+zYXD1sRBQNFzJWw1zKmjZrOPRwCGf4QYUgOhyQBqNargpSNyrsIDgkJYPULrDYcroY1CWMWG6NUrG6hIjVgAENmrJmark7QJMPcF60IgDdy2aCLm0VYKvJYEpWjfA+6VhxyD5Z1Je23Fm4y20h2npRZiOa7cHPeDUw+s6VvLgWEkNZa6G55J5nUG47OZbadIXKXF5A5CzBDWsdekxYQpPJmUiAVsmQlIgJybdZMAgsOI8FQSvST+JYNoR4AqQCBYIyJYWuLUCHDv29nZU9No2lylnJWUmLIAokU2tLjtAkTMgCZ3uIiTVTM9hfscioOWHz4CHo99ZDYAntd6XZ44IEHsHTpUns8Zs2JYYcddsCBBx6IO++8Ey960YswGAywbt06z6qzevXqqE/PlsLWPcG4nYOIsNeeO43W0bWxTADx3tN0SqQC90kd+I9T/WazuT2ulU+2461Tyy1GQumS61g3Ui9DN7ubu3WJWh/atkUL4uChYqVOuKN6dPrJHZRjcNPOp825nL308lsFk7BxSbkzksGJjd3UpoxiD48RQPpfhaGgMXcoywu1zf41ZWbnA705ZewjWpRtCoic4fi95CYYNUZNUarJS5AygZQJcpkgy7QPj1TL2dkubU/0MSPBHIeOpaPj9uhzRRpn1RcEhjKxS82NA/NQKufecmwaBZWv8Nkx+ezLVkUdc04goeVupv3LlO8ROq+OCpnz1mICA0qdV0JgkxzDIE9xyq6/g1fu8caRttRYCJj1Z/P5mLZaunSp92lLdDZt2oS77roLu+++O4466ij0ej1cdtll9vxtt92G+++/H8cdd9yT0gZAZ9FZUFx9zd245ob7AETfm4sOwG5KhDZ230rrjSlA9qB3Jzby9dfcKb9pIKs4Z7ItWzKB9ZtmSkuH3fyWuKRUPh+WZU09kbKbrFuuYhU6h+WbMS+2ast2zhUOx81WEidhyUoRnKuAtSxVnLN6VCQYbUwtWJvXnjWpEaYbYTzwwhhUEZ5KeX7tDfFmqUwdE6KHWQz19FvwtElUOvEDTb46umW4sG7YqR1Gw55XAiwZImm/aaeb11hnXDAAZuEc94UyC2Q5gyTrWCcql+QEBLVTuvCykD5PKhAgVFTjOLFj5CyRoOyArbZzILtvlajQryxTp6H5UB0rLVoeobhmzG7EAT/ttOyDaOlWT3KAhdm9/D3veQ9e/vKXY++998bDDz+M973vfUiSBK997WuxbNkyvOUtb8E555yDnXbaCUuXLsU73/lOHHfccU/aiiugIzoLigu/dQ0oUVs7QAKk973yCYGOSZHDEoJWSMppGSq2TcXLjIqVw1DT9g0bQlb5t5l58SemZoBQDEUsIgnQ2LvrfsnILvVTLUhME2nwXubNwTb2ziq5LaxIntHHSW+nBSvkt6lTK0RIWhzspWtNHs3yt5blqeFTBRiz007hQMpoEY2ZI7qpm2dGDvXxGFtmNW0W7RX1zWd8WqK3rJYhi+fDDpyyyBvVWA8mhhCNQng4kp4i38pnC5Lj+eYAensF1kuu/fpleSFTVCypz2QCkWTq1om0U64DOKUsHYIRg0pLukGlJPRFVrOZZxOFr9u/qiCa7BwNUwHANx/6Jp6z03OwYvzJm27ZVvHggw/ita99LdasWYNdd90VL3jBC3D11Vdj1113BQB84hOfgBACZ5xxBubm5nDqqafin//5n59UnbarqaumiIwxXHjhhTj44IMxPj6OI444At/73veeIm2BG25+UK30JgL3CHJcIB8XkOM6SrKgIpCc+6La4nX8RccfDAKQuK9lgpqXTJMmPJGiCgsTKnsmtueV3lJv4Ln3njupmDwplS0hm/Nm5JphGkhMoViFviahTuz64LSGQ5DIvVYNebxiQh3rSE5ss88GeNfU1bFWjrEkBh9PmTqoDMFK64qU5gLUitpMGAZfp4eja5UCOgBhNaqUJHjxByKQkpBnJmIyavQoI0zaKvaPk85Pr+oQj4Ls3wRSR3kub2lBGOSp8h2qvGiqjCZdWU+VDXVww7lcMdGyboXVMSpTz5WJirDa7ORranoBgf987D8bUi08FiIy8le/+lU8/PDDmJubw4MPPoivfvWr2G+//ez58fFxfOpTn8ITTzyBqakpXHTRRU+qfw6wnREdoD4iY4grr7wSr33ta/GWt7wF1113HU4//XScfvrpuOmmm54SXS0piG2imBDQJ3BKoJTw8pcciT9956mN2xdIAK8747l45hF74bdOPgT7H7Acu+y6BJM7jCl/HKdcNxaOGfzMVhKsI+C7ugIoYu1UgEya4OA9jzzh9IEUif7cDpUvcRUibL0i44xth9hT0MIaUxQASxg8VVoOyu7QUb4nfH0B51xNnWOnXDJbIixRWQ7J2VywI7zdjgVe0dHyG8k+Be3G1WXG5DTJbuw2uUKGx+6rPw6Zav1oRKYXmpbDC9Esv37awliADNHxyROgfHZevvJEzGUJZrMUs1mKQS7spqTDXGCY+7F2GnWCwHSeKterSNoqaw0IyFjYvcNKW2qgvAFrtQ4SD888XK/oVoD5b+g5/6mvrQHb3dRVVUTGGP7+7/8eL3nJS/BHf/RHAIC/+qu/wiWXXIJ/+qd/wqc//eknU00AwPLdluL+R9aqH+6qKmcpAwlg8eQ43vDq43DVDfeiN5ZiOJepZIE8SQBNJLjge9cUB8OB0s1kbdZBP0n6P1KDRMmMa6wAXD4knXg/Lmw/yOwEOiTnRA1iA51LWljpHwY2teOhsX5one1uvDUv31WI9oFVb4GRdvLkmLdpo2uVz4/7O9yUNFJnc7iRUHBxC6hMLiFwlKuQU2UM8trBHddl8LsO4ao0Nz2jhmtw8Itb8ZLNIXNVA7IJn1OefnJvvHq5RALMshQDpyYXpHQSCQaxv/oqTG91rQTpBXZNbKvw3SHnGABMijF87f6fw30AJQsMWECQqt9AEjKW6IncbgRaPTVlpKtVWT2RA056UdJVyVNTcQIpSbAQyFjF9vHblpoWjFoICIwn4y1SdtgasN1ZdKoiMsZw1VVXeREaAeDUU09tjNA4NzeHDRs2eJ/Nwa67LlEEx5Ac87KoVyBxQpACeO5z98Vf/P138dHPXIIB55ooFFsgMPQU0ZhA7sY4qSI5ruXIm2B3j7v59fFgsAm7FBnuDu7oF7VW2HKoLKwODjFjQrGRpyPG1jeFXxeCimtSMYVXx7sYKHbsdgld+HYYWkxiMp3zEn4dSmUahE9rgxWrEgx/d28EJNEcqCE5VSp4nMFtc/PPMyE2oKkOngKO0OB6j3Jvkf2vDiFh8TOwmfvUq5RK90azBgCAnXuL0RdV0zShRr5gtfqqvBHoCIqgfcOR/d8lDczAtN3d27u5lI56NZj5Ppf3MJP1MZv3kNXqXsgnLW/5+G5Y1lsGFY1Zr5aSymI0k/cwJ/sYyB6m8zFsGvYxk/Uwl5WdvdTGpc01lpB4zo7PaU64wFiova62NmxXFp26iIxLliwppV+1ahWWL1/uHWsTofG8887DBz7wgXnrmyTCG5TMtlUGBICJ8MOf3aoP6G0fUtaxRszmecDY4h5mhln922nNK5INFB/mN46hxlky4CRZxQANQhE92QQSDAcgDXfYKKkfEgJDlMKGMnpxMdCXSFWQpbY/C8YyS3Jigup+B8c8i1DYbpG85lCt9ZiDNBGyRHDaw2WfsQLbkJtgX7GIOrAmoxqdN7sPDRl0pB2tLXKEMkykX0aV6jHG6v5V55WlgK1uIyyatPkfm50GiJGIQoab3/6u8XOJBRMMp4jqdBI1DtRVehe6UY01Sl24qttwIBOkosKXRjfrUPZwxp4n4thdDsMhS/bD44O1+MTtn8dtG+8GA8hYLU0v5YdaMZbnCWaksiSlJJEQQ0JgTvbAzJhMh0rTiII79nbCkcuObG6QBcZCrLraGrFdWXROO+00nHnmmXjGM56BU089Fd/73vewbt06fP3rX9+i5Zx77rlYv369/TzwwAObJWf9xln1RQ8usTGPgKLj1d9VJGK1r1WeAHkKTA+yxre+JlTzIypZTABVNhIonx1zXhMcz7LjTlVFSjFv0ezKcD+6LE6hqHnNsm5O9MdYXGrapNXja/LPd7sGd2APHYnbEIvGN/qG8yHpiz35wagTM7x4hI9qBskawtTkdDzSbewxRxTEZ7OfBSri7rjy7Qhu/mtZcZdRj6yTfpGpcWJWvjxVgpUufuwgbdALjpXkMnt/6+FQWzb7YFF0NVa5nGq2nTl7h1ndtf6ZFBAk8OD0ehy6dH8QEXYd2wl/fNDv4yXLX4x9Jg/EQKYVfSI5WgsMZYqZvI+ZXFl9Mk6QI8V03vMuv5GVS8JD05vw5fu/2bJ9Oiw0tiuLTgg3ImMMK1aswOrVq71jbSI0Vu3xMSo2TM1WmDEKeC++ztpHNWVDpRVZdV3wfHDWqc/GN3/ya8wN1Jw4G4ITey2MWnicikaUtONTMBizmTSvGjjNmOmeD9M55TV1S3ZccgYo9i7CZsKMVVVLoxuufx1C8mEO2npEnMOjsl2CELNCNehqTrXR2ca3CaIXWxmhBTAGl4SGy9ENe96sh4IAWVghyR4z5TUJc6w6ga71VpRypd2pIDVFWm5drl25xMglgYOrUsSLKfbC8qGm3YAEuZToJ1x6GXN1TqjYE6k43gT/bvG4JIDZvIe5nNGjHGQdpwmSGYIIOSR+/viNug6Er91/Kb547/cgtQM8g5CDIFgtky+vLDM+Oob4+BOAmUyxQQqMCYlEbxqYcWItHN9/5AocvPQAHLfzUS3qujDoLDoK25VFJ4QbkTGG4447zovQCACXXHLJkxqh0cVjazepLw0DRwhjuYhO4dQg7OxicqM6EPCVH/8KczIHpwD34Awu5OevHZziJxjKiZld3xdjjWmxUscrtypdOPBTOY8d513C1CQ3VKTK0mQsOVbhsAKRPK6+LpEJSUhddOaIfKuPK9v8DnUJ26AFkynG5hjjbPcGTIYZVmXhiu9GAbfuNfdEI+p0qIRzA3FwvHY1VVzR2NLvuhuTS59yemHjAZWXshe/jWyBTApNtMJ0yvIylFTybWleIu8TC/aOFH+HnGIoE2SsPhJJYV3hHAzG9x6+Ep+/57vIWaJYQKHqK1G1A7e7rQ571iVjOSIQck4wkD0MZE+TOUOMCBc+8N26Ci44ulVXCtsV0XnPe96Dn/zkJ7j33ntx5ZVX4n/8j/9hIzICwBve8Aace+65Nv3//t//Gz/4wQ/w8Y9/HLfeeive//7345prrsEf/MEfPCX6Jo1vhjV9uEtyXBuxCPfXLYiEerDjPY89Ghmkc10ou2VV2aU345moWm1UVq4CLe5ir982312SY95WXT1GGNxteue7JRThSrRYPUN3BJOvKo+rY8VgHmvSEpGrS2wzbAasrBhTMG/cIwpvIh0u0XQtO+ycD9N6upYRj8fSRlmlSDnmTpnscIWlxhbXQBbcb+WkqgKSC+LBDGQ5MMyMfK2r3iYidjNIJgxygUwTGvUhDKWA1GRnkCd2uknJq2g/RzdyAjCGN7XbvYU65RBgCSwf2xlDmeGC+35QW05sWwqzGktK2DoMc+WMrLWq6VqUjg9Mr+qmr7YBbFdEx0RkPOigg/DqV78aO++8sxeR8f7778cjjzxi0z/vec/DV77yFXzmM5/BkUceiW984xv493//dxx++FOzUduzD39ahdm4GgVp0fCmjCrIDgFISE8LmbcReH9NuvA4k15NZRWY30MdNWZUrIAaiTQ1pa3zsXHIjvdEcJClqeqBbLtQrWFApeBc1KoUy9+ifdj+F+SL5m3Z4A3JOPItLLeKWFceG4Vo6hGfJBRLZxRE0mUDdcSpso5UnSeqNAUB9bQMqQgP53C87CMSGt+m210ztd+VIToCrOaekeUJsjwM+hcpRd+kkgWGeYJMB+9z3waUdSdBbnxr4JO5QhejubS/2ebwy6x2ZAYyJHhgZh1e+bMPYfXspsZVWu7qIVOmmd4yfkU5wxI+FaW7XiYA3D+99cbT6Sw6CtuVj85Xv/rV2vNXXHFF6diZZ56JM88880nSqB5nvfw5uPLau23fGX37dv6SWWVl953zcxQDMntvstKZZmK9PyHJQq7U00Qs4G0/kCcopjhaLBchOB1azaDu1jW6Q3kkfSxNW8pVsuZUFeTIdJPbmHd1ulaQh9oB3U0TWhi8RqrOX9c+3vnYgapBRN8/lYasmvyxVVjR3+7NHVqWwrIabz2uCKKonxcTv8dcw7BxK+pTecvHrk1UhntTcaQerpCKorRVJGYdsSu7UNHuQTnGRhJLlksBIolEVPuy2CN19zIzMml2LxcY5hKCgCQITjjM1QajghiJYD2YKguP2Wer3iJUnNiYzQAQYCL0RN64Ws7oUYR1KjKors5P29T1rR9uALBHfaIFguL48yMq24O9aruy6GxrePYRT8MfnH2Sdyz6suisYpL6d8whshhUyb7Eml3Bw/g4dnWSWQGkX85kojb9lD3AmY5uf7PXdLitX4QDxMY+6/NiVmFtKYT1dQcwLutvdYmQsLZ1ne/7Ul3+kOR4qtYp2PZc1DTYoBQQX4DXxPRj51w5FaySzDkRO+9c8KCc2hg0EirEQ6hLBYwvTB2q/HZkxK8nmrbxhiP7zlImM8rS4/vZbM4TS3pbCCCXACNBzgmGeWKjIA/zBKyXRUpOkMlUbzWgdk4f5oVVaJRyc71zetV5186dK3uNPuOCMcgF5vIEc3mCjJvi+QA7j+04oq5PHTqLjkJHdBYYZ/3Oc3D44XtA9hwLCpRlmwmQqSYjgpALQLqLvaj8cft62VN7aAFlv50YyP7ni68bb0oymAuHgIpCOCnq12aUtxYLV7ERl2i3Ud7qo52i7TJ5XZb5bo57Dr3ueGmOmeX3LconACcesC+EttqNwn4aq1YTS6iR0LhWJvfj5tV/PWueG7iyLUq6EEqkKvxNgL+3BwV/9ffaUAMRNt+CNHhsrbGedQkKElR+bFQlzdSSlLBBCBV5EhGCUo3ay81AnidafvjAxQhSNebyxDoAB/aSihz+zZXJxI/07OjoftxjkoG5LEEu/S6IdYJc+xnNSeVcLdm/lRV3TZQ/D9RU15xM1XYTkYYjAPsv3gd7THQbe27t2K6mrrZFzA0z3HjHw6oHMVaWSDoGVByZ+rjo6g+zHWwZUH1y09tk03EB5VOABguCE3Mnet6QAteU3abzdMeiKnrusrLgsAk3UttRew7e/ikvLk9FGnssHFQr9PLkE/DTu+7FSQfti50nJ/H165391povua+TW1aFb5KXrHRR1Vlz77SF4RxtLByeDm75+nsxFaNTVZGQ2mkpk7wt6aIiwIx5cWgxdeHqvdlgAudqyxeGKZP98+w0Uswq0/CQxwIOxpDnhF6vnBdo2RZGjhQgISvz1E9LMTIWSLTZzBC73Hs70rurBzusz+U9x4AtkQoJKYvNIczUVIYEAhJjSR685fl/GYTZPMVEkjlyCQkJvHHfhXF7aItueblCZ9FZYLzvU//hePnHUZyj+oSuZSehUiySJjLjTlW5Im2iCjnmRTtqpQktAVWWgQrlPKJlSEQdAv0ltA+SaZcK+XWDuneuiXRUOT3Xla3rJZlxxe334LZHH8OJ++8DQU6DR9rHa5ugLO981UBTfar2TF0W/+0d9fdrVSnmTdtrICdleC2MGW1UZlarSfxZs5cidp/XgiuJBWAGfQJLAucEmQEsoX7bTShNIcac6CtWt5ybvRvdTVce3BnCTpc5Ggb5mmAsM9UPbJPzsHGgVtNgpJ2fPQkeQbE1cMkcCMM8caa03JVlyn9lLjfTU1UKEST8abF9Fu2J9x/2hzhoydPrKrHg6KauFDqLzgJianoO/3ntXa0sLu640XTbWauK6Rd0v0h5IcOItB13OBUEJ21kYLGzBq5+JlS9DF6Q3DTk9NFmYHQ74VjlXN3aPHP6TZwdndwKhcXImhF/pLHTNEgDGfLUCcqWzLjhodU47xWnYtPcANc+8LCfWeexWWqIlZuntn7hEZcz2CB58fxheZ68FjskNvF2a8ypql94MNL+yjsjcuErS3UajeFP4xBACay/f6kiVcwNiDoVu87G/kUzK39cvdTAbfmvJFDCNi9LtjuSRy03rPKXl0OXyU6WJ0icmAcqKGFunYrbWXY0meB4jMU2MrKcIASDSze677AcIzkmveIwKqHNbeugiF1zB8wYSoEcjPcc9CYcv9vWv89VhwId0VlA/PI390NqU3lVP1l6/JosClCOxCWBBDUt5lh6jQXG7F9VNXZ4r+pmYHatUOHLNDlWFKCYXjAkR0/t2I0kQ3kGLYIFxmBe8D2y59aH9fjtyo6QFGuh8RhdDRp09YhlTR4C8Bff/RGGsSi45i8FGWrKq4NPVrk6YwuSELUgtSB97P5okl6Xpk6G2oa7HSKDpVsG5xxMTzpvKiUmGxFRRUTcEqvqYfiaiebrZCZNdsi5531LTpGniE1TBU20zEsDAywTADkEwfrPJEnzg2GCDQJAItSqqjYkh1lFIjZeM6VVpvYnB79RSqdIY5Wu7cxUmVT7s3/6ru9gyAKnLH92q3wLiW7qSqEjOguI2YGO2BV2gua7OUjOB/X9uTf9FHsLpkKuN2iyf8weR5A4Itv2oUY/TTBslhSlaTSrh2t5cUmJWWlSUY+acSBuyQnrFWujGMmJnKsq135p6hcaBn8GFAGuIk4j6NWkU3Ht/M6ewrYwisUsNISKoYKKkTb2Ru/8bTXU1I/LAXPy0xNQbCdSizaEigBDdsg5ZqZSQj30OTP1RBROY8UetAYVCVpmOMBT2QoVrUzTjePraHcZl4nuF9hbhl0ryT5wbAMKpkJZh4AaUgdV1VwW00YE1mQpqE1ERhGfBw5BjJXRLrhhzmqZ/GNz63HeLRfg/ulVeOO+L63LtOAogkDOT8a2js5HZwGx3547Fz8cMsMUWCQIAJHaJkFfMY+QIOhTR4W2/bKrg1nC7viJhNYI73dQrpFldzuPEQ+3fuFSdndjTpM2IHshLMmpeHO3+lYRQZOOm9OEci0aOu06vTyC9lT2LW5dawgJgNJ9ULrfNmNM5dKX4FwTE6q6VoHTChmBbcxctagiQ+bhReRmE9qEao4Vn3AVURvE0vqDdfDAwJRR7ytUlknaekMBsTGRlEfXEwByFqVNO8M8bMp32wpFUMI6f6QsJ2RSIJeJ+rBAHujLrHx/Mikwm6WVS9qLVV0q/1ASckn48r2X4PaNm7ehc4enFh3RWUAcsPduOPTpwdJEQqyPAgC7M7g0y53dcwJFTBwjp6FTJ/sfUNrawfXzEVArvjSBKA3sddaTWN3C84GFgiiSr0a297cuAnKNHt7pEYhGiehVtHmt0TwsLxjYvXEzJqDtNa7UgBvra0+b8gwxRqQNRtDPZKvKGpKs2owx4gGUyU5MVl05MdSSL4flA76pwCFCytkYBcs1ZMc5x3Ypebyc8FwdYSo28mwmKICKaaMIReHXE1qPmiIqm3RuY3lRlmU8To2/QKN8kXN2yY5PFs0UU5hPxexR5eVMet8sTZ5YYDrrY2aYeI7YRre5TG3mmeu3EUXCBL7z4M+aKr+gkKAt8tnW0RGdBcZ7f/9ULJroR8/ZAVwQMhMYT39kqoP7JZqEBGTD9p0haXDPA2AdXJCD49H0xlpiLEvmE5brR4avv8vqLCctCIfR1frctCiq3TxJszwvXSRttC2rLCAVg3aJRMQIbA0RrB3PiUfqAbxLxc6PuutUYiujQ9sTCnmubEar+6RSt6oDtfeIJiU1p+tOlMlI1UPg1DnMo60zYPKsOCbGjom3Y8oD3Bg8opYQ+ecINqJypF7GBySMbxOTH4vDw0wYygRzGWGYR3SqcVYyK7rcKarCnySeT0JbdkoPnvqdcYKBTGDi8gylwOwwBWuiNLQrtNQGFjdtuLdCv60D3aorhY7oLDCevucu+NwHXofluy7xyIbU1pusR8jNbuHhMxysMjX9vtkJnFNSn8RureN1zoYkcU99pLflg5MORfmeBSI2uDsB9tw0oUXfG8Dn+Rx5Afrakp15wiOSwq+fNyhRUNdARqN1qeqH+72JuEVJGJfvp0Bk7NO6TBelpcrxskKU1KpK1ERKTF4dzdibgZmnZadyAVOdPlGSowssDdTuQ+TfVyatRyxYgGWiPwJ5pi0YlkSQ/h0nXUWQPQRvSVUVI0gp7H5ZUk/rlHRHpK08qGkmE8aI7X9V0EH9rL46GnzDoMzMyLmuo1DEa9Owj6lhH3N5ikyTo5zVlFsRjJCxYThTW16HrQOdM/JWgK9edh0e3rAJcIJ0saD6TtN08PqvHYxSfSzc7FMUiXJjmSEqD54p1PJw8xYIdcz2WXn1S7olRA7sYB7RfRRfgToQ9BJx5/e84azSiVq4Qt8h8928XcbOh8JGsFh5edocM+pU9+d+IVQ+zbEfOp1dEt1qYGeMJQnm8rwxaeP5EeofT6AuEBnV9KHiIPy6VtaRYELrMmoMD006RW4udwm5nz5mcVI7pCv/mXikSjaxbAKZea6Wo4cbC3P40HLNfeSUZSw7ACBIsdtR2oUIyHKBXiJbXFOjqwCDtYN3vTVHldHIjKP52Wl/hoBkRgKJ8WSslHZrQueMrNBZdBYYa9ZP4eL/vFH90LZdJmp3ZcK+0lpSIjcmkdrVPCUgpTLJceR5L52udQbw9Cq96VesyLF/w4EqHFg2B819m4XVtc1z67atsRi5U3WE0rSdRZ2fECHeFm0xQltVvRX7h5yKhlMNVYLJ+RPmi9aN8NZnH43JXm8kQ5SvM1crNNL9Q9ZSwbI4VH5minS1c1RM/i7kjYiRlXKSypeA6HEzmMVYWTXTZRbIc7KDYTEoxm9cY8kw0zq5tt7kkpypMpU/tn1DE4zDr/ULqn1AVEOovbGE3UureVCul1oPv21yFti1v+NmS3sq0E1dKXQWnQXGVTfda2PpWNTuyxPAISWlwKERMBfpqG7UEbrfDlcwaWsQBwOAjAz6tVZv0y+bF9HNRGkWgOPdvUfGGl7qPCfbiG61z/0IxIsQTEVUpWkSUic/OOYTPSqndBrPyxu2m3v9GqxGRMA//dcvbObJXg8SjNksa54ZqauMUXJz7h8mZeUMCFll2Y3KGblck8dpvArCYtOZ+7ikX5HRJbJUazqpeTBAkDrQYJXGJouUAoIkCkffwqTMptNipbNkAul4PTaoHxVkxhAhAiCE1GUp8jXIBYSU6KWyZtm3smTlcIMREhhNliQCUStG2gBV78GWMks/SegsOgod0VlgXHXTvV5nEp0uaAC7loamXlmnixEBq4NrvakayHRfVxqoRnkmzIDqxDeJZa+bPSBUk6zSO27MzylsY6OTsdhsDkZpg5qButW0VQ3RqB1LYxYMh+x4swAUJBtFP7hkTiWYzXJI5koi2Uamd3VNzKERr1ePEgw5byhnRJh7OSSE7o9WxClGTPTUScU92ySTXa9dQqtpHJeME6m3e38Ju68A2/QEzgVIMoRg9ZCSBHtvYypidS4Tq4e3IitDieyY7+7y8pxVMMBUMHIG0gRRguQuWy/KKqdRdSAkoqkDJvxm/QPIZI5UtI1G2WEh0BGdBcQwy/GzG+/xX7Dr3pAD2MfQ+Ns0PJdmYInyFvPm1a5ozzIQK7YN5/ITR8rwX3DLY4ZJEwugx4WOtVP3EYJUp1PjOSOkRd3Z/RIQiNKL/GYMxATfd8k7UZlDvZ+3Se/5doygnyI5LVh89P6qYF0GIzw/WS6LCMPE1e08gkyPeLk3rCW0VQw7Ise13mgdyhaeNrIi6c2Scfug1stQLyPcznLkWHmYGVLvmUlkYvFUXUP/u+QEw0xFXzZbcqrYN2FaVWomASESDHNGmsiApBVpJQskVLYWmbRZrkiLoLzRxyhniVk5wGIxUZ9wgcBbYOqps+h0mBd+dfuDmJoZ2N+2b3TN1jX9T2HNoHYWoIZ9aixXaujYzUqjqEJ6OqONHEB3LsYPyHnh9EiK+4LL/vnancyNDFTXOfbiHdvctJSprn0iacLk3qWKWEk8wtVUVpP8lrJUPienO1g33TMjwu4/Va+MX1BT2hz1VsgaCCZI5ojflcPmamSWoxNHLDKboRfgW3BYb6fiEY6m+7HkNe4/HHXXIVzSTo1Ws/hbBVHTthNky3O3dmAWkNJMSLlWn1BH7dMjJYQQGOaKVBl1jXdAT+8NNsyFirCMYnpN+QcZPx/GMBeWBwqKb12RUoLJrdghubCwzU/Gto6O6CwgNk7PVVogqroDr98hlDe8rLgrW1kq9GmZwhKWUqdCUIEJ6wQQWjlmxiwu3rhgZLn9cli/uk7ejDWiJpmWbx1T5xmZ2FjNwgEuOt0Qtq17DuXzUVQYNcw5S0RGrc8I6e2b84hlEFyrQosCKsnOqJaNMtjo04Z8NZL3yF5OI99PhQWj+Fn4wfiWNOUPEyP0ZZITK0ogz2Vp3ypvqflmt7G27kQsK21QDNRcEYfHLQeQnEDmrIO6U+BPx7o9lHktl0Dus+lg9Zla0QVW8XcARj8trDzMQMYSG4Yz2KG/aLSKdXhK0RGdBcTTlu+ovrhkx1hL9ApLS3pca4exZIRPvfMAupzHvJW0clZ2ZWkCYPNDy2gzqJmyHAtM+HILwI/mzEE3WvES6gmp00O3EzuNYbIwnHY0ViUjlspFhWJj3b0di2IZwkxNJKctzDWvOG6+slvxmnJaDfYBNpde1K6kKmEzmeeoiG7+aW5+58Ey90hAvj1y4BJ3NbaOgMiFteUGHQYYUkLtXO5KqLWg+OWwFGDKvXoV9THWls290jVvYDUwZatoxBK9NE7EYpAsbAJh98ZSsXbGRQ+z0oQ6KNqnTRDHQZagnxZhEnIGHppes9USHQkCzfPZ6SIjd5gXDtxrVxz0tN0gnO0WzMDPovCvsIOysaYkagm6CVjnQaeRifrLibLQcGpPV8L2aYElxfTx3vkmGH1FuYuLkpxAdquB3+0/wwEBgb4mqB8VwRjt3e8sB2cdV6TxzTPWybbdfiIMvhiT27ZvcdMH1602bUWCEvkYxVJTN5aViN0IA1+jDuYtvf5Qk06lvDH2GOhiLIFF4D0uHlhJ4Nw5pwMWqsidFNev8ViM/avvzORtGaG2l2jbzqyXmqssJsKyX4aw8jcHo1pzfH+g8j5V7rkgp/dX6uXvADDIE2wcAIuTCSTN83BRubl0gxMSJtKteOqKy+EDNuezraMjOguMP3/DC9HrJRCCnIFPj7SCwIn/CXuL6DhpogSbjzuQo74v9aw+plN3BudRYV8q9bYRZmNSG9iwLuNmDrCWADpEylqjTNtEpqjY/ucqX1FcIKNFrLLSOS+mY019ajEKEXHzaPZq/tWKqtInQi5Hwih6tyEu887L1npaItAu+QlDjLvnYnFobDwe52aUhnFHdGQ3jyOn8S3Fl6VeTto2csiU4w+gdDbi9Ke2muXHtoUopbKWpPIO725AQl/n+nJNXrs9BoB1cwNI3ryYOsqPRz38e07sjH0X7bYZUjo8leiIzgLj0H1W4AvnvhbHHrr3ZvXlpTwtn9wo2Qm3MjAEJwwaOKJ+Vi+XNEXS2S0VRinHWI3MxyV3rnWoiYiMcjxc5dVkSWmJaHaXqCEy/pEZ0OKftmVaslNhYSsJbEN+QhluWSO0lyUf9m9F4cbC16RnZdkUP81uNirITqtwLDFrg3PMEJ7oef29sZ1UOjO1xJL0R6u/uQTRgbvEnGXxlt9IdjRhyXOBXO8TJWV9vrIlScElWTqlXoVFyDKBLBO1u5pLxzqRA+jTOPqiV69/Cb5ev7f/ixpWoi0suoCBCh3R2QpwwJ67YJ+n7QIeUxabpvvKPpMicqwKQd/pvphKOJuDOkTBs66TM+CM0HFai04F7IuuG3nYtUI1wLNgRc1boTINuqIgDu4Jl4RVkoktMKCQ+8W5ZhT8RnBtqohcXCWtuRlHq4halemP65NYRNp7c/08vHuPSz/KhCx27UMrDZzfDeYGn4o4F6CpOjpjfDCMXNT5gAHOXXkCJkxQozXFkiQ/XTyfehBYCv2JW3mMRYn1cnBmQp6nlvCYtLI0/VcFQpYLJx/ZPbbMpSiOlfMWUzHqyIbhAK952vPtVFSbO9OkESD874N+G6fu/swWuRYO9jrM87Oto3NG3grw6f+4Chdcdq36oeJq+R2zBsPpCl1/EC7OV7wMFWmBwjHVtaCExMLpL81ChVIk5aZyKk6X0lb08ewkqTrnOY5WJRxxh25G0PyBjuGlKZGdJ+kFqHRLhP5PIdlh/1Td9ShxHLcRKupTsv6UEpR18fQYsa1sXuNdTs7BKlIWtkXsmFuBYCl53S3etv+ve+Fnd/qkKl3TPcxBIltH9YVz1VZUEV5C+ViTc5L9dKZTIY7mBwis12+bMoxcfyqv+JvnCUhI5Z8IQEqzX1V9ewGEXCZgN8pohKHnUoBIBk7V0GVpYgbCZ+74OdKk2ELDLDevLl1ZkDblAuNisk7RDlsROovOAuOJjdP47A/+qzhAxdS954ysT0tC2Xqh80ihLDMSRdyIEJYIGYdYl+SED3hgUbB5I8TE9GnG+diOK1WWAkZhjal62a1AaXxqGixHGFBNHT2S0+Sf1MYSUpW2ovxGGAJaV/+o4aHd8GzHyQodSyTQyxgkDmWH415LuMSXjGlNBgna3g+hn413nvx7tALk3Sibh9bTHnUmB7L/+eltYyjzbOg4XbbAUEFOWE1RgZU/CjvbNoRv+Go6SkBKgTxLIPMib93DrfIUx9jxo2lG04ZyKO23ZfbRkmz20SLkXnkE479dZcnIJezu5x++8Ttb/e7lnTOyQkd0Fhg/vPa2Ym4dmuSkgOwD3APytCAQ0pnScV9QcwFwX32QFp8wKm5ohfAsQA2DpTTLtMnRxxCrROnKzkovk6ZiBbwXCLAK5JaXwPoQWUFt9gQb4RmNDt4V+SMvtfOGV34FKPzRZlA3XwSXNyetyFJFuEKi3AqVhKJlfo0Sp2K1dJZcx655m9kLOSZYciMaymw1cDdYMbwH3v1eWX7kIOuprLAj8KZ5SK9SIsicIHOo6SwJsKzerLOw3gDVG4OW6+SuiFKDqhhhQ9D6tw9p9VFHFIkKB29ClrtyRJQDMwOZJE1yFIYyw/cfuqGlrguDjugodFNXC4zH1k+BoF8ww5cU8zKm4+rYt3PdCTN0ZOTYVdSWH5mr8c3r24x8Y9VpNBfDWn5qiZF7TG9fE4YeKVmzG0Bh+jbkJoYGq44V6URpbqWbK9utaCif/Dxh0jYkx0s3Eng0nycHREAvTTDM83Y6jmA9e9IQXg+DUfUypKJNvqp0881vYUx48OtVaeUJhRV3HEv/tAmKB8AhPW5+LsrXliHp3sRGPSdXmz6F7LSVUwbYWnaIypXbXL/fYSYc65m/oivnBMhypImZtlIbTuTMuq3iHVZCAvdPrdk8hZ4iqM1V5/dAds7IHeaNXZaqed7K5cnuAG/eovXqImmsN7F8KPKZqTD74hu8kbfqPMj5tEnrdoDGMuPoZD8N8CwL7t8aUlEnjAF/lZarVyyuTwMqLTtOe7n9ROxlvGRJqrG0tDsY6BO5tzj4lLLpa3PUypX4+5e+FIIIiXV4aCivEuz92RLwHMfdAalkqYigiVg0pgG86auaesWme7yyqi6Em8Gdrquaeqt9RuMntV1MkZjoG3zx20xx2Rvbe4CcHCOPjf6DI6VapSWlmdIi5LlAlpuVVdWrq7SmAIAsExhmqrNj70Hzr0EuE8wNEwyGAoNM6O8JhlnqDfThzhuLtuIYOguB8847D0cffTSWLFmC3XbbDaeffjpuu+02L81JJ52k9z0rPm9/+9ufVL06orPAOP7wfWH3qKvqHFyiEA6GTeSDoAbwYEWVN+jVddAI+rGG8iyhcpeqE8Ap1PRWL8jfZGWpG/gjwQhdHey0mpn2MvV35RrSlzj6wv9eh9qxCYUlzCWpXgyf8Jgj1Fv94grW6VIiLOr32mnojP8lYuUcTwRZyx0RcM3DD+Mrv74BHz/tNOwwMVESW1VctaUB/vXcXNLjknYguEcizHLkgdchZQ06FtNnKAhIhEk2rSiqZJ4ltt+CcFaiPpPrvFvyVWEC9DRW6UaSxTlDQtpM2bllGP8cl5CoqSz1Ye03JHOBPG8iU5occdUbVSyz2jVd2v2uVL48F56uBjlLvHjl4c2VXEA81auufvKTn+Ad73gHrr76alxyySUYDod48YtfjKmpKS/dW9/6VjzyyCP289GPfnQL19xHN3W1gJieG+KPv/S9Vv4Opq8zU0FgtKepjuXDynHKM9Ng8Udf+/q4g3AkIcPRx03rDnwjDDbBy1f5XISoGSMPV63Eig2K7gtpqGOba9JSx0pdGoRzhU4JERaP9fGV3z0TL/vClyuGL3008OuKQjdeLrmU/qf334ef3n9fmLT6mkaP+6UTatp9VETzGotMA4uvtX6wnTKuLsfNYSwFbH+HEaDtppzRZ9d5wI1ulerrkyO3XVmoHcgIxZ5vARs1xLdeKa2/bjYRrPIKp7OKQTR0dGYIUax+koEPEQCQIEjJlWUYfxx4eqtrM5q1KU7aBAinrDgE+y9ZPoqwpxyFBW5+MtriBz/4gff7/PPPx2677YZrr70WJ5xwgj0+OTmJFStWzEuvUdBZdBYQ//aTX+HWBx8bLUCefrBH8XPxLBXawmEtCdr6AjhpnI/UlpC6ez1Kcsz30ALVRl/zJbQ86br8/+y9eaAnR1Uv/jnV33vv7JNkJnuGbEAChLCErCoEWQK4IZHHE/gFFIP4gKeEpxLluYC+qMjiQwT1AYoQ5fFkUxRkXyRsgZCELJCELCSZbCQzme3e77fr/P6opU9VV1X393vvzL0zfE/Sc/vbXXXqVHV3nU+dOnUqCGAoBmwMhH5OcZmZSrSMAdGRytbZHj2VY4laWZ3uJuCpDz8R//zi5+OYgw7CQI1RSOp5dMgqO7oO/FLgI5C2tQhQMf1SkEDAHUnaFx2CIG/JaLZvKJfqp4Iku2gaLa88CL3NifLljD/cJLVvtpeR57MGFsYiADOH9haeJn17y4z0wzFWFQSrsmRa5xhdJ+IEma0smk4hBVT6Wizc/YUhoYLyy+GfeuSj8MeP+4V8xgOQtm/fHhzz8/OdebZt2wYAOOSQQ4Lr73vf+7B582accsopuPjii7Fr1669IrOjAwro9JkfjOnv/u7vWvOFq1at2ifyvv9L34a2I7iufs0PSlzf27WFgqWgH0xZOiw/P80j/Ff0QOQplFUMppcCHJEOSJ0nAwBKXjFfsSosK2uhDtlbHPzpl7EnuEvpJQfm3DNIAUhm4JFHHIa5wQBXb92KYamn7gn2/L1Em6dGwMWq9W2sTsXcg01REAGuShTL4R+CzBvx6ilzC/CIv0sWjC31EcV8I1ARLy0vN2pTd87FrYgL0zB7fWmCHil72GmnoV3NxUB6d3VjNWpWX6UfsvMVclNb7og7CbOyS/m9vAAEm7Hmpus0u8jCCpoHIL0aZx9yMj78pFfiDac9D6uqrmnj5aelXHW1ZcsWbNy40R+XXHJJsWytNX7jN34DP/ZjP4ZTTmmm+J7//Ofjve99Lz772c/i4osvxj/8wz/ghS984V5thwNq6srND55++ukYjUb4nd/5HTz96U/HNddcg7Vr87vLbtiwIQBE+yKk98JohHu2m3lLvyIn04fIvpYBDwL8NFYH9QErvi8rTGcH02ay309NWYnTVLEelEQjXe2UezQYD8rJyDeu5SiQJ3dDCNBKkyhrHEublFdOH8Tlymsu2Zu+8GW86YtfbvHpVeY493IPEE4dtWXsLwSDYLcu2CufnH1o4wznUlYGXz9qeBKKbSPJTGEl5HK/BJ/eQTkFd7Ri5xU6kmR79Hl4oWBd/jGNDCkA5UCkSZcKQxHyyJfRZ3WXJAOEXKdj3j0XByj46Ah21VfDfMdoAZ++8wZsntuAP3zcT63orR8cTTKOSPEAgNtuuw0bNmzw1+fmyo7YL3/5y3H11VfjS1/6UnD9pS99qT9/9KMfjSOPPBJPecpTcOONN+LEE09cpLRpOqCATt/5wZiIaJ/OFwLAQFUYVAqj2n50JJSGlM2dZABISUdwlDZH8aA1m0j2PQwf36aVNwN4gmROOQ/gpvXbClvolHEUS2cdcjTGPSdvsn+elFL6RrR59nmmdMpSyNODutRROkfqfILsvRJzuX36knx3JL+evAkhoGl/dPGHHIGqnExSjuDjKiDzGm2/wMV+W5IV9xksivsaCbCzFA8tVSZ7sKOUi+HTLkP69cT0/pu/iZ95yKNx+uZjl1C2lU8bNmwIgE6JXvGKV+Bf//Vf8YUvfAHHHHNMMe2ZZ54JALjhhhv2GtA5oKauYsrND8a0Y8cOHHvssdiyZQt+7ud+Dt/5zneK6efn51vzleOSUoSnPeZhZpWLv4jWihyW1xw5K4AFGrqywfvI/K1nzKEHdjqqT1/Rsz+RVv6+U0Uti1QqneDhZw4m6eMmGL5MNOKJ5A34dMidHGUlgJRsZ048/1J+d9FPT8TCLhG12C3FEHJcAVKHJ0o8oDjNIsrtya9ZucTpNuqSR9YhlT8JoKIj8DoWfHr4HTVymLzF2dLEu+inhFp+O3EnUebT5ptPlPYDcucqukdBklL9KlJ4//cv7xZuBdC+DhjIzHjFK16BD33oQ/jMZz6D448/vjPPFVdcAQA48sgjJ61mJx2wQCc3PxjTSSedhHe96134yEc+gve+973QWuOcc87BD37wg2yeSy65JJir3LJly0Qy/vJTTzcfswApegBoC1JYgh5EfR0hcMplBbOEO14+7RyOu4QZp+N3ZY8Tmdgp64zeAYRCF3F2uGpHeF4UceZnCbAhvJfqn5MWqRKrQtpWJyvTdo3ubWa52qfFb4mASBbY7SOAWiwuV2dOnPdW8q409yJTE6Sq8Fy8L4j9cJul6BQ2ogOlUcOy82eRUYpFWtbNvbwcLoN4cVNgLdcW4h4HK6TaxaQcf12nxQjL92AjztNrK4gyyOkiDp5nxLnwDtesccP2e7sLWAmUGwiMe/Skl7/85Xjve9+LSy+9FOvXr8fWrVuxdetW7N5ttsq48cYb8frXvx6XX345br75Znz0ox/FBRdcgCc+8Yk49dRTl6bOCSKedDvhFU6/9mu/hn//93/Hl770pU7TmaThcIhHPOIR+MVf/EW8/vWvT6aZn58PPM63b9+OLVu2YNu2bb1Ne47e+NHP491f+Gbb/GzPqYZf/g0gtPIIyiodcbO40jajSINBD0fl9lVoIp+3AglZCHbWPLVyy6UTnXhyxCiYbVw1h217CisC4jr2ASgJ61SJZ0uuOG2hvHFAU7K0SH+25pd6ArpOGUvp+pbh+XG/YZeoi2unmF0oG6PdAAk5+rZ1rq6p/SLEd1vEp/Lbs3yCzSilsqEot/9t66jQ2ryTc98/cZhXvivxN9hqQg2q0HLqZW2Tk7jHADO1NxMFQHD1ZVAV1ZnY88lTOk0MFtslA8xmeXorDXVv7HnG5uPwnideUBIsS9u3b8fGjRsn0hfjlnHC3/0u1JrFLa7Ru/bgphf/cS95c1OW7373u/HiF78Yt912G174whfi6quvxs6dO7Flyxb8/M//PF772tfutbYADjAfHUfjzA/GNDMzg8c97nG44YYbsmnm5uY6HbH6EDPj3759fXq07jopZUZrzgqQUsoc543JdlKpgSfLk6iTC6xGXuaIdQkmy340BQzs/STIiTvcyiR0m4umsJAr5xfPOBXv+OLX0yiDonw9QU5LIaWAQ7azLacL8vSVqUQpBT4OKKXEeaKI4ggplzfJj4M/RVklQO5Vp1QDpEwRHeWW0sVAxFo6+7AM2tE9fM3tadskIwoRjctrCw5qWaPFM6mUYguPfBfkD1bgEQfvK8uKVGG+4sq9qKEc0CEQ8jumo9meQXRC0qKU9xMiv9oryztz3Yn7U1seleH9o01ddpMtW7bg85///D6SpqEDaupqkvnBmOq6xlVXXbVX5wsd/eC+bbhr285u02AczTg/SMlTAhwVMQohv4eWUMRFPq7TV+GllkIr/ZVKX7U7a1+2KOfj19yA5zzuke0IxBTlie+llEsK5JT+yuxupOnaoQPAFAehvYhCeRcDlvqVNuHNduKJxB3znS/mX3RbiZfFbpxZHATInC1jEDUvemfDJF7aOO6PjTUTxIFIUQxygmvu4+H2R+en8JoPiWu5iWZZfgZsxGNAj4y8bn8pTsxbm1VS5h6zgq4VRkOF0YJCPTS/HYhJV7LjwTjcnUhWEeHoNRvxM1seXeaxQiheMj/psb/TAWXRefnLX45LL70UH/nIR/z8IABs3LgRq234+gsuuABHH320jwHwute9DmeddRYe+tCH4oEHHsAb3vAG3HLLLfiVX/mVvS7vwsj2QKWOrHPo3J98bBZXnnBCbI137aacsWyBOK5f000+SmcLeccWhz5WB8lYRR9fMOoE7tmxA4844lBvQs8mTcD8pA7IyddDO4/z6LKPOqpfsSSZdjHKu+O9y4KyrmdY4DbRa86Jn73AQSLjkgBDwXsMfhx/PBJo9AFsrTQCmKSEaTW2LdTzSpk5EvxlXi+LNWnpFoJLkg8xUJt8oSXFApqWwOT9k+RHS64aNUBVuHmnuUdw05nmHjf3gjLZR6+WU3PrB6vx9z9xAdYMZrsrtgJoKXYfPxB2Lz+gLDpvf/vbsW3bNpx77rk48sgj/fH+97/fp7n11ltx5513+t/3338/LrzwQjziEY/As571LGzfvh1f/vKX8chHPnKvy3v0po29Y0AE45CURuijcGOrgrMUpXgVlEVwywGPlLUkEdk4K9y4b2JKRgtqdo2G+KP/+HzaAiQtLKJ4377uXmpPqr1MLRCYIn8/kDq6JxkuQphx6ywBYeroI9Ny9qk9Bvt7Sz4/A8Po3ow0RzK/hgEa2Y03jdUlzyfXELmPIfEx9mgsHzRQWpsoVXz4Mnk/Hr9jsTmaFV0EPbIgyTpw1yPTJOcf+xgTP6c2wQzlKjC5UkvXQO15EOqhwr075/HRm6/prNeUVhYdUBadPn7Vn/vc54Lfb37zm/HmN795L0lUplUzA2zZtBG33Letn3U6NidT8yc3cPPJI+UeKKV4c8xJFJwYQKasOsUnU5VuCh5UFs2Niltb42QyJUf/PcFBn2TjNGPwjHIgQz6zwEzVjJxbci0GsMTydWSZGwywZzRqvZ9lWXqYjcaVvz34T1DBZDUu2MlZR/rK3WE966QApUeNJtu81f6h5aItSCpzh712oiB6cR5TTsl/Jt9hhVYptnFyXP94wvpD8OKHnon/e8PVcAl1XQE1g1TDlBSDbRBBjuT7m2svwy+fdAZWD1Z+ZOTJ43REPPZzOqAsOvsb3XzP/bjzwQcBpPs5P2iRS8eFFSbXN8Z4CED+SSdG3ot6rwl+p/Dklj2xhWVM9qX04/SxWetJqs/N5c0NYHOgo0SJ/FnytvTm6GOMiPP0oYBvqs4inZ+KzTHIAg6Y7VdkeqesxnwXffVYWEiSjcPhaTZdIluMBfqkKxQPoBm0xHKPXUai0WJ5/cHNqMBfE51AsKeLswoVHkjrAwytSXIcGvh+RHxjIJPyFcnvkk6tsiSfG+6/H//fZ/8R7a0nCKyVPQi6jkeGDe0YLeArd9+SvLfSaOqjY+iAsujsb/Se//wmarD5ntyyTHvP9wEKgNtU00Uvt0utORo4uj2SyL2cZAIGOsp2UWIg5+LYSF0aU9/3nu1KqQrAz5z6CLzgrMfihrvvw6evuxGfuvbGYLHIOIxTY8pAGffI3xtUlCg1wO0DnnLy9LYAUND7lAa3Y1MEXpJ8MxYIzYxHH344rrr7rk4rY0x+x+9JQGKBHHjK6ujgBjd/+jyLLiHd1gwlXi3rnODtnnP8nrXOcwXIDzviL37wyPRBcnNPdoBLZnJxLsb+ZozlKPnRenDl+Jo6m81B7Hsh38naViVafurSFS1ANs89u/bAVDifpzEfpyu7fWFPV6WntIJoCnSWkT5+5XdRa246Qxkky4KWWAGydRL2ACjRSbYsMmJEVeqjGAhCsaf6tazyk/ekzHYq5pt33IGr/+VunHX8Flz09B9HpQgfv+6GHKsW31Sf0+q7OwazLZoU5HQBG3v9qA3rwQDu3P5gN79xZHEPktLPv7ecJSXbJVcG5HmQY2+tnZ3FjuFCtiotQSZSpD3J8fYfQgaxdZXfC4VZ3hrhUmtXZI6HGGBwCuw40lGe0iiGRUKXNgY8UefgYuIAFACgVDnSkTq/ZxXZ4sVz1hZluMpy1Ga+/q4cN3rLVJVFVbXYXyzTNsx2RKdCB2h20Z+bLbFA0RYVf3vN1/D0Y05a+dNXpXdtHB77OU2BzjLSnuGw+UFo+aoku2G2YEdYJHy6Pkop8dJKcCWtLIxMelFWrBtTe+jUBNzywDYAwPfuuQ//8LUrcPpDjm4sw9z0abmPKh54s0jbGeywwHccGtfycnsC4EhRJtbnrUbvYDbpPVdG6b3KZHF/dyws9CsnValFN1SC3EOUWjBAyj2tFr0AWeLFKwGUQDNL5Uvp95fRb5rZgRDY1UupdnWKnQTIkdgzltOBbAEG8o4QNrO2GX1/FCErx5sJGLHRTrKs4vSZeHayc8w+qDiNLT4ABuQvmqkse1Uxrn3gbvz5tz+P/3naU3OVXhE0XXVlaOqjs4x0yNo1Rf2bvJcCI8UMTdoUacD4/8wYANWy2BDC8BuuQ5P9kkufMNPnfOG+fuvtLdliIBHIEjtSOzky8Wla/SEljp7k6zhGXp9ehYe00gVtOpY0lsbOOwZlwOPE1AtoNoli7LFXZViKUW+J3HRz/IJTlCb2i1kqCl6Z1Mdly9YAagKPYPddiR9+0+EEICe+nirbARX34hen28xfvxqqho0J1PXCxx98KFuaONzSImkGCv3HuCbUmvGPN1yBnZ3WyimtBJoCnWWkozdtLN4n/0+b+vgGphgGqzEJwAwCRdxilwIIToFX0ZEAOZ0+CrIMRP0MCR4RyToUnZ4XqagDENenLujxXOJ27ODJzN7kH4CuHO/UeR9KAbllNFt75eJoEQAg9xonVWffduuSp4WOGwtBkD85FUNNwL8c9ZGT20lbYMfKQaz8R0UO+DjlLj9Mdy/gTD5QYgvsOKCHnt+G//iVXYmh0h85J47UEy22U+pmv49813CI7267pzvtclOqncY5DgCaTl0tI1WVHUnY3+7z0jADnsCR2HVGEIldf5CLhyHIv68zYjQm95+KKPWpJwc7ufIyAKXFTAISO3rPfVtucB/smh7xKfqsoM07TpI0dPcEOC2mfQBeB7F46N6/gTJy5vjGbT0OLRIoLpb8I44V3KRylfK6e+O0V1eauDwGvN9NsClubkST4CH4SnGTZWXFFtNYKYtPzENcdr47ic2zLKBpwvv5yOBA48szCUkHfAnQUhUeqwxOnpZFaDqqila2rWA6dWVoZT+lA5zu3rGzNQBhAGx3MJcjfq7sAMemCaaPUpthpkimi0GOGEQFnaeQK0epgdbESsLJkbh1ylGH42cefVI4XRVPXRXawsuVCGQo6xBYvHIgp6CX0maCVMruIZNXPHIKTHBoAd+cnIsBBks1qivyySupoGq92nZ8OUietV7mdJ7s/eQHEf8mMRWTq5BECfkyKVlGc4ESzzBwCkYC4NhrsfXnEQcdChU8jILcDHBN4BGB68YSVFquHCxrzqUNRjupe+IcCCxTSVmZoWsJYhIyaVsHbxI35Z900KH5yqwEWqw15wCx6kyBzjLSwmgU+GkwAXqAtnKSf5Wx9MSWC5ajw1anFvFxeeJv2vFPvBUu6dxM1e58ItDQKitFvawZzbF6boAr77oL/3Lt9e38sn3iUbmUK86jEPaXEegb15LTvz+YoOeInndwOYVMJU0IOjv12bgU8MkIvK861Vwn7hqUEcaY6eIlefYtqw9vcDvWTVwmRNMKlE4MkFTy9jzeTiEFchp2xnJxwsaD8ddPfjYenF+A7rIg+7KiUYffEysNYPw1H9mZzGakvt6ZEZCoCbvypRyazPYStQJG1G5Pbe9F+3I5gGOmz6J62Lyzqme00yktK02nrpaRjj/0ENz2w+1eOTuLcGmgJK0MgfOc/U323LHw32dJacfXxZS44z87UKjB2K3rYAdxzz9KPw4ldXgEPnYNR00E55zlgpt7olkSzMNrDLQhvxaW+Z6KXg0IWvdsgF48Iy0m6hfXd2IwUrJMLTVJeVsFd6G1FI8logRPp/zNdMsiWqPvO5Qqgvw/0X0rcLTsPFgGHiDg5oZX5HZgVAI5jg5fsw6/8dgfx3/71Eeh2US48VHos9lzozWYZZiDMPJxAHLid6GGtzT72qeMORbgsecjpJHptQ1IRrI8e14JuSy/XD3Ukr+Ie4NKCmUcHvs3TS06y0jnnnSCfw91TwtCS3Er8XcAv6qHAR9rJ+konGQY8XZ8FTCvNUZgAzZsOdGGxUH6PuRldHIKgJUSi5EeCXaV4erD3B5cB75Esk+oon4uRaLeTCZgXjusbVaiaBieT9dbl/Ql+bxEaS3Jx+kjl6QvFIg50STF5loExe+VrArJ8sYpN25IjTC/vC+tLskHEZN48LGVp/Vhh7Uh97LKaZgCEQg/cdRxuOhzH0PNwhaUaxMvQ46xfcbCcdkP2GqEwCKwokT542KTL25zcNwuWoUgx91zK7BqGCuPXDEXVImgGfjM7Tdm6rlCKPdejXvs5zQFOstIe0ajRplKhVug5Dsnv1Vh9XCLFbLUUzklyxTgZFwKBpzxX9sOvg6Aj/gMpEdynmSfKPpDv8FxtFlnUgZ53rHVBgAoK9CjjzxMpJRjT0kWwiX76kyvQm3xUvcnpdazGJfvUgwYc3xLtMSdbwL7iachAIUELCVlH3COGMaAJ0ciff4zE0g7kCfPmMR/fWT46A3XtQYY5MBbLKuUqcTUgYqRAF1xHmdVETJmBzotgNdFnN5Cgq1MfiWIaKPW58n41K03YEorn6ZAZxlpflSDrLWlN3Duo+gr48xc2gQzGKn2KbJQ7rgDXc801R/KAR2hsUb16JPBbf7eqhWX5YBPT+FlsrlBhfVzs9iwag5nPeQYvP05P4u1c3MN4+Iz4rYcSdCTur6XKfdMSmnH4b0EFLBZIrDjYalTpol7LWAQ+++0RsER+CgqzAIJa8fPHX8yjttwUCGv7EjGHMXkwBoDQ63NlGwkuwc7bhm8O19KInSM1iCciPukaxgTkLa0tVY0RB2QeKYz1Qr30ZladABMfXSWlU4+YnP4XSn4peKpT7bP9xxMKQvFH2dheTIhiJH9aW4bnGT5XQq1YG3pGicCyO/SnimrSMI449p2T13j1594No7ZuBFnP2QLDlq9Gr/+rx/LCJq20IwrR/I5LIE1Z2I+He9NknqlT6CNKG9gL8u94BNQ/KqkjX12WTZgRxJCUKGQWzJSdHVMeTWAj9xwXV4woGDuKBGZiMUpnnJqJwBQ7L9fEls0uGTd70bUOZXIlUsC0CT3sun5QvpIzmwfl+EX7pSe4yVNTIZ+7Mhje1RiGWm6ezmAKdBZVvqJhx6HIzasw10P7vDvElcA1eGnNk73FXyKdisXksG6BN/4Ww7ARTysdf2Z2LbBgxW2faXOy5rbpiFbD0rzKgK3DMhbEmtCAMwYf/q5LwIEDIjwyCMOw5561Gq7QLd1UFc33e5iF0dLwmsSsNNJ/d/6NJhYIioAKAN2hBRRgJikGEspn5QtsEZQeK+P1c2DB/sjsno0p5F1yjm8RXMCne+pu1kToDoAX1BeKJLcrqIla47qJp2ptvtQx+ww7LM8ZG41nnLMQ7vLndKy03TqahmpUgpv+oWfglLU+KFUANtoxYHVUEzBdFpZJJEBT9LvkKNOkGFXUA6iw8buCSwsrg+IrSZKpIcYSBS2aSgRA01AVHGk6hnUOWV1LhbEpsOTR4t7fK3pLIfM+Padd7XbRuiOPiQVRKpuLSDXty3dM5btmLg/MS2ZaTtlwukWbsmt6sKC0KttggfTzhIYCeJpksUQwzv0hltMjFFeqrPIIaRk2gx/RvqzcefOqqIpv4SfMz8YwYvc7MnV0bDRbQKZSNAt6vdBrKoGeNdTfgEDtbJVaBCXaBHH/k4r+yn9CNDjH3IUtmw6KLaPG6Axa0APi9VUQPlTzFkR2O2zFIEOtkCoteLLpVFiMBqDh1SvbgETFLKOw10RUrWTScphz2Xcm6AftUCxpXe7hpepFVcKIeDxbdP03lmwED9Hyvf5LX0gwahIHwQuHMNC5I/U81pK68eS8MoxSSvU2HDhLy5jp5yz5Di3LPMK8dIDnhLFq4ZSaBooP8NsGZRs8+BTkecWfIX+ToR4KXjLSqXtlhTSF8jd62O6smUnl9IzmmdC5pw7NDuB8L/OOg+PPfSoYroVQamOZpJjP6cp0Flm2rUwxM0PPGB+pL5VYRURfYX5/t02MIj6r8jy4Z16ByYgYatv6Yr+22FFylGqn2PAbx6aGwwmoxvHsti+jYnBisF+jwz2Ju3nPPoReNKJx+WlywAAP2iU7ZsCLF3AI9evpm7F1prYGjYmJYGp5J+RbyLqejkWXU6aQbbYcV7W0otY4JN6XCU5GmBmU0qF3ZG3mKaPIorvj1PfTjnSz8a3ia0nWbAiwUYAdlqgyV5z8azspp5UK1BNaafnGMSlRg0pYjI8FwioyfgdJdOZtDwi/M8vfRo7FqYbeu4vNAU6y0z/fu13bfyVHondcnE7teR2w3bTTHKFpv+25RJwd29grvvgpX0U1bgK11l3org+flpOLFYIQJACsrZSN1RUFtxUbLzMZCwem0YT455du/D5738fuZ4uBoVIpYqBgTxK7ZbL48rpGowuAhy09MUi+XVSF++lGhUKPnGz9RjXp/nFvOW1DmbWANCraqGs4iPNTd30qcikgK70POI26UoP4CEbNuL0I47JvsaloIQkAU48v+qm5mRad6YJGNo0fndztKMeWx7FwIjByAkeRHGqLSz/ncMFfOiG7+R5rhSK5/4nPfZzmgKdZaZrt97dO623zDiKlWXV9BGQaVOjeAuYOgFMrEHGfecdSHKBCwVPrmy4CvfbAZlSsJwYQMTngH+rv3jzzQnt5yw/HVUpadKJtKrg0zUNtUiQ43nsi/6pTxmLliWygRWU7iR4qiVayTIQpSMYi0M7HktXmaJUuTRbo5+1ZwxAFlBqGqvE353HfkAWEAy4wowe4MT1m8Q+UAXeibIaiw2CtmgsQKkXyE6AsYwKZDsRTcbZmcmnK5YvwZYmYOTKj9pKBDSsiPCtu+/sWcnlo9j9cNJjf6fpqqtlpu/ff3+vdL5/ySno+GIfACNGo/0GkGI43TVKEkw98Ir0lb/vAJBc5tpFXSAhLrv51dyOeLSsIHuLZDssQVlFK9TeIvc8+1D83MeiWKOX3xEvVp8yRR1cUl8ah/e7gcskRG2+7Iplo/DjoaiTy/q6+B3IcwLE4Iui36V3MbJiEIul9Xal0khr3PjA/bjpgfvBrsFimalj8MIEIp7MciCek/x+CfC+gKXmCUCOuNoF1mpm/HD3rvHl3dc0Dugs8djPaWrRWWa6Z0e/j8WtQCp2Be6LTmz5kEzeuV2BG6maqSLYKS938DhQv0v5ik5J+t0wMeJNCPtqlbjvkkfcp0rrdS9aSiAxIegJrHd9LEVLRZNY9RZVWA6Rtqk169nHalEurZXvIes3Ys1gJs17gk8iaySMLQoCeJCPR9EhsAVFxUB+MRiSv31cL/GSUvuFZSmM2xvGWVdSgK5V7oQgx1mb3N8R/A7pJNIU/Yt9+wrZeziMf+kHt+KmB344vtxT2uc0BTrLTNqq8a7v0I1SiiSVXZGfdd61+2NhAHDFLeDCzq6cA04lsDOu0gaHq4scD3utu5VifpEsCU3S4taX/WKBRCr/pOBh3OX0i6WERa6TuvF0R+b+REAz5ZDa9TtimaoCJ9JJPrdu24bdC8NJxOtNfiomnmqKQQ4QKnp5LurdimKcqpvM4/xeXN44bUbqJMXPQT6PEfpZ33LluWmq2vjtEAg0Ir9rexJAOllqW74HZtG9DpCstca7rrx8EsH3HU19dABMgc6y00mHbYaKHHNjcg7DXX1qnz6X5dcfkwUuAZ+clcCai42DNGfBRG/hZL4UL7lz+TjfXQ9QwZnrnfwmsJ60miHVrrm0MZ99CW6WylI0FjAoaMAOC0FWuXWAHEnSIpAtapJ3fQzyfihMiFctARb0MIxzrpXVn7sVT2gCHPqpGuEHRGj2bGuVHVttut6DFKhleEtLAKRGvqAQZCyGPPijNk9Xfg3ryBwBnIAsgIr9kgQvzcC/3nj9IgXey5SyCk5y7Oc0BTrLTM9//KnQDjDYa8G75XpbO4WjiaEVQ1d2eke8haa/sPfdEU/9SKtJanRewZSlAFRU7NjCRRJsZQ//83KlpqB8fUtDL3FtKUBOdG+sbzjHr6+VgwCS4QA6+Mn+P1ne3uiAYpDZo72r4k6rCeotd0+UmkSP5ghi2GQ45nK3yumZP86zt8mBkRiYyN+USme6FWycW4XXnP3EJZMGQGM98pYi8uCG6gi05UBOTtGWQJF8QDWZDTqltWoEsyu5VH25h8qwm45G+UdNpj2jUSbzlFYSTZ2Rl5lOO+Zo/NLpj8e7v/5NowSjj1eDfbRhP4Vk07ipHq4ZiglacRu6Wp5cMxSoyV/6uP2u3VxM5jWCU9428JYfCQPQUtPoBhC5To7H0SB7yXIhQcfERaQ0KUf3ozS5x+Dkke2YFG5vW3J6Us0sX8t9QLa02Grg7+VzdadKlORe0RQALX1Lfe7vJepcKGDTbJ5bg088/0U4ZPUarJ+dw+9+/pPQdfpJyrYo15n8oCscY6WtRkGAPhe4r+thlVal+QdkQU4tGLAze/V4KC5JbePsQPRXAFRFeNjBm7r5LCcthUVmatGZ0lLQxU95Il7/zKfi6IM3gMlYYWZmFWpnWYkGIK3RdgUDcqronvyWVU9Q0eP7L1ojXKwfGYnZWZCE/423+LjrXeTyiY8uZSEPvsnSB8rij4015Ku0WMUkAGAp6F98KbD0kOirc5aVFdQBHbp27XgZFi17yvREwd2Nfkf5ds5xZkpIgk75CXWZ90uWt65pgb09ZWD537trFz7z/Zvw4Pw8jlm3Aat4AKUpmArzFhF3FEc/DflF3/EeXKl82lh6wDC+NjYoINVkLChDNNaUEshJyBEKRf1ATszP4SNnidKA1owLTnnceLz2NfWZlupz7Oc0tegsM+0eDvHWr3wFl155JR6cnwds3zyP2py7Tib3bbpe2wX+y1n7Cd7Xp2sAKs/i/ikAOSkLAyPUJPHIrGcnmb2fyO9l4jBpF6BjIHDmdavaGOP3hYEwhbxSNm8pQLsO/obNVASXS0WL4H/3zp37rKw+xACecvwJ+Podt+O27duCewOl8IjNh+LunTuxdeeOMWWxD07qS/d85LMvAR8ppPxm5PU4fe776Ct7PCKgJutvfeoT+J3PfBIj3SCIwHojrhEA7Za9p+rr+pdEXq80441AxbL1VORkGU2aiRseRfDfs2G6kmVAKoHwhCOOxnMe/sh+5UxpWWkKdJaR5kcjvOif/xnfuvNOEx05R6nOML4/BnVa02Mgw1GeHhahrv2s+lCg820buC0Z3BTZ3EBh7ewcfrh7d7izesSHovMkYFMA68nF9uVk6p7rgwNZpAJJ8Skp0sXQEgAPhe7BtqcleD+66IPXXRtdMQWONOM799wDzfmpWU9BO4sXxq1f9mBVxLRp5bDTMwLAuMWKHD/rGJCIc1+GZN7VjgEAIXsaf9AIQE5cNiFMqxjQdaLcBMhJYpHE9FSfqTYDzmwbaBhLbGxx6DOQ6gKV0T2/JUT0bJ938imoVvimnkuyauoAWHU1BTrLSP945ZX45h13lHWW61OXADTE33dqgOnKlDdbO1T0UFJENoRGd9KkrPF0nZta9527jQL9T7/4X7F2Zhbn/d3fpYEEJ/RHaQl+nLivzM5q1qpIW6TOskoWs/i8JGufuixhH9Yb5Oxzki+z+aOlhpOmNUlJkCNutEBNvjGD1UvC0YXAAIcgyQEL/85LPhTx6aNnuS0bSbAWpW3L3AZGfqeWnDUKS/6JteWyS9Njfz+Op/o5+uuFE5JIQOh2RqfMe7Ef0VJENj4QIiMvGRy99tprccIJJywVux8Jeu+3v90r3WJBDhBZMcQApdWXug1EhdIm+1s7/5se8rDY8qHXd+L2xCLk4/ZIAGDPP3PTTThx0yF4yRNOa5Xl6ut5uiPmE1tYKLpXqDO79Lld0CNZkuClo4xie0d5N8zNYsvGDek69ilrX1LyBdzL5OqeKjdnTSkizslEiNGs2MTAl9/o2WiFUixnqv1E2yZBjkyXOk/VOWmtasvQ6/ViLG4X90Td/EoyEQMotGi18/t6MUAjgqqVl0tpFS7Vj97XXcPhhMJPaV/TkgGdhYUF3HLLLUvFblH0tre9DccddxxWrVqFM888E1/72teK6T/wgQ/g5JNPxqpVq/DoRz8a//Zv/7ZP5Lz1gQeWpI/v0hf+ntsLyylmofT8KEj2vxWaqSKXVvXrm3wa1fxO6RW/PJ0TgEFSpuf8/M03AwAOX7/eW1hlWQFPWY8CqMh20jnAUNptvW+04qWwvBCwfWEBjzrssB6Je/JcKbSUgMhZIXz9M4q/N/octyF7IFcBcpLklHzKWlEAOVnyeXJoPF0OMULHZaAchTjmmJI/JVsXifoSTMBAsg7M5PatcuW2wGKzqsqkiYClXV5OzknbOmXvF7uX8xId+zn1nrq66KKLivfvueeeRQuzFPT+978fF110Ed7xjnfgzDPPxFve8hacd955uP7663FYQgF8+ctfxi/+4i/ikksuwU//9E/j0ksvxbOf/Wx885vfxCmnnLJXZV07O4vt8/PdCd032LYgh4BCd9y3wxr/ESf6QJ8+sExwg3YoThjmJSCcGnJAgBv5AbSWypvzWPqIEmXfu8s4wd667YG2g6LsRLv6+0QbM9LVDe7FCWNahk7iG3fcse8L3ZsUK/GlAGgtPtELEJcbU/DwxygzVWQ2XXdFHdjps5Q7W5YfycRmj648SH8Y45DjlfuOHL8a7YFY4Tm1pt24yS9BFjNMFGUk2swCp2T8Lwb+4Yor8LLTzkgGW/xRp7e97W14wxvegK1bt+Ixj3kM3vrWt+KMM85YNnmIuQ/+BqqqwmMf+1hs2LAheX/Hjh345je/iboubaqy9+nMM8/E6aefjr/8y78EYMJ0b9myBa985Svxmte8ppX+ec97Hnbu3Il//dd/9dfOOussPPaxj8U73vGOXmVu374dGzduxLZt27Ltk6Jf+Kd/xLfu6LcDLrt/OFK+0hIjwr/7fkBabxxp88hznWHLCsIcdYhSAMBdDcorVUR2VsG9nlpMlHvk+nVQFeH27Q/mleAknW+ffL1s9D3L72PxOZD700mBYhfQzPEsAPbeCnvcMvvy8R97D9BfKq9PGhkkqLM+Nm3LGYvsXTuQ6ponkJjMRW52W9I07JpEsjzhgEw6ErggvwcrLr98xhqB75OUs4ve/Ixn4udOfkR3QkGT6otJyjj2T/8IatWqRfHSe/bglt9+bW953//+9+OCCy4IjA0f+MAHssaGfUG9LToPfehD8apXvQovfOELk/evuOIKnHbaaUsm2CS0sLCAyy+/HBdffLG/ppTCU5/6VFx22WXJPJdddlnLWnXeeefhwx/+cLac+fl5zAtLzPbt28eWlZnxg+3bA4NGkYRSDYJ2SWUrfhcHhWJ5DMc9Ygqo+CVPjnc08hMGH9lJpYAUE5vrKbmI+ikYUfCdO3YIu3SmFUtKrZS+ZV+PE4zBL8Ui7nBztBwgZ9w22xvUp+xxrHZ9efetc98yx32+BHSudCl0HG7s6vfEKrIao9GkJSeRzzs49+jUmv2EqbGcaPZ+fbK8wNm4NhaYcFuLbiOY56G5ZQ1azKf1zm9ePjbQOdDpTW96Ey688EL80i/9EgDgHe94Bz72sY/hXe96V9LYsC+ot4/OE57wBFx+eX4DM6IowuUy0L333ou6rnH44YcH1w8//HBs3bo1mWfr1q1jpQeASy65BBs3bvTHli1bxpZ113CIu3fvbDmscnBu/7OKPPidstQ4igFQREwAV4CuOEwfr1aIeVa2E61EWufgO0DojyICFDqZWRm5y7ueB71p+nZw4rRET41D0ZErT4Ic2TYEkW8Mcvnks5GBFIF2NVwxk/TEVDjidKm8MZ9c2qWiCZo0ySPmF1+L0y2W+oCYcfmNC/CivK0gfSl+XDhylNi1vM2Xwk08c3JrADrcooOsrwy5DTrjvb0YfprJSRFIkpNfXGtty6cL+XrQ7dsfnCzjvqJ9vKmnMzY89alP9de6jA37gnpbdN74xjcGVoyYHvOYx0DHsRgOULr44osDK9D27dvHBjszlR26eFAA/8EFoxNnxXEK15peWXPSYlKiVgwOGMDTmwuheWNiZRhfY9i4NJFJOmPpaXiVrTrBthR98Y0TKJbTymjaXbaNSODuyzK9JWaCIX0L5CVG3bKMOOlSKmnZHl1V2VcWpaUGO7FlSifu5fh01Vla5BbbPpPUO2URjMGNHAeEo6jgErF9nXOBR7tMJrIskJkej+Nauax276skJwnuo/yUywNRl9yzcHF/mICaGx9BF9MnNcBjhG4AiYHlwasXNy2012kJBxDxzMXc3BzmoujjJWPDddddt0hBJqfeFp0jjjgCxx57LD772c9m0/z1X//1kgg1KW3evBlVVeGuu+4Krt9111044ogjknmOOOKIsdID5gFv2LAhOMal2arC+rnZBsC4j00ug1ZkLCAuUzTybznJFfsgbnhEK6yYOW+N872H/bcvuHBp5LYUsSy5AgNLi9gilKK26rkCLAQaCRlb9RGaIHW/s/7REDm2pgTtkalBnHYcq0pXmtwz7NOY+wrwLBVZZdXq8LtATh+KwYPMv5SAVPJ12zAw2nXLlenSJWRrvQpyrBrz7VMnCYjkxp4MsTlmuvEby45N57Z8qOFXTnXFVfV10DaP2FjUWW8UE5TdmdxMt1GrHWlkD93kpZHgZ+nnH/GjExl5y5YtwUzGJZdcstwi9aaxl5c/4xnPwG/+5m9iKGII3HvvvfiZn/mZZZt/czQ7O4vTTjsNn/70p/01rTU+/elP4+yzz07mOfvss4P0APDJT34ym34p6aj16+3HmbO3CkgQjybExpsBUea6y5PiIwBPSwQynFqxZfpQKh11WHSkhSM+HLiRcX6ogUITyQOEo14KLxd52ZVsbmd2/9fmZRKyZtuuZN0qXM+07cRAZLHPdaXRYoFGFxDKWVFS50tBCX5FrGxBWMuAyOXXB0AbTI1TFwm+/Igs+gDG/FTHed0IBowoB6q0OScrW7ODBOGoNRswUMrcc3W256TTXRCJdAevWo3/esqjx5BuGSh+hpMeAG677TZs27bNH9IX1tEkxoZ9QWMDnc9+9rP40Ic+hNNPPx3XXHMNPvaxj+GUU07B9u3bccUVV+wFEcejiy66CH/7t3+Lv//7v8e1116LX/u1X8POnTu9Y9QFF1wQPKBf//Vfx8c//nG88Y1vxHXXXYc/+IM/wDe+8Q284hWv2OuyrpmdEe9RoUdxPjExZcCO84HRFUMrDT3Q4BkGV40yBtD0IMK3xoEdZgEfSr47E1AemIj6iz6y06Lhgg1OqlkS1pPe/brL4yxX0ndpbwOCJPgs9FQ/KjSW5aFwP5eGRJo4/VI1t+Sj0fYtSaWX+XT0SojgfL2sIiXrUOn6uBagghApa1NnlN6MtYrkPQZqZtz54IN+6x1vSXJWm5J4Gjhm/Xq8/7/8F2xas6arNstKsUvipAeA1ixGPG0FTGZs2Bc09hYQ55xzDq644gq87GUvw+Mf/3horfH6178ev/Vbv5VeoreP6XnPex7uuece/N7v/R62bt2Kxz72sfj4xz/u5wxvvfVWKLE/yTnnnINLL70Ur33ta/E7v/M7eNjDHoYPf/jDez2GDgBsWr3aj7yAxrDg/FDyVgBL1mJgTLrGZ8dNhXGFsFeQnTM16X35Yj68BYRSIEOa7cchIYepp7NpNR1OY5USefq8WrJeMc++cpUvBdRy0ZnAONM89aWg/PNeks3HYpr0HdibNI48fZrE8XN17VvnVvtnyop5uvPoei57gLkyIMuz6elCma2iBDAUXScE4IpTG39OQqJMgtgzLLUQg126NBG1QV68x2CfroYA/MG5T8FDD9nULf+PIF100UV40YtehCc84Qk444wz8Ja3vCUwNiwHTbTX1Xe/+1184xvfwDHHHIM77rgD119/PXbt2oW1a9cutXwT0Ste8YqsReZzn/tc69pzn/tcPPe5z93LUrXp+IMPAeimxoLA4rseR7l7YCABSkLpufMY7AiS+7tQsJ33GBXrIbPjy3178j484VhxVE8UO8CYAoCVS8PdaXrc7pWiHxWet/u7lGAn4Ls0LHtTqRrjypNS2l3XczLk3okYYEh0oqNruvn53Ec9ClfctRXfu/e+zirIk8V8Ri1+bkQvv6VUJPC6/dolX7eCjC0BEsDNgx0XQDB2eE49ewmWXHcj+qBugdq0a3+IigwsjYVxzPxdxobloLEnJP7kT/4EZ599Np72tKfh6quvxte+9jV861vfwqmnnrqsy8f2R3r2ScKRjZB0FO5FJZt2qdcjAwrkfwD81AtX3ICGXLmLISEzKzOtpmEOjuTsXa5rR/nXXu/zvUqQk+sjvGyLVfKu/Sk6kJvac6XLo80zm+1AotyDmaSecZP2xYMp5erAi/Bz8eZ/nbhvi/LTBK5oBj7+3e/hpY/vGZusBCA6XplcnagGlKuDvaaAZi8pe5BOl+vq1WoPKVOufOHETCyWy9vfANp+RCk53HW7TYUb2CkmzJIqL4Uv0I33/nC8DMtFqXd7kmNMesUrXoFbbrkF8/Pz+OpXv4ozzzxz0VVZDI0NdP7iL/4CH/7wh/HWt74Vq1atwimnnIKvfe1reM5znoNzzz13L4h44NIjNh+Kc489DqC0YuvnZMuJF7HHsCk36o/DrHd0+C2QBPSQOcrvoqK6eDwOZCl7pNqhr8XLpfXlpQfngb9klCfON5EMCWqBOXce+ByF7RpeiQrv+7wPFJq0I47zScuFTCP/xufutw7/euwuAY4lr/g5TJskAh6cX8Bvfvw/MFdVuVSBXFklL2UR9W756gg+cbyamL+vY1myJq04ku0on0cP8OGBj9uHKidLUA8K/o5qDkNi2XNGom0iWf/yP7+KT3/vxryAU1pRNDbQueqqq/DMZz4zuDYzM4M3vOEN+I//+I8lE+xHhf7mp56NdauMU1ej0rVR/gMGZhg80GDV2DoCcKEQ9dCWxrUIOYfnBADKgjDiBpwMGnCSy5PikbRgxT2rs/rEPPsqOAtisnostoZFf5O6rmf7dg1cW9ouU3ZQd7/qbBFDrqWkfQGiuqo6ThOkpqTErZZCTijDOO24uLtXWptofliXlT4SXUChrcgCBEql1829KMpDkz81HdX3VZRgJrX8vIYPFBjt596uh1tCXngnuniALR8HBmF5SlkTsisi/OV/fqVc1xVAS+mMvD/T2D46mzdvzt570pOetChhfhTp/j278eBoXuzhYsGD20jTfaMDe7+2F+XUCQNh4h7kXt6uXtdvFho5+abyyR6/RpMWCDob6QeULTsGHbZ6gV+RrX9ueq3VwUnwIKadgo5blid/p+Tr6gBEmjGfTpOBy+3NourMCSUk+e1N6tMek9Je4itFTjVbrOyC8C+LBVxdL0MJHcdycOI+t5NGt9LOyWSuN++V+CDcO2m7H78hJjffGoPNQggnT+q91U16A65SH2IHsU3v/HXiTYK9yOXAqs7nh0Y2i60/NJrtKEQ/S7UpV4Nx9da7sfXBHThi/br+cu9rGjOycZbHfk4TOSNPaelop4tHlBoWpmzGsRU7ABbcdAAtL8dEPnme66AJAdjxVLIFWguRXyHBIm/KgbAPxQAEgq+zCEX8mpVozX1/jo5+VdxrOVVKJdKl4LvatitfDgxK+SLRUsnZ8gvvyZxL0JntDbCzN0AOB3/KJEEDN3larTWunDlGArgEzRm/CzG4ieTt9WpHQMe9Iw3PRCch5E7u+s0AjQCWK65Icokkcx/kuK+fbwcrl1vlBYTnfVjVTUsr21+Q7VNlnyfBmRN7xTslB89zETz2c5oCnWWmI9auw6pqgD31CNlhmCP/PTrzBpqOpILRyG5I4jfuTKk+ca1PB+PAjvto+iwbdXLJDllOUU3y8Qgrh/nJYYcWgxFhAeL4fo8qJPkCvbb8aeXnCfuLrjJSAA+ZZnbD8BT/4PoiQM9Sgp2l6KBTr74lb5XoWd0kNllKGSOQE5ObSorlCHBJX8XWAjMMYuqwgAQfn71C6d+6SccEkErw5KV5XfxKLBfFGARoNuV2PdxYbnfZvhfJjYntP6tmBjhi/fpFSj+lfUFLGAZuSpPQ6pkZPPfkU1C5D7KPYsudKxi/HrfBpn+6jHYPyHlrQUoOlza1tDRHsrg+K8kc/46VZ941uSSTACNB2lKde1Aya1+wKOvXlac3CkOyXi19Z+8XlcoSGHX2OXHiyKURFGDVUqNEwGhJQY6kyJk5LjN1LUkTy0T2X0LSv861ofOrEQEI09ya/5Tc6DMn66QDH1kekwE2ZK9Yx7wsa1unZFdX+BZc+z/n0Y/EmtmZCQTfdzT10TE0BTorgC46/cewZf1GqD5Dy3FeOreSSe4qXnG403iK+lzvI4fXJj0S9ynTD/+orQHk39Q1T41G7IfVSho0kq2PhasvTTrUjQGdLJcz14GorSbs2ZbCTD4hBdXJKdDIWkKtjAgVep9duBdLDO/P1pUsR74KIlGvR5EEiQmLTnDfAiK3s3ifghhmmqsupB0XZGdASoulU9Txe8DifAIaKIXfeOI5k2Xel5R6xpMc+zlNgc4KoINXrcaHnvMCPO/kU7sTE8SKG0mFZdhys9DQ0pzknyIf70VF20h0ypuxHGWVbMd1v8qMy2CgpflEHtsmXcv3WxGix/jiA72QatO+VqB9Tdw6GTPfUsvRQQlLlVf6opNuYTuGDyTpM6c2/4wV4t7o+O173Km0S8RhHQkJBS/SStASnMWOp8k2HPPFtBmJYZx5E3GEksAjpWh7gCrZli3e8lln4v90lUMAnvvYU3Dw6tUdwkxppdAU6KwQOnjVapx73HHFEbVXzAMGlFTSE+xJZQELAANeZjR4Vpu/lQ3Zp4BNq9dgZqD8EnJ3KEVQiW4iWPrug1tkupOUFSaVJk7XJ58RpgF6sm0E2OkkUWZnB8/Bn27fj0VptgmoC+S2yu2p1ZcL5CTSe+wrr8WKLqVM+2yPsJh6lvI6zD4p/xxIyO1EHmH+1quWABbF13FMuUmbQIRKyJUFJCnLggw8KNOkgKqV3e8+7g5Gt6E5c39QKfzyGT0DOS43MRY/bbXU3/cy0NQZeYXQ//3ulfit//y42btKePu35swr8ZfR9I4iYlZqSbejwEqh2Djxxc7Fyt7TjE3rV+G++3eGTMhsGIoRB/L58uxbxT5Wh2Qe8ulNqSmYSSi2JCk0q8NKZaKjXeOlWX3l6wI7y9HJyHbeW5R7hnGZEzxrj627LAH7qm1jIBK/g33AhMsvE6Wm1tw7a51yWUfMOQ/Yc499LJCTApPiO2rxEntiBc/N/dUCBMaoTKRRojw/6JMVktYjx6/PQCfxjvzpT5+HYw85qEfmFUBLAVSmQGdKS0Hb5vfgtZd9EgBA7gO1ptVgWXb8oSsGamoiC0ugEsW+AZrfrLjpJHKOvPbe9fffi2RXRzBBAt1qB4KRg0T+CmCNJmR7iiYCBKLHLynCPrxlm+roeooXp9s1tJVj6TqHuOPflxSYR5bQxBQDkFzdOEqTo0nbZl+0acraEpVN8kff91nErwneR3agG63r44jb+5G7vipjSfGvD8NYOEvgVj4P67ckZ6ljoOh4J8V0AMrJlyo6BmMpkBYDJgbWzKxsB+QptWkKdFYA/c5ln8CCdh6JArQooNjbKIBJt9PIeBIRsbcZQ3QsZP53HUiuc46pNAXkhmFEPTrZMRWpglm+Ok6AsT4iQIiRYi07dZ9NZIzAoje2FYrLkcQ3S6qQC7xSBoe9SuO8Z108Ji17b5Kon3PcZWSsh+6FKcWYYuAxRx4BrRnX3X0vhlrb1y5soLF9aGKZI7Be/DrFNFLOYOVZlax4hCBacwC4Yr7iu0qx9LLkAI5LIwW2UZZbJAGTpV0Lw0TCFUpTiw6AKdBZdnrv9d/Cx26+zvwQH5T5EFko1oy5WVHjVOmuEYw1hdBMy8RWH58YAHMr3gQpY41Z1EsuQIMbZZoRqA47dKcIOgCPGaECUMC6uRnsmB/1K79PHfrqhlTwRF9ImklCd+TL5kK6xdISMFVE0BM7k1hK1LdI8cg7cb1l8OvLb29RHExPWG+SsWrcN64j66D8tplw5e13+SwpK0fAbwK8Q1Z2H/Cvi03uGaT4SrliS15kmSHR91BhimkiYy4j2B8rFklm9oBJRHv+58u/g58+5eRSCSuGlmJ5+HR5+ZQWRfP1CG/41hfMD9WG3kRkrmefUvoNDEYqZHnbFVcUHaackFcTEG+xb7jpPrwDdWX27Qri/Ni9sljuIpiol5zCYwA7FoZN/VLgTV7v6vDjpu9KL6cRg/ITtvtMEYGcmTx7k2JjSvZJE3DY2nUgAHPVAD/1sIfj9KOOaqXZm9Rq4hjYc/p6J9PFklWY8e7csNO1csftsOgUyHH1JJ+XtONhjy5ZwkImq4/96/Z7ygJIUa+Jm1Lu3i756mhctogB1+Fr1qBSogNl2P202ngy9Rmn3rvLbroVN95z32QCTWlZaGrRWUb60p03Y9vCngZQJKZiXAAs41QY2QUUe2sMW9N4kgJAE/NHuRMhRnsPhNg+kevqbG9SNSAlSwOAR+ybIun4a3u+1ugwECHqfX3d9oI2lmX4yNFRW1kQuXZmttnuow/fvTGKivgGIKdQ5kjX+PwvvQTHbNgIALh7xw6c9c6/2SsytYQrJeUJn2qs4WR5KctInM9dF1Yac4lBeunfs85vVMqUs5blTIrxdS2Aoxh7cGpQ0WelWkrGgiWOIp4ehLB4T8ege3bswtEHb8BtD2z3zFIsWtdSbUnwG3p+6YZbcOKhm8YTZkrLRlOLzjLSD/fsNieENAqxxMwWLIgeomJj0q3MOaqmhwosNSmrQUxdNucKoAGDrFUGyg1fu5hT86ePVaWCWeouAh36HdGrqIm8FSoaZhLCt9oHTCxoCkLTu/e1YqVwX7KOht98PRK/e5geSOZeQootUT0sXw/M78GfX/Yl//vQtWubSN6S71JRThGieVSUUViTlhMo/LjR5bXo8cmpqaLT/XKQAyvxK5erTxTxOHg1HAixaahegmcQW+IS5UKej2Oxs2k1A3fcv725pif8ppwlyP4c1j2iPK4ESrXxJMd+TlOgs4x09LoN9qzcXcipJKo0MNDNvLXtFYgAmjFAhJ0RRuSd1G+3pc9cL1QB2eUWQOF6riAIkBIBFsq0UGs4mODnyIGdoOdmv5TebJ8hyp+E4lGgkGXEGmNrhn2pNx3OC38a3cCMj333emyf32PEIsKTjzt+r4pTxMZ7q11yFhv3/rWsDXsLjS6SBGb3wNAFyNPNbxoBsEcMXHKf0sQ+HylgJYEYevLtm0ZYbzTD1FMAqXGIYNpH1eav1oxTjj5iTC7LQ9MtIAxNp66Wkc46/CE4cs163LlrO/qAHZoxIMZ3sIlhDzkAwk33REB588LMi2ymzdpbJRBZIFVBxPKQWj7Vc/XsXpzYgl2XwiMAihTqnC1d8mMYYOOnA+S5zBOBEq/4CvWQdvZIxtbUSJ/R/77qYDhxKuRmACNmXHvvPRgohbUzs3jAgp4Uj3HLzN0utlDqmS0R5UC1n8WVz3IpZLB8snUepwyO/orrFJ0zmhlXKUqHmAYwuT6gK6OsVC2ScvM34NGnm+DoPNFnOD8cD3gYoKHNWqE/ubKkP1ENfOCrV+LxW47E7GCqQvcHmj6lZSRFhP919nn4pU9/AOWundAOc5pJKxW66BGKFp1khy0uyJ3LBT/2Sj3W5miAAqOfUi9QUelZa9aJGw/GDdt+mF8RRHbFkFzVEt335Lwg44L98LAD7PS63sEnBh/iGcnHFYs5qxQW9CTOExHDxO//+s//VyinRTzTfTlC7PqscsAgk77BsEtvVlo0yBHpJajJWTJi641L2qurkO2W+lbEfYqciVsAZ1yylqDGnw8hSOJ2/Ul+O50oWuQFvGO2pI9f/V2sXzWHP3j2Uyerw76kA8Ais1iaTl0tMz356BPxm497kv2VeiPdlyqvdShab6/u+YazPIkAjp9KavMiiu4HZUtRI75BWTGCQthpRbMDfs8tsd8WAVg3O9e57JnB/TZOlbLE532mtrqKCHrhBAWBjNqaKhjQkvhLwG+e8xPYMDvXIUCBUkogpcAWw3+xlHqdcpSzusQ8uqyHUbolxTm5uoxryYlnRzMgJ8e2r0FFNkEAWuLXtjbufEHXUBKg4x4BzVYOYtZcWTBFqdlhB7Ti8vu0LcNP6bVuMfDPl1+Ne3fsbN9cSRR3s5Me+zlNgc4yEzOjqhikUqNw10tpULD8vMeb50DKgP3eWJ1DNfdXufQCZLnrqbLdqiphdQqWsPvl2OLLUQBV5gg26oQsJ1Dp3inZy2Odlx972JG44p470I5tE+SGBqNWZh8vhgUHcWToUm8fg7BcGnSkgShXPlMO69uSx8krD3GfAdz0wP04d1z/mQSwTKbBPurzSmWNK0DcpKnfqbFEH75L2BhJgx/CT6YTELk0cql7gvfEGC0jQ/BpS1+gSfjLv6n7EWiJQU1qPBWnS0aykG1cGzClMiDHUa0ZX7j+5nyCKa0YmgKdZab3fO8beMOVn4MaAGpgewjfWzBooKEGaJRcZ+/KOHhuNdbOzJj8pEFKGydmpRvAEwAQeODQctqNqYL4+hmkzCovsyILfgjHYmtoUgANzAH3VxBJeZgFqLLkAI7PgEC+b9xzO2okgrDFJK0yEliN0yOXmr8PEJLkwYqUg5u2CAI8cq+h+IxS+G+nndlTgASfrvspc0GfvBOSf4vGBRZxeuk3X7d59RLf8xyjsgnFG/DS0e+UzPJ3fE3waSn98STtph6flwc9k5adAh7ut/TxyZTt6JiDNqAitA16dvxG1rFYlumctOPPLkcEYE/fkBHLRFNnZENToLOMNF+P8Jarvuh/kwKqGUY1a48Zbq+aalkCJJlrTz/2RKyfnYWqjNL0PJRdtRVPLVGQPbxemr6JehwJWLylJgJnbkeIONqprB+Rve+WspesDUJOZ9GJLTvBb8Erjgbdm1JgJtWO47AnpNvc36deQOTvr7oCH7vxejzp2OPGKLwn2Xo1LlcTtF/fLFF7jm1tKZTtmlJOqRTzdd0r5WlpWgTKWwIDT26c48S2Ctindcu83VLvTPssGcgpWVoydVtU2VEQRhoCGMJPTRXlZOB/PO3H8Uc/9zSUXNUIFtTY6TVVaEe5Yisu7uGHb+5ZqWUiXqJjP6cp0FlG+srdt2D7cE8xjdTFfiqIZM8TvY3E+OQPvou75x8M8geRkKuEd2CsYLs6lNR2ElE5Af9UnXLXk7wYZIcXZA9zreFFFczGooGoouctAaa+JHn4KSQ37LEHid9dZe2FTuR/f/0ruPmBB5beyNJlrepT4GKsMotJT2GypIKO5Y/5MZB1rM8ph8SnKoFLLl3OcOaqMq7xcNE0BghctEzC4uKmj4LxXcnKwsDt92zDpjVrcNFTf8y3dyue0AgGKLol9jEokoCS22kUEY7bdBBOO+7oxdZ2SvuApkBnGWn7Qhnk5Mhv2SCUOlEzjfTAwh6oTHcjwZIC+cBvj9l0BELAVBLAnUSKnUq9UL/eMA2Smp6fWmlZ1AkefDiH5WCKriXRmD10snoZLRTIXuqZe5SbKieXxPK6efsDEwWuHY8SMi0VGErg0yWh2JIpn2kquF4gD4W/460fCs0hLUie5LSVKL+XhakHjT3dl2QSnUdbXaSsPb2K7AArQPu5l8Cf5Pv/vnEVnv0X/4Cbtv4Qh8yuMqBJWMFSzsnkLGSMxkdn1FiRFJtrrs6VIvzpf3nm5FbhfUTTqStD0+Xly0jHrj9ksowEA2oUQParZTfStH+028wzoSaYgaPWrsdZhx2H9TOzePpDHo6v3XUbrrz/DnCfEPZiW+44ngYpNBuJ9pnUT31EFN1zy9tTI25/Ym46wMNLreWTAEea26LbUqEyTG+hEwn7WNF8W6QaoVRmIsdiOq1ImRWfbu7Z9qHF5O2iJF9p7hFallP34cEII/wb8M++q2GxYyuRHP8E7wCElOTpAqYOgOn26+rbwJ1QlLdLvjhNDPYS2bz4MSi1N2u7HPyj37w2zCv7qQRP0qYechfz1mc9ArgCHrLxIDz6mP0gaOBSg939lKZAZxnp0QcfgYdvPBQ3bLsXOvE2pVZLN9M3IchoNuaksNNJxH2pFOGEjQfjjvn78fUf3Ib3fv8bhoeb+vFgJ2d2iWXqVd02pT4gaWO0nSHlRMkoRF//uDOX1y14Yh05MZO19DBa11uy99VSQZkZiCCVRGKUPDbtDbCQBGWZ+hRG3EtO4/DsbBdhtekoq+MrWbq6RmWyvJYqvE/bOwUoIwbG34hNV/KNCfycuBlgeLEIZksX+W4HCCnBMyN+UA2xsis5BskxKiB0164lkMUwQGh+OMqkmtJKpOnU1TISEeF/nf4szKj2Y0iHhGGbLwYXYZcb3E+s1NLMuOyeW3D5PT9olaAUQJXo5TIyuHJSRLLnC/K5XjO1fARoQs8mi8sUlim/lDYazskpLH+e8PcJ+Iz75cSj3b55HHllMcYQLQcOF0ud5pwlKD9VzfLrOB7PmH8fPsLY0zdtlmKLyxh8ihY6ASCS6Rih1SQ1/cRNutZnnJMDDc+4XJJ7aHGTLvU8eo0dbDci5ZDlJp+PBFcZItguqFC0S3PImtU9BF0BxEt07Oc0BTrLTI/bfDR+7VFnZ77wdq/s/HAAiQnSb2LKMZgAv+IptiIFjssqLjvUCl1WHGr1dvB1bPxpop4t6ck8PrVAYgn4+AG8ja7jQAyhATuyJ12MeKmONmUpkn+z0wGL7IEWU4+WRhljWD4poEgp41zeHEhyACBQutQtU6ygF0uxspd/M5R8DXJ55Q7kEZiLV5q5z9IH3LNOwN6KkxiPBHLFVkgha1COhtlTixNl20PZPbdY5wZ6ghfSz4JyN8b4VPo84+/cfBf+97/9Z3+my0RTHx1DU6CzzMTM+Pfbv4PBQEMNaqhBDapqqKoW01EGeKgqBTDK2jeeKVGKUAqs1yxFB6gyDs5Nz2TOiWiMfqPJH6z8sufkd2Wf8GtK6VhXTh9gYgGNP2Ize+kLmdRC02cImrvWqleh3YS28K3cMWLtRRyz79kQfQvuAhQlcFDK6KdgimP/dlkCIPSiCFxkxgrh/Th9B8AA7HvugJtwpg3klD+6nn3qZp/Af5Q8Da759rP1UgBmQcbxd2hATtDWufpr9Pqss1RqAFt+3zb62099DZ++6oZJJdk3FL9/kx77OR0wQOfmm2/GS17yEhx//PFYvXo1TjzxRPz+7/8+FhYWivnOPfdcEFFwvOxlL9tHUgM/nN+FGx681ww4KTxUxagG5lAKizR2mN6jhu7W/YFlB9bxWaxuQnujz1Zp0ceRk52BJnJzUpguYTvuL4ZKMkNYgfpSLune6kjImDAC9tJoNmnbWctILyadcwoRcfAn8aMHSaAqQAPFDdElRw+wkcwny44PaS1h0TwJOXsbESkRfFzy5SBpkU+rLs7gFQOwFGjrw5thlmoPgdGQffwalySob2ZVVy8qVTTFq28bibSKCO/5/DfHk2tKy0IHjDPyddddB601/vqv/xoPfehDcfXVV+PCCy/Ezp078ed//ufFvBdeeCFe97rX+d9r1qzZ2+J6qrPLg/pqIfeFptMbsKKhlAEfuu7mLUFKftSeLzfc1bhDPpeHZMbEENH3ttQU4qIPSpnIMVs6avbUcpYslpvDg3UzDCxGZw4AQnQdmXslCviJtvHnrj2ae0vZMp2RqKVoXfcjVhNZT2IeSWXcwTny/UiI1imLAsZe9ReXMclzkp8QxUxyyj0FJCTicBYWkSTnxzL2mKTQuL48LeRwwCcTLiLgK86phq8Pl7612gzsvFixfFFbaWZ86/u3Q2uGUntzxLUIWgqLzAFg0TlggM4znvEMPOMZz/C/TzjhBFx//fV4+9vf3gl01qxZgyOOWJ6lgptXrcMRq9dj6+4HoztlgACgtbS7TezT+TyKwTpvyEtZYsJO2w1D2a7OavcGbtqonTdPpBxgSPB0GkuCHdcTefOTRGdC9t4fKWdGtOS3rJCAJiC3ckWHoKjFq48MfVRcoHUgKpkZZltAmOScA189iNFj2424jFx5UkH3K7yVvvysx1NEYxm9IozZ+52zr1PRB6IPeojAmTxxn0k4+EBoQYrysvsMCS2n31h+eaP49savqPxbADsAjG+PZNXdLBCrCAAAvIxJREFUNfrtIpQWr5vKvH7uXWKTz2M8FaWxbRb0p6ClcCvca7QUPjZTH50VTtu2bcMhh3THqnnf+96HzZs345RTTsHFF1+MXbt2FdPPz89j+/btwTEpKSK86KFnBt9ss2oJiD/L8kfVVnRKJbaRyMD8lCUnucRdGV+f5MaUgAc57rwvkdz3qXUTjQ+N4nCfqHjPrnE1N7F30KZIdoqDb0QbaSbli4njhF1pxiTKHAFzDmZQlo06TAG98WDvNKmHVaaxUstXX4JJLhyyoAx2bvHP3YvvU/pe0Api0WPuM1WiPqn28J9ZLENO3hjsLoZSuF6S29JBBgZkmCkzNBYqkudiiw2XV40ANbTHCKjqtuiPO/6oFR80cEoHkEUnphtuuAFvfetbO605z3/+83HsscfiqKOOwpVXXonf/u3fxvXXX48PfvCD2TyXXHIJ/vAP/3DJZH3RQ8/EN++7DZ+843pjHCCNgWIz1aQrEwpHUGOpYHvuhnVNb+KmrMwn2HyIShn+unYamcW/voQQUBFAttcLt2QwYMfJ4spN0xjWCmI0laYwW58+RcHnJ6JodVpsMUoBQTj3liCfb/cuK4hCMMoOS+Uov5RnHFTYIYNLI89FTKWeT2NpqEsxAeX6yHuR8o6TtX7k4halaBIEaGVr4Uqk3jRxMk7jp5iI362xQcw/BiJCPiDfjoozojoQFL3K7KZ9YnljkCND0HS1Q2qgBRhLccsUZXjLfYulCADAI4SDEWelcayitmmV7YCpTfCIow/tqMAy01KMbKYWnb1Pr3nNa1rOwvFx3XXXBXluv/12POMZz8Bzn/tcXHjhhUX+L33pS3Heeefh0Y9+NF7wghfgPe95Dz70oQ/hxhtvzOa5+OKLsW3bNn/cdttti6rjQCn877N+Aa9/3DOxfk5hdqBRVYyZAWPVbI3VsyO86GGPx3lHn4wGUDRTRMZpGd5xuRo0K7RSwMM4Omu7ssv0WEoxqhltjoGLk26coJWCjcKclt9tP6HchqHiyyhZp5pE8QVu91Y+fzSMTG074SwvNk/o5M2NgzVxA156yVUCclENyPjxsLX0uKBpLDvnwILV0cN2yNWLIovZxP3XJBnHyMPZH4bGt9HESCRf6KLBXzRVG2NN93tGKbzp2c/sX5f4NZeOyymLUQe47FOmH79YMCA3Ek1Zg3x4rFjeVAydVkEZATKPzluUXJlDmNVbmfdFnvtl9NZiQ5FsOWAXt6li4Npb784IvzJourzc0Iq36Lz61a/Gi1/84mKaE044wZ/fcccdePKTn4xzzjkHf/M3fzN2eWeeeSYAYxE68cQTk2nm5uYwNzc3Nu8SKRD+/c4rMeShv2a+LfP1vf/Wr+JJh52M2QFjpDU0W6CX5GbBkD9vp2o2+EwNkQ1w0qPWjZC/i9LsrTpmqowZYOH0TGQ9V7z/jeAZsXZTRSyHTaK3k0Cj2WoiQRZgNL21uO5+WsvLRMot1SQ5Rm7kS5kkhdF6Md24JPkvtvMi2QEWKr8E5SyKxzj5xy2nkL70ThEArRnvvuzyMQu0pINPNU8ZsNPXQCoBDCeux2n9W6ATiR0Dsb1COnOUvsczobpfnXKvgvPjaa1hECAvEE01v7c+EPtWTmkl0ooHOoceeigOPbSfefD222/Hk5/8ZJx22ml497vfDZWIONxFV1xxBQDgyCOPHDvvYuiqB27H5ffdkr2vQPjKvTdCWyuLcfLNKRizPqiZ1ipp4VR2A6CqgUY9yrVhONRqBSas2IIdGCdADYDYylNABIIfB0tIMkEK3RwTExQR5lSFBV2jDvLGeTLF96SkFUgOb11zi2mT7BMIemjL1HWwJfPRJMo/0dNzD2NHqkzy/DKCcOa8g7cXcSnAWEwpnpFCHetLsfy8dWEMq5BmxtV3TmANGAMASCXNgLdy9q7jmHVK4nBh1fF7ZZG4laqHzNNVXhS9uUtOL2PCWqQ0GkswIw3KAFQWGFEFHLZxXUeJy0zSErUYHnuBbr75Zrz+9a/HZz7zGWzduhVHHXUUXvjCF+J3f/d3MTs769Mcf/zxrbyXXXYZzjrrrN5lrXig05duv/12nHvuuTj22GPx53/+57jnnnv8Pbei6vbbb8dTnvIUvOc978EZZ5yBG2+8EZdeeime9axnYdOmTbjyyivxqle9Ck984hNx6qmn7lP5/+HGLxfvazDm9RDmrVNQxNAeNJi3WQlriHOQmxjs2F6gGhh/Ho7WZZrIyWl+DgiQatYgOZNGA2BimUKQI69QZDv1/AWvQ+ZW4/876fE4fM06/M5XPi5Tp+vWljpZlxx1Tnm5qvWxGMWWFvdYE/uUBaIu1tIxIQ+KfwQai8YHOQEfge/2NtiRim6SsmLQsZcUgqTZSmFh1HcpY/OHAO/X4nE0UO4V7Ps7Dvm3QLZJ/A2kCk0o5MBqVfqMGaFfUE9KbvIZPcuYpf+tjQXo2WedMl6h+5pWMNAZJyTMpz71KTzqUY/yvzdt2jRWWQcM0PnkJz+JG264ATfccAOOOeaY4J6zDAyHQ1x//fV+VdXs7Cw+9alP4S1veQt27tyJLVu24Pzzz8drX/vafSr7toXd+OSd30G52zF1GAwYym6vqzVQ18aOqig2AFhnZjs81jq3BXiOTDdICiBma26H3x4iZ1GSIMRNaTGbhcgOGZAiMe0U9iiBgcNOg0neqXIAxtq5Ae7ctQ1vv+Y//fYVLD9yP2+U6E1zcXcSHzgzRw7OGerspVNpxbnBsxHYEeeTAJWcNWNMatUoAAkOgU7ANHfNWU7GBSOp+op2aynfXNpUGpffpSlsS5ClceqjgRlSGC3odtt3FBGcy1g0lpJv6ITvRhBjJ2E1kfzbW9lEsuZkcgAstdN5nwfAaO3EHvCNFhOkiAActGY1nvX4k3sUOKUUjRMSZtOmTYsKAXPAAJ0Xv/jFnb48xx13XDMdAmDLli34/Oc/v5clK9NCPcKvfOVvzZdXCHSXc5ytKg4sOGEe639amWFVA3YAD2S88nfB8CINys7vxkZntoGxwrTtchuyq7PAYOUUYDrAVgBMIj45p2pnHPrBjgfwgR3b2mkcWAr8g2QCWxsOLiTIFDT2UlJrdeodXE+K4TUQh0PwWNQ+SikHcqiQJiVTLp2XjaLfPSmytKTuM4C/+Kln4jc+9u/d/AJQQo3sMSAR71wAJ8W7leLtL9cRv5wMksYFqbVxfB3ZqOYxruz9agng4VctUXhbApCJ/NcIoFFzXhTR3miNCWJsL2R323jEFpigTUpCl8CXI/eOxM8nyvTwIzdj1ezKVqHRI56YB4BWGJW94auaCwnzsz/7s9izZw8e/vCH47d+67fwsz/7s2PxXfGrrg50est1H8eNO+4W0zPpoW12CXSH8iVr7TGrpzSIGHPVADMVUFXaHmz+Kg2yq60IJp/jQQRUlYNDrqfgoJw0GCFfN59msV9eVL+ue+SHsrmEiHpOXpoewvEGMNZWEXF+QrR7ORzTVjlJCoqmQDmMPRouUSxfX54cHRmarSr845VXjSeT27RTKMgSyPHn8eco/vr7JStO6XHH9cyBULvSSeVkLJXl5HN/o+0sCGg27dQG+AT7UU0CckQ7F2Urye7kFFtluFVSVQ1UHIXNkv1OqQsd477c5FSJmDxy/y1FhA1rllbJ7xWKv61JDxjDwMaNG/1xySWXLKmoLiTMr/7qr/pr69atwxvf+EZ84AMfwMc+9jH8+I//OJ797Gfjox/96Fi8VzYcPcDpweFufPC2bwCwQEJp1K0pJrbgo/mg3Yqkvr43xuJjLCtKMUa8YDqIRNpKMbSG98mJAVYzZcRgETq0mUZKkSlXa3MeJmt6cRJ1C3L36nHziRhooheXsscsCOFy2aQZpIdY1hQeRxLujCwcW0W6LASdHX4PuUuWmF5KSyJxpC1RMr2QuSQdAXjaiSfi3677bocQoizdcMzxLpbJCKdOE2WMDXJyFH/Gwsk294VnH5UDHD3ASgD6dHix+LanrB1sAAmAYAqr039GAC5psYpj2iTZRGCVazSrLSmRLveeuXt1u1zPm229lPGZfPJjHlqo1MqgpYyMfNttt2HDhg3+es6a85rXvAZ/+qd/WuR57bXX4uSTm2m/XEiYzZs346KLLvK/Tz/9dNxxxx14wxveMJZVZwp0lpGuuP9WLDRruO3GnRq1Jq/wFRmLS+iA2/je9FFesW9DYz1qAwoHYlL3DbGx9JDbpyvWsmU5jI8LIOPfSIuUJvil6TI6cw7s9A23b9xGGMnpwRggIPE7aEOxpL7PvloO7DBa01i9t1GQcjrtl8sXd/CJdMUSS2Anl7cLhJXAUwfIcck2LWIPOqk45cXsm94DyCzKAOZefwmmBZEooBcYI3FYK8TYZPN6kJJapSXbUO5DJZ6jX/AogRMjP3/ACKIYy7+y/FY7RFYqoJGjNe3IsJHXET50mS7aVDUm90yIgKM2b8TTHvuwTIUOTNqwYUMAdHK0t0PCnHnmmfjkJz/ZmU7SFOgsI6U29CQCBhVbXyL2m3G6e+5vRYy61lBEJvgd7MBIO0uPUKZB/vLwquQgaFM4rqhsDJ667rIsOUDDoCrNy/0mAmjAdvNRm6akdAmd+2n5zlMBXEdytoZtTqRMfYIhJxINVQAfNp/0E6MuoJQDHWM5/HYNqRdJk44YOwBVXMZ7vnVF/1pE1qL4UiyDv7fI0e/YVLK6lGRxVhvxW76OYz9xB1gi2RC79bnrESAI5BAyBCdy+ba8Xof50+979DueIpPE7WfJrnzZNbL4y431o9h2bJzC//qV52N2Zj9Qn7n2HJfHGLS3Q8JcccUVY4d/2Q+e1IFLj9hwFEzEm/abJAPnpawZzMCgAhoQYU4HFUMzm9VY9nMluxS9uCR6EWSmpcZx94or5DRes6JJVdzaaV22Rbtdurv25Kr2lGiRJScO++6vAybqsetM4tFlZIVrYRNuLDq9LDvxs5MxP0rKgd0/zYViaR3vSHaEPy6YmuBdzFpgunj2UZ65NBymWSrIWJxSEI3cel0tyPF8kD4fi6J3yFdb7BnV4p9om5iHvBVYsRy4iIMfcpg3AEvUTteqQ4Y88HFL7OUO6ALoxHIneRHh6E0bO1KtINrX4L0n9QkJ8/d///eYnZ3F4x73OADABz/4QbzrXe/C//k//2essqZAZxnp8NUbcdohx+EbP7wJqc+ryzeFoslvb/GB8/fxoYHtdQ1eEv/zlLrJqaC+X1nTa7biqLjRpjTEJC0xPcaxopMtzwmYju9XHnk67t+zB//vxqua/PIvG0uR58kuKGIoZ1Jer8g4AD1JmR21EFfEs5g31EpGhDF5xaN1X8SElikvTX8l7ZVW0YAoUEIfBVZ6TSPZO2WNeU2iaErttYRgy/EL/kZiFMuKLWIxT5EMQBCEL06fAn7B5yoKmaj+wiKlNKDdJrwWcPVtVzXuysspJalPSBgAeP3rX49bbrkFg8EAJ598Mt7//vfjF37hF8Yqawp0lplOPehoXP7D7wurTvdHlHLYldR8h4anUhqVjUqsR11dl1PUue68LSeRW4JOIhigTItWnlL5ki+n1sHGXMmBhQL/hKUmVWZzj/DzJzwKH7vtOuweDsM8uSG0KCO/L1iENyTPPn4VKQUYAcJknljWhH+Dpy6lL9PZKN2LUjx96i5BllRISSUtpOk5Su+kqN7JryMCVR57i7RLMrjOlT8hL/c35ucAZcuCF3cBFF1PWYZE/pZVSPIdA3cnqc8Axp26fbsAv9KqzzhwzcxMX2mWnZbSGXmpqU9ImBe96EV40YtetOiypkBnmWlQVZhVwJDN9JIijYEyw56hnvzxuFVc7hwwMXdGNSIwksqrwRwvXciiBF+GWYnVoHFm8lansabMbGdUVQAqhtZsdlvPAjznrJ0Ai5EViO1WFKFJJgQmbC0zH7n5O+FWEj1Hct2WOAF2xmyXrOWqhC0L4Me1GWXarFM+bfKO5VQdE0fnMZsWDhVmBH8/UXZfkDNhRx5MnyYsR6nhwqTgxPPpqlMXKsjcSyozW6d400sWeVpfXARy5F8PUDm8lgTisXxCnkUhPPk5M7x/H8l1FR304I557NqzgDWrZhchyD4iCUAXw2M/p2kcnWWmkzYchRoaioDZqsbcoDZxbwigRb6lSuw47v4qaOGUHJO5VilgZlBDKdHDAXaJelkml0YpjcGgtrF7ah+fp9NcEIEOJ3s1cEE1pEyi11IMNcNND9YyAlkH7zjIR87yoiJncQKKaK3LUFYir0R6MOhbRk6eeEROPcstlNMJcno89tZ5AQiy13ZkrXhlDVn8giaJaJyyXmQASCc27vN52/Lk69erV4gTRSDEfyo5Zg7VpGLwZM7jawG7XFuM0cWpGsCokKXU3gKsybS+/prL37jlMaw1Pv/tm/oJPKUVQVOLzjLTTxx6MjbPrccP5x/0e1W5DmGmqrFQ54JC9O+aGU3wv8GAMRyZ7OE3bX7IIIGDSkMTgaOyTJyd9NDbgJxwtOuA1WikYYJc5KRsn7r8zkG5mR6zpUdtpiqzN1fIiEQaGwFazH+kgJXP1mH9alGPPOzKlhF72SnwxQKPaIzdZeGxlrBkmV0KKGdFGoeHSBO0WkEpUq5uMc+usvtafBwJhR9h6DKPkrWtQzafrKuMXH7XqJKfjtIJ3Bjk0SZY4ThfQK90Qh4pWjEWl3ieCkA2skXLxBRej0EduX80NzuuICMIGyEVCPdt25kRdGXRSp662pc0tegsMw1UhT957AswSDwJZ+VJjxK7e3IHZJR4U521hcBQpKGI/VEpZ5ERPBJ881GcZTnNX3c+GHDCshP26LnIyQ0fbX2CjNUljshMClADDVK1GLaGTiSu/XLRnOM65C8kyD/H3LPhpo5ymUdlDwUwMXKRlPMDTjFcde9HZLxKirXYTmwMINNFydadBPNFxr5FwMaGXw7kAItrw9Rz8RYGeD+S3somAZDkcnDPzx2Z98LfW2oll/q2bfldFp/W6q/Yr8uhplz3ktpclAHoJjxHCAI5/AsAbAZKhx28wnctd8RLdOznNLXorAA69eCH4Ph1h+KmnVtb9xQBc4PaWlUJI63sR8l25xs5hGnGXsaoYD7geLVSpTQIBM0xumreaLYjm6aEhon0x1HKgCWzR2derXiLi2LUGgj2noqsMrkPK7mCKZdOGduN9IVxdQIMIOqklhxueNshgN8UMH42KWEZgRO1GKWyjoILcvyUE4LKBHEnlUStedEWRb6te8CMrvLHkTECOX3SFa0AIs2iAVNKhtxfESU4SO8uStni78bJGsssgwEK8mCnTlyLZcjUgeQ719NSFchgLUg+bk9Up1SUZwIay45cBu9ij1GTt1UPNy6w/UJgCK1NhVgMGEg3ewoOGHj8w8JVQlNa2TS16KwQWjezqnjf+O00k0hEDAWNipzFR/aSxnJCYGF9MfcUGJVizFQ1CDWaIV9oNakqjUHFmJ3RmB3UGFQjN+njoyfPDDQqxd7C0qUKZOBCUnZn9IRVpiv/ONRa1j3WNJTwSQrM5FwWxgKtpqh4uJxIn/pNUW7q0Pcyn7QYddBJh2wOfgs8uEjq39a9Uo4R7bcPv5ZZPz4XgImiZxJTZ3PFCcRrFRxWJgVhUbFHpQizgyqUw32+0mrjKxilczg9lo0SP/uYwjhqw7ALSqYP/qbuSYOva6M68ZxkfQXIcZYeErKZduW2DBoAcwMKOcyrtPEJUvZaNQLUkMEjxv/43x/BaCSQ4QqloB0WcezvNLXorBA6Z/PJuOqBW5Df/NGsypJd1UxlhjksnGaZzTSUAw/MQM02CB81q59IGcvOcMQYcRWUU1n4Ky0oCoAa1Bha/xfpy2PK7VdPIjZRnVt7evni07X3Smex42q2ciC9SismYb0KkAYBRaeCPqPbUtm+XVlK3ViLspswdbOWtHZmBucc8xBc/8P7movRqD/LJvdMPFLq/6yKqQXgCChl6el6D0vK1lHOQVmCnbiBciAiLqtPs2SAQl0znv6oE/G0Ux6GnXsW8Mmrvof//N4t0D0/PqfQ2xfF375KjZvNRhO38hc5Uf0oQ3b7CglwxKXkOIFhLKK1vVAB1uxsy7NvbcqM3KNbuOamrbj0E9/EBT91ej7RSqAc6ByXx35OU4vOCqGfOuoJ6BoKzaoRKsUYVDVmKrt6ipsPlaAxqDSUaiwRDlgMlPsd6uaZAWO2GmFG1RioGgO7w3nOQXfG73Ke0vHlL8LlkUvfm3x5DST78ZmUM1O2nKYNnHVmXLlder8CTY50FxM4rE/nkRrup66XqKO5do+GeM9V32qV25d9U49Ym4f8uqiYxAOLXhLlKbIUJO/1sRolrA4xq9Z1mdYaUruAXUq+f//2d/G0Rz4U5z/hFBy6fi20LrxIUgYhc7B4UVt5rD8QCetQiW9qCsxRYMiURyreUuKzZ6AdbTzTbvk2ZDg3PcWAGpmD/K7oaelbskfnsry/+sAX8b1b78GKpvgZTHrs5zQFOiuEbtpxByrlNvhsv2UDpaEUUCmx/NwbGNhv/gmElhjz11xXkQ2y8ZsxilwRI/QGZhDMFJmZJtNQmWXiDYDo/ircVNeg0qhU42xNqEUP3KZj1h6EZx97SrL8uE7xbyKGqhjS2zCPUxpQGPDyQ9il7QVSjttdGKqz1J6iaQ3UEsAJ60QfAcJnPmZ7JAbRyTSebSIVRWmjSzEfX80YyVHzpwinhAiJ7G0xJcBw0yIlGePZkISS/bW/+zDe/qnL8MFvfKclU0AOILrXPgYJok1a1pkY9EWPtgtyutdC1cAqqvz0T3I848CHA1vytwRiHWUavgwwN7uop+Ty9cmDnSAdkPQR0gDe+k9f6JJoSiuApkBnhdDn7v42FMFYVqjGXDXCqsEQq6ohZtUIiowVpbI9zoAqvPJhP42NM2v8VBWQV5Q5IEIUdtJuNVZFJpqyXPLu0s5WBgDFFAKpbiDirTuVXf2lgEqZVWFu+MYAlCKsGczgf591Pv7ltquzjsS5abTAipMBQjFwKYMg2J2QU722ZJ7j4e5R8nkF11K9u6tnWUSTkMs8ckJ2KhQpQ8rCFAuX01JdCswq614WnxLGKt3LvBtJHgmgEmTn5m8n5kvhZsk7Ahbu75e+ewv+8lNfCS0vqU/PAYdRQnlHZbks8aMkoO2H0wPLMhpgM1yoQXbbBarRAJchoIZAJe4lHYctw2KxzGaJuANKMQ8HNHXjc+PzMRcfPTlLkD2kT9RXr74Fd//wwe4GWSaa+ugYmgKdFULa2moHqsaqGTOFVJFxHJ4daDOlBI3ZwQhrZoY4eu0qHLFmHd7/4xdhtlJ5V5EelhZ3X5EBGhTkSyvhQdW2b5PNr7zVpBs8NNNMzXSYUsBgYABQpWoctWY9/uFJF+C2XfdjQddRnbqXiUsyTtNhb+2CHOanttCY0hG1SwnsFGQim4cL/hXZqLGR2YBbJyKPdIqW2ss/PlnI5OSbp89WDu60C8BI5j3S9LIyTHSzvxzZT42ity5+bawFpQRy/G93IQ4XJQuom/NY4ZcoOVSxJ4rtUWbRlJlYFk+wYKNuP385mGrxQtfzazK2AI51KJbdkdIGwHhrV4rliKHmGWrEoNoAKQOmuJniA3D3/TtKki0v8RId+zlNnZFXCP34oY/CJ+76SsvJF2gsFHLV1d3zP8QfX/MenLj2IRhxl/c/W74MDQU5fmn0bN66kLSQAHCB/1v3qInAXGsKNrlMSscSuDRpicwU3UwFHLfuYHxv+93NPevLzDZ4IVmhU7hB1oEIoIrBtTUDiOjRbuuHxkzR4pSeGiNG4w8eJ3CVTNedCggtaPv4LzVKxKWj2L9bOltTxCOQqy1crw0dZLsFbAo5XZq+K6hi0WIHcMfHtUFPtpPSYvkHy5gXU558nkC7neL7Y5B7PVJWqz6ZSYCsgKnjlQJgHdSZNjdgKAFGWFlTyI0ZNApXuwe9kwiyefD61V3STWmZaWrRWSH0+IMfhoFq+4V4vQurFKM5mO8+eFvPEszS8rlBbffSEhYBDv/GJJ2I5Wqu9nYQbesGiX9LsuXG4wzgB7vux1M+8WbcM/8g5DCDiEGKrfN1eYuJ2EJFFbccE/xUkbf6hILkV8Q58TNWHXmIGyWQk+QjhCEHXhLgtJ2Xw+rktJj7W5k8xfpmZZMIHenH0WcTUGvhSCtwbu67Eh3I6YGxxr65FKNaZzlI8ZH8xymn1Ii5cvYm1cbht2Vds/WTPkF7A5B6gCXasmQ1TFnPzG8202qx3BFoIgBHblqPow87aGkqsBeImJfk2N9patFZATTUI3zgB58qpMh3C8qDjbLRXqEBUQOlwQyMtFlWzuhQlBG5QXWlGKw5AkgmglfgCF1cxp0yM7RpZ72Av7jm03BhRLQ26l4umTc7qAN13ZRVqlduKs1PGcWWrIJ8KctXlLn5Gxjg8vXmqNP2yrxrTixWzD54YarC5b+dq9hFW/nNNlMWJJG+16smR/0UXSfYaLaNp1jn+yssYLn7WSuTqEvxLS3hYOEcy/HzidtkCUDVUvCRFrJSvV3APZXzr7EXF21xk2bZ+DrDr7Ly1afu8vzzELyqYTM2aHUDsiGYsWHdCrfmLBFI399pCnSWmW7YcSv+6DvvwL3zD6K8D1T6kyWCiUvDyKRhn07SQGk75UygMQ17ktegCr+kUU1RWuN35JaIaSYbD6jFtXe5LqYNRdcdVRWjrlGwmAS9Vatsv5rNIR5K5Ys4cneasNOJNF2Up8VPittFce/cLM8Li+zi4RJyxzRWq07lNuiC5QHIScnFMNFvc5ahVFtJRZt6/H2tTC5LnDgHLgKAGorXBUj6tTnC+sRbVYwRZLFEJXBHMEAu+1wTg4aJBJBjOgl6tF06HiUdy4nWWX9GaL1/rddJVHRuJtdnT2kl0RToLCP9cGEbfu+qt2J3vQcUfMXj0YyqoetK9CVhz1IlehkiYLZyvWCNPaMBSls4TEbWN0jMLVTE0AwMR8r7+mgm8ER1b6dvlsxzwjeI7b3m3Cy6cGmb9ER2ROguKYB1h0XHlcFWE8ca1YNRFqgtlM2fCePI5Epikc9SyNAajceWHlGdlh9KJD/FJ+PWrwfwTPKMwY6jLpAjlmb7EmOwVCgz6ezaY6QdN2NwgxA4HMstEJbiCw6eN0WgMn4nM0u5W7SYd5nRTP3p5mGQ5uT0VIyLimK5unGal+RpRQFgVoOedNxhY1dlX9JSrJo6EFZdTYHOMtLH7/wSdtd7UNtehbjZv9oQ+5VXBEbNCiMdOhMDxmoyp0YYcYUROwUL73ZcWpHlyOyUPkBFJmAgAzZ6cZcFQ15zG4Ra2OYHXZG8AGYHGrU24IKUBuvSyCjsrrqcmwEDZuo6pY1CksvLY76BQ3jAIwWgXKRV2G28tDMLNXk84KPAMmE6b1FudCJ3ijfu34X6L3WnJJRTSmmkJGH7Lvu70tqQAk2ynB4AYCKKrUQlq5G4H1tEcmAvacmRdYrvZcrn6AflXl8ZcqpLmYmyPIjOpXMAIgaCYh8peR0Q95ZqjBTXJWpHDzQ5E0WZomyyHRPlyHeiqwpBs2jGz//kYzpyLDMtxfc0BTpTWgx98d7LoYUiHBBjaLd5IABz1QiKGsBQUY1ZVWO+rsS2DcbJeDBgszeLdltF9PlkmzSVYszRKEgxUBq1NuAqBBru3ZdIgFERQApg1qjZ+em0p8WkUzD7GBZtM4BxIG7uayY/bdWnV1WKoSMrTGmJuwdGiQ/bWXga8CIZUepPA1T8dEKUQLKQS4V9+S696JHrvmBnCbROroMrjXrd+5oZvUvrT3gx5N3Zt3YlkPdTgMNeS0LXCHB1tmTGkpO1dBT4BnV3ADhqn6D9S4osBjjuPOYphdEWeydAFNn7EfvmXpf5pM8zS9VHi/eGm7SUW2yawNgt2Tj6C4z9ybziF5+IE7ds7k44pWWnKdBZRtpTzwNgDEhjyAMQATPQqNlMK3l9GOIJzFU1uAZqrkAw00EAMCCN2oOAnPUBiHsS07e0l04DJl6Ose5UIjWs5cacm6kn8oYKpcx0mYnhZUBPLIcZ1RuVbSIza4xGynI3UaAdyHPKU1m7chP2vk/P1NS15LDqVpEZJSDBDgfuAHDOz/HS7RJ1aQE5ak9oeRIO16GFxcFNKiuZXsghIgm2Eoq5WFwWEGbyJRRcsUm9sk+kigAKA639kUJh0QAeFo+qS4YEj0AGSznvtz68ScoTP78uIBa/Q9E5Rw8imPZy4KajjNa1XKVK754Dc1I2wDibN8s7W3myIEfyjeUJvi+G9z1zZehurEYA5uYG+L1feyZ+8oyHdwix/DSdujI0BTrLSFvWHIEdo/sABoZWk80ojVlicKaLdN/+jNJ+bjmYglI1dBC7JjeUaX5xMl1DzrIjf7fi/BAC65OT1az24pZVyN2vbA/LAGZmGHUdgqI4tg+zAV+juvvVnVUV9uhmZ/ZeZIetBuuQzWv+mnBFtrJJ00TERwAkzij+FmXi8fj6+/hBTUOz1OLuhbBmNw8GYmWcAz8phdotdZvk65e4HPDvUtoJebKyR3w8v1R940QeQGX495HLNXfJp6WDPNjIldMTEGbvUQNk+mD0ougSPMjz+Pnr6BsUaWMZaGi/PGLzvvsb8r3uQR7QoCk8szcYjQTfDHMG46D1a/CBN/0y1q9d1UeC5acUSJ6Ex35O0zg6y0gHzazyFpBZVWN1NcKMn3TOv11EwEBxsPWDu14pYyGiAIantEM8xst3Haa8GgQONvT0MXWsOk0NvpyVp4qGBS4SsbHmAJVqrESpTUUlP+NMXF5OokA4aHaVr1afUBCGt5GJFEQbyiX0Y6h8yv7oSN+RlmC3k0fz6PxBzblqrD5BmlQxyQ6R+krepkjxZ9P0teK0eFP4uwSWWqinLMdEYUMcDxuFN2hajtKgQ3ekFLkDBdHjzMrRRe676JChE5q7RG5rBBnLxllrRg2wCiiOK+S2cbBsyVpuaMRAzcCIzeoq3a+KAUoTD9XvqTViYMigBfs7V1m3TYQGHrxnF86/8G/w1nd/Fjt3zfeRYllpugWEoSnQWSbSrHHVtmu8QndAoPFJ6UPp4feqauTj5khlXc5bKIXhl4kTyiAkd10le7pEuqrb6mGmscoBAjUY98zvMG2g8rLJskuHLEc6MPemPr1FTzCWuYOg3dxpDHbk32x5BeDbw/LSAoVC4QRFTtyBUhKwFeF6H5w5CaITlhF3xG8lyRtuY08HAuQhwCHHOD4CObLsQBZEaUqUVOpNfXwSjo5Ueob1ETTxbNxu4cr6/Pj6yr2uGMZp3W6tAKC1Qs1jdg4D+PW26BCZLSAssPFbT2igGgKDkdxrywAfjBp5fP1GgFow5e7as4APfOybePlr/wm7di/0kWRKy0xToLNMtHO0C7vq3f5343jLrV3G22S+vthKAsBvjrnGbgiq4FZCacwqs1HobBXvkl4mY9ExtudUbJo+nU4KHEykWCwpRVDBbuIhf2pZtUrUbodgywhvRYrLWkQFFkG+XjGgaCVM3BLWnmAfLNkTRCYN3zpCm+fenCRMjRJOHGhVAIHUvU62PR/X2E9VKmcBIqVDr7sWAIdEHg+GEmXEIDHZzg4QlcAJEvc5uu7KEBthKrnD+Kg5VNYpmIMjMDxqBo3Y51eOZ9dDjPf3ypYLbyFy5NvLxt2JidhYgsneX1sNoOYZ1UITjNCz0Iybbr0X//iRr3cIvMyUes6THPs5TYHOMtGCHiJ+g1yc18oPa8pvWGq3cOMWbDcDrWqsGSxg7cwC1s6MMDfQGBBjoBgDNTKgqvNtzveUpiwtNvHMU6jcClYhUZccOV9hN9VEkfZQSkcgqO8QMCyj9VvFarxHD2B79nC380kp1kiugI4sJY9YEveVuC6Kaqxa7eJyOrRVTpeoBfAUMs0zmbh5c9N5JcqBEnctVhQxGJKHpNw2EYmkQTIdPsoW6xxPKaPcaJPht1RoLWNnIb57R2LGmv2ydHegZg8+PO9C/eLKOktMayVau1YGtNjd0YOtN7gBOS0W1gfIXd+zZ2gtomnSmvGhj19R3Jh3JdCP+rQVcIABneOOOw5krQ7u+JM/+ZNinj179uDlL385Nm3ahHXr1uH888/HXXfdtddl3aP32LNQcTmlMuOHSTGYid+8Jv9AaVTiiSoCBpWZFiPbgzoH4dlKY9VMjUppDChVVvO7Im03FG16vAGZHdUHijFT6aJ1xvUDM0obJ+rC19M1zeUGiK6bcmCnqppDBg10ZRFpkDJHa/jaQ7k1/kem1yY3vC2BnVQHWMrTOSXWZ44mx7PjvpheEuvo09kjJd2CyiTOE4/SNWOOWnAu98o7sk1a7JPzeH3yEWs0yo8BTXBvjF3dA7IApGVpQfs1SOHJJNhJtCclAJsEMklROfMq2iB+Seob2FDW1U2D1c1UWM6p2JvRbFBBN6XmrU/Djs9HoPo++OWB7buxe8+wO+GUlpUOKKADAK973etw5513+uOVr3xlMf2rXvUq/Mu//As+8IEP4POf/zzuuOMOPOc5z9nrcq4frANFPY8ECxUxZlUtLC7uA21MwNouj1RkAIfv6CjkZZyUEdyvvBXE/B6QW87efN0EB3LMb2e5GUQWHGfZyWmMtr9LmZSSfkohKCFCZgsJhOnsqXOgdo7OihhV5YBKHuRkNzitAFKAqoBqAKiBteW3ZLUVl+1pnZzTnpmu8mE9WgchWhrcozceS8laAImObR9iq0TOSpFgkpU4ZS3iKEFGWTsf7KK8Eb5NFt1lWXL541VEQqYcOOx89WXbSSFkxgzwoPSttjzcvp5y4yPdQ9FToswmyFarvjFAypKQR0krkyhD1TA+NVJItyLLTYGJhiHrTNxrfMAM1Ayq7fSan2YzB3RTrlKE2ZW8DUQ0fTjxsZ/TAbe8fP369TjiiCN6pd22bRve+c534tJLL8VP/uRPAgDe/e534xGPeAS+8pWv4Kyzztprcm6YWY/HHnQKvv3ANTZoYPtlUmSmn6S/yFBb6MMEIuenU46ALPm5PbGY4R2W2Qb3G5BuLQ/3cjFZwNNerk0EzFYmkGHllTi14uckAw0miAioKiOLWdbeALImSnCg7WVuDyAZ0rcmpIrYRmZG4/gphqEE932HZaTaWFWA1jUQBCcsdA5uqJzaUoLQREuOWaQ0R1cf5B4fOoIMel6mXXOt24vkI0qVA+Q3eIwUvXmaAviSmUH0rCWfklUk1Z6xZcGdKDRTSKlHmlOYS6EPcgA7c+7ydD6rBMBp3aPw90TPv6MNejsSW15Jy5C9prT7drkBNO4+i9fZfsfpZ8aBAzRD/Hb8IiSpXLydGeCJZz4Mg8HKBTrTODqGDjiLzp/8yZ9g06ZNeNzjHoc3vOENGI0SXmeWLr/8cgyHQzz1qU/1104++WQ85CEPwWWXXZbNNz8/j+3btwfHJPS8h/wcBoqwSi0UP36/jFt08HNqGHZ+PXuPtq4MtUDb8tLsSe22mEmVRWQCGQ5Ubae0asxVI1Qispexxshc+S/IyVBV2i9Bd07Bg0r0RLFVzGqonGVbUqUYs2SwPqmwXFBjfcltmiyvmSXpqaXx7UyplVxhAjRKNmUlaWXN8Bmng/JpJ4Y3eb6L6ihN5sPWrsNLTn88/vyZT8fGORPDRBF1d8KREncY0ynclrVDLpFOyd9hFUiCiHHbIAPIkvx7Uus1Sr1PYoqnt7wpq04H9WbNBnhkivV//TNFeFHOEscWIWi2Vhs0z0dzsAM7xS9I3GYaeOFzzuhZmyktJx1QFp3//t//Ox7/+MfjkEMOwZe//GVcfPHFuPPOO/GmN70pmX7r1q2YnZ3FQQcdFFw//PDDsXXr1mw5l1xyCf7wD/9w0fIev/ZYnLP5VFx+/1cxUAvYVc9460pMDuTMkPZTR3NVjYXajXe7u0BnAZLB+4zvTW23lJBawWhasp5/ijgrW8O/fW2m0oCP4qwxoxg1czJacopfvIWDu26m0dxO6M6qZVac1W6U1wPHj3gUgJx2XTiwoEWShGntjtommF+5bGOYcshVDqUJD9u4Cd+7/758PpfUZRWWmNR0h5lBK7R1AScVn1BJY8VyJvJ08hfg5K4dO/DOr30TAPCwTZtw5Pp1uP6e+/rxaOPhNOUUo8vXwxrh5RFlZl6bdsYCddWRc3JLBrIMoeCZxdLtshghuUIZYOqY6ozlKD04grC2TQ68mypzGB27ZN3KiRW1HwFYNahw/DGbJpZvn9CiBxpLkH8F0Iq36LzmNa9pORjHx3XXXQcAuOiii3Duuefi1FNPxcte9jK88Y1vxFvf+lbMzy9tYKeLL74Y27Zt88dtt902EZ8FvYCrt10BwCij1dVQOOKGQ0oixgw5Z16TfoZGWD0YYVXV3xmOYIITKtJwux1UBMyp2vrp2FVbxJihEWaVGeIOlLYO0uO99cxmabpSI+s0bDb0nK0cYEvEw7EdqNYGiBk/odAHyFh5GDMDjUGlMTNgv9JKeXtzN+mO+rgOzSxXR2BdSvUi1PuLSmuVXzjxFPzbT/8SHrv5yGLWEMm460Ieqfg8EErUtQUAQlNHtnVKzUaJNHHxXY/HytDSiRq44Z77cO3d9/qtQHq9kV1KXCj6UrpiWZQAOX2YRNajlpWpq1xZdq48eUjHXgdyJiS/qsr5y4yDS1Ll9gSlRZFT7c8CcFKYdCwSeffMj3DjzfeMy2Gfklz1tphjf6cVb9F59atfjRe/+MXFNCeccELy+plnnonRaISbb74ZJ510Uuv+EUccgYWFBTzwwAOBVeeuu+4q+vnMzc1hbm6ul/wlumf+HszrBoQpC3aGWmEoLCy5XciJCBVGUFRhpM0KKjPwIIy0gk7gWOeXM1dpzLLGiBUWtALBbCo6iD59ZmDWRQlWDB4Ras7vah6Tn9pJ3FMKmGG2VuSIHzMGIpPzzxlpgmbVsr40vkX9LVz9KgAMMYKqnKUmsuLAyOW23GBnzBFpk2ytRpytZjCrFE4/7CH4wzOeii3rDwIA/PE55+GnPvp37Yy5njmoMtspFmpQggQyQSYOrT3xkNeCqnEwSqvMxY4IdabU2EqRk6WP/u3gUynCGccdgzWDGXz2upvSye17mF1yHZcXy5+Ts089XdkF3gCaXc9T/CWPLnIfnBZFiWXfxbc/J1uUpPSuZ6cs3fVgOX8ig5Ch/KWm8zjSB4Cj7o8CrXigc+ihh+LQQw+dKO8VV1wBpRQOO+yw5P3TTjsNMzMz+PSnP43zzz8fAHD99dfj1ltvxdlnnz2xzH1pQLETm8YMacwNGENdYV4PkNvzypBz0tRYPRgFjsSzgxpDrTCvB4CdglJACyAMoFFVGntqky6mGFzNVjV2j5y3bH8wEeg9aQImu/xdG0dpFSC6Nv+BYox04J4alQILOsaTrw+R39Y5BDttB25hMu+Yp7jwUU/Aqx/zJNy9ewdu37EdC7rGsesPxqMOORyPO/QoXHnvnaiDAoDsNJS0pFQA18IBOWVlCZAINU7Qkj2FAM/Vt58yTMiWulciHZ7nyk229DhAq6NOtWZ87+778MQTjgsXocTt2mfpdCxfXH7CEtYXqPl2SFnTdMZ6k7KipMBVYDmECRwYJ9fNhUAWm8AZcV0srPjByW0Y8kCJmtg1LLoL8Qx6AxeZr4vidMy45po7cMrJR/dksAwkn+1ieOzntOKBTl+67LLL8NWvfhVPfvKTsX79elx22WV41atehRe+8IU4+OCDAQC33347nvKUp+A973kPzjjjDGzcuBEveclLcNFFF+GQQw7Bhg0b8MpXvhJnn332Xl1x5ejQucOweXYz7l24FzMYmRVL9uObUzXmVI15PcAenQIhLq5NM5SPQcmANFiNMNLKTufIXkXkYTOdtaC7XwdFwOrBEPOjAXS/7gSuZADJlVcuRs2MMtNV8X1HznJTKY2RDkFie9PMPrIZIMA9QJG3GhEgwY6TPbVCy5URXg+tJe+87mt433e/hQcW9nh+q6oBXnLyGXjDjz0TP/+x92LHcD6hf2INgfZ5SVnFIub4MCwAEpafPhYGyTuHSbkAR0WFi7uHRLJ4EOaYxkpvXOUW0Ye/eU26bAFIxobXEQ8JbAL8IZV6nF+2gWwzGUWhpagRWmSi655kpdwS7jiGEES7W6sRkwU21JQv2fj08UuQmiqRG+m5zJk4Rm3Ay/nnPc57QEI2y/P9H/o6zv+501BVK9MLZLrqytABA3Tm5ubwT//0T/iDP/gDzM/P4/jjj8erXvUqXHTRRT7NcDjE9ddfj127dvlrb37zm6GUwvnnn4/5+Xmcd955+Ku/+qt9IrMihWcd+TO49Ja/xSAeannAY1aN7dEz4qaDAuzsNEn+RMAMbKA8Ih8Px4SJUNaR1zgYV7637O6mFQGrZ0aotVnuXrMDHe28ckrJpaiIQXbiVzNhxG5yDp1KtImXUpaVqJlOSlNkzegJdoC2BScO5QEApMRGoH4+q017RjX2YBSUPa9HeNt3voz/c83XMV/XTTgeC6bMaJiaKhTMHL2WlZdIPo++AEcqhBx/qzxLy4el4nd5ktYkCZrEqL5UfKsuOd6WFBE4FQVZnk+iEFwe4f6WBBBxevnKxu+BtZqoLt8K+R4nrpP8zQAzdy8P56bc5A4tKRliwKjt5p3umoLZxTz+0AigUQbsy3RdMneRH83A7M3lzFEauPe+Hbjp5nvwsBMPX0wJe4+WIg7OATA9d8AAncc//vH4yle+Ukxz3HHHtcJ1r1q1Cm9729vwtre9bW+Kl6WzDzkHH7jtb4IBiyQiA3bmdQXpe1KRdaMtKnN4Z+P4XXWBAI1lhDyACJOVlX+lACaG0hpDbXyK4pGoi3RcwbmtULB6y0Rprhufoo76+Hq1ZJV1ZjCXp/yatADYTYX1A3oliRqDD/kYPqbttVGUtpMMLUBNma5diIB5PQRINSZ+L3tH5x6KVKa+fVisjLp49uFrV6nlwE4TQVf4CDl8FysvK99Yo88xQJzWjPsf3JVPUCo7xVs+Rk69CYnfzgdGAl8Z80UuQ+eOt5nDcrOgJ5KlE+TktldI8GpNATJAI0ZVR81jHWLZ7c8GmF3iR5aJauriLEhMNj1glpATTMRm+b4RoOV8vhy1BBYkm1ecy7ZbGOY2+5rSSqEDBujsr/SVH34eQBrkOHIrskb2y62tM+44QFvGbWnmtMkCJrlM2wzHQ8CSFs5NFw0U46HrD8cPdm/FiEf+Xs2EWjcbgSrLUQuerh4DpbGgHdoqd5MpS7RsD1OTJhhilFvwcTKQtwiMS82AqSm87TRu+003KiWyCiqvioK4iA2niHGHcISyNWdcK0SftFL5ut8Fi1OgtC2FU1Uk/hWUAQ9jQdVI0W5cM4eFYY2FUR06mToQkQFkvdoxB3bgwHY3iPAWsBSfVPBDbt6jFG/RJQT3+zyyFPXa2iEuH2jeF21ATszD90yaQQsIrErMgAjV5QGU2xdL2++R3P5aCPNWNaCJw9Gga99htwVrMFB4yNGH9Kzxvqfp1JWhlTmx+CNE987n4/U4YhggYJaFNwHpxrM9mBGJnOgiMkHz3NLxVYMhZtUQq6saqwfmWFWNxCajkptZ8j6rGEQ1bt71Aw9yPG8ye2rFAIt8rczh4tOYPblkrRO1YBcMMOya5XJvpRiDyi6Vt+1lDhZTX02bELHddqJba6WmqUJrXHrcS6G4Im2a+gaA7CK2/6ULcSL0KExYc5Kpo6YLmiNHQvkTopZz06oFcbh4oQdFzH/r6U/Ep3/jJfjvP3kOzjjuGBy2bq1XmC3LhwUXRf+hXHkc1bcL5Li8qUQpkCPz5KJsI3MNaAfKkzcKD6T3FgsJMQlNxOFsGm6LQPJ+4lw5a0zdbsIgTc3eUgMLnhTDTFVpbYGSDradUIrwtCc/EuvXr5qg1vuIeImOvUR99qe88sor8RM/8RNYtWoVtmzZgj/7sz8bu5ypRWeZ6YcL3UDH2FggAugxKjBIMXQtd78KiUV+eS313hIBs0pDk7Y+N81I2gUmHHlfnKZEY11Jd2/eeZhMgEBviaHQb8RZRAgOiDj/mnj8yBZAmaFauCTdWXDYW5AGAw6Wfrs0o5GRvrLL5h1QUWSCDWourCrjGNiYDk/Wo9jdp0b2CYp9m0JvaPEzV5zLb6vC4GbJeSxQSSYO/2ZBDgQb+XK4peFSTm4SO6AQVMPKW7JGeXwmX+g+gCFRLzOgJ/zPD38Ss4MBznvkw/DTp5yEP/qXz6ZjiIgyU99YluoEgBV8ijxyH24fK4p9V2T7BtNcpbwZUYBopD9O28uCI+tfFw8uYK0U+6IjsruvgapuXggmAAPy0ZPN4NANF8z0FxPjqGM34dde8uSe0kwpR6973etw4YUX+t/r16/359u3b8fTn/50PPWpT8U73vEOXHXVVfjlX/5lHHTQQXjpS1/au4wp0FlGqrnG93d+BwTdCVYAo8QHVGNGWHVmSGPHaA7tlUPmNyM022l7z23sYPg3yrMiQNeiM3PlKI1RTZ4b2Xut+DcRERk/nNor6TwoImjM2roNa8JIy6g+dhRle1dV1RgAVqYGFNUUghRnyWmXFf729VdmBKcFOPKawtVc/kwBh0JbSDYSuHQSZdKm8WBbHNtMwZJz91cCnRiQBPwSMkRKhNw1jhNReC3nmzMJOdDUN32koJmBEZsvcH40wr9eeS3+9VvX9stvyx9ryX3cvn0sIYQ26OL8a1FkFU31TEKpQHJjiSLwe3+LWPDhNZf7ZE3KYDfpjEE8AxgyWIfBPYI+g4FHnng4Nm5Y3aP05aP9YeqqtD/l+973PiwsLOBd73oXZmdn8ahHPQpXXHEF3vSmN40FdKZTV8tIu+ud2KN3YeCXR4RvlOgHAcBGKw73UqqIsa7ag1mKN+gJBs72t7EEDSz4UGAMiFGJ2PfMgKIaAzVCRSMoGsHtHzUga3VBYmRaINc/5b4X75gsQMlAsZlKqxagaGQcmlVYdwIwUwEzlQkuaCIjSzDF4oCvnwQ2KVkq5erp4Gcz/RXurF4ypSTupABIIn3W94oYcgicXWrcyifOK5ckyix7ghio5PFpKJrPJzJEiozE4bGkA2GOBQGdu6dDtNNYZpVY8CgrA1yXniLCSvQsc/XMAFsO3uDLcAfVUdtlrDbBnkxOMU8SsVb4qXS2b+uCsX6oEQJ/nPiL682UgaedeRI2rF0FgvF3KToeMi9plF4zDdVuC2cJUvG7EdGnP30N7rjzgaUTaG+QcyJc7AG09ndcqh0HSvtTXnbZZXjiE5+I2dlZf+28887D9ddfj/vvv793GVOLzjLSDJmHpwiYUWblUayEzCICp3RV4quzSp4WMNAKu/UsAPLTOPAKIxqdRHwqsO0DzZSOAwSGj4ZmsxS8jqIS96E+TtPx0m33uwIA5XZdj3kJyBEoVOubAwmyzBRW44Bclkdl+lzTLulVXWnn57a0zvzObpdsiGcVaIwEGGFjdatrIRwl0uYEYBjng9zO6VmQ5eRqj9v9iK8lc2jFCXKRsEYkgEqlCLVOQjIvRnDdAoBOq0IOCEagKfGZpfPmrkfl7V4Y4Qe7t7ctXqIeHOfjZkTeqhcn2iBHUf2cIu/6CII97B3IScTPCYUGkpOO7IQ2f6pK4bijN+F1L38WiAh79izgmb/8NtQ5mdzHKKIvS9Z9QFvYfly0pFHcITE3OxJbk7CqFL7whevwX5+392OurQTasmVL8Pv3f//38Qd/8AeL4tm1P+XWrVtx/PHHB3kOP/xwf8/FyOuiKdBZRpqrVuHk9Y/Ddx/8NkBm/ydOdGoj7eIayztmDygJCAZKQ9fKBv4jMw0VKaacFcN1mpxJpwiYqWofw8WAMoKCRo04wnNDTf8wyXDbWlggp77yvMwITDcjMfnXARRCx9JzV4Jcct5cbdolZ0bIXE8ANSL4+CCEYPCU4NtcrqHtNhPl9sjlN+JE6qhLYdu8BVWWlSMrXQZYHbZuLfSCxn2793jRJA9O5B13xU+LnA7rAcplnhiEZJWu4xuvSBN5fJUcyOnwowniK+UqLstlVz8WBQFFwFMzZhVhNOrTNlYgH2tGZGBADRtMPjtT4XUve6YHFJ++7LtYWLAoShECIGb5kPNxikBaVvpgFZXzxeuRL8jPwIib/XcbhgAxduxY2n0Ul5qWcurqtttuw4YNG/z13DZIr3nNa/Cnf/qnRZ7XXnstTj755CDO3amnnorZ2Vn86q/+Ki655JIl2WbJ0RToLDM97fDnGKBjSfqPSOfdcLSsTRTlBK2tFrBaLUBDYUFX2FPP9BrxAGY6ixOdngcMDFuu2Uyz1sCIFbROO0T7QVgwRzE+eQflbnViVomi3Xc3YK5LMzQluH+b/laCmLZ6I2uiaDklByAnsobYZjNLYhmsVTfg8JkZcin+uCEHWjLmLBZx8sgjVC7rT+XvbG2psBm458Fdrei4LJIBoRVJAoIWzIwzJuRzsVXGbbrUnlYtK1MPSuYvtalLQoisaVEGV/eRCB5IaAIf+rYJgYMni6dHI263Z64iTHaGVVReC1Bmfw93DPE/3/wvuOTVP4fjjtmEr3zzJsOCAR6xnUrlJn/NIoBgCISaRywAuAQ5zKAhwwXzotTexLlxRcqHx2bXQ416tMJj6PQZwPThAWDDhg0B0MnRUu5PecQRR+Cuu+4K0rjfpf0oY5oCnWWmY9c+HC867n/gH2/9S+zRu4IpDIZZ9WhsOTUIZJxx/RfXVprGxwYAa6xSjBmqsX202t/PUZeS9FNZ4poi2B3ICUOtoENVZCwo2mwWChrZfbfacotSsvdmlMYwA6hkblChHyYzJVXrRsZ0OgYxJQGTK4lI7LfTlO6XsAOwu2sbebv6Gg94OjYDbTK4vx2Rj9uiG3LTZg2aDvnKtME5tdukMrzYoppYnk6QA6vgZF77KsV5W7yce1kkt78Uo4j4QcSAxRklumTvAnApRZp4CUr5O5+qaKPs9mfOEiNHTy6fbfOWp2bKoa6zQdqJlVWyDCGDAKW3/uB+vOBVf4dTTzoKq+aayO8EWDASyuGbUIvGsUCtmcaTAAegkYZywEZsdxH2VEg/TPFuxOSSXnfN7eWm+BGkpdyf8uyzz8bv/u7vYjgcYmbGvCOf/OQncdJJJ/WetgKmzsgrgh618TT81slvxByZVVWzaog1tAfrqz3YWO3GarWAddUQq6uhBxZ5JR0eFTHWDZp9lHLUtQrTpmqVZQKLMmaUxgyZmDsVudVhGnODGrNVjRmlQWXV0HHP+DE1FplUHUpD90bmJk2uN7dOx4nVWu6vATTUau+g7Sugqhiq0qCUc0FSwDGGX1YzB4BrDMzTyiSVhzsUgost9lJzZF7LPjWaHVRoTaXl8rqLHYPpIG/q1UisHPJpYx6JevVuahZH3/R9kwug6H3VR+ag1HSTG61IcKfRxJGpudkfKwa6XXWQjUXtU/d+xO125fV34KrrBWBw890cZAvPNUAjs2qKamOxInkwQFza+jfxugb1NXFzup7Bld++DQ8+uLsj1fKRe46LPfYGXXbZZXjLW96Cb3/727jpppvwvve9r7U/5fOf/3zMzs7iJS95Cb7zne/g/e9/P/7iL/4imPLqQ1OLzgqh7z74DQxoD+YE9GQGhlC+X3JQoQ+50Q0RMEs19tAIuvC4iYBaJ3qhBNc4n/dTIOs8nABEDLNlRK012juyG03JyeGl4MHAQNUYBvt+yUScH9omeKUpDMiY0hPOIRnQ/fx9qKvMtgx91Si5AiL5smzt38Da4fyfUuYT90jqguVIgh0dTSFQv45yflSH/Ev5pGUg1VTiwQW33YncNiEnkOChiJpIyRN2+jQaD4OOBaJkvr6KKQY7CV4+qYwKbS0oaWc/BBYb3/byeSiEm3HaEdaeXUPvP5R0vo6LKj07n2isjw6N+Wm80cLu3UOsX79Cl5lrDqcRJ+WxF6jP/pQbN27Ef/zHf+DlL385TjvtNGzevBm/93u/N9bScmAKdFYM3bLjGlTR92WAjVlppXso8BwpAtZUC9hRpzffZDaqqStIW06G0H8mQ/ZbacCOHFcBikzsmtJO4s6CFG76I+4zWtcCEaThg9y1tNWmixrrjrZBHMsZScFt9tXJl50yMVdCICH/eor4krgsk3jF1oAQ36/nYvpIsNNzaW/8DkkdmaWEca1T2ZWYptogTtJVH8vjUUcdhu/deS/21KEJqSgfAZvWrsHOXfOYn6+z6XI8ekHdWNk7i86kJC02DmjGoN9PfQmw48617T8I7iUO+ccVcntPQdTXvpDUBVD8PmkZ0BUB0+7PWpQXTG2X5ZibG+Cgg9Z0cl826rLC9eWxF6jP/pSAcVL+4he/uKiyplNXK4Ru23Vla/BRJzoLZ6npM0XjDgCYUYzVaqGd1yo4079QS4bQoTjdXcSgJUVKAS4eT6VMnJyKzMqxgWIbQ6dYpaZeomQpZzMtlW4bonY94ummsF7dspi/3T2B8cHp8dwUoCoGVRklEbeRjx0UlkXx47IgsOU/Q/FJSqhEuQUKtpwQeVtYhuEXtqSaZXJo3009cJCnF5zzeMwPa1+VlBNyTLODChgyFgogx1FLhkx7tTKlEvRptMSL7qd/rEVIOSuOtIy5ODY+pg97iwzVbPL4bRQK0ms75aQTr3XqIyxZ9pAoy3YGVHcOvxpW1uxMWngB2gedrQkDG9atxuzs1F6w0mkKdFYI7Rg9kPjGE1YLMsut+4w24nyrqhFWqSFmaYQKIwwwwqwy+1mtqUamROKWlaPWlJhuMhKafq7HeImNQ3EKZJj7/epicJnzn2l6QSKGIrMfWDPBx8GhdRNHxwx9teATltF3SXxTh/RQWsbcMgEN/Z3uysbe38G9oJQ8P4Lf+mGxwKHXE6K2RQcEv8maWd6P0BrhVwFFzyElQAEYdcoty+090gd+8qTjwm+z8Fgc/59/3KNw/44O3w3BZGzMQgmLVB9AKsCJlzdaxh6/WnKRnXuUysbUUQIgNe3M6Sk0htlSode2FR2jDleP1AoqADTUrdV4pVeGGRi0EpH39UmPABn33rUde/YMSzVZVnLAfFHHcldiCWgKRVcMtRWlgTIG1Ciw31NcEQOs0YwVJfDJv5ZEwJzSYGjM6woxzq1gdxYXa0HNZNIgwdvcr6jGkN1WofnyzbSTWQU2bPm1UHn6RJRJBKwejDDSysQX4goA+60hjE+SqeNIKyEqQdkeVltwJh2TtQZq3cjl9hSL5UnFuXGrubTWkG0a941EBFIaBIL2u5dnxhrcyJZtE3fZTys18rrVe36vqU4qtH08sk/KKn8mpkA5nZbsP37VjCuAEEVbFunFZcqJnVKy8m/PDSjXzc1i3apVeOhhm/C9u+4LBUmVwQDXwP+77Kr8WMSBLG5l9Z8eRWnjT52KgfsQJo55RV1NCUvLZuvynQmuOwAidxEGN7F0+lAiFk+DuAg0YrshJ8A1B20UgKwu850FfnqhDvZADZbkq3Z6Z5EaLoywalXGb3C5KQXSJuGxn9PUorNCaMPM5tY1JYYkLpaM+1oVMQbQqFDb7Rz6Ts6brR/WVkM/leRoQEYJK5gXo7LB92bIlCG7PQdaBspsIaGCLjEqkRtFNlDaWJbsMadGmFNDzKkaawYjzKg6w6epNxyfQQ1FjYu2XBWlCJitzAowszqKvVN0apqGCBjYndaJjC+RLJftEtYSKRn7AzB+OU5pKwBu6TkBqjIHueGwL6fpV0g1+YrjUUKzlhdit3Kfhf1/acEdkwTFj1X+TYnlLDfgLBBolWTNBByllYaHOI8bqWYBhwMMrrwYoyfEaxEDO/Ys4P1f/TaedPIJ7frG5g+7VYNiBzQTgluZVHTbPeakvBH5bSMS8vpPWgPGfBal0QhG6V3xg3wVxxnW1039ZBfjHLKz72GXDEDzcdTctkQF7ScBHtvRjf3Gmhv+mho2w0ZntXIXiG1E6HiVF4DNh67HupW8e/mUAEwtOiuGHrXxbPznvR8JrjXbMpie2/22at0obzs3oaCxwDM9uhD2UyhrBkNj/WXCgh6gIgXNtY10rMUKIwOCUs68ADzY0QwMdYWwOwl7yHg2xo1ijUO02SldEWO+HkSpDMiJMcqMqjHU6dfYLDhwiEXsno5mh3MpF7NZDq61A0bmXLuAZ0hv/SDzu5VfTte2AhcqtGLlkO2ddQ24VWPxyJmUVZ7SIZyjEyV+EsyomsThFDAD8QqnZsWaKDkANyR4JPRecgTNJnpzRucHFL8U7pJUyokspRVmvjYOoEgrjt/VPSwjkMGCgtd9+DOYG1RtE4cUqrRJaSof2vUiyyfekZzFdRVnjuVNRF72kYVtGuXqbdugF4YhUb9cBvfchPxeduff4xq9T6Hi4bJ76TSgxB5VnWKPtAEnThaCGcVYXlRrk0bUM/xonU29PfVLBPzc+U9o9SUriZZiefjeWl6+L2kKdFYAfXbrX+Cq+/8FFWYtyDAfDhEwgxGGPLC/GQO4UObm41VkQAYRsJpH2K1nMM8pM6p5W6uop3Q+P6urIfbUM4BiEGtjo4lWWeUWN7jvQJEBKm5fLCLGKlVh+zARZC7iKUHVbKUxoAUMtcKIq2LZlWKMMlNe8fRT872mhXHTBoqaVWFKAW5vqzoznSXzy6X2qfteVG6uubrIPbRk/rCNLIO48/E8bTBDOZWV0KisxfSSr5LQ/M4awDJjhhIdIblhce3URA9yyMWWLZsqBUSCpoxkcFYICXLS5Zm/wdSZAEWuXLP83WJBubRdKtyS8qXkaVAft+w6qC8LnCoBU1xnAXJaGMgqO6VDfMZun7WcTC4zzPYN5Qdi+UZTgg2AIFNgX2DlwJnkY32CehEzMNQGFEkeDNBCbZfMuwEP+fJYIl8lvrN4lRmAkx55NM5/3pk9BVom8t/wInns5zQFOstMV97/L7jygY8CIKxWQywwY8gDr2grYhBGMC62yl7Tdgon1GBEwJpqCK6BhQDsuE7DLQNvAxhmYK4aYnc9gxkCRmw20uRCz9aAnHAEpAh+H64FZhANkvljXqYkU5ZSxp+oHjmN2aZmCt+BvkabaVa2vcYbbdlVsvacfBnMGrVuQGi2Hj3KS4X3IAK0jvczi3gnrAJR4U26HBv5OOMYhvLV0Jzd+LMVSE5kb5GNm9KbLNiR0KirRTtb3D3PUmYZWycHGOSPlI+PA0yF51MCObFIQZK4vMhyU9rri0h+We1yOttX+AMFQF2+KxJQJlD+rCL4PalLFpDYJ0fkob7xXJhBw9pEV07ds4MAikcTzGE9m06puWjzVJXC7/zeszEzk9/nb0orh6Y+OstIzIzL7vk//jcRYU7VWKvmsVbtsccC1lYLHjzM0CgCOS2uWK2GkD23AmMGdVHxO78Wa08wsEqxKCvuZMzvEUv3PcsL4RRTH6iRWt4NND45KXL+LANiDJT2gzCC8eGZnNpA0Bzl4VHWLyPiNU7Zrbz5x57gkxt2m6PlR8OIH2X7JqV9fbIgwvotFVtOArOE2Ml81PFeyQK7rAjO8CVASt7Cgbwjc6mSWvitpPLlyky0B1lgE0QDLskseWWKTz9yDkAU3DfqEttl4moIVCOgcr5Dzh9Ga2CkoRY0Rnvq5jnk5htl7JvAUsZBpOQk1dqAm6E2FpuCs7ZZReZHMBbUa6BuDtLaWCO9Y18k47DGb/+392D7tpUbFRmACQmwBMf+TlOgs4x03/xN2KN3QIyXAAjQYTuWihjr1DwGqIuK3xBBEbCW5jGDEWZgtl9IbdaZIkUMgrZOwWZvLdUaGhkZ1s+sRgVlgA0YhBoEZ22yJu9Ae47/wfR5QRU5J2IO/lZBSNduRdA4AqetXpWKTSBopTGMymVMSj5vChB4vj2EkMlc0iC/vRk5ZLcF6qFgGXCzsS24HIERoLFGsWCeAwDFN0o0w3g2vY70JfxcAjJUstV1ECP4EFjIMA6/bMxR3+byGhtwlojoTGymsqpheypJghMaAVVO1txqIEazdYNIQ8OMrZQZtFCjGhonYRfPp9guxMZPLn7/WIBdDWNB0m5+j/1Boxo8rHHPXdvwLx/4Wqmk5Se9RMd+TlOgs4y0q/5hdCX/eVbEGKi6N1aoFEMptxLIWFn6kbZgx6y6gpiKqqj2x0DVeMjqw238GgOIZpTGXKXtvlcmPRH7Hc9L9cv1e4oqpCrtjSdOGVpHZYnnZquRrbuGIo1KxavH2vxK1nGl5PRYnNACu47AE82IOMzvHZm7nlN8O0APE6jRgF+U3yy9Q8sQSJRc0FMkEoo2rmYM4IBgWiHXKoHi5szfceW0fLN5co+3BPqo4Zukrl6YxN8E+Ov11InKTqUMM9VTA3BWmpRVxPnJiI+1BYRgX2UlfotywnI5PDRb5WqtSTWDFtrAx5cl4uWYcrufNo0SFyXwkWBnyMa6A5iVXgs1yPr96Frj3z/8zc7yprT8NPXRWUZaO9hs9ZQDAXkwYG0V9lenbSLoOwkac1RjxBUy4yKfryKN2uaeUTWgFWqoQPEQgFk1wq27r8PqSmF3PYuBCpdfExlTvSKNwWABu0cDDLksP0f3CEDNI7gdtGBXZrnl4swSvol85JMbsENNxOdBVUNrYL6W/jbmb1eQQCJgMNAYjeLtMBpLXEXGCXpU6nATRhcigBSD6/R7kGQXKPUxQE6LVyKv94BGsEQ4MM24MPx9y3XPBUjHOXHiKQTTJVLmABPFJqIY5Aj+yZVisnw7hqBS3Uo8uAPKUyGN/PwzvItpS3mB5uXpeiUJ5ak7RtEXKGQY9mxhQXkZ1Yj7r/LRuuX83CnWKO5lYKbgYouWS8OwG53qZDn337djjNL3PS3F1NN06mpKi6JNc8dj0+wWAECzjUD6pXJ+Ec20ULlXHJCNdAyNWarNiihViuBpfHkqwdetgFqlRpghc5i4NyM/rTYg7XcVT+327ZaTr50ZYVW10Kpj+xtyHRGjUhqrBzVmlI2XE1hs0iBH8q3tPmFSFne+alBjQIZvI0O/LlMpI5s5jNXKrM4CmBgjDqfMYmpkabxdnJ9js/VD4jk7QCO1faKYpo0yMth8DXgtdGSBphLtI6wLJetHkn2p3/Q8uX1NFM92xN2LBMBKyqgjo5U7iWP75Hj0pQkMbj5fwuqVpPiDclM/ndM5LjmLLR4QPMOkc2+RZ6FERuPjM89QC4BaSKCNZN6mTmMRczvSr7Mioe/wK6SZlb79Ay/RsZ/TFOgsM517xKtQgVChxjq1G7Perhq+acoeRDI4YLobVna5+QzVmLG9k4kYXFuw4/hqKNSYoRFmaYQZjJAaThEx5tQIq5RJV5EGM/sIwyb43xCzJHnL/ObvqkpjdbUHAzK+PLD18ecwPj0DqlGppoNZVdVmXyzlggMWzAGWmvFX+76TZ1AxZhR7yEQFfgBaFis3XSa3dqDEWYoag4mzhBkN5sAOtbaLEIhDsqb4Qqp48S5Z3wviFpOMoIUy0BfkUHC9j75vTfNQdKTkLJEDC4lmzSpMaT1hjLd6LMevJFsuTx9FIzFrPL0Tx9eRssjnIQBekK4G7LhpPKxWsgSwjZJcN0UHsWpKdXaOzmPL0/NaQFRMNqhWuAqNpwYnPfZzWuFw9MCno9c8Bmdv/q+4+v53makWNY+aFzCvZ2DiFJtl3jUGmKEa8zyDSjFIu8B+sjc2IMdYPwhztIARmmXmRMAqC2qGXAFsHJ69XZ+ANTwCacI8myXhSkQ99spZM9zkmBy0aS9HRikyMKsARSNoJtQsd1Nnn8bwMtDHLcWeVSOzZJwq1D2+u3F2e5+paizUAzsd5q6G+eW3XlUcOi6Lgeg4XYKy1a8jnwG30IPEhp08yrQtA02wv5CHmbIRwhESIKeDuhSt30U6ca/39hNjyOLqYFn3DvUvlaK7H0UJbpGwBHVO2ZTAiiizlT++Lh9xCox4zBqhbsuExP3kXlNSgMTUXGs+kMVWCOOQW7GVvAfvJ9O67wNqwVhaVOJ9h7mXtDClYjd0ydlJ+Yc7XV6+f9AU6CwzLdQ7cO0D74MZzZuPqSLGGjvNwwzs0rOo2Vx3AQSVYhDXkHqsIg0W/jRuR96YFAEVs9WN1DIxr1ZDszKUK2tFCvNrP+QLb8woI59mk7fm0IvV8VHMGPEgycP1UwouKrQBO2YRu7O+yDx5YFVSsjJAodnywu3Q5PwKTMNpBohUOQCgk4SBGaVQk7Z7cE1KEpnI+o0PGuSS9GZFU8yt0IYWWJQLaXRTk5bSwEOmLfDL+mnUURw3oL3fVaTM5aWJYJdtty6Zs/pQCOtFlelqeNDB0b2gTOtLEsS1kYwtKJA+7f4N4g7eKSDGBWtXCVDEU0vOiiiXqRcBZvPCUs1mnykJ5kYMNeLo06Awf/xh2gq13v0+wChznwCcfMrR5bzLTNPIyIamQGeZ6aYHP4GaF5DrIY0idrtsE2ZIQ2GIERuA4nw9BqShASzwAE00nPQb6qacUuT4rasWsKANWHGrsDTMxpd1RyA+gtsfS9uozpF1BJQtv5EBYadse+yB0hjVlS/Hd/QxYILr1vLWJfnX+BJxewDJAKPOgjZtt3Ngq0WGWpsNPutWl1qUQ1wJK+ETFYAII9z+oaRsK7IKk6IBfMZa5MsRCClKt6qqMDuosGN+IXm/BUImWaLt8ipgrhpgfjjyUYo92HFpGPLl6AdUCrL07uhzQMdebzlP26W7lHjkLTkIPnZO7pWhmj3IaeG+EngM2omD35MCQ+8TpDlY5UQQ3x4K/Jl9e5EGWE7l1o1s/tPQMuyzaPBh3fiiZYI8luvCYTLxwTKAn13xkZF5PAtXjsd+Tit8gvHApwcWvg+FePRveisTC9nFg2nuzdIIG9Q8NlZ7sKHag/VqHrM0wioaYbX3k3HKu+0Yy61Y8jGZ4SnZQHzO8VihcU4u5XfOtgMVyt5M+fbrPgO7jS3OLXV3IDC0SjTn5aCK4QCwTkUAlnUBWjF5nExNJOXwXuXt/fl2chaWlOlDGGLsjwSCCbCU03BslrslhszkMqlmbMuEaDl11DHK0EEe64VyHL1hA6582Svx8Re+CN2aAyEoS9Wn49XUxHjMkUe0rDU5HnmrREdxApT1VvgJZgTxvrm/1sIRb+6ZlcPGh4nTkjiyIDdOHH4q/q8BROEy9KKKi+M51E0AQbNUnYMpqrgJOvnLPPbzoxGgRAwIYm72s6rNoUYaVc2gPSOourleauOkHO47GNXmWBgBC7W/fv4Lz8HjzjihowZTWgk0BTrLTAO12isdFexG3nyYc35IpDGDGgPrbOyIyOxXVZHZUXxWTF4PsnFf+lLbijGrtHdyLhEzAqDjZ9LGHCY2g0yy5de2Ximwwzh0bgMuOuk5yPX8sY9dlz+PC+AYp4ojQMdtlXaebmQAXDyyVBpnlQMIBFVxJl2CPMZrp/cRnlvaksESEbi4IintFDXEjQ/cjyvv3oqeO1oBElilgAnQgDFXBRe4jIGFUY2r7rgrVPC5ooSza06WFgv3w2HbFCCTwdS4+SuBRwBWXVkSWPQBOTZtZwitLqtQnE7IbqY37AcRRSDu9UQ1Qy0wKrGrNwFJYDYWY9E+BARTdj67i7eDsL01A+TMs7H51p2L61TrdscAgBZGUPMjqIUaaqShag21Z4SHn3gYXnDhk3pUYnnJR85e5LG/0xToLDM9ZO0TobCANbQHa9U8VlN7GosIWEt7sIYWsFbtwRo1j1W0gIHcy5yaD31W2IqVBT8U8euiZiVQ+t6sasrOkbECsQAUhmcYLTnN34GbFBhxy95XV3a5ezXCqmqE2arGqgr46ye8GlvWHJa09sQ+nCMNpHYyT8mk4i+eAGa2Fqp2JiLjuFwpnYguDePEa6cfgzK9jCFfNQjrkqUcOJFKPdbGcs4uU5+QZyjH127/AX793z/WLVsklwQqvlRp4IpfEwsq6uBBZsrhNquWHCKoXZDI+c04XSjKjgFKKzJEChixkEeAnE7qCzqoRxqRNgXE5HYSLl0RWltg1I6e3DRaH5mTfOO8KV6p5eIxa2XhdwHskNZmF/NhDRrZY2EI2r0ANUwP6G64+na86nl/hbpe4ShguuoKwAEEdD73uc9ZpdE+vv71r2fznXvuua30L3vZy/ad4Lwba9TQr2zK+b/MqRpz1MSvIbCJayOmqhh2N/NomsOAnaE9Rqj8Otk80DCfb7onMoHxnGNwv49ABuMz8XdqhEPcpmyiZnk4A6hLQEKxnSIzfkrnH3MuDp5bj4dvOBpz1cBbYpysZkm4mdqqFGc/AB8DiJrfsHzcAbQXhZR4lUlq1LZUDoypAfCoQw5D1QutIkQSgGjGGD3EGr1ACSDyz9dcg+vuu7dbJsGCEucArDNYJqEjJfrf3Gvo3DYikBckdW3kQIw7HBiRoIbbokiwQ0AY/Tkux/ESq8Y6ycqVYtkiagYInTz9uVVkNRKggcG2EThWds53ZZSbEhpDOSZASGsDz6gQBgCd2nVN5mn6EAKE1Ur7bSb8NWKgFkBnVHsZco/ptu/fi4+898u9qjil5aUDBuicc845uPPOO4PjV37lV3D88cfjCU94QjHvhRdeGOT7sz/7s30iMzPjinv/F4BGETrIYpZ1h34h8XSVs5j4WDkh9+BcEWM1LWC1WrBTPzmwY36n44CG1MTzydXPAIOmBOOzM6DaWGCCAIYiFdupO9a9l4m76aXj1x0OAFg3WI1nHPEEf8/ddwfgzNth++ZWVwFNPrEApJkKKnS5qUERkVleTsrGzPFaMl9fV4e/euLPY001m03XZEizSwYU1Og/9eTSW/1ANXD9vfeMpduyJBR7II9AFv6ViHsvWX5HgDyfVACTIL18xmhkylLc1q4eYkoLtq16BzrUJn2v5d32FWQq1DtedmdfSsWEQUVBOqrFNhCZtvTOz+07ACi/5YWUR5puGcaykvKnETLzOForttZCYn1uns0oauSe7/I/v+sLYwizDMRLdOzndMCsupqdncURRxzhfw+HQ3zkIx/BK1/5ytbURExr1qwJ8u4run/+auwY3iKuMAbQmFXzXqkuaIU9nFdqRMCAawztzokExojJ6gADlgakvVJmZqxWC5hTQ2wfrTbxdET5hilQ7k0YmgkVwca08aaDII2bTpurRlioK8xEPfYqO/U0rxWYFYY840GDIg2tK1Rga9ER8hVU2Cfu/DKecrhZCXHI7Bohm/3LDQ8DxBqrEjNlLS+NZcfwEStgMVAawzr/KRGZ1Wolvk3tOt7VwQyOXLMBZx/xEHzytu/174NaCUWbSpATmCX68nIsY3NCgk80eG+lSIGcVNnyNonrceccp5M83HkhkLV8s3vDQKdNrRw+n9vdWzLPUQbgpL60oO6At4oGiV2D12iF9H/a2Q/H/PwIX/zmjWDrQByvBGON5vmy35Q+vVVG9Lll243IOBKLbElyy9Nr9526CsrKFvKm0sj59A7LTYl+eM8OLMwPMTs30514GWi6BYShA8aiE9NHP/pR3HffffilX/qlzrTve9/7sHnzZpxyyim4+OKLsWvXrmL6+fl5bN++PTgmoV2jO8QvM41SUbjEWQe9eJrcVBZgpm+IzVYOAxphjkY+QvIAxpJT2fMN1W4fpZhsSYriHceTJaK2oCB0Sg7zOWfdGaoxWzURmuNjVaUxW42wSi1gdTXEXDXyu7S7TUEb/5hyd3THnrsBAO+7+T/wj7f9B2Zsfb0Rm6xuY8PLTcP17eaMk3ijGY0fDlCplIXMdaYAkQnkaII5tjWriXfTbRnaMRziCR98M1bPVugw3Puy25YiM4L/Lyc9GsevOxgDUpHoBTnckmhQo9BVzsqS5xE6kYfFFi1L7lZsGYmMP72Vlnv0tJcHrmxWWHH5Efu0FC8jjxS1M4J4spttKudDJGPYOMvFyC7IQ3h8+ivfxe+89Dz80ct/GpvXrgnSyOIJ0XOTps2YWoml8FbwETfOrtoCmJRvSM2gBd2sqqoZamEUgKQkWR8ico0Vm1ad5UjrVp5eL4PlN7+7tLXOlFYCHTAWnZje+c534rzzzsMxxxxTTPf85z8fxx57LI466ihceeWV+O3f/m1cf/31+OAHP5jNc8kll+AP//APFy3jrDrIn7t+QVLf782Q6bEVa8yqGppHqORGCGJ0RTBLdGepxlqex049Z4MANtYKF5gwHBqb+2aDYcNZETCL2kAldtfM1NPIar2a21GUY8kVAFba1oHA0KjIWKeIDHAbso/RLHLKFgTWVKvwya1fx9/d/O9GFuWAmwEiWsNHfXakFEP5abJ81ylHyoqaLTCYzXYSRDXqOtyDi60iVQKwkv1HR47MSjF03aSLZXHPcMdoAR+77VocsW4ttu7YmZeZEVlnmvOaGR+44Sr88dlPx8dv/C6+ePstTR63PI4iXp5nokiZRYl0sTzRT/9WSeXch1L4317LWotkOgAs/VJSrxTaTTDJqB9As8y6j4UptWu4y8wi6J1tM4qfia0TM4KVSkli8y78zT9+CR/5xLe7KyIfmq9QrqMSFwMnYECNdLCahzRDaTbRDBTArqFqbZaISz7WAZgYYM0mNlRstfE+RGKo2DBpLG0OBApQ1FSt42kTYdXqGaxZv6qcbjlpKZyJpxadvU+vec1rsk7G7rjuuuuCPD/4wQ/wiU98Ai95yUs6+b/0pS/Feeedh0c/+tF4wQtegPe85z340Ic+hBtvvDGb5+KLL8a2bdv8cdttt01Ut82rT8NcdQiAxiLjyE1dV0EgkzZ5yyyA1bTQONqiRgWNilywwcaCAjQPfk01tFYPbZe1u+kuxmo1DPbVIrBf/i5fHWcVmVEaM6rZMsKRToyx40UzSgEDu4+XseLA+vA0S+0rsbxFkQFCla2vIrOU+/i1R+Efbv5Etr0o0GquC7T7aJmeL9vOIQ/YQIrOwdk4Rs/OaP87Ubo4wiXrXjkrhrJ7XZHKWH5gQNI98zsQLgtCcx68Tvn35+1XfQXvedZz8aZznxXIlgIyznrjI27HYCa2igQj6SidswTF9e+CEik+MXjog0Ziq0Ukj+fvioyNXim5UjcFgOsCXpmf0U1qDhYWGlmZBE7N8zNCfuzTV/VJXcTUqYvkTtjsTF4tMKohh0uWLcgBbKgmDWO1cQBHPhdXX9feGgn/Gvu+LdSySRqfMgly3M26eUd9U3LCCVuWAeCnf/EsVCt5vyvXRos59n+cs/ItOq9+9avx4he/uJjmhBPCoE3vfve7sWnTJvzsz/7s2OWdeabx77jhhhtw4oknJtPMzc1hbm5ubN4xKRrglEN+HVfc89qop2rerYpgrA2FHpyIsZ7mQQQMNWHEFdaoZom5WVRBGDk/HqFdNQNr1R5s02uTyrkitmBLlAeNeeHrkqK2vM2oTNg1WmWaaTsNDYUKhLXVAvbUMyAi668TTu0Feo8YX/nht42TMQbIrQdpD0sN0KqgrS9NqCZKfZ1xtg7rooihtR1ldmheIjbRlSOrl7fAKTb+EYKHu8cAqAJ4FI1mG/zWST/YsR3fuOsH+Purv9WvQ1Mwfhwl8A0IRVZQt+IxGOXT014iH1/81xZZXL5t28ZjXhbXIz5QaKIvJ0TwPyB4CvJWl646ERqLVvhqFrNkL4YGlHz5ti1GDizE5cYZ4/vOcpK4lrLopWQhnY9hTu5Fd6uwtPaLpHy5bL8Bb8qyA0dFTb6Y5EqvwvNpukpuLDw270GHrMV/ufDcTM6VQVMfHUMrHugceuihOPTQQ3unZ2a8+93vxgUXXICZmfEdxK644goAwJFHHjl23kno6HVPwffu+5/YGb1LsvtYRUPs5hmrUMPeXZE2Kt0qvyEG7Y6EzN5WhMZp2ZEiGAdoDLFgNwD1O4iDsboCRqwCp2VNhNVYwG49i7DrMuemb2/KqFAba43oJ2pSGCa2VWim14zyVgBmVI0RD7zVxEVWzm5jAePovKBVcuuGeB8tL6cy/je1bmxSzLkuGAFgdCUzWz4W7HSF2TCW//TeYa49StsvkLNqcPteX4V50Rf+Dbc94PzMxrCopMgr7AIf8R603JJSwAPhNYrS+mQurxyFZhR1azNQjtJQdF8jLC+hxFs1drq3r54QALU4jMgBCVluzNPJKNrGfRpUComVEiRR99a5Aw9dSpILEYs1I9jBV0xPcZQvBJ22AYd1O9Kn+6g0e0sQcWmzGFsuR3IAeMXvPxvrN64u129KK4JWsM1tMvrMZz6D73//+/iVX/mV1r3bb78dJ598Mr72ta8BAG688Ua8/vWvx+WXX46bb74ZH/3oR3HBBRfgiU98Ik499dR9Iu/WHR8DY6G4VJsIWE1DzGIIGcdlgNqDHAAYooLb8TvFo7K+M/IaYP1slMYcjTBAjVVqZDg5PgTMKLN6a0Aac6rGumoBa9W8dUY22sVMfdWQ4QkJjFnFrb6mgrYRn8OOsHF6bDTgwC4/YavM3cxJrnd2lv1ZZWQNNSdjpgqtXZKUYswMapumPG3Y1DGdqo9+c3VKcQj6V9GTS3O6B0L+Bzd/y923F/K2bdvQ0dUH6YuWF//YOmrvqhwDuJQSlYra/S6Z1VPLzgWf1nLplKIWeUg3UyokwEJrKiSmcaLKynqK16HlcO6WhHf53jhyiWoYPyHh06PEtX7ypWVpASvrFF0p41qA2MeG2TjL1drHwmk9Rt1s2+Cbw0/boSOAlUlHzp9nWAPzI2DPCNg9BPaMQAsmTg71dIQkomYriFrj4E3rcNZPPqI743KTe26LOpa7EounFW/RGZfe+c534pxzzsHJJ5/cujccDnH99df7VVWzs7P41Kc+hbe85S3YuXMntmzZgvPPPx+vfe1r95m82xeuAWGAWQyxR+wKbnRAo3zIWl4aT882jTi01sTEbACGjvxrmIEZGkGDfCA6d30UpZW0phqCNGPANsyh4wWNnfVsoDrjvM6HcUDayt2+BzDkDuxeB3lwUCZnvTFlKAugTB+5ZjDE7nomy6fLD7GRA61n1UxlMbrGEqVpseYe+T/ewMSNZYuIbGA3CrFNADgyUMw6uRC60lrSpq5ZsEONnL0UsSw9ziPa1//mBh/Fxp4Y0HhA1DSfzx+X4RM4fxoHqrIy96AYjOSwZApo2WecWhBI8TL1gFf8DM3DiJeqS5ZV3wdl5fOWGvE8vOzMfv+udavnoEYaDyzsAYjBML448vn5T5lF8zADdQF6U/zF5YX1xlAXIXm8FR6WjQVM2i0XJDzvV5+8sn1zHE2dkQEcgEDn0ksvzd477rjjgtHwli1b8PnPf35fiJUlRTMAGHOkAV7AHsQxc+LPOf95d72OzpIyjKZjHIgasQ4iM5ciXBABI220mgM5nhdrbKj2YGc91+FbZNKOomjQ4XdlfswoE4tnXCJrjarrUA4HgnIRoJ3SZcSKI+TdpCcvd7jqJ5+/1H+0QI4oU4IduHLd6iqZXMF27DE0aORisqCF4P1RFk3UYfVpim/KjJWd45NpI8qcy98EU/XsnlLc/tuA7EyhAnR34YNUqweZU2gtBj01QODWIwz4upVDtWTHcB+m2yeLYv72sXMv6xADQyOLrF8gv/V3cbT9wT2gofbAR/rL5HsWtMFoioiMJ3bufRWWiIBXq70dEEg8KWa7mWe0fFwpPO6ch3ZJOKUVRPsBJD2w6dDV54KwgAoa69QC1mMnVmMeB6ud2Kx24BC1E2toD9wX3cS4aVOfzspNeaUoBiWlKBXMwAIbnJyy1hDcqqQypeJ4aQaaZdfG/D0gjTWDIVSwfUX/kYbZGqPZZJOomRLL5lFmKqspD+K8Kd/JDLid0J2lBRhUOpG/ATJcuFcCiE27kX01Mm2hINCCK42DgR4nNUKGVAeIkTyzVofEhRSioegp93ncDhAkqhTM7OV4OovUOFaODnFasslydXgeADjtxYGyuEW5LSrc1F3NJnpy4pMmd981b9wm43xC7AYHzRHwYLelQiyES8dlAENNsn7ydIwS6kzvJWP71BoYjsx2DzIYoaPhqA1yAIA1XvvCt+OBex/sKewy0mJXXLljP6cp0FlmmiGFdWqIVZXxj5lVJnKxWRZu/GrW0BAbaY/taNKKEwAGCZ+XkEzk5Bmx6SfQ1bmku6Y640DrcxEQLzNPkSmbxbnrO9sWGMBMORH12aDC9XcmuKGzGSmw9WRiDJQIJBiRtkNoRc5JuZHMyGN4uEEss3M8bltgBlW4i7msJ2VNCOUaNgu68hajgJWyvhCpOXe3U7g9L75DZIFSLk0wYg7QXIstxSepgIlCAeZtgxFllS0aTJXy74lBd8yrwd5tsJBRDq1WIpGew/wty1QMDBzIidozZbEKrFqZ673JgRRy7w+DtfCfcY7BqdVLovBuK43L0APt+Cltkdb7/zBolB7MybRU18HvABxpA4LSeYH77tqOD7z9091yLjO5VVeLPfZ3mgKdZaSdC1fhu/dc4OPcjCAD4oVK3kU2JootMk1vOYORuBaTWVHkFL0kM8hhzCBcflHatLMP1JhRbuPOHJnVSWvUEHO0AEXalpje2NT51wx6xtGXgEmGH/GO2QSsGgyFOT6ub2OZqSrGQByVMhYfHzMnsOO35R4MBNARcjXFjDO8NuQ2eO1DoU6g6EAIdrqoawaRHYAjBE0r/vrmkjiNEtYig07H08zp179fPre3kwTdCaTgrycsJRJ/pq0KJo/cPDRIl/Kr4JAnwb7LOm0gk4cbk7SAlDgpPnsi47g7ZOPA7LanGHHj8xL7DWnr05Jb3p0rStsYOyXfkthRVqbVDBqOul8XNn1Nq91rbaar5svRjnWt8fF//Aq0PgDMHT8CNAU6y0i3P/AmsIUMxjKQH7MSARvVbt/BDVCjwsgG+KsxSyOsUjXmsABqdVou2J+2AyFrn/DKx9yfUyFQUu2uoJGnh+aooO1O6XnN47aJqGyAwj4BEgdU++jLYa8epuvaENRt5zCnTBRpZdupa7NSxx8AKmV1cTzMTqSvFGP9zACb59YFNXSAKcxfBj5eqfpXpuN5BCAnx6ybTTOX0pEulUeKk7JilKxF1AMKOvkpUcue+jZpgaEI1DigEgEPIFUfwVP+iKxMzWO330VkEXK/U290YF2KVoQ18nLycIDTb5PQahC24CHROMx+W4bGEsKgYQ01tIEBHQ/uCaOdbG5ZeQvwCUDDbchOdZ1so4CITP4+8hRo14492L1jfpFc9jJlnvvYx35OB5wz8v5CI70N2/Z8DnKUP7C9VO1jAYefIoGxCguYx6z3IRhgBBCg7TB7VmlorsGsPaBxYAIw7+xQbPcAbva4AhhrldkSwt0naGthkaDHRCQeFoGEKXPdYB47R3M2WGHYy0u5nJPt6moI1M7/J2XVMXt4LdAAZOPsMMcbf7o908va2PkcQBlww6wigJQHer6WbK06IAwqjbpWiNeDeOdkAnbreezSC/bZmHZlv+U0A2JjUe7hKMIME1SwztS1bx8lLBWdjsRWdDN9ESMYcRJbuShKE8vmmkQo+djnoyhZBESCMvqAODTvYctAF+VdMzeDPf9/e28eZ0Vx7g9/q/pssw8MMwyj7Ci4gEtQAhqXiCKSqLnRqCHuSzSo0XB9o/ldJeqbYNTEm/h6o7lR8b0ajXqzmJhIcCHGgChGoyKoEFaBQUUYlpk5p7uf3x9V1V3dp7vPGRlmYKzv53MYTnctT9XprvrWU089T0eJGEccXpgJ3TuyKirNGAreKSTx2wOahifkrLCstkekY0laFRLuCpg60Cmv6eXyzqjRSGrt9LQugReCul6vvUwE8PRJlbgZiB3skTPZJw7JgYkQUF06FFxoeXvB/rsf21caUUpEGSwolbaQq4wPuLxHoDuIiiE6Bp8WtrsVkBqENBzf8RsAQJykscHQSSnvOLjvRoKQQ0GcfCIuSYRAmjnoJCt6VavIB/JgPA+XgDxZ6PScEQqj3xrejjyl0Ik0QMJPcXjoEIFBfW/L4XrEdCnrS3ei3UnDJi6CgSptTlBvLI5JE5CzCkiRi51OJqJsBosTuKuOycvAnFIToGxryvGyKzTfwitRirsAuci7HC5xET2dir0kq3xREFtUwnOr64oB13WL93kYB9KMYDt+vwqNQXg6iapIG/glyQJnIuZP1OmqyFknHmV7J45KpiawMh0GRkJuU3nel5W8pZRssm4WvqaqU12ra0+iZJcyBCJya+SJM4b7LvsKFr27FnOeWww39mHw84a1MYrTFjTtJfPURtFlxEInZijuWkbCWDkJDAwcygM3hWSn5OWCeoldF7wQLSiT95WWyetOgncijDiEQbDvhVPmU5ogCrYvXJWMQs6StpJU/5YKBgoINWuSt08ijDpoH1iprp8ENeh5mK2rXkKaN4CDIyPtbQgkHe758ZNSEIbIablFpU4EKMeBDoo3kBgDqnheMwQOjuwptX1FIiwEl1tGFSyPDCuAwRW2K9xGDe+Q0bYBEd3ckR/hOLBfxkL/THOoHjUgBeM9cS0ulX9qyHd+yBm8dottNkKOh1fMonxxZDwYLdyz3+GEFHdhlThRRQQUiGvaG8EMMxYha4mtsWAZWts0A2DRl8FhU9n0cG9fKfhRpgUijTzgrp8GkfVw7peny6H6rmhW93T5xbM5YxBHd8pW8XQRavZSRKU0o0omPIq4MGjPS4z0MdqbOC4Wycv1NgDB+EraI+KCYNsunlq0JJ7kaOXyuC0nxb4CRDSUijxR4ruqBBlK2vIKV+Q9odqjVPbjQsk/qU62okgZCzkVJPk3IFegQCZsaQo2UJDO/9R1hzxnhEFtD8Q9dT3RBgjCxihuOw/AhpWtcEq5Pu9tmK0rAIbo9BosXoUKqwmQk3qa+cc2LQiSA0YgxpDhLiq4jUpeQAUvIMVITrYsUknLJdmp5HlkmIMMc5CCjbQkMS6J7StdIa187ORYARnWiTSzYTEHtbwDNbwDOWYjy2zkWAGVLA/OgFHVR+Bz9RMCdMsblIrG7OLZxRvwmP+XQRnZEtIseFqJAUjDEStBRsgEbIqCk7gijHEjtR04NRYcegVhcr2/KjCqID+O/C6IpxoTAy1TY6obVKGQGmO1er1TWZJQhvdqhP2OOupeTH5coYjyO099eHFZZTlBLGdmI0j7Gk3jEpi1ShfhlRN1jUTZRat3FkgS/NV1DYw29xRpOPSfWq8/yltyRDs4Adf88il8vG1ndJv0upxoGQLCsVA3SFbnXSsVTFTejCUZXigGRL0mpVHub6nXUXSdYomnqiIQKdx7lmNyuC5YZ0EeC3eFFsdxpGqLgbjsVJe8I/jeXwBwQ6etwrICgF3wy4ya8B0HWz/ajuVvrolp1R6CPfh4+fz58xEXqPvVV18FAKxatSry/ssvv9yluszWVS/BcdvguB8EVmtBB3RAGoSC3DZS424N78QO10WeUrCRMHgwob1JySVdp2t51MKBPskH8xAJT6lEDiwuvBYzIqQtx5vUC2SBAXhv23yAAIvlPBuhMJSRtQpNIVTXxSSnWAYXDiykmAObuBddnWSZDAwpRrB4AbammXHlBMkYkLFsdDophGctQTbiR3C1FeYQyRcrpL5nEPYp3JVODKP7kTQDhCAhYlpaPz6W7bhgLMkEPNhn4qgv9zQfOoTtDoSdQ6QKI6YGGUSUlbIPShr8ypkc40gOEPQoHJ48FUFh/r2okA6JIugkRxnvavUHAsKH5GQuUCjnpA2hvPAPiT80AEd+pCjCG4EvoNf2MHHT5PDapV0K1i/tWqIeC6Y9LfpDrBMREieyAnXqFUYdO0+C/wL5//euAUyeiAr+5iSIiUtgjIsVAOfB+y4Bju3z/3AaaNe91Ypiqywol0Rne74rLetx7MlBPSdNmoQNGzYErt1444147rnnMH78+MD1Z599FgcddJD3vaGhoUt1GaLTS7CdjwCpYYmCeqdSINiQRrdMqM4rWUEGxASKN6+CICKos0RAeZO8LBiACJ/gkti48l2+iNNKaoCttvJoc3J6RgByG44J42qLCnDAwRmDI0M+xC3Y/G16Ub8eDNQlJm1n/LRpJuxiHOKSSIkMKUZglvCo7GpyBZ3Qx8sQN3cE+0kf+MoZzovTqLI4I81GshTd0auNLpMIIe/IZYgWp5mhmL8J5YjAixH1elqbCIWQA/Cw6iaq/ND/iXyCUlYQTdVWjeQo0qCyB4KtSFkoirzoP5VOwsqBIg9R5EARPs9IGL7/GjemGv0iUewx90CcWNJ+VI9N+QUSc6Fcb+kEVHlfZnaMHU/pRzgij9Yi9RC75AnM8hEkR0/LmW9bo28rRZ3GYixIZAJ1cwAhzU9owmeMYZ8RTV1soIFCJpNBc3Oz971QKOD3v/89rrrqqqCGD4LY6Gm7CrN11UtgcjhPIsvK7kQnM+IHI+SYAw65pRLvB11uWftDdrmHKr15gEQkc10mNzR9pZiLKt4BNfxwiICdGbnNk2YOKiwb1VYBtSlpC1RCDJKEShEU1U/hiOcKLoSXILCgz5wUJ+RSNqpSKU8rlBgPsAwI9yAMLjGkOZCxXLnVFf4d/BmsnHmPiyNk8lsZQpYgL7ptCxgAz2tzQj655VW0haWXo01gFMcqAotpXUUC7/+Bw21SRc6IBTUpLOav/n+N5GjFl4ZsB4W0L1E9pGt8AvnDjv8Ini8esBK/okNgdkh2RQJjGqCTHE8uhL7Iv3FbZ+G83BZHxMW8zoJklsjzuhzVFnW8PBblkh19e6jgCi2C1OQw1wWzXXDbSX56/dMavsBSoxGeOL30nIt6bFsG7HSkpkeNO9FtY5zh81PGoqG5vozG9SK60Uanra0t8Ons7N6j9U899RQ+/vhjXHjhhUX3Tj31VDQ1NeHoo4/GU0891eWyDdHpJXBe1eU8+jTFGCErVxzKl054JGYgVLA8cqwQuFYuCELLwBlgwYZLQuOQYoQ0KyDFbDA50me56xkzp+UIq7at3NBjlilhKAxAHSrSJCF0uhb05a0wnBYG0orMcebAkh/OxB573rXQ6eZ9W5dIHX1x66PsVVxXEEciuUUmZeOMkOIizITfhnD+MjU+gZk+Rrryf0YAwMxDv4CclS6dUGqBoicG+ERHypgY3NMCSIXQUPKqrSJdo+IyeCfOlDGz/vuXIDuMB76W10ZtMo8kDdq1SOfTWhkqjR6yIZAs6rciChgKhz9gULuSAWHK2g6Tleqxr+KSiaPnkhxw+E4AHalBsX1/NWF412JnEcXalEgxD62cTFmnA9bhgNuC2MAWBsVUjnGZXha051ffAouC4wCdeWHUbNte2AfqzINsMX6QbYMK8uO6INeF67gYcWBL+XL1FuRptF3+QMSGrKur8z6zZ8/uVlHvv/9+TJkyBfvuu693rbq6Gj/+8Y/xxBNP4Omnn8bRRx+N008/vctkx2xd9RIs3gALlXBZslFjka0c1EJFnpiiAvIsBQcuOKm40sJ7ckqdmGJ57CQVTTy0JC9XXgbk5YimrEiIhHGuQww2WajieexwhV8JxtQ2WbE9kLAbKiUDIcMJRLaIbq5Gfzn064TCBQeTZEPVHeisUD3KUJlQfE9lVON+4NwVRU/tnKl7wmOy6xCCB1jLI1YqTViLHkhF2l9GWuLo8omAzzW14KpxR+GXb72GdjvGrb1C+WKWkVilUpOOn2vf2lq0VNfg1dXri6vWmEfgmLcugwaXQieTWHGaYAYE7UYUqXBj5mytPNXteluKGqC/YvpD5MlEpQ2VI26qk0UlOa4ikKWSeTY35LeLCS0TOAO4kDMQrZ5IkCPVH5KURtclnk2mNDS6vY0sy0O7HdVkMMftwtJMQv02amWT1+xolHW/0uJExLIiqQEiEFAInc13fXb66J1P42tXnYxMrowFRB/A2rVrUVtb633PZrOR6a6//nr86Ec/Sixr6dKlGDNmjPd93bp1mDt3Lh5//PFAugEDBuA73/mO9/2II47A+vXrcccdd+DUU08tW3ZDdHoJjHE01FyGD7f9Z+yCQ2hEADVyiXGTtIjM4ghzDraYaKURjdpeUrC4hcbUQGzKfygMbUkcTS8HBGEs20HqUfHzebYlEF6XHXBkmS0PonOpySmehBkDstxGpxvtFFCNVAxAznLQ4QIOWUhzB3mXw9IIjRor0zJuFSmZIQY62ymWQWwJuoEtveDsJDQ1actBh80DNojqKHmcwsMlQbgcz/GfNiMkEh7lX8hPowZcr1dItdef7pjUmJCr6gh2o8WARyZ/HQAwefBIPLl8SUz9XUC5HDlGE0MAPrdvC5at/yi+SNXEqIjqKrHiy+H8oXk0cM+JKE+BS9JUamaNKjd8P/Rzk9rK0mxySpIcXQ5SpKOM7pd1lExLBB61dSYeZLEtJV4kcc8lz/mg1zyN9EXKIfOIxZka6EIZOuwiTVigDggZhM1XGXBl5zImyIySBRAaHNWeqICdgDwgQIBlxfvSIcCxHdz//SdxxW3nlCNV7yDCtuhTlQGgtrY2QHTiMHPmTFxwwQWJaUaMGBH4/uCDD6KhoaEs8jJhwgTMmzevZDodhuj0IhpqLsJH2+4SL3FIV6CeTVdOgJYkMOp6ndUJmxi2ujmh0WCA2pYq9g/moCZViXGN9+O9rU/gk45l2FJY4212lXISJ/z1JO9yWsyFCyAj/eS4BLQ5lYgbarPMhssYCqRORSmI9Bw+kUkzFw6JE1gOC2o9CMzTqKgxlDNx7DvJT5o6WUVQnpCFDFwGDFWnWwiaFkXTuESVp6RnIHAiuNp1i7tw3Gjip8+cTMoOsEA9xeOV7kFZEJ4issPEnL4134FcKo2LDhqP/12xJHncS5q4tDQsuLfYZWxub8eKjzeXpdGInbCTCIeaOVUsKBdFHoqj6gJkf0bUAyRpL0JlqTzkEwmmjjvHyZyEMjQ0ARFKnXQiQpGbKgXlg8L104IAHvImrMqPfQwYA9OIgiI7jLTHlHwiFFmEql9qZ7K5NDp3xtiGKGKlf1dHxMOekEt0plpgUJJ6FcC8xxbi8tlnR2/17hHoBqLTxRe9sbERjY2N5ZdOhAcffBDnnXce0unS2rE33ngDgwYN6pJMxkanF9He+TdkFInRritNjgpNpwfhZJ4PHZGvnrdDKJflpC1SFdW1Pf8Wln80Ey0V+yHHWtGQ2ob+fDtyrCDte7T6vb9itnADmo9i6Ma/wQnfjc3HGFBpFZBGIbDg55JSeeUwYVCsNFXcaz8CvaaPM0qzzkFI8/iI7kzaHwkng+KjAmWqyS5nCeeMSZocBf3wik88fe1TtBNDf4XMAPBEdULUulfvK00ADXe/vQC262LDzm0YUlOHRDbjkYMEOcr1q5FQzUsrV8MuNyBiVJ9rJCcysKWC2tMKPJfJdRXZeOvEJS6zevn0R54gtoEUSSmlCQqXp0hSV0iO2sbU+yRiolPHwYsJn9ZR5F9jbvwTEU1Cg9qccFpPE1SOl2JARBYnwlFfPEBoWSKMZT3bIqiyHd9xYJF8WluTkPjCAzu37sTa9zbEpzEoieeffx4rV67EJZdcUnTvoYcewqOPPoply5Zh2bJl+OEPf4gHHngAV111VZfqMBqdXkRH+1ykGFDJAJsIttQgdIjzVACUPYyclBFcRZFc/VcxscIRcxSDQxwFMDgyRCbk9lObvQ5bNt/m1Z/ihFrqAGNAm5NFB/l7rnqA0U+zHmAMyDAHHRTPpUlqBkp7jhH94A+JTBu7g1owjRICIKQYoTqVx04nDZd8Xz8W0+NjRcvvyFVxxnLgui7ybunVhichE84GC5oWhzHhHNAbn+WsGnbPwS0XrsMQ9sOTtBmhjHuJXIACsxQeee8f+M2Kt7Azb8f7x1FFuwieOFGOARW6QnKi0ktCQED0tlRSWfr30ORfUjOkd0kpyLQEbUJWecO2NbI94YUKAEA/kq2MZMsUAQzeaadAmWVkZLY2P+v5NAOvOANjHcQFiVR2KcnpZYe5kuWDwPJOvLaYyDtNVXavFBy89+Y6MIf8OFgqqwwxEZCnUCIWmWX5W1txKKPjt29tL5mm19CNW1e7C/fffz8mTZoUsNnRceutt2L16tVIpVIYM2YMfv3rX+OMM87oUh2G6PQibGcDUhATippnOAMq4SIPhk7iwlsy88Nq6mAkvSkzV258MXASJ4/SAFwmDIU7KS1tZuK1ATW8E51OWjoTDK57SxMRkuncgOPALC+g00kjHOQS0Mbc5C7y0lZYNnY6aQSNiJNIjvirtrOqrAK22UJnpGR1Epwcir+et6CgvU1CXxCE8z/IWFxp7sJ2GfRpzvc9Jk6xCQ/K8LVGEOEhxBis913pCaHIzlOu8HfaBSgdU6w23o2owwveCXE8Xc8XRR707xEkKeC/hQnNUeLWKZXx/1LYlXFayqx+/sC2ltLihOB1i4Xk/dMoyJ+bqWCgqixW4unTyJV3X2kmJQHxyE+5DvxkDLWy/BJJGbgjWKF4zni8XQ1jYLZ6GMi7FgnGoLwZf7D6I1RUZdG+ozNaU6NQsJPvA2qlFfkykEYKS+UfOLhrzut6FK7O0neljN2HX/3qV7H3zj//fJx//vm7XIfZuupFWLxaxkMKalIZA7KMUMOEdUzUNMdkOg7BVlloEgXzt4EqWAFO5KwURDXvRNQQKGSI34YCRPiIdCh6IGdAtdWhXQm+dHnpYdm/Fw3VLxnuaDG8gigmOcX5s1xZJYktqgpeQL90VWAsUwsg20VAg6Q0NHEIG75aXG4nMkFaLHn0nMlwEgDgygCiIn/Q/w9jgGUF43mVBPm/vUd4lGykCgZgAcyC3AaFtt0S6jv1lWu3vZAEoQaHxfQIgTZbh7MoLVRc+yKqiNKg6CQgDkzZ6JSCJi7TSY68yCPSRk3PTP2jq/hQQk5VnuPHyPLLZ3C5T7hi84Wvaw+3HsOqrCeKtAaTNvnHQPegW9T+MIQzqtBvG1E+kdy28r925hPYI5H0AeGU1kT4Kw4tO4HyBXHkvKMDifGuGMOEk8aiYVB9cj0GvQ5DdHoRFbkTS6YpZxs5HbGs9MdX8ZJmWVLACFFPijki6GeAkLjIcBtVrFObuDx1AQD/1FWKqfNOPlJwkEZeEiUlmzrezSVZC5YXL58gCZYnQzhfcmeluYOcJT5Zy0EuxTAgW+H1keOSp3lPceGVWS/R0mJvUWBwDNZDxGC73OMVSn7G5ald6QFZnxaifmfOBdlhRfsnydAJs4hHpW166vVwNXklzEiByVp+pB7YswxTWoNwSR4xoljRA/4ftRNJfgKtUG2iLioDMVWoCbIr8XoUKdI1KlFylfo5CEF7n6SJX5bFQyTHyyrnZDfuZ1IaICLpXI/AHW2Liul8tgzSpQRmzC83bjDyTjSVWoRocKMXcF55ukYldPLJcV0ccZzY5uC623SVpzOvbaElIPTikm0DO9vFlpcMJ1FMmsgjOdW1FbjsB2cl19HbILd7Pns5DNHpRVRWng0R5CEepRz8CQNdIGo40SfZLCuxFw1CGg5SrIAqnkcV60AOnSIQKMTEX8PbpfGy0PBYcJFGASk4mg1jeDYAaq08LLjeVpsa77kcenniGdVQWyGDbjIXHC4slGqXnzcMh1x8kt/qyZpiQJqTpwmzGHnenQGh/chYKqgnC5Stvtoul8fWARWY0mKqX/ztg5RV3uAhNDsuUukSZCdm0UmxM2PobxIU2dDTqxBfFF2U0iwlTu4yE3m2OlrCCG1KbDmqDGjdoHeVXcaRcZmHOYCl+9nxtE6xVccjpu0ULk/WW3TUO6pAzsR2mA1hi6M+HskJksoozRc5qmMpWksjCSeT//dMxaK2c5TtUblGxeH8YTlDZQeimnuJGdav2oQBA6rg2o6vdSnYQHtHsXxKy6M+nvqVeQbLVJBanDg4jpz0Rb6JUw/BT5/7P9hn5MByW907CBttf9rPXg5jo9OL4DyL6ppvY/u2H0vfNkEwhqBNQwzKeQ4ZgAqWB4GhQJYW2FOThxEarB1odzNwGfNsWJh3H8gxGzmNXORdUVaxToa8MkFAjhew0/WX74wBWdjooDQILniMDVEYGW7DIXGkXM1ERDa22dnYE2eAGruLdAHodNvAkA3kU8fjlUG2fxRdGERb3IXjBO17xHaX3qe+wbRu10GQgUIBeZQ8uc26Zt1KERybFU/6gd9fM9Qmhrj+kEnLIzqqjjDPksaqiXY2enoWcU2/lcz4/frDVblCjkiNC/lZE5tKkjCo6sL9Kx0KFj9BCeXK+r00ymAsIo2nxSmh8QEEqdN3iXUyGMsFlbZM9pXX37o2TT1srvDarHzmBF5u9TBrJ508fztJ0MkFAFaI10JH5gtdW/fuBn+rjGLSqfxRBseWBajgn4AgPCXQPKQBJ00/Cieffyz6D6wvmX6PwF5go9MTMBqdXkZNzbeRy00LenbVUBwbOwgiIC4CDGlpACADB1nYqOGd8qRWkJ5wSQXSzIYNHkmGwnWr+E5EgEPK306wPMaANHOQ5jZEzHW5CmRAlhWQZi5ScJJbSiTrIxFbynJF7ComtnZyvJCQX9TnBE6AiWsWF7IpOQGd5PhQ21DKq7IXYkKWI1vkyRA17mp3vbaUgm5kLba/XF38ohpIG9f8k1oJ8Ga8EuniTLQI8SRHy6tv4xU97Az+NlgMvDktzBVcOelTiLfpX8rUWsW9gwEtVlegyUpqywPwHjQiiLhYpeZY7UVmBZ85BfgHEH0EXLyYYLbcFgsQT6UdItGPNoHlCVaeYNmK7Ig8RcSPIEiOXSbJkUe/Wd4G67BL/yQqT9T1QiHgeDHQHh2uG3+qynGCp7JKuDogImxY0Yo5/+dRXD3pP/DKM6+XaoHBHgRDdHoZjKXQr/996N//XlRmjgTXRlO5kETcRKTGzULEz+jnYN53fYslBRdVzFfVpuB69y0QUii1xFSTl4q4zWATl/6Qi30DMYjYXFbkqMhgccCKiNfltwYBB3x+/cJ2Js0dZLztufDSXjcf8O8xSTj0E1XkkRzSeo48wqGiuKe4i4zlImvZsGT0dMUGShptAtLupvQMLBbZ/sTIeZDskNzyCWuGqMw6xBaTShc1saD49JR+D4gnKHIi9ysLiaNpeESUD+2mrNcP9unn8ciOIjmIaaVgpuVpPJN88eiy668i80Utgsc+QsIpgkCI9iocvqb9nxUQMKou2o6lUD+QjHcVnsM1LY6Sg0sNju4UMCg6K+5k+QKW6l7mEFjBEZ+oragwPJufCPLhusKjcbjScGfEaXKCiYTxsVNam6NvVX+49mPceNrteO3ZN0vm63WYrSsAhujsEWCMo6LyNDQ2/R4N/e6ExaQtCgCw8P4ieR/GCCKspj9JBcdIFvuMCi2LizRsj+ToyEGtdpJX2WowLJDQdaTgIMsKyLICMqwAC44YTBRBUGGdJVxNA5RmjoyUHqyTgWK1VkpuiwutTgVXXlP9GcNiwvjYp37CqJggtDwpRshySVggjJAznJDmLjLcRYq5cn7yLYz0/kpbDtKWg0zKRSalGw8XQ99mEOM4hT5+3wKA6wW8lPOsq4gPeQWSVOMIz8jhMkKyaFV5Y5jaI9UT6CLFLXYZkOAmSfMCHDG1hS/pc01E9wW+SgJTlUmV3n1joU+4UNWNZY7lUXKAheYCrSuZdkHZHDFJ0LjruSwqbrJ+Qf5QXv5AU8hPEy7D9WUI80u1q6lTYT28QxF0ladeX6IhOPlGx7qzqKQJVGly7FBEUmVn05lP/q0iTlIlpaV8XnwoeYFCaqXDmPf/+/79f0ouanodhG4gOr3diF2HITp7GJyO58HkRMolybHUX5A3aKUBVDCghrmoYTYySvMAsZXlaqv8+OGLkGFO8cqQ+dqaJAjjYBeVvAMuODLM9k4qKW1LirnIctvTj3DPD78+MDOvvDRzkZMkKcMKyLJ8WXIwEDgnZLgrI64TLEZIcwcpLgyLc5btbRc5xD0HgqqvRdqIFSepMBHRdRMBaa61qZRtjNccJk9haROYPLmmyBKBQrMw8/9IL9GMk/ddbUUpTY9fZ/GA5WVhJCKAB1wokdSYKJPqiDZos2SkVodiSE64G2xNnxTDEVn4Py6ws8MubwxWcx8Q3J+Sj2LJrSNNrtjWSBse79EmCtiQeFoVF0UaFqbJFG6P5zXKjqlbvWgkSEeA0CTsxjBXbmd5FVFy+5RwDF5kc8mS49O74gQYHPGJTKkTBSKg4IiwEap89ey7BOxsD4SUiJezCzOzdmqrVBgHchz5Ywnis2rJWqx6e235dRn0Gowx8h4E1/0Ehc65yIGhE4QUODKMw5KriAIIW0k4AwT8xUsOhBwcuC7wEYlVrr8Fk/zyWprO3h8flAGuAxsqFlXUclgduRY+aVwKajqYNqHkeAHtbhZEwlaogBSEnqc4irjaPlOZSxnsenXJLCk5wodlScFFHuT5rlFtVffzjlWUDyjdj3o7g0Os3zL/uvSE7AYXyfoilEU4a3Hlvp3r+v529PFcj5ElbD98TVBJyAeGqa0IgrBHccXvHoxerTdGtMzzoAutDWVUrX4zxoXM6lEsOdkieRIvVZ+naSpFXsL5EzRbqvywYiywVaMRnqI61cmzMBkleRorrm61PSXrIygZEraIXAJ3iklV6X5Qna8cA8bLBMCzxWGliIe2VeUZQKuXwwXQ2Qlm27tHsxDQgGpvqWdHJd8LV5y0DGtwPt74CYaPHbIbBOsmdMfW056utSoDRqOzB8F11gNwwBhDFVLIgXs/EGMMaTBUaBNzGJwDFXJbpsTaBGogtEDiyLjY9wgY4VawgjwCHp2fy3CfLilPqNG1qS0r4TmZCc2L1NikWXwsKlVTOQOcep8dFeA0Qhbls4ciiIvwa1OaGCYhaBMjnBJaXPrD4UpzQx4JFeREH1yDchPBi72liE2RjZKWJ5ZcJPUfC/3V5UgBpDt1KSI58q/kVLoSSbneSPzpQoqqEiHVAtXqycqqgyAMf/XmiBdAHPdmxVm8/6t2J5ErKuaVRYE1leItrgy1DQaAXPFATzhwSDTJIWEIbMmj5QwAmKY/K1bg+flioqCXNZ1JWx6K0byJNC74TmE5yIJsvFgcKRM6C0De9o+A24447t3eAVaGDU0RSmln1FaVriEKkQKS38kWPnX8LWI/z4CW/l2XrSehH6vflc9eDkN09iAwVgcA4FCrdoagvxYGV3mzjUEVk/5tmKs51tMhpnOuTY4puKiyCqjgQeO9LLORgQ3LIzXKd42DDGxkmHASmGaEyjK2mATREY4Js9xGhWWjNtWJ/qkd0rYmTtagXU8Y+q6MnWg0Ek9mkqhhudRHaZ4sGRw0CpwLQqS2wpLU5eqWsrVxXaGSiCNxMrXWjWWuxLwuCaV3IY6QFxUXFoApFhm4FRvpW5EGjzzoq/jypC6bjjJJbtzApYA2zb8QRw4Qv3XURZRsGwdABBnHFicfcyAsS3umld2LFiIiwFXVVpbSSITLj9MolWtYXHCDBE4fYkjIxzodbasxxMbD5XlyBaOOM9v2t6kS8sdCPy4e0kgoLQ3lY/zmkCQzKiioTn7UdwaMOnQYhh00uGtyGfQKDNHZg2Cl9oWVGieITsywWnKgZMJup5o7mr8bfeNESxrSBKTgooIVAvereScqWYcgHCBYcJAOeQxWx8RrZCT1OBAIaSa87ujjFmdAJc+jindAHzkZCBnYqLE6ZYT14rKVlkNEIRdenIvX5HIlRhRra8MhDJKz3EZKht7w5VMen6Pbpuwng8bfxYREaWXiSFAU1IkwZfdTyo7Ay8cJzPqUKmd9YUtM+jhQOoA4tZ346Ct9Fu41RWw8OxZ5navyqfRuG2l/NHFiW6qfUopLo5Mc7f8BbU4ZCJpSMRAvO6sPIs8nEHOBW+95BrbjCg2P9G/jHfdmMeJ5ZCeSksZXrYK5RsgEINmfiiI5OwuljYVLXVOeiJWdU7lbJzLd6HGDNc/G0Xndzs4yfOf4dSvtjoq5xQBc/uPzypOrN7HLhshd6P89GIbo7GGoqLo4cTBKcuUhtm7gjWY57moTtkYg4uYqJiKOBzQzTMbMYsIbsjLmjZrELbioYHHeRckzEo4b6yosG1lp0JxhNjLMAZdPaL3Vnkg2VBlp5qDSynvtFQTN/2S5jbg9CLVwVB6RmdZfGS+UdHh1KP4WHN3jUTwhUXXwgDFHaXRprCH99ylzpg4lYfJIuZpQy1dryfzwJ1uPRBCKTg4Fukn9WBHyBK6Rli6O7BACR6tLiq8ShDQUjFDaHijup+QhFUkpIVwCzweJGQEA146jh6FtdxXfi2A6JUgIWVKzEzASBmC7wmg7jqXbDnh7obxHJfxuhA2S47ZKShXMGGDbWPPueow/8WCtTIAKNtzOTrgdnaCd7ckkJ2qCJwIV/LHt5IuOx7hjDiwh0B4AQ3QA7EVE5wc/+AEmTZqEyspK1NfXR6ZZs2YNpk2bhsrKSjQ1NeG6666DXcKXwubNmzF9+nTU1taivr4eF198MbZv374bWlAe0rljEu9nEqhO+NUVnoxdSVIiJpYIKMIiIG1vOCGDQkkHd0qzUzyaEkCIjRbupSJhtJxSMmsaJ8aASq48BvnlRxEuEcLBLXq4FWGpTnUiTHaijKgz3NciWTL2VZHMEEbM1MVXqRRp88r3xhlv2kuGZijDGEJanej8ahzzjL67oMWIQkAbqc98lFCsxxGpyKDX+79KEp5NFUFi8O1tZF4WylsWQnmj4msF0gLF9jhA7AQRedUlcKlM1ZunZOCh6/7N8MVQPZp2R10rqTQjCMeEBRcs74LZFBl/S8+gPB13QVnpV6afyCpljxNXgTYpt2/vwOLn38Fhx46Rt+Q92yldvlIRS7JFRCDXCZAcABh79AHJ5RjsUdhriE4+n8eZZ56JK664IvK+4ziYNm0a8vk8FixYgIceeghz5szBTTfdlFju9OnTsWTJEsybNw9//OMf8eKLL+Kyyy7bHU0oC9xqBE8dhLg3Og2GVMQ9iplEqpiNjOcMsLwRXymwhf8aMcSmuRsZPLRIfqYmcVGG/kcvPbJeBrCE5XNKHoWPUbB7cMhXDRSp7hmQkhqbJHiaFwjDS9sVFae48IpsMQdELgqupc2m5c+oyq9QYhpv7Obebxfnt6OIrOjtSCl1QDFZ8vKpLR4HxXGnvK8l2henddCKSGxy3MRNxUlKVeA9gV3VhFFQzpKvjK4JCpRFQaKl+pnDP/0V+sT1T9QJqbDckQ+6qlazZi8n5pdVEMSDh/sC4f9IhKOQlwv14yTY0wSg2+qEtUCA7yBQpvm4tQ3D9m8CHCf2vYmE3DrzjI9Di+VcVRZHfeXI8svrTajtu1397OXYa4jOzTffjGuvvRZjx46NvP+Xv/wF77zzDh5++GEceuihmDp1Km699Vbcc889yMcYnS1duhTPPPMMfvnLX2LChAk4+uijcffdd+Oxxx7D+vXrd2dzEpGtuQpxQxtj4jRWmOzEPYqMATXcRh3LI1Ok8ykGEZBiBVSyPOwip/jlaSCYfk5WDjDF+pXovHFHyVnRJBY/tNpklZQ1y0t5TVWLO4JDekgIeZqKEThTVihKqqj9j6JSPZueJKKjxmVHC8ppWa5n51MsJ0D6vqUGxgCeIjCr+PdnAGADcJgIABrnA8jjjjHt0uedT6sOCvFiFvHxEiRVweQkrYgGK4OGqkk9/JiW05QiYiaf0KjXjQFIMeTSKd+hn1uCzCRplOLqp1Am+b1kc0posER/hh5CFXqClamnJO2HLtiaBo/KY6Y62VFbXToZkdfXvLcB27fsEAE7C4XyJmwtUjljTPjOCWW79EfnoqIqV7qsPQBEbrd89nbsNUSnFBYuXIixY8di4EA/muyUKVPQ1taGJUuWxOapr6/H+PHjvWuTJ08G5xyLFi2KrauzsxNtbW2BT3ciXXEKsrX/ATGsWNpfgKf2g80IKcaQhfgwFG9bhZFihBoOpFllbBqlFWJg3hkrfYqwWPLKTQx2yh6GPJrkyghRVmzAJAHGFEmJls2PCu7VFpM2WXeg7HDKAwNnDClG3kf1g4hirtdEJexvRO9SwMgkSn6l0Wfy5J2mYeIufJ85PsT4nLyuZhy4ZcJkjOvvvyP7VtdCtaDkqjy8DVZkx5BQSDmTbLHCqevp1OMaE/YgLlvOstC/phJnTByHJ6/7BuZc9TX812VfwTeOOxy8pOotQsSEU1pVuQzOOvEwcJKhF3Z1wRz4WXwSEUF5wRIMpBkDWJJ/nGBRAULFQrei5VQPtuM5BiwZJy26IEFs1Cds06P9XrUNteCWbHQMkfK0Pdq2lnf8XCNH9U11mPnLK3Dqt6Z8Cpl7CaoNu/LpAzY6fcZh4MaNGwMkB4D3fePGjbF5mpqaAtdSqRT69+8fmwcAZs+ejZtvvnkXJU5GtvoypHNTkd/5GFx7ORirRKriFKSyX0T+o1PgFN7xtnmyYEiD0EHFEdCDcNBcewnWbv2p/C4pA+kphD7GhoUsbBTkgXKRWhgkRx3RVkU4xJCWoRQac5/Dqp1veWlTcOVWWNSMyOAQIgOJeppp4uBwQB6NioOa7UpM/DFt8e4zwHESymBAzrLhEsOBtaPQbjt4p+0Db8sr6oSX8Fmtz9Tq//5vIU7VRrfP4sDougZMbNgP97+/CEG7l+T2NlVUY/r+h+O8MePR4diwXQeVVhqj5vwYjuMH1YyafDwFgQU/cJgSXbdPYQDTQ1Z4cgvXCHGzrJo3mVO8O6HryRiAAwY2YmXrZuRtJ1icSu8G04cLUdfV36baKjz7vUvBIwxt92tuwKPzX48WWpbnbXcxgKTH4bhfwuIMJ39+DC467fN4+sUl2Lx1pydfHOEh6Uww8ddVE7mSSV7+xhlH4oRjD0S+o4CFr6zAtu0dePutdVi+YhM4Z3Bd8hQ0++zTD811VXjj1VVJNQnIqOWiPl+Hp5zqBZ9q7f8FJ9gOxsRDXY7HY1VQkp1NaFI+/swJWP6PfwXkBgjg2pELkvGuNE0OAOE/hzGcNmMKjjr9CIz9wgFIpfvMlPmZQq9qdK6//nrPV0zcZ9myZb0pYiRuuOEGbN261fusXbt73IDz1GDkaq9DZf/7UNHvLqRzJ4IxC5VVlyO8ZOWMoYKxxB+0vuZaDKy9Clne6E+2ah8aACR9KCANkl6O09JvDiC2cfRTWaR9AOUEjyHDbWR5AeMbr0ClNcCrnzEgAz1ysZ97ZPUkDK06HkUEivyys9xBlWVjdPUw1KX6RbZRbL2V9rvjgMmwDdHphAYp3kec8pZsMYZx9cPx48O+iXuO+BZq05VeW8UpMxeMueBMBjvlHEcNOBiVqZQX/sHvC7mqTlBZEIBrDjoBNxx2Ih47/nwc2rAvAHXKK3nldfmBE2HJY2w5K4XqdBaccxzVMhSW5S/Lw1tPBILFmdjiUl0mP5wzVGZSOHnkfjjrwLH4+SmnYumVV+PWL56Akf2FMzXOGI4dNhz/+aVTUJ/LFWlILMaQtiy0VNb4JEvjn/oz9uWDx+B3l30DT37z6xjT3FjUZGUUHA6H4JUZEZvp36cdE0lyAGBgvxpcdepRXvaA3JyhobYSR+y3L/Yf3IjjDx2F/5xxGg4Y2iT6KwTOGLKZFKaf9DlU5NL475vORi7rT5x6O3UoJWeCDhOKwenbTpUVGZx/9iSMHNaIA8a04KLzvoBvf+tE3HfPBZj9/56BSZ8fhWFDB2Dc2MH492tPxn//14W49YdfQyabPJlnMilMPnkcmPLvI/07+KSy+Kyn2PLyy2B6/+i2NzxhBCO/nfH3xV/GGYYfuA9Ov3wyDjvuQBFHR4dLwgtzwQZ15n2iQwTXdkC2A84Z6gbU4LLbv4HDvjh27yQ55tQVgF7W6MycORMXXHBBYpoRI0aUVVZzczNeeeWVwLXW1lbvXlyeTZs2Ba7Zto3NmzfH5gGAbDaLbDZblly7A9mK02EX3kL7jvsgl9gAAMY4KgDspOBKN2UNQ7/ab6O68iwwxjCqaQ7ebz0HDm3zl7/SB319xVRU5k7G+5/MRsHdLBZcJGY3QhYuKyDLHNguoSC3pOBRH7WS52isGI+G3MH43IAL8LfWH3uycAZkyJa6IYZ9Kg/Hcc0zUZ9pAQAsa1uAv7b+Dz4pbAARYFlp7HQ6AHBUWXU4smEaJg34CvJuHv+z+md4d9ub/spfjZXMBaMYzZPSUhOHxVykmAObLDD4oQ7URG+7YRsllVfotrI8jamDJuLC4V9CmotX6T8O+hquf+MhuY3n9woRkLFSuOOwC3FEwygpCyHv2pjf+i5++d7fsKxNaBE5JxBxuCScR3LGYJOLCiuNmw6dhi8OGg0AGN84GE9OvgBrtn+Cjzp2oCqVwY2vPoPFH66DxRgcIliMwyEXl4yZgPP3Hx9uCgDgm2OPxIsfrAppPfywD1nLwtOnn4f5a1bhntcWYUtnh5d30r5D8INjT8TQuvpAmdPHHYLp4w5BwXFgce6Rm8MHteBnCxbiqXeWouAKunj8yBG4atJE1GYyOPHnc+Dqqhyt+2tzWdwybTIAYL+mAfj1pefgxJ/cj4+274BDvkaEZF6SWhKLMzi6fYbUkFSkU/jul4/FKYeNiewXhQtPPAKNdVX4xZ8XYc2HWwAAmZSFLx15AK469Wj0q64IpB83ogXX//yPWLxsLTgXPrEc10Vjv2rc/q0vY99G0Vf7DKzHQ7d+A//x/z2N99d8CMZFjDJd1yf+w4AUAVFmZcroOULjc97XPo9sNl2UhXOGzx85Ep8/cmRke8+/+Bj89389H9sfZ54zARdcehwuu2oyFv7tPbzx6r+w8v1WtG3ZiY7tnehoL3gv47CRTdj/wEFIp1MYMLAWx04ZixXvfIC/zX0bO7d3YPDIJlRWpPHbB/6GjvY8WDoFyheKJ1dvxUNqDzf6GDqRt0j+5q1nIpVO4eYnrsUDNz2Op+6dB9cO5VHliJdN+CsCwC2ObEUGt/zu/0Eml4ntiz0ebkRwta6iD9joMNrjw68GMWfOHFxzzTXYsmVL4Pqf//xnfOlLX8KGDRu87ahf/OIXuO6667Bp06ZIYrJ06VIceOCBWLx4MT73uc8BEEbNJ598MtatW4eWlpayZGpra0NdXR22bt2K2traXWtgF5DvXID2Hf8/7MJbYKwC2YppqKicDrAquNQJ1/0IjGWRsoYU+XXJ2xvx0fZHsHnn7+C6O5BL74/GmnNRXzEVTFpk7iisRHthDSrTI1GZ3hcA4JINBobthbVY1Po9fJJ/F/6aTWgVGnKH4Ojm/0TGqgUR4Y3Nv8KrH90PoQexQHBBcHFg3Wk4auDV4CyZb3c4O2BTAZVWDTgL2vCs27kSj6+9H6t3roA+RVjIoDLVhE2dm7xAGi5cuAQ4lJJOf4Wv5mMbjwOjLNa1b0RlqgITGw7H6JoR2G63ozZVjU2dn6C142PUpKswumYo1uzcCJcIw6tbUGEVP1f//GQlHvjXs1i8eTkAIM0sHD9wLL613zQ05uKfj5XbP8I7W9YjxS0c0TAMnY6NuR+8g62Fdgyu6ocp+xyEqlTyoOsS4aUNK/GH1UuwJd+BwdX1OGvkIRhd35SY78Elr+Hml58DlwRJbX/UZrN4ZOpZGDtAEP+84+C1jR+gvVDAqH4NGBIiOOViZ76Aze07UZfLoUZ7N5dsaMWFj/4GW9o7AumH9KvDr879GppqqgPXV364GRfO+V+0tm33tmAsxuAS4YZTjsNBg5rwxCtvYeWmT5DLpDCotgaD6mswuKEeJ43dD1VdmMSICKs3fYKOgo19G+pQXZG82Hl3zSb8/a2VKNgODhrWjIljh3katXC5S1ZsxNJ/bYRtO/hw83a8/M9VaO8oYOTgAfi3Ew/BxEOHo1Bw8OIry/GPt9egbVsH9hvehPadHfj1bxfD1QgdZ8A3zvg8Lvr6UbH+nEq188FfzMdjDy8AY8zb4nJdwulnjMcVV58UqwEjIqx4dyO2bN6BxoG1GDoy+blTaN/RiQXz3sbHrW2oqa/E5o1bMPexl/Hhhi1gjOGQSfshl0tj7fKNcGwHIw7YB2dddRJWvLkGD/3oKWz9cJtX1r6jBuJbs8/GYccECWzb5u2464pfYuEfXgMYA7c4HNtBOpPCCedMwtqlH2DVkrXIVWZx3FmTcNqMKRg4tLHL/VcKPTFfqDpOqJmOFNs1omZTHs9te6TH57fuxF5DdNasWYPNmzfjqaeewh133IG//e1vAIBRo0ahuroajuPg0EMPRUtLC26//XZs3LgR5557Li655BL88Ic/BAC88sorOO+88/Dcc89hn332AQBMnToVra2tuPfee1EoFHDhhRdi/Pjx+NWvflW2bL1FdPYEbMuvwb/afoN2pxVpXoPB1SehMfe5ogF2p70Zy9vmYVuhFRWpeoyqmYzaTHlEshxsyW/GW1sXo8NpR1NuEA6uOxwWS2HVjn9hyda34JCLYVXDUXBdvLJ5MdqdnWjONeO4pmPQUtF9cujYmt+B7XYH+mdrUGHt+avC97d8jEeWvoE3PtyAXMrCiUP2wxn7HYy6bM+fMFm4ag2ee3cFUpaF08aOwQED4yfM9nwBf3rrXTy3dAXaCwUcOKgJXztiHIY21PecwL2IzVt24Lm/LcNHm7ejoV8VTvjCGDT0qy6dsQQ2tbbhub+8hY8+3Ib+/avxxZMOxqCW+l0XuEwQETrb80ilU0ilE/yHOS6WLFqOrR9vR9M+/bH/YUMTCd6mtR/jxf9dhLbN29E8rBHHnvF5VNVWxKbvbvQo0an+evcQne2/2qvnt72G6FxwwQV46KGHiq6/8MILOO644wAAq1evxhVXXIH58+ejqqoK559/Pm677TakUkJjMH/+fBx//PFYuXIlhg0bBkA4DLzyyivxhz/8AZxzfPWrX8XPfvYzVFeXP1B8lomOgYGBgUH56Emi88XKs7uF6Dy/87G9en7ba4jOngxDdAwMDAwMyoEhOj2PvdCM3MDAwMDAwKAk1Em1XS5j74YhOgYGBgYGBn0RrnYc8dOiDxCdPuMZ2cDAwMDAwMAgDKPRMTAwMDAw6IsgQrzb066UsXfDEB0DAwMDA4M+CHIJtItbV33hvJIhOgYGBgYGBn0R5GLXNTp7v2dkY6NjYGBgYGBg0GdhNDoGBgYGBgZ9EGbrSsAQHQMDAwMDg74Is3UFwBCdboFivG1tbb0siYGBgYHBngw1T/SEpsRGYZf9BdoodI8wvQhDdLoB27aJyLmDBw/uZUkMDAwMDPYGbNu2DXV1dbul7Ewmg+bmZry08U/dUl5zczMymT0/MHEcTKyrboDruli/fj1qamoSo+aWQltbGwYPHoy1a9fuNTFFjMw9AyNzz8DI3DP4LMtMRNi2bRtaWlrA+e47D9TR0YF8Pt8tZWUyGeRyuW4pqzdgNDrdAM459t13324rr7a2dq95+RWMzD0DI3PPwMjcM/isyry7NDk6crncXk1OuhPmeLmBgYGBgYFBn4UhOgYGBgYGBgZ9Fobo7EHIZrOYNWsWstlsb4tSNozMPQMjc8/AyNwzMDIb9CSMMbKBgYGBgYFBn4XR6BgYGBgYGBj0WRiiY2BgYGBgYNBnYYiOgYGBgYGBQZ+FIToGBgYGBgYGfRaG6PQgfvCDH2DSpEmorKxEfX19ZJo1a9Zg2rRpqKysRFNTE6677jrYtp1Y7ubNmzF9+nTU1taivr4eF198MbZv397t8s+fPx+MscjPq6++GpvvuOOOK0p/+eWXd7t8SRg2bFiRDLfddltino6ODsyYMQMNDQ2orq7GV7/6VbS2tvaIvKtWrcLFF1+M4cOHo6KiAiNHjsSsWbNKejrt6b6+5557MGzYMORyOUyYMAGvvPJKYvonnngCY8aMQS6Xw9ixY/GnP3WPi/pyMHv2bBxxxBGoqalBU1MTTj/9dLz77ruJeebMmVPUnz3phO373/9+Uf1jxoxJzNObfawQ9b4xxjBjxozI9L3Rzy+++CK+/OUvo6WlBYwx/O53vwvcJyLcdNNNGDRoECoqKjB58mS8//77Jcvt6jthsPthiE4PIp/P48wzz8QVV1wRed9xHEybNg35fB4LFizAQw89hDlz5uCmm25KLHf69OlYsmQJ5s2bhz/+8Y948cUXcdlll3W7/JMmTcKGDRsCn0suuQTDhw/H+PHjE/NeeumlgXy33357t8tXCrfccktAhquuuiox/bXXXos//OEPeOKJJ/DXv/4V69evx7/927/1iKzLli2D67q47777sGTJEtx1112499578b3vfa9k3p7q61//+tf4zne+g1mzZuEf//gHDjnkEEyZMgWbNm2KTL9gwQKcc845uPjii/H666/j9NNPx+mnn4633357t8gXxl//+lfMmDEDL7/8MubNm4dCoYCTTjoJO3bsSMxXW1sb6M/Vq1f3iLwKBx10UKD+l156KTZtb/exwquvvhqQed68eQCAM888MzZPT/fzjh07cMghh+Cee+6JvH/77bfjZz/7Ge69914sWrQIVVVVmDJlCjo6OmLL7Oo7YdBDIIMex4MPPkh1dXVF1//0pz8R55w2btzoXfv5z39OtbW11NnZGVnWO++8QwDo1Vdf9a79+c9/JsYYffDBB90uu458Pk+NjY10yy23JKY79thj6dvf/vZulaUUhg4dSnfddVfZ6bds2ULpdJqeeOIJ79rSpUsJAC1cuHA3SFgat99+Ow0fPjwxTU/29ZFHHkkzZszwvjuOQy0tLTR79uzI9F/72tdo2rRpgWsTJkygb37zm7tVzjhs2rSJANBf//rX2DRx72pPYdasWXTIIYeUnX5P62OFb3/72zRy5EhyXTfyfm/3MwD67W9/6313XZeam5vpjjvu8K5t2bKFstksPfroo7HldPWdMOgZGI3OHoSFCxdi7NixGDhwoHdtypQpaGtrw5IlS2Lz1NfXBzQqkydPBuccixYt2q3yPvXUU/j4449x4YUXlkz7yCOPYMCAATj44INxww03YOfOnbtVtijcdtttaGhowGGHHYY77rgjcUvwtddeQ6FQwOTJk71rY8aMwZAhQ7Bw4cKeELcIW7duRf/+/Uum64m+zufzeO211wL9wznH5MmTY/tn4cKFgfSAeL57sz8BlOzT7du3Y+jQoRg8eDBOO+202Hdxd+H9999HS0sLRowYgenTp2PNmjWxafe0PgbEs/Lwww/joosuSgx63Nv9rGPlypXYuHFjoC/r6uowYcKE2L78NO+EQc/ABPXcg7Bx48YAyQHgfd+4cWNsnqampsC1VCqF/v37x+bpLtx///2YMmVKyYCmX//61zF06FC0tLTgzTffxHe/+128++67+M1vfrNb5dNx9dVX4/DDD0f//v2xYMEC3HDDDdiwYQN+8pOfRKbfuHEjMplMkS3VwIEDd3u/RmH58uW4++67ceeddyam66m+/uijj+A4TuTzumzZssg8cc93b/Sn67q45pprcNRRR+Hggw+OTTd69Gg88MADGDduHLZu3Yo777wTkyZNwpIlS7o1kG8cJkyYgDlz5mD06NHYsGEDbr75ZnzhC1/A22+/jZqamqL0e1IfK/zud7/Dli1bcMEFF8Sm6e1+DkP1V1f68tO8EwY9A0N0dhHXX389fvSjHyWmWbp0aUkDwt7Ep2nDunXrMHfuXDz++OMly9fthcaOHYtBgwbhhBNOwIoVKzBy5Mgekfs73/mOd23cuHHIZDL45je/idmzZ/eoS/dP09cffPABTj75ZJx55pm49NJLE/Purr7ua5gxYwbefvvtRHsXAJg4cSImTpzofZ80aRIOOOAA3Hfffbj11lt3t5iYOnWq9/9x48ZhwoQJGDp0KB5//HFcfPHFu73+7sD999+PqVOnoqWlJTZNb/ezQd+GITq7iJkzZyauVABgxIgRZZXV3NxcZKGvTvk0NzfH5gkbutm2jc2bN8fmCePTtOHBBx9EQ0MDTj311LLq0DFhwgQAQkuxK5PvrvT9hAkTYNs2Vq1ahdGjRxfdb25uRj6fx5YtWwJandbW1rL7tTtkXr9+PY4//nhMmjQJv/jFL7pcX3f1dRgDBgyAZVlFp9CS+qe5ublL6XcXrrzySs9ov6vagnQ6jcMOOwzLly/fTdIlo76+Hvvvv39s/XtKHyusXr0azz77bJc1ir3dz6q/WltbMWjQIO96a2srDj300Mg8n+adMOgh9LaR0GcRpYyRW1tbvWv33Xcf1dbWUkdHR2RZyhh58eLF3rW5c+fuVmNk13Vp+PDhNHPmzE+V/6WXXiIA9M9//rObJSsfDz/8MHHOafPmzZH3lTHyk08+6V1btmxZjxojr1u3jvbbbz86++yzybbtT1XG7uzrI488kq688krvu+M4tM8++yQaI3/pS18KXJs4cWKPGcq6rkszZsyglpYWeu+99z5VGbZt0+jRo+naa6/tZunKw7Zt26hfv37005/+NPJ+b/dxGLNmzaLm5mYqFApdytfT/YwYY+Q777zTu7Z169ayjJG78k4Y9AwM0elBrF69ml5//XW6+eabqbq6ml5//XV6/fXXadu2bUQkXu6DDz6YTjrpJHrjjTfomWeeocbGRrrhhhu8MhYtWkSjR4+mdevWeddOPvlkOuyww2jRokX00ksv0X777UfnnHPObmvHs88+SwBo6dKlRffWrVtHo0ePpkWLFhER0fLly+mWW26hxYsX08qVK+n3v/89jRgxgo455pjdJl8YCxYsoLvuuoveeOMNWrFiBT388MPU2NhI5513XqzcRESXX345DRkyhJ5//nlavHgxTZw4kSZOnNgjMq9bt45GjRpFJ5xwAq1bt442bNjgfeJk7um+fuyxxyibzdKcOXPonXfeocsuu4zq6+u9U4PnnnsuXX/99V76v//975RKpejOO++kpUuX0qxZsyidTtNbb721W+QL44orrqC6ujqaP39+oD937tzppQnLfPPNN9PcuXNpxYoV9Nprr9HZZ59NuVyOlixZ0iMyz5w5k+bPn08rV66kv//97zR58mQaMGAAbdq0KVLe3u5jHY7j0JAhQ+i73/1u0b09oZ+3bdvmjcEA6Cc/+Qm9/vrrtHr1aiIiuu2226i+vp5+//vf05tvvkmnnXYaDR8+nNrb270yvvjFL9Ldd9/tfS/1Thj0DgzR6UGcf/75BKDo88ILL3hpVq1aRVOnTqWKigoaMGAAzZw5M7AaeuGFFwgArVy50rv28ccf0znnnEPV1dVUW1tLF154oUeedgfOOeccmjRpUuS9lStXBtq0Zs0aOuaYY6h///6UzWZp1KhRdN1119HWrVt3m3xhvPbaazRhwgSqq6ujXC5HBxxwAP3whz8MaMnCchMRtbe307e+9S3q168fVVZW0le+8pUA0didePDBByOfFV0Juyf09d13301DhgyhTCZDRx55JL388svevWOPPZbOP//8QPrHH3+c9t9/f8pkMnTQQQfR008/vdtkCyOuPx988MFYma+55hqvfQMHDqRTTjmF/vGPf/SYzGeddRYNGjSIMpkM7bPPPnTWWWfR8uXLY+Ul6t0+1jF37lwCQO+++27RvT2hn9VYGv4ouVzXpRtvvJEGDhxI2WyWTjjhhKK2DB06lGbNmhW4lvROGPQOGBHR7t4eMzAwMDAwMDDoDRg/OgYGBgYGBgZ9FoboGBgYGBgYGPRZGKJjYGBgYGBg0GdhiI6BgYGBgYFBn4UhOgYGBgYGBgZ9FoboGBgYGBgYGPRZGKJjYGBgYGBg0GdhiI6BgYGBgYFBn4UhOgYGBrsV8+fPx+GHH45sNotRo0Zhzpw5vS2SgYHBZwiG6BgYGOw2rFy5EtOmTcPxxx+PN954A9dccw0uueQSzJ07t7dFMzAw+IzAhIAwMDD41Pjwww8xduxYXH311fje974HAFiwYAGOO+44/PnPf8Zf/vIXPP3003j77be9PGeffTa2bNmCZ555prfENjAw+AzBaHQMDAw+NRobG/HAAw/g+9//PhYvXoxt27bh3HPPxZVXXokTTjgBCxcuxOTJkwN5pkyZgoULF/aSxAYGBp81pHpbAAMDg70bp5xyCi699FJMnz4d48ePR1VVFWbPng0A2LhxIwYOHBhIP3DgQLS1taG9vR0VFRW9IbKBgcFnCEajY2BgsMu48847Yds2nnjiCTzyyCPIZrO9LZKBgYEBAEN0DAwMugErVqzA+vXr4bouVq1a5V1vbm5Ga2trIG1raytqa2uNNsfAwKBHYLauDAwMdgn5fB7f+MY3cNZZZ2H06NG45JJL8NZbb6GpqQkTJ07En/70p0D6efPmYeLEib0krYGBwWcN5tSVgYHBLuG6667Dk08+iX/+85+orq7Gsccei7q6Ovzxj3/EypUrcfDBB2PGjBm46KKL8Pzzz+Pqq6/G008/jSlTpvS26AYGBp8BGKJjYGDwqTF//nyceOKJeOGFF3D00UcDAFatWoVDDjkEt912G6644grMnz8f1157Ld555x3su+++uPHGG3HBBRf0ruAGBgafGRiiY2BgYGBgYNBnYYyRDQwMDAwMDPosDNExMDAwMDAw6LMwRMfAwMDAwMCgz8IQHQMDAwMDA4M+C0N0DAwMDAwMDPosDNExMDAwMDAw6LMwRMfAwMDAwMCgz8IQHQMDAwMDA4M+C0N0DAwMDAwMDPosDNExMDAwMDAw6LMwRMfAwMDAwMCgz8IQHQMDAwMDA4M+i/8LcYzCBOdGPCYAAAAASUVORK5CYII=" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "execution_count": 3 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": [ + "Let's create a simple KAN model to learn the `xy` function. Simple application of KA theorem give us that the target function can be represented as follows\n", + "$$ \n", + "xy(x) = \\dfrac{1}{2}\\left( \\left(x_0 + x_1\\right)^2 - x_0^2 - x_1^2 \\right)\n", + "$$\n", + "So a simple `[2, 3, 3, 1]` KAN model should be able to learn this function." + ], + "id": "e0e0f63562a5e604" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:14.011743Z", + "start_time": "2024-05-09T06:27:13.887273Z" + } + }, + "cell_type": "code", + "source": "model = KAN(width=[2, 3, 3, 1], grid=100, k=3, device=device)", + "id": "7ea1f4706110b0d6", + "outputs": [], + "execution_count": 4 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "Since we know its explicit form, we can use it to initialize the model.", + "id": "b774a22986c1528e" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:17.054151Z", + "start_time": "2024-05-09T06:27:14.013705Z" + } + }, + "cell_type": "code", + "source": [ + "model(dataset['train_input'])\n", + "model.fix_symbolic(0, 0, 0, 'x')\n", + "model.fix_symbolic(0, 1, 0, 'x')\n", + "model.fix_symbolic(0, 0, 1, 'x^2')\n", + "model.remove_edge(0, 1, 1)\n", + "model.remove_edge(0, 0, 2)\n", + "model.fix_symbolic(0, 1, 2, 'x^2')\n", + "model.fix_symbolic(1, 0, 0, 'x^2')\n", + "model.remove_edge(1, 1, 0)\n", + "model.remove_edge(1, 2, 0)\n", + "model.remove_edge(1, 0, 1)\n", + "model.fix_symbolic(1, 1, 1, 'x')\n", + "model.remove_edge(1, 2, 1)\n", + "model.remove_edge(1, 0, 2)\n", + "model.remove_edge(1, 1, 2)\n", + "model.fix_symbolic(1, 2, 2, 'x')\n", + "model.fix_symbolic(2, 0, 0, 'x')\n", + "model.fix_symbolic(2, 1, 0, 'x')\n", + "model.fix_symbolic(2, 2, 0, 'x')\n", + "model.plot()" + ], + "id": "f06862a421ec65f7", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "r2 is 0.7779091596603394\n", + "r2 is not very high, please double check if you are choosing the correct symbolic function.\n", + "r2 is 0.7826252579689026\n", + "r2 is not very high, please double check if you are choosing the correct symbolic function.\n", + "r2 is 0.9846869111061096\n", + "r2 is 0.9848194122314453\n", + "Best value at boundary.\n", + "r2 is 0.9986667633056641\n", + "r2 is 0.9976706504821777\n", + "Best value at boundary.\n", + "r2 is 0.9954747557640076\n", + "r2 is 0.9994252920150757\n", + "r2 is 0.9984908103942871\n", + "r2 is 0.9984245896339417\n" + ] + }, + { + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZcAAAHiCAYAAAAkiYF/AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAABRjElEQVR4nO3deZyN5RvH8c8ZlLEzaUWWxlLRr73QSlQiS4Uokvbway/1q1TaFe2bikKIVjKyVCqpRFGJ7EulMcY2w5iZ+/fH1clgDMM553nOeb7v18tLjTnO5cz9PNdzL9d9h5xzDhERkQhK8joAERFJPEouIiIScUouIiIScUouIiIScUouIiIScUouIiIScUouIiIScUouIiIScUouIiIScUouIiIScUouIiIScUouIiIScUouIiIScUouIiIScUouIiIScSW9DkAkHjjnWLNmDRs3bqRcuXKkpKQQCoW8DkvEt9RzESlCZmYmgwYNIjU1lapVq1KrVi2qVq1KamoqgwYNIjMz0+sQRXwppJMoRQqXlpZGhw4dyMrKAqz3EhbutZQpU4YxY8bQsmVLT2IU8SslF5FCpKWl0apVK5xz5Ofn7/L7kpKSCIVCjBs3TglGpAAlF5EdZGZmUq1aNbKzs4tMLGFJSUkkJyezYsUKKlWqFP0AReKA5lxEdjBkyBCysrL2KLEA5Ofnk5WVxdChQ6McmUj8UM9FpADnHKmpqSxatIjiXBqhUIjatWuzYMECrSITQclFZDvp6elUrVq10D+7EVgOTASyi3h9SkpKlKITiR8aFhMpYOPGjdv9f0mgIzAdeAZ4D3geqLmL12/YsCGK0YnEDyUXkQLKlSsHQApwJ7AQGAZsAO4FHNACmAUMB07b4fXly5ePVagivqbkIlJAyqpVvFO+PEuBe4AJwH+whDIOSy4XAX2w3stHwFdAN+DIWrWoUqWKB1GL+I+Si0heHnz0ETRvTujYYzm/RAn6A4cD1wBzd/j2LcBQoDHQGlgGDAS+XreO0IMPwqpVMQxexJ+UXCS41q+HZ56B+vWhbVvIzobhw8mbP59nypZlbdLuL49pQJekJJomJ7P/ZZfByy9Dw4Zw5ZXw/fdR/yeI+JWSiwTP77/Df/8LNWrAbbfBSSfB11/DV19Bx45UqlqVMWPGEAqFSNpNgglX6D/1/vuUHjgQ5s2Dhx6CmTOhWTP79e67sHVrTP5pIn6h5CLB4BxMmQIXXmg9leHDoVcvWLQIhg2Dk0/e7ttbtmzJuHHjSE5OJhQK7VS7Ev5acnIy48ePp0WLFvYH5cvDdddZcnnnHShb1noxjRrBgAGwZk2s/sUinlKdiyS2f4a6eOYZmDvXhqx694bOnSE5ebcvz8zMZOjQoTzzzDNUWLiQ74ATgfV16tC7d2+6detGxYoVi/5Lfv7ZhstGjrT/v+QSuPZaOOqoff7nifiVkoskphUr4MUX4dVXISMDLrjAhsLOOAP2ooLeOce6qVOpeM45rPv0UyqedVbxK/HXrIE337SY/vgDTj/dejktW0KJEsWOScTPNCwmieWbb+DSS6FOHXj+eejaFX77Dd5/H848c68SC9gwWKVKlQiB/b43f09KCtxyC8yZA6+/br2qzp3huOMsEaoAUxKIkovEv5wcGDECTj0VmjSxVVpPPgnLlsFTT1mi8ZNSpaBDB5g0CSZPhhNOgHvusbmgO+6weSCROKfkIvHr77/h4YcteXTtCuXKwYcf2oqtXr2gQgWvI9y9E06AwYOtN3PttTBqlPVkOnWCzz+3hQgicUjJReLPnDlw1VVw+OHQvz+cfz78+CN8+im0agV7UJ/iO4ceCv/7H/zyiy0+WLYM2rSBxo1hyBAbQhOJI3F4FUoghavomzWD//wH0tLg3nth6VJbiXX00V5HGBnJyXD55VZz89FHULMm9OkDRx4J/fqp+l/ihpKL+Nv69TBo0LYq+s2bbWnxwoVw551wwAFeRxgdoZCtJhsxAmbNgo4d4ZVXbCl1jx7w3XdeRyhSJCUX8aeCVfS3325FjtOn/1tFT6lSXkcYO7VqwaOP2lxS//7www/QvLmq/8XXlFzEP8JV9G3aWE9lxIhtVfRvv23btARZ+fI26b9j9X/DhrY6TtX/4iNKLuK97Gx47TWbSznnHJvMfuUVWLIEHnwQDjvM6wj9pUQJOO88Wxn39dfQogU88YTNy/TqZTsCiHhMyUW8s2IF3H23DX1dey3Urm11H7Nm2bzCHmzPEnhHHWWry375xWpkJk2yFWatW8P48bYQQsQDSi4Se998Y5XptWtbFf1ll1kV/Xvv7VMVfaClpMDNN8NPP+1c/f/CC6r+l5hTcpHY2LGK/ocfrHp++XJ/VtHHq4LV/1OmwIknWv2Mqv8lxpRcJLrCVfS1a1sVffnyNlfw669w4432/xIdxx9vc1nh6v/Ro60n07Gjqv8l6pRcJDp++gl69txWRd+qlVXRT5wYv1X08Spc/f/zzzY/s3y5qv8l6nSFS+Tk5VmvpFkzOPZYSyT33WervxKpij5eFaz+//hjq5/p0wcaNFD1v0Sckovsu3XrYOBAG9dv1w62bLH5lYULbZw/JcXrCKWgUAhOO812Opg1yyb+X3nFkv8VV6j6XyJCyUX23oIF9uRbo4YlkXAV/Zdf2mmLQaqij1e1asEjj9hqvUcegdmzrfr/7LNtjiYnx+sIJU4puUjxOGe1KK1b23DKO+9Yglm8WFX08axcObjmGqv+HznSFlr07Lmt+j893esIJc4oucieycqy43mPOcYqwlessP9fuhQeeMAmjSX+JSXBuefCBx9YL7Rly23V/zfeqOp/2WNKLlK0FSugb19b9XXddXDEEVY/8cMPNj5furTXEUq0HHmkrS779Ve46y7rsTZuDBdcAOPGqfpfiqTkIjtzzp5aO3Wy+pQXX7RVRvPnw9ixcMYZqqIPkipV4KabbHn5G2/Ygo1LL7Wameeft2MRRHag5CLb5OTYCqJTT4WmTW0l0VNP2VLiAQMs0UhwlSoF7dvbiZ/h6v9777VVgrffbqsDRf6h5CJWRd+/vyWPyy6DihXtFERV0cuuhKv/586F66+3c2WOP95WCX72mar/Rckl0H780c4DOfxwW4Z6wQU29JGWZufSq4pedueQQ+Cee2xX5meftTm6Cy+03u+bb6r6P8B09wiavDxbCXT22TZmPmmSVdEvXQovvWRbuIsUV+nS1uv96iub7K9d204SbdAA7r9f1f8BpOQSFOEq+nr1bNw8J8dqVH7/XVX0EjmhkM3XDR9uBZmXXmrDZ+Hq/2+/1ZBZQCi5JLodq+hPPdXOU/nyS7j4YlXRS/TUrGk7Ys+bt636/5xzVP0fEEouicg5G+4qrIr+rbdslY9IrBSs/h81CipU2Fb9/8QTqv5PUEouiaRgFX3LlrBypQ1JqIpe/CApydrlBx9Y7/ncc21rGVX/JyQll0SwfLlVUO9YRT9zJnTvrip68Z8GDWDQoJ2r/1u1suMAVP0f95Rc4pVz8PXXVkVfp46t9FIVvcSbgtX/b75p8zBduth5QKr+j2tKLvEmXEV/yil2Jsfs2fD006qil/hWqpSdBfTppzB1qh3foOr/uKbkEi9Wr4aHHtpWRV+pklXR//IL3HCDquglcRx3nM0dFlb9P3WqljLHCSUXvwtX0desCY8+aivA5sxRFb0kvoLV/889ZwtU2rZV9X+c0J3Jjwqror//fhv6evFFW10jEhSlS0PXrlabNW6czTGGq//vu8+SjviOkoufrFtn8yfhKvqtW+1UwIULbdy5ShWvIxTxTrj6f9iwbdX/gwdbvUz37jBjhobMfETJxQ/mz4fevaF6dVuW2bixXSjTpsFFF0HJkl5HKOIvBav/H33UVpu1aAFnnWWFmqr+95ySi1ecs5UxF1xg3ftRo2xJ5uLFMHQonHCC1xGK+F+5cnD11fD993YNVaoEV11lvZnHH7fjJMQTSi6xlpUFr7wCjRpZhfKqVfD667BkCfTrZ5OYIlI84er/99+3Xv9559lBd0cdZasp5871OsLAUXKJlXAVfY0a1tjr1t1WRd+tm6roRSKlfn3bAfzXX6FvX7vOmjRR9X+MKblEk3N2vkXHjrbC5eWXbeJx/nwYM0ZV9CLRVLmyrSqbMweGDLEFMuHq/+eeU/V/lCm5RENOjq1oOeUUOP10m2wcONCWEj/5JNSq5XWEIsFRsqTVx0ycaEcwn3KKLe2vVw9uu83ONJKIU3KJpNWr4cEHLXlcfrk9OX38se32ev31NvkoIt459lib85w713ZiHjvWqv8vvtiGz7SUOWKUXCJh9mzo0cN2JX7sMWjTxrriEybYxKKq6EX85eCD4e677cHvhRdsYU27dtareeMNVf9HgO56eysvz1amnH22PflMmWJnpqiKXiR+lC5t8zDh6v/UVCsJqF9f1f/7SMmluDIzrYq+bl3o0AFyc62K/vffbfxWVfQi8Sdc/f/227afX9eu26r/u3VT9f9eUHLZU/PnQ69etpT4rrtsaeOMGfDFF6qiF0kkhx8O/ftvq/6fM2db9f/Ikar+30NKLnvit9+sin70aLj5ZlXRiwRBwer/0aNtgc7VV9viHNmtkHPB6uut/u03spYuLf4LndvrmpQDTjyRcpUr79VrJTpW//YbWcuWFe9FztlcW4kSe9UWDjjhBLUDH/l7wQKyli8v3ovCBZglSuzVe6Yce2xg2kDgxnKWjBtH/Ysvjs2bvf46NGpE+uLFgWlQ8WKv28HePGS8/jocc4zagc8smzCB1HbtYvNmb70FRx/NmqVLA9MGApdcnHNUqF492m9ihxk99BC89BIZhx8e3feTYotpO+jfH158kYwaNaL7flIszjkqVKsW7TexRQKPPw4DB7I22m3ORzTnEmnO2VYTV19t1fg9engdkXgh3A6uuQaeeELtIIics506eve2B4zLL/c6ophScokk56z7e/XVdkPp3Vt7hwVRuB1cc409saodBI9zMGKE/ewfegiuuy5wbUDJJVLC3d+ePa1Kv0+fwDUmYVs7uOoqW8aqdhA8zsE779j2Mv362eqyALYBJZdICHd/r7wSHnnEdmINYGMKPOdg+HB7wHj4YbWDIHLODi274QbbHPPGGwPbBpRc9lW4+9ujh91Qbr45sI0p0Aq2g/791Q6CyDmrh7nuOts6plevQLcBJZd9Ee7+du9uN5Rbbgl0Ywos56xy+4orbFdstYPgcc7OaLr2Wvjf/zTPhpLL3gvfULp1sxvKrbcGvjEFUvhptVs3G1+/7Ta1g6BxDt57zxby3HOPhkP/oeSyN8I3lMsvt52Qb79djSmIwk+rl11mwyB33KF2EDTOwQcf2Dxb3762o7LaAKDkUnzOwbvv2q6p99+vG0pQOWdHLnTpYsMgd92ldhA0zsFHH9k82x13aDh0B0ouxRF+Uu3SBe69VzeUoArfVDp3tqfVu+9WOwga5+CTT2ye7dZbNXpRCCWXPeWcHYl66aX2pKobSjCFbyqXXGJPq/feq3YQNM7Bp5/acGifPnrI3AUllz0RHgK59FJLKvfco8YUROGbSocOttT4/vvVDoLGOfjsM7sX3HCDPWiqDRRKyWV3nIMPP4ROnewJRU+qweQcfP45tG1rhXH9+6sdBI1z8NVX1mu96ipbHag2sEtKLkVxDj7+2BrT7bfbiiA1puBxDr75Blq1slVBjz+udhA0ztmhYe3b23CYHi52S8llV8Jj6xddZBN2DzygxhRUs2fbMbddusDAgWoHQTR3LrRpY/eDJ5+EJN06d0efUGHCY+vt29u69Yce0g0lqH77Dc4+224sL72km0oQLVoE559vDxjPPqs2sIf0Ke0oPLZ+4YU2YffII0osQbViBZx2GjRtamez6KYSPH/8Ac2bw4knwuDBe328cRDpatnRjBn2lNKzp3V/lViCKSMDmjSBBg2stqlk4A5tlcxMSyy1a9segmoDxaLkUtCcOXDOOVYcN2iQEktQZWXBGWdAxYo277bffl5HJLGWnQ3nnQfJyba9i9pAsSkVhy1ZAqefbg3qlVc0BBJUW7daG9iwAWbNgjJlvI5IYi031ybuMzJg+nQoW9briOKSkgtAeroNgRx/vB32pHHVYMrLg44d4ZdfLLFUrux1RBJr+fm2V9icOVbTUqWK1xHFLSWXjRttwvagg6ymReOqwZSfb1umT5pkNS3VqnkdkcRafr5tlz9xIkyeDNWrex1RXAv22E9WFjRrZkMhU6dC6dJeRyReyM210wNHjrQby5FHeh2RxFpurpUdvPOObfWkNrDPgvmY7pwtMWzf3n7/7jubvJVgcQ7++st6LNOmQVoanHyy11FJLDkHq1fbyZFffmmHfp18shbzREAwey4DBsAJJ1iP5ZtvbEhMgufpp60dLFliN5bGjXVTCZrnnrNapiVLrHBabSBiAtdzCQFbSpWyjeeuv95Wgmzc6HVYEmP/toOePe3c83LlYNMmr8OSGPq3DXTrBldeqTYQYSHnnPM6iFha9s03ZC1dGtOnk4NOP53KBx8cs/eT3VM7kOXffUf2smUxbQNVmzShckBGSgKXXEREJPoCNyy21wrmYI3JBpPagDhnS5aTktQGdiOYE/p7Y9Ysq4GZNcvrSMQragPy009WWPnTT15H4ntKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLiIiEnFKLnvAOcfatWtxYL8753VIEmNqAxJuA/nhtqA2UCQllyJkZmYyaNAgUlNTada8Ofn5+TRr3pzU1FQGDRpEZmam1yFKlKkNSME2cOZZZ7FhwwbOPOsstYHdcVKoCRMmuLJly7pQKORCoZA7FlwuuGPh36+VLVvWTZgwwetQJUrUBmTHNtAIXCa4RmoDu6WeSyHS0tJo1aoV2dnZOOd26v6Gv5adnU2rVq1IS0vzKFKJFrUBURvYNyG34ycWcJmZmVSrVo3s7Gzy8/P//fqxwHfAicCsAt+flJREcnIyK1asoFKlSrENVqJCbUB21QYaAV8ApwM/Ffh+tYGdqeeygyFDhpCVlbVdgypKfn4+WVlZDB06NMqRSayoDYjawL5Tz6UA5xypqaksWrRopy7wScDX7PzUChAKhahduzYLFiwgFArFKFqJhqLawK56LqA2kEiKagPHAJ+zc88F1AZ2pJ5LAWvWrGHhwoU7NajDgdFFvM45x8KFC8nIyIhqfBJ9u2oDAKV2+L0gtYHEUVQbuAQouYvXqQ1sT8mlgI0bN+70tf2BYUDuHrx+w4YNkQ5JYqywNhB2MHbBHFzE69UG4l9RbWARllzqFfF6tQGzqyQcSOXKldvpa48DRwLnATnAvCJeX758+egEJjFTWBsIWwhM/+f3XVEbiH9FtYFhQDWgCfAZ8Hch36M2YNRzKSAlJYU6der8O17aCegJ3ALMwMbZswt5XSgUok6dOlSpUiVmsUp07NgGCtoMbPjn9x2pDSSO3bWBR4EM7N5QcIhUbWB7Si4FhEIhevXqBUB94FnsSeWNPXht7969NYmXAAq2geJSG0gMu2sDW4BXgQOxOZiC1Aa20WqxHWRmZlL/sMOYkJVFLnAGkFXE92t9e+LZVY1DHeB54Aa2HxpTG0g8u2oDBZ0EXI49gM5QG9iJei47qFSxIj+ccgqHAF1Dod0mllAoxNixY9WgEkilSpUYM2YMoVCIpKSiLxG1gcS0J23gW+ArbPi8GqgN7EDJZUcvv8yh06ez6p57WFGmDKFQaKdubvhrycnJjB8/nhYtWngUrERLy5YtGTduHMnJyWoDAbUnbeBdIL1UKaZ07kyL007zJlCfUnIp6Ntv4Y474IYbOPbBB1mxYgUDBw6kdu3a231b7dq1GThwICtXrtRNJYG1bNlSbSDgdtcGnhw0iFvnzaPOwQfDW2+BZhn+pTmXsDVr4NRT4dBDYeJE2G+/f//IOUdGRgYbNmygfPnyVKlSRZN2AeOcI/P77yl9yy1sHjCASiecoDYQMEXeB376CV5+Gdq1g+bNvQ3UJ1TnApCfD1dcAdnZ8Pbb2yUWsO5vSkoKKSkpHgUoXguFQlSuXBmSk0muXBmUWAKnyPtAo0ZwzjnwwQdQsyYccUTM4/MbDYsBPPooTJ4Mb7wB1ap5HY2IxKM2baB2bXj9dVi/3utoPKfkMnky9O8PffuqOysiey8pCXr0sJGQN96w3wMs2Mll5Uro3h2aNYM77/Q6GhGJdxUrwpVXwoIF8PHHXkfjqeAml5wc6NoVSpe2p4wSJbyOSEQSQWqqDZGlpcHcuV5H45ngJpd77oGZM2HYMNBEvYhE0jnnQMOG8OabthI1gIKZXN57D557zibyTzrJ62hEJNGEQnD55VCmDLz2GuTuyaEdiSV4yWXBArj2WujQAa67zutoRCRRlSkDV10Fq1bBu+96HU3MBSu5ZGVB585wyCHwwguqVRCR6KpeHS6+GKZNsx1AAiQ4RZTOQZ8+sGQJfPEF6EAfEYmFJk1g4UIYMcKSzSGHeB1RTASn5zJkiE3eP/MMHHmk19GISFCEQjZicsAB8OqrsLmw4+YSTzCSy+zZcNNNtv780ku9jkZEgma//aBnT8jMhOHDA7HBZeInl8xMSygNGsATT3gdjYgE1UEHWW3dzJk2NJ/gEnvOxTlbrbF2LYwfbwWTIiJeOe44WLQIxoyBww+3TS4TVGL3XJ5+GsaNg8GDE/qHKCJxpG1bqFHD6l82bvQ6mqhJ3OQybRrcdx/ceiucf77X0YiImJIlbf43J8cq+BN0/iUxk8tff1l1bJMmcO+9XkcjIrK9ypXtDKl582DCBK+jiYrESy65uZZYwJYfl0zsaSURiVMNGtioyrhxlmQSTOIllwcegK+/tvOsDzrI62hERHbt3HOhfn3bmT0z0+toIiqxksv48fDkk9CvHzRt6nU0IiJFS0qyM6VKlrQJ/rw8ryOKmMRJLosX2yTZBRdYwaSISDwoV84KLJcutR3bE0RiJJfNm6FLF5ske/VVbUgpIvGlVi3bqX3qVJg1y+toIiIxZrtvuw1+/RU++8yOGRURiTdnnGEbXL79Nhx2GBx4oNcR7ZP477kMH25Fkk8/Dccc43U0IiJ7JxSyEZiKFW0EJifH64j2SXwnl19+gV69bL+ebt28jkZEZN+ULm3zL3//DSNHxnWBZfwmlw0bbBvr2rVh4EDNs4hIYjj0UNts95tvYPp0r6PZa/E55+IcXH89/PknfPmlHScqIpIoTjrJ5l9GjrQDxqpX9zqiYovPnsuLL9quoi+9BKmpXkcjIhJ5F11kp1a+9hpkZ3sdTbHFX3KZMQPuvBNuvBHatfM6GhGR6ChVyo4M2bQJhg6Nu/mX+Eoua9bY5P0JJ0D//l5HIyISXSkptljpp59g8mSvoymW+EkueXm2i+jmzbZvWKlSXkckIhJ9DRtCixbwwQfw++9eR7PH4ie5PPqoZe4337QCIxGRoGjdGurUsZq+9eu9jmaPxEdymTQJHn4Y7rkHmjXzOhoRkdhKSoIePey/X38d8vO9jWcP+D+5rFhhu4Y2awZ33OF1NCIi3qhQwTbnXbgQPvrI62h2y9/JJSfHJvDLlLHzDpL8Ha6ISFQdcQS0aQMTJ8KcOV5HUyR/363vvtt2CB02zFZNiIgEXfPm0KiRnbS7Zo3X0eySf5PLmDHw/PPw2GNw4oleRyMi4g+hkB3lXqaMbXC5davXERXKn8ll/ny47jqrUL3mGq+jERHxl+RkK7D84w94912voymU/5LLpk22IeVhh1nPRRtSiojsrHp1uOQS21/x22+9jmYn/tq40jno08eO+5w2DcqX9zoiERH/atwYFi2CESOgWjXbUdkn/NVzeeMNO/zrueegQQOvoxER8bdQCDp2hAMOsA0uN2/2OqJ/+Se5zJoFN99sB+V06uR1NCIi8WG//Wz+Zd06W1nrkw0u/ZFcMjPteM+jj4YnnvA6GhGR+HLggVYT+MMP8PnnXkcD+CG55OdbbyUz07Lu/vt7HZGISPw59lg46ywYOxYWL/Y6Gh8kl6efhvHjbb+cww/3OhoRkfjVti3UqGHzLxs3ehqKt8nliy/gvvvg9tvh3HM9DUVEJO6VLGkjQbm5toO8hxtcepdc/vzTqkxPO812OxYRkX1XqZKdfTVvHkyY4FkYIef2fWnB6vnzyV62rHgvcs62LShVaq8KJVOOP55ylSsX+3UiAJnLl7N59erivSgrC3791ZbJlylT7PesWLcuyardkr2w4a+/yMnIKN6Lli2DEiX2+vyrcocfzv570c7DIlJEueyTT6jboUMk/qrde/NNaNSINUuWKLnIXvtzxgyqn3FG8V6Un2/j2aVLF2+H7gkToHZt1v35p5KL7JU1c+Zw0PHHF+9FBx+8d2/29ddw2GFsTE9n/xo19u7vIELJxTlHhWrVIvFXFfUm8PbbdiLls8+ytnr16L6fJLyyVatG9w2cg08/tVWQffqw4cADo/t+krAckBzth2nnYMYM+OQT6NyZrCpV9umv83612J5wDt55B66/3k6k7N7d64hEiuYcTJliqyGvukoLVsTfnIOZM+1BqG1b21ZmH/k/uTgHo0fD1VdDv37Qq5c2sxR/c84K2Z580iZW27dXmxX/cg5mz7bzYVq3tlN/I9Be/Z1cnIP33rOjPe+9F266SRep+JtztkvtY4/ZasiLL1abFf9yzk60fP11OO88OOeciLVX/yYX5+yc6G7doG9fuPVWXaTib87B9Ok2dNuli+2RpzYrfuWcrX589VVo0cKSSwTbqz+Ti3NWtd+1K9xxB9x5py5S8Tfn7EyNhx6ypNKli9qs+JdzsGABvPSSbRlzwQURb6/+Sy7OQVqaHRh2881w9926SMXfnLMNAx94ADp0sOEwtVnxs8WL7TDGpk2hXbuotFd/JRfnYNIkO5+gd2/bGkYXqfiZc/DjjzYn2KYN9OihNiv+tmwZPPMMnHSSHSUfpfbqn+TiHHz2mf1jr7sOHnxQF6n4m3Pwyy+2fdF559mKRrVZ8bM//oCBA+GYY2x0qDjFwMXkj+TinB1r3K6dXaCPPKKLVPxv/nybD2ze3Gqw1GbFz9LTYcAAqFfPFkpFMbGAH5JLeIXNhRdaceRjj+kiFf9btMh28z7tNBvCjfKFKrJP1q2zuqsaNWzX5Bi0V2+viPAKmwsusNU1Tz2li1T8b/lyuOUWOPFE+11tVvxs0yY74TclxaYcSpSIydt6d1WEV9icf75N4D/zjC5S8b+//rJi3qOPhrvuitmFKrJXtmyxobD997cedqlSMXtr7+7mP/4ILVvaPMvzzyuxiP+tXQt9+kCtWrY6TIlF/Cw31x7ac3Kshx3jI+S9uaPPnWsVoa1awcsvK7GI/23caInlgAOgf/+YPgGKFFt+vt1b//4bbrttr84f2lexv6vPn2+ra5o1s/1s9PQnfrdliz35lSplY9f77ed1RCK75pztbvz779ZuK1b0JIyInOeyx5Yssa0GmjSBt95SYhH/y821uZUNG2yrjORkryMS2TXn4OOP4bvvbG7woIM8CyV2yeWPP+DMM6145513oGRs85pIseXn2yaUS5daYqlQweuIRHYtXC+Ylmb1gjVrehpObO7wGRlwxhn2j33vPY1Xi/85By++aE+Azz4L0T61UmRfOAc//QSjRtnq24YNPa8XjH5y2bTJ5lcqVIBx42K+YkGk2JyDkSNteOHRRz1/AhTZrcWLYfBgW4HbtKnniQWinVy2brUVYZs2WbFk2bJRfTuRfRY+9/7NN21rl0aNvI5IpGgrV9qS45NPjsrW+XsresklP9+q7hcsgO+/h0qVovZWIhHhHHzzje0UcfXVNpTrkwtVpFB//mnt9aijbCNKH7XX6CQX5+C//7Xt87/6Cg45JCpvIxIxztlD0IMP2mFfUTrjQiQinLMey8CBcMQRdtSDz+oFI59c8vPtHJYhQ+w0yQYNIv4WIhHlHEydattktGtnO8YqsYhfOQc//2x1gg0aWGLxYVlH5JKLc7B+vZ0c+fbbtty4ceOI/fUiEeecVd4PHw4ffGDHavtsaEHkX85BVpYtNf7sMzj9dHsY8mFigUgmlwEDbBI0O9su1NNP10Uq/vbuu/Dhh9bbvuceOPVUtVnxr0mT4PPPLcl07w7HHuvr9hqR5BICtpQsCW3bWhftwANthZiIj+WUKAFnn22nSFasCJs3ex2SSKFCwNYSJWxFWOPGUK6cbUjpYyHnnNvXv2T5t9+StXRpTLPogU2bUvngg2P2fpJYVv/6K1v++iumbbZSw4aUr1IlZu8niSNj8WJy1qyJaXutkJpKmX3YlSIiyUVERKQg79au5efbbrPKbRIv8vNtQjU/3+tIRHZv2TJbruwR75LL6tVQr56dRJmb61kYInts0SJbnbNokdeRiBRt3jx4/HE7lNEj3iWXgw+2bfenTYOHHvIsDBGRhJKZCW+8AfXr22IVj3hb0nnaadCvn2XYTz7xNBQRkbiXmwuvvWZHmnTv7ulSZe/3C7jpJhsa69HDDhMTEZG98/77NtfSs6ctV/aQ98klKckybeXKttHlli1eRyQiEn9mzbJtjNq3h1q1vI7GB8kFbMfk4cPhl1/gttu8jkZEJL6sXm3bbh13nO3m7QP+SC4A//mPbSHz2mswYoTX0YiIxIecHHj1VdtloksX32wJ45/kAnDFFfbh9OoFv/7qdTQiIv7mnG0SnJ4OV10FpUt7HdG//JVcQiE7n6BmTduddsMGryMSEfGvr7+GGTPsfumzc7P8lVzAjkIePhxWrYLrr1cFv4hIYZYvh1GjoGlTOOkkr6PZif+SC0DduvDSSzBmDLz8stfRiIj4S1aWzbMccghcdJHX0RTKn8kFbDndDTfAHXfAt996HY2IiD84Z7ubZGXZPEupUl5HVCj/JheA/v3tQJyuXWHNGq+jERHx3qRJ8NNPdhx3SorX0eySv5PLfvvZ2u3sbFtJpt1oRSTIFiyw01NbtICGDb2Opkj+Ti4A1arZJmyTJ8Njj3kdjYiIN9avh9dfhzp1oHVrr6PZLf8nF4DmzaFvX9s9efJkr6MREYmt/HxLLGD7MCb5/9bt/wjD7rwTmjWznT49PABHRCTmPvoIFi6EK6+EfTh6OJbiJ7mUKGHDY6VL2wT/1q1eRyQiEn1z5sDEiXDhhXDEEV5Hs8fiJ7mArYwYNgxmzoS77/Y6GhGR6EpPhyFDoFEjG7mJI/GVXMAqUR99FJ57Dt57z+toRESiY+tW28i3bFm4/HLfbEi5p+IvuQBcdx106ADXXmtL80REEs2778Iff9jBX8nJXkdTbPGZXEIheOEF2/rg0kutUlVEJFF8+y18+SV07AjVq3sdzV6Jz+QCUL68bXC5eDH06aMNLkUkMaxaZfe2U06BU0/1Opq9Fr/JBeDII+GZZ2ySf8gQr6MREdk3mzfbhpRVq1qvJc7mWQqK7+QCNix25ZVw000we7bX0YiI7B3n7EF5/XrbkHK//byOaJ/Ef3IBeOIJaNDAEk1mptfRiIgU3+efww8/WB3fgQd6Hc0+S4zkUrq0jVGuXWsZX/MvIhJPFi+286vOOst2gk8AiZFcwI5GHjwYxo2Dp5/2OhoRkT2zcaPVs9SsCe3aeR1NxCROcgE4/3y49Va47z6YNs3raEREipafD2++Cbm5NndcooTXEUVMYiUXgHvvhSZNrKL1r7+8jkZEZNcmTIB58+y8qkqVvI4mohIvuZQsuW1Z8uWX2xOBiIjf/PorjB8PrVpB/fpeRxNxiZdcAA46yM6Y/vpreOABr6MREdne2rW2y3v9+nDuuV5HExWJmVwAmjaFfv3gySft6UBExA9yc23x0X772flUcVwoWZTETS5ghZUXXGATZUuWeB2NiAi8/z4sW2YbUpYr53U0UZPYySUUsq0UKle2AsvNm72OSESC7IcfYOpU29W9Zk2vo4mqxE4uABUrwogRNnl2221eRyMiQfXXX/D223D88XD66V5HE3WJn1wAjjnGCisHD7ZKfhGRWMrJsVGUSpVsFCVB51kKCkZyAejWzfbs6dULfvnF62hEJCics9GTNWtse6rSpb2OKCaCk1xCIRg4EGrXhs6dYcMGryMSkSD46is7/OvSS+2Aw4AITnIBKFPGhsX+/BOuv14bXIpIdC1bBqNHw2mnwYkneh1NTAUruQCkpsJLL9kOpC++6HU0IpKosrJsQ8pDD4WLLvI6mpgLXnIB23n0xhvhzjthxgyvoxGRROOcbUOVnW31LCVLeh1RzAUzuQA89JAtCeza1SbaREQi5dNPYe5cW0iUkuJ1NJ4IbnLZbz9bc755s+1ImpfndUQikggWLIAPP4SWLeHoo72OxjPBTS4Ahx1mZylMngyPPup1NCIS79ats3q61FTbeirAgp1cAJo1g7vvhocfhkmTvI5GROJVfj68/jokJUGPHvZ7gAX7Xx92552WZLp3hxUrvI5GROLRhx/CokWWWMqX9zoazym5gD1hvPGG1cF07WpbNYiI7KmffrJJ/AsvhCOO8DoaX1ByCUtJgWHDYNYsGyYrwDlHeno6S5YsIT09Hafiy8BxzpGRkUF2djYZGRlqAwG0y/tAejoMHWp7GDZr5m2QPqLkUtCJJ8Jjj8Hzz8PYsWRmZjJo0CBSU1OpWrUqtWrVomrVqqSmpjJo0CAyMzO9jliirGAbOOnkk/li2jROOvlktYEAKeo+8OxTT5H97LNQtixcdlkgNqTcUyGnR7DtOQfdurHlww85OSeHn/45A6bgxxT6pwGVKVOGMWPG0LJlS09ClehKS0ujQ4cOZGVlAVDbOZ4HbgAWqQ0Ewo5tYMf7QGfnaFKyJEe+8QZndu3qVZi+pJ7LjkIhJrVrxy/r1jE4O5tk53YaAnH/fC07O5tWrVqRlpbmUbASLWlpabRq1Yrs7Ox/f94FqQ0kvt21gZOcozEwPC+P5t27qw3sQD2XHWRmZlKtWjUOz8ric+d4H7iqiO9PSkoiOTmZFStWUKlSpdgEKVEVbgPZ2dnk5+f/+/U68G/PZWGB71cbSDy7agNhhwC3A98Dw1AbKIx6LjsYMmQIWVlZ/OIcNwKXAlcU8f35+flkZWUxdOjQGEUo0RZuA4XdVAqjNpB4imoD+2MPnKuBUf98TW1gZ+q5FOCcIzU1lUWLFv3bBR4EXA6cBczexetCoRC1a9dmwYIF/87HSHwqrA2E7arnAmoDiaSoNgBwJdAAeAz4u8DX1Qa2p55LAWvWrGHhwoXbNajbgV+Ar7BeTGFNxjnHwoULycjIiE2gEjWFtQGAkkBj7Km1MGoDiWNXbQDgHqAV8BbbJxZQG9iRkksBGzdu3OlrW7DEkgy8AcwDHgIaFvL6DTrdMu4V1gYaAncCZwK7O6BWbSD+FdYGwlpgPdhfi3i92oBRcimgXLlyO32tItANe1I5G5gAdAdmALOAu7DGBlBeWz7EvYJt4GDgOqAHNr5+H9AVWF7E69UG4l9h94GwbkCJf37fFbUBo+RSQEpKCnXq1NluvPQerNdyJ9aD6QPUBNoCM4GbgDnAzP33p8pbb8HKlTGOWiIpJSWFo2rVogNwG1AZeOWfXyuwuZbCNgcKhULUqVOHKlWqxC5YiYrC7gNhi4G3sYeOA3b4M7WB7Sm5FBAKhejVq9e//18X6A08CvxR4PtygYlAT6AG9jRb7sgjCd1/v221fc458OqrOoQs3uTnE/rqK96oU4eTgI+xn31RQyAF9e7dWxO5CWDH+8COnseGy28u5M/UBrbRarEdFFzf/n5+PkcDRwKbd/H9261vD4VsZ9RRo2DqVNsKolkzuOQSaN1aO6X62YIFMHYs/Pknm446ijp9+vD35s17tBxZNQ6JZ3d1Lp2wYdKLgbmoDRRGPZcdVKpUybbzwFaF3EbRiSUUCjF27FhrUBUr2v5CH30EixfDE0/A+vVw5ZVQowZceil88IGdfin+sGaNncHx/PNQujTcfDNle/ZkyNixhEIhknZzJsdObUASQvg+sKs2MAqYD/QFkkIhtYFCqOdSmK1b2XjEEcxasYIz8vMhFNrl3mJjx46lRYsWRf99y5bBu+/CyJG2NXeFCtCmjfVozjoLSpaM5r9GCrNlix0ON3WqbTp44YVw7LHbbTy4u32loBhtQOJSUW3gFGwF6d377881H36oNrADJZfCPPss/Pe/bJg6lTdmz+aZZ55h4cJtZXN16tShd+/edOvWjYoVKxbv7/7tNxs2GzUKfv8dDjgAOnSwRHPKKYE/vS7qnIOZM613uWmTDVs2awb77Vfot2dmZjJ06NDItgGJK0W1gTGHHspR+fmUnDTJer7yLyWXHa1ZA3Xr2g3/lVeAbWd5bNiwgfLly1OlSpV9n7RzDmbPtt7M6NGwahVUrw4XXQQdO0KjRtq+O9KWLbN5lSVL7OyNCy+EPVzZE5U2IHGl0DawfDm0agXXXAM33uh1iL6i5LKjXr3grbesh3HQQbF5z/x8+Ppr682MGQMZGVCvHlx8sfVoUlNjE0eiWr8ePv4Yvv0WDj0U2rfXaYESOQMG2D3jk0/gkEO8jsY3lFwK+vln+M9/4JFH4NZbvYlh61aYMsUSzYcfwsaNNhfQsaP1ag47zJu44lFuLnz+OUycaPNa558Pp56qoUeJrE2boEULG9YeMMDraHxDySXMOTj3XFvlNXfuLsfgYyo7GyZMsKGzCRMgJweaNLHeTPv2djSz7Mw5+OUXeO896wU2bWo/2zJlvI5MEtWYMXY8+ogR9jAoSi7/+ugjG4N//31byeU369ZZjKNGWc8GbCK6Y0fV0BT055/2M5w3z4YW27WDgw/2OipJdPn5NrKQlGTXqHrHSi6ALUtt2BBq1bIegt8nav/+2yamR42yuZrSpeG88yzRtGwZzFUrWVmQlgbTptkkfdu2cNRR/v9ZSuKYORO6dLFh9XbtvI7Gc0ouAE8+CXfdZau3jjrK62iKZ/nybTU0P/4YvBqa/HyYPh3Gj7c5lhYt4IwzEv/fLf508822cCQtzeqnAkzJZfVqW3p82WVW3xLPfvvNljWPGmXbmSR6Dc3vv9u8ysqVcNJJcMEFllxFvPLHHza/162bJZoAU3K55hp78p8/P3EmyJ2zXky4hmblSqhWzZY2J0INTUaGraSbPRsOP9wSaI0aXkclYp55Bl57DcaNs9q1gAp2cpk1C044AQYOtPqWRFRYDU3dutabibcampwcmDzZFjSUKWMLGY4/Pr4TpSSe7GzrvTRqFP+jIfsguMnFOZuT+PtvewIuVcrriKIvXEMzerQ9+W/YYHU94RqaatW8jrBwztmDQDjms8+G5s1h/10dOizisY8+gttug6FDbcg2gIKbXN59157cP/nEVlgFTXa2TTq+846tkNuyZfsamgN2PArJI8uX28q4xYvtSfDCCxNn+FISl3PQqZNdV2PGQIkSXkcUc8FMLtnZtirsqKPsCSPodlVDEz6HxotJ8g0bbMx6xgyrU2nXzobzROLFTz/ZNfTAA/Z7wAQzufTvD/36WSW+bljbS0+3J62CNTTnnmtDZ+eeG/0amrw8+OIL61UlJdmWLY0bJ95KNwmGO++0LYjS0gK3kjF4yWXVKqvcvuYaq2+RXduxhqZ8+e1raCI9TxXesiU93Ybozjsv8LUCEudWr7Zh906d4I47vI4mpoKXXLp3t4K7+fNBp8btufnzt51Ds2CBzXuEa2j2dTPIv/6yLVt+/dVWr7Vvr91lJXG89JKtGhs3DmrW9DqamAlWcvn2WysmfPFF67lI8e2qhiZ8Ds0xx+z50uDwooIvvoDKlW2yvmFDLS2WxLJ5sw3v1q1riSYggpNcnLOhlqws2wMogKs3Ii689Uq4hiZ80Fr4HJpdzWfl59tE/bhxVrsS3rIlCMvBJZjS0qBPHyuubNrU62hiIjjJZdgw2+JlyhQ480yvo0k8W7faefThc2h2VUOzaJEtLV6xAk480bZs0THBkuicg8svtyLmDz4IxN53wUgumzZB/fpw8sk2QS3RFR7uGjnS6oi2bLHPvlYtOyenXj2bVwnQ+LMIv/xi85R33w1du3odTdQFI7ncfz88+qj9cGvX9jqaYElPh8cft97KsmX2xHbOOd7W0Ih45X//sweviRMTfkFR4hcPLFtmN7dbblFiiaXwli3PP2+1K08/bVX2AwdaT7JnT9tssnNnWymWne11xCLR99//2pxjAPYcS/yeS+fOVsT02286rTFWVq60nsrChXD00XZw147byaxYsa2GZvbs6NfQiPjF66/DgAE293LEEV5HEzWJnVy+/BJOP91+mN27ex1N4tu40WqIpk+HAw+0LVvq19/96+bP33YOTfjog0jV0Ij4zdat0KqVLXIZPDhhl94nbnLJz7dJ5FAIvvlGN6hoysuzRP7JJ/Z5n3eeLfsu7nJv52w/pnANzYoVe19DI+JnU6bA9ddbzd1ZZ3kdTVQkbnJ54w248kq76TVu7HU0iWvePNuyZfVq+5zPPz8yW7YUVkOTmrrtHBrtCSfxzDm7P61YYfVeCTgMnJjJZf16W+569tlW3yKR9/ffNhH/8882btyuHRx2WHTeq7AammOOsd7MxRf79xwakaL8/rvtSnHLLdCjh9fRRFxiJpe77rKjRufNC/Qxo1GxebMto/z8cyt+vPDC2B6bXFgNTePG1pvp0ME/59CI7IkHH7SJ/bS0hDunKPGSy8KFdk7LXXfBffd5HU3icM72Zvv4Y7uhN2/u/aqu9eu3nUMzebJ97eyzLdG0aaMaGvG/zEzb/ujcc+3clwSSeMmlQwf47jvrtZQp43U0iWHxYltavHy5nVnfurX/CsDS0y3GUaPgq6/sCOSC59AkJ3sdoUjh3noLHn7Y2m+DBl5HEzGJlVymTLEn6mHDrL5F9k1mpvUMZs60eY0OHWwLF78rrIamdWvr0Zx9dkJOnkocy8214eUqVWDo0IRZEZk4ySU3156qy5eHadMS5gfkia1b4bPP4NNPbS+w1q3hpJPi8zNdsGDbOTThGpr27S3R6IRL8Ysvv7RdKwYNssPFEkDiJJeXXrJ14zNm2G67UnzhOpMPPoB162wb/BYton+0cSwUVkNz2GHbamj+85/4TJ6SOK691h6Gxo+3Yd04lxjJJTPT6h5atbL6Fim+VausXmXBAjjySFtaXLWq11FFR36+FdaGa2jS062GJnwOTb16XkcoQbR4sR1B0bt3QhxmmBjJ5ZZb4NVXbf8wHY9bPJs22ZLer76yZNKuXUJNKu5Wbu72NTTr19vS6k6drFejpewSS489Bu+8Y0uTDzzQ62j2Sfwnl3nz7GbQr58tP5Y9k5dnCWXCBHuSP/dcOO20YJ/QuXnz9jU0mzerhkZia/16m3M54ww7JiSOxX9yueAC+PVXqxRPhLmBWJg/35Y9/vUXnHKKDSeWK+d1VP6yfr3V9IwaBZMm2dfCNTStW+v0TImekSOtRm/UKHtwjlPxnVwmTLC9rEaPtidLKVp6uk3Wz5ljZ9u0b6+tU/bEmjXbami+/FI1NBJdeXl2bSYnw4gRcbvQJH6Ty9attr/UwQdbdXac/gBiYssWW1Y8daot1b7wQq2O2lsrV26roZk1SzU0Eh0zZkC3bvDkkzY6E4fiN7kMGmQT+TNnWpKRnTkH339vhZDZ2dCsmd0A99vP68gSw4IF286h+e03K4ILn0OjGhrZV7162fL5CRPisnccn8klPd2WHl9yidW3yM6WLrVltsuWwbHH2l5blSt7HVVics6GGsM1NMuXq4ZG9t3y5Tbsf/XVlmjiTHwmlxtvhLfftonpOF+uF3Hr1tlE9Hff2Q2uQwebX5HYyM+3IY2RI7fV0BxxxLZzaFRDI8Xx1FMwZIj1XuKszCL+ksvcufYk+PjjcPPNXkfjH1u32jb4EyfauP8FF9hJnBqa8c6uamjC59CohkZ2Z9MmW5p88skwYIDX0RRLfCUX52w7kuXLbSxScwf2mcydawd3rV0Lp59ujTEOx2gTWmE1NKeeuq2GJlF3Q5B99957VsM3bJjtnxgn4iu5fPCBVZB/+GHcrqCIqD//tCWy8+dbVX3btnDQQV5HJbuzYcP259A4Z2fjhM+hUQ2NFJSfb23DOZvTi5PRiPhJLlu2wNFHQ5069uQX5AnSrKxtW7akpGzbsiXIn0m8KlhD89VX1htv2dKGzs47Tz1QMT/8AJdeaue+tG/vdTR7JH6SyxNPQN++8OOPtrFiEOXnw9df266peXlWwHf66cHesiWR7FhDU67cthqaZs1UQxN0t9xii0XS0qBsWa+j2a34SC5//WVLj7t3t/qWIFqwwJ5w//zTJvdatbICPklMhdXQtG9vG2o2aeJ1dOKFP/6w3uzll8fFYqaYJ5fVv/1G1tKlxXuRc/ZrL8caDzjxRMr5qMZjw+rVbM3IKN6Lvv/eeiipqXu1D1jZGjXYX8c++8bq+fPJXrZsz745P996qnl5dg3s5UKWlOOP99V1EHRrFi8me+XK4r0oM9Pm7KpV26th8MoNG1I2RnN6JWPyLgUs+fhj6l9ySWze7Ikn4PjjSV+82FcXVcbPP3PQf/5TvBe1aLF3wyJTpkD16mzKyFBy8ZGln3xCveLuhxd+DizuTeWpp+C441izZImvroOgWzl1KrXPO694L9rbB+3XXoOjj2btihWJm1ycc1SIxfr+rVvhxRdh8GCK2UeIidKxuMjD2+p37kxW9N9NisE5R4VYbBq6dSu88gq8/DJro/9uUgwOKBeLwsitW+2MmP79yYz+u/0rPta07Y2+fW0L/q5dvY7EO+PGWW8njtbGS4T97392HXTu7HUk4pWnnrKh1DZtYvq2Me+5xMSWLfD00zB8eNysCY+43Fz47DO47LLgfgZBt2ULPPssDB2qNhBUOTm2fcyAATFvA4nZ4m66ySa9L77Y60i88957du5Iced2JHHcdptdB3FSFyFR8PDDUKaMlS3EWOL1XLKz4eWXrYo/qEWFOTlWD9OzZ3A/g6DLzrZJ3HffVRsIqs2brWbqhRc8aQOJ13O57jrbWv78872OxDujR9vTSlCLTcW2aK9c2eoiJJjuvx8qVIAzz/Tk7ROr57Jxo40vT5oU3Ke1LVusJub664P7GQTdxo22yWHQt0kKsk2bbDPbN9/0rA0kVs+lRw873+Wss7yOxDsjRtg4+xFHeB2JeOXqq+06OOMMryMRr/Tta/sOnnyyZyEkTs9l/XobX/7yy+A+rW3ebHuv9e4d3M8g6Navt8UcU6eqDQTVxo22/9iIEZ62gcTouThnS24PPdTOyAgi5+x0zgoVoGZNr6MRLzgHV1xh14GHT6ziIefg1lut5+rxStHE6LlkZtr5GN9+G9yntexs+PlnW4Yd1M8g6DIzbcfsIPfeg279eqtv88EqwfjvuThn5xxUrx7cSnTn4K23oFIlHZ0bVM5Bt262oeFxx3kdjXjBOduW/5BD4KijvI4mAXoumZk2vvjDD55nas9kZ8O8edYdDupnEHSZmfDpp/DNN2oDQbVunfVa33vPF20gvnsuztmeSTVqwDHHeB2NN5yz5deVK9tYuwSPc3bGR/Xq0KiR19GIF8K9lkMPhfr1vY4GiPeey7p1MHFisHstmzfbYVLqtQTXunVW26VeS3Bt2GA7oPuk1wLx3nO5/HI47LDg9lrAVohVrKheS5D16GE/f/Vaguv22+Ggg3zTa4F47rls3Agffxzsp7UtW+CXX+C//w3uZxB0mzZZJf4XX6gNBNWmTbZCbNQoX7WB+O25XHcdHHAAnHii15F4Z/RoKFvW5pwkmHr3tkrsE07wOhLxyv3325xrw4ZeR7Kd+Oy55OTYWS1pab7K1DGVmwszZ8K11wb3Mwi6nByrwg7yDuBBl5NjIziDB/uuDcRnz+Wee2zX32bNvI7EO+PG2elydet6HYl45b77dB0E3dNP20mjPtyZJP56Lnl59oG+8YbvMnXM5OfD559Dly7B/QyCLi/PTpl89VW1gaDKy7NTJh97zJdtIP56Lk89BSVKWFV+UH32mR1ZeuyxXkciXnnmGbsOOnb0OhLxyptvWhto1crrSAoVXz2X3FwbEnviieCeCZ6XZ/tHtWkT3M8g6HJzbUjskUfUBoIqN9dGcO64w7dtwJ9RFcY5SywlS8INN3gdjTecs8SSlARNm3odjXjBOUssJUvaikkJHudg4EBrA126eB3NLnnTc5k4Ef76y/47FLKbZVISlCoFyclWFFi1qm0bXa6c/dm331qPZcwY6wrGu3nzrKoW7DMI/ypRwj6H0qXt316+vE3ch0KwbBlMmWLbqvv0aUWKYdIkWL3a/rvgdVCy5Lbr4IAD7FoIXwfffWdPrO+8ozaQCL78Etassf8Ot4FQyNpA6dJ2/VeubMvNy5SxP//pJ1sd9uyzvm4D3iSXIUNgxgzLwGC/5+fbr61b7Vd+vp1NUru2fbgTJtiurxde6EnIEffdd7B06bbPALb/HPLy7P9Ll7aGlZxsCemkk3y3nl320ltvWTso7DrIzd12HZQvD7Vq2XUwcSJ07QqtW3sbu0TGe+9ZsijYBsLtINwGnLOHi2rV7IFj2jRo29b3qwRjnlxCwJann7YPLqzgB7p1K2Rlwd9/255Zc+bYjq9PPmmT+Js2xTrkqMht29b+vWEFG1denq1f37TJnmxXrbLPpE0bO1YgJ8eTmCVyQsCWJ54o/Dpwbtt1kJ4O8+fD3Ll2HTzyCHTqlDDXQZCFgJy+fe16D9sxuWzeDBkZsHix3Q83bLB5lgsusPbhYyHnCj46R9+yb74ha+nSWL4lB51xBpUPPjim71mUtUuWkLN2bUzfs3ydOpSpUCGm7ym7tnzGDLKWL4/pex7YtKmvroOgWzV7NptXrYrpe6aceCIVq1aNyXvFPLmIiEji8+9sUEHhrmKQ5eVZl7hgF1qCJTwfI8GVl2eHA8aB+EgugwbZKqonnwxukvnjD/jf/+x3CaZOnWxr/c8+8zoS8cKqVbagqUULm4vxufhILr17w91325kFN964/SSoSFC89pqtFmzd2s7xkeD4+We45BJLKkOH2ipSn4uP5JKUBA8+CK+8Yr/atbPzXESCpEIFq/O67DK46iro3z+4Pfkg+ewzK5Y89FAYOdKWpceB+EguYT172vbSX3wBZ56pISIJnlKl4Pnn4YEH4KGH4OqrtTQ9kQ0fDtdfD6edZj2WlBSvI9pj8ZVcAFq2tOTy11+2zfTPP3sdkUhshUJw2222ceGoUTYOv26d11FJJOXnw+OP20PEZZfZdi9xMBRWUPwlF4BjjoHp06FSJWjSBCZP9joikdjr2NHO9Zk9G84+27YHkvi3eTPcdJMdK3L33XDXXXG55VV8JhewrRC++MJ6L+edZ1vKiARN06Y2Jp+VBaefDrNmeR2R7IuMDOje3c5reu4567XEqfhNLmATnB9+aD+MK66Afv00wSnBU6+ePWhVrw7Nm9vO2RJ/li615ebLl9u+cz7fO2x34ju5gE1wvvyyrZzp18+SjCY4JWiqVoW0NLshXXyxXRMSP374wYY5S5a0FWEJsDlt/CcXsAnOu+6CYcNsK/Lzz7dN/kSCpEwZGDHCVhf99792Taii3/8++cRGX1JT7f5VrZrXEUVEYiSXsM6dbUvyWbNsLDrGG2SKeK5ECTv36MknbWeLrl3jZruQwHHOCmNvuslWwQ4ebEP9CSKxkgvYpOZXX9kFdeqpMHOm1xGJxN4NN9jwyoQJtuAlPd3riKSgvDwbxn/ySTtR9PHH7VDABJJ4yQWgfn1bqlyjBpxxhhVeigRN69bWk1+0yIqOf//d64gEbGXf9dfD6NFWCNunjw3tJ5jETC5gRyRPmWKbvLVtCy+84HVEIrF3wgm2kqxECXvQmj7d64iCbfVqG6r8/nvbyuqii7yOKGoSN7mATXCOHm0bX954o218qQlOCZqaNa0W5qijbIhs7FivIwqmBQtsRVhGhm3r0qSJ1xFFVWInF7Antqeesu0TBgywdeSa4JSgqVwZPvrIevFdusDTT6smLJa+/toWHFWsaHNh9ep5HVHUJX5yCevd257Yxo2zQrO///Y6IpHY2n9/21Lkzjuhb18b69fxFdH33nu2i/Wxx1q5xEEHeR1RTAQnuYBt8Dd1KixcaF3SBQu8jkgktkIhuO8+ePFFeP11K7jU8RXR4Rw8+6zVG3XoAC+9BGXLeh1VzAQruYAdtjR9ulXCNm5sy5ZFgqZ7d3j/fWv/zZvr+IpI27rVeojPPw+33GLLjuNw88l9EbzkAnbYzldfwdFH24U1apTXEYnEXvPmtqP4339bfZiOr4iM9evt7Knx422e96qrEnKp8e4EM7mATXBOmGBLATt1siImTXBK0DRsCNOmQZUqtm3/1KleRxTfVq60ift582x+q1UrryPyTHCTC9gE59ChcM891oW94QZNcErwHHooTJoEp5wCbdrYjrxSfHPn2jn3OTm2R9gJJ3gdkaeCnVzAuqsPPACvvmq/2rbVBKcET/ny8O67cPnldnTygw+qJ18cU6dacWS1apZY4uSc+2hScgm78kpbpjxtmlUyr1rldUQisVWqlB1Q9eCD8PDDNm+g4yt2b9gwG/U47TQ7tDCOzrmPJiWXglq0sOTy99+26eXcuV5HJBJboRDceqsNF7/7rg2TrVvndVT+lJ8Pjz1mybhbt7g85z6alFx21KiRLVWuUsW27Z80yeuIRGLv4otttdOPP8JZZ+n4ih1t3mxn5gwZAv/7H9xxR+CWGu+OkkthDjvMNvtr3NgOHnvzTa8jEom9Jk3sLPfNm22p8g8/eB2RP2RkWE/liy9sGLFLF68j8iUll10pXx4+/BB69LBf99+vCU4Jnrp1LcEcfjicc47NSwbZkiW2+eSKFfD227Z8Wwql5FKUkiVtm4yHH7YVZd27a4JTgqdqVasJa97cltq+9JLXEXlj5kxLLPvtZ4XXRx/tdUS+puSyO6GQ1cAMH267mZ53Hqxd63VUIrFVpoxdAzfcYMfy3nlnsI6vGD/eHi7r1YMRI2zoXIqk5LKnOnWCTz+F2bNton/JEq8jEomtEiVsJ4sBA+CZZ2yuIdGPr3DO6t9uvtkeLF97LaHOuY8mJZfiOO00O5dhyxZbqvz9915HJBJ7119vvfi0NDj3XEhP9zqi6MjLs7nWAQPs3/zYYwl3zn00KbkUV716lmBq1rRzyT/6yOuIRGKvdWvryS9ebEXHiXZ8xaZNcO21VuvTv7+dBxXAzSf3hZLL3jjwQNtNtmVLaNfOttUWCZrjj7fluPvtZwnm66+9jigy/vrLtnKZNcuGxDp08DqiuKTksrfKlLEVI336QK9eVtUcpAlOEbAe/NSptnLq/PPtST+ezZ9vK8LWrrUFDI0bex1R3FJy2RclSth47KBBtvVDx46JP8EpsqNKlWx4uH17uOwyuybisSYsfM595cr24Fi3rtcRxTUll0jo1QvGjrXlis2a2d5kIkGy//4weLAtUb7nHpujiKfjK8aMsUO9jj/eiiMPPNDriOKekkuktGkDn31mE5ynnmrda5EgCYXgvvusyPLNN+0gvg0bvI6qaM7ZyMPdd1u8L74YqHPuo0nJJZJOPNE2vdx/fxur/fJLryMSib1u3eD9922YqXlz+OMPryMqXE4O3H67JZRbb7Vlx9p8MmKUXCKtZk1LKo0a2YU1cqTXEYnEXrNmMGUKrFlj9WF+O74ifM59Who89ZT9t5YaR5SSSzRUrgyffGL7MHXubMVX8TjBKbIvjj7azkdKSbENHidP9jois2KF7bjx2292zv3553sdUUJScomW/fffdtbDXXdZhW88TXCKRMIhh9iZSI0b2xHiQ4Z4G8+cObaqMzfXRhWOP97beBKYkks0hULQr5/tRzR4sE36+32CUyTSype3+pfu3a3qvV8/b3rykyfbUunwOfc1a8Y+hgBRcomFHj3sHIyvv7ZK5pUrvY5IJLZKlrTNLvv3h0cfhSuvtD36YuXtt+HGG+3QsyFD7KRZiSoll1g55xwbf05Pt6XKc+Z4HZFIbIVCtrvwW29ZXUmbNpCZGd33zMuDRx6Bhx6ynpPOuY8ZJZdYatgQvvkGDjjAtu3/9FOvIxKJvYsusgUvc+bAWWfB0qXReZ/wOfdvvbXtnPsk3fJiRZ90rB16qB0b27QptGoFr7/udUQisde4sRUdb9liS5Vnzozs379mjdXbTJtmG8vqnPuYU3LxQvny8MEHNu7csyfce6+WKkvw1K1ruyrXqgUtWsDHH0fm7128ePtz7s86KzJ/rxSLkotXSpaEF16wyc2HHoLLL4/tBKeIHxxwAEyYYHOSl1xi18S++P57Syz7769z7j2m5OKlUMi2nxgxAkaPtmNU167d6ducc2RkZLB+/XoyMjJw6uVIIklOtu3te/WCW26xayIvb6dvc86Rnp7OkiVLSE9P3/k6GDcOrrgCGjTQOfd+4MQfpk1zLiXFuQYNnFu0yDnn3Nq1a93AgQNdnTp13GHgngZ3GLg6deq4gQMHurVr13obs0ikvfCCc2XKOHfJJc5t2uSc2/46AP799e91kJHh3MsvO1evnnO33+5cTo7H/whxzrmQc3oM9o35820rio0bmd63L+f07UtWVhYAhzrHrcCTwKp/9kAqU6YMY8aMoWXLlt7FLBJp48bZMPFRRzHlhhtoc+WV/14HBW9XoVCIEs7xYKlS3HjQQZS7/XarZdEeYb6gYTE/qVsXpk9nbZUqNOrTh3OysnDO7dT9D38tOzubVq1akZaW5lHAIlHQqhV8+inZv/3GwZ06UW0X10Gyc7wItN66lW6rVpFWt64Si48oufhMZqlSpC5dyifAu85xYxHfm5+fj3OODh06kBntYjSRGMqsXZvjNmxgCzDVOXY8bPggYBjwH+Aq4H3QdeAzSi4+M2TIEDKys7kEeAoYBAxg1z+o/Px8srKyGDp0aMxiFIm2IUOG8NvmzZwF/ASMBy7658/qAiOBikBnYDq6DvxIcy4+4pwjNTWVRYsW/TsEcD2WYCYCi4BHgR13JguFQtSuXZsFCxYQ0rCAxLkdr4NSwIvApcBg4FRgKXAtUPBAcV0H/qLk4iPp6elUrVp1p69fALyDJZfz2Dm5FHx9SkpK9AIUiYFdXQePA7cAU4A2QHYRr9d14D0Ni/nIxo0bC/36x9iQwDe7ef0GbecvCWBX18HtwG3A+ew6sYCuA78o6XUAsk25cuV2+WeTgTnA6iJeX758+UiHJBJzRV0HT+3B63Ud+IN6Lj6SkpJCnTp1Ch0v3ooNh20t5HWhUIg6depQRWdUSAIo6jooiq4Df1Fy8ZFQKESvXr326rW9e/fWJKYkBF0HiUET+j6TmZlJtWrVyM7OJj8/f7ffn5SURHJyMitWrKBSpUrRD1AkBnQdxD/1XHymUqVKjBkzhlAoRNJuDjZKSkoiFAoxduxYXVCSUHQdxD8lFx9q2bIl48aNIzk5mVAotFM3P/y15ORkxo8fT4sWLTyKVCR6dB3ENyUXn2rZsiUrVqxg4MCB1K5de7s/q127NgMHDmTlypW6oCSh6TqIX5pziQPun/NcNmzYQPny5alSpYomLSVwdB3EFyUXERGJOA2LiYhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxCm5iIhIxP0fP6uDurzeT9kAAAAASUVORK5CYII=" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "execution_count": 5 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:41.156097Z", + "start_time": "2024-05-09T06:27:17.055133Z" + } + }, + "cell_type": "code", + "source": "losses = model.train(dataset=dataset, opt='LBFGS', steps=20, device=device)", + "id": "24cde08da2ea047a", + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "train loss: 1.27e-05 | test loss: 1.27e-05 | reg: 2.38e+01 : 100%|██| 20/20 [00:24<00:00, 1.20s/it]\n" + ] + } + ], + "execution_count": 6 + }, + { + "metadata": {}, + "cell_type": "markdown", + "source": "After some simple training, we finally have (quite) machine precision accuracy.", + "id": "cc7190123c5f557e" + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:41.423498Z", + "start_time": "2024-05-09T06:27:41.157083Z" + } + }, + "cell_type": "code", + "source": [ + "plt.plot(losses['train_loss'], label='train loss')\n", + "plt.plot(losses['test_loss'], label='test loss')\n", + "plt.legend()\n", + "plt.yscale('log')\n", + "plt.show()" + ], + "id": "21bc841e6c2b5606", + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAi8AAAGdCAYAAADaPpOnAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAABHgElEQVR4nO3deXhU9aH/8ffJvk8SQhICCQFkC0uiLBFbq2gqxl4E3K1XAyq/1husNmqF9ioubbkVaxGZqrUX0doqtRX0ilYxglREQDC4BNkMIUAWAmQlZJk5vz8CUYRAhkxyZiaf1/PM85iZM+d8DocxH858v+cYpmmaiIiIiHgJP6sDiIiIiLhC5UVERES8isqLiIiIeBWVFxEREfEqKi8iIiLiVVReRERExKuovIiIiIhXUXkRERERrxJgdQB3czqd7N+/n8jISAzDsDqOiIiIdIBpmtTW1pKUlISf3+nPrfhcedm/fz/JyclWxxAREZGzUFJSQr9+/U67jM+Vl8jISKB156OioixOIyIiIh1RU1NDcnJy2+/x0/G58nL8q6KoqCiVFxERES/TkSEfGrArIiIiXsVnyovdbictLY1x48ZZHUVERES6kGGapml1CHeqqanBZrNRXV2tr41ERES8hCu/v31uzIuIiPg20zRpaWnB4XBYHUVc4O/vT0BAgFsuY6LyIiIiXqOpqYnS0lKOHDlidRQ5C2FhYfTp04egoKBOrUflRUREvILT6aSoqAh/f3+SkpIICgrSxUi9hGmaNDU1ceDAAYqKihg8ePAZL0R3OiovIiLiFZqamnA6nSQnJxMWFmZ1HHFRaGgogYGBFBcX09TUREhIyFmvy2dmG4mISM/QmX+xi7Xcdex85m+ApkqLiIj0DD5TXnJzcyksLGTjxo1WRxEREZEu5DPlRUREpCdITU1lwYIFlq/DShqwKyIi0oUuvvhiMjIy3FYWNm7cSHh4uFvW5a105qWDzOYGdi+8gkMbXgFHi9VxRETEhxy/8F5H9O7du8fPtlJ56aBt7/6Z1ENriX3rJxz8n5FUvLcQmuqtjiUi0mOZpsmRphZLHh29s8706dP54IMPePLJJzEMA8Mw2L17N6tXr8YwDN5++23GjBlDcHAwH374Ibt27WLKlCkkJCQQERHBuHHjeO+9905Y53e/8jEMgz//+c9MmzaNsLAwBg8ezBtvvOHSn+WePXuYMmUKERERREVFcd1111FeXt72+pYtW5g4cSKRkZFERUUxZswYPvnkEwCKi4uZPHkyMTExhIeHM2LECN566y2Xtu8qfW3UQc3nXM4/t25lYu3r9GouhQ8foO6jx6gdlUOfH94NEb2tjigi0qM0NDtIe/AdS7Zd+MgkwoLO/Cv0ySefZPv27YwcOZJHHnkEaD1zsnv3bgBmz57N448/zsCBA4mJiaGkpIQrrriC3/zmNwQHB/Piiy8yefJktm3bRkpKSrvbefjhh3nssceYP38+Tz31FDfddBPFxcXExsaeMaPT6WwrLh988AEtLS3k5uZy/fXXs3r1agBuuukmzj33XJ5++mn8/f0pKCggMDAQaJ0w09TUxJo1awgPD6ewsJCIiIgzbrczVF46aNTQwYwa+ke2FD3AOyue5oIDL9PfWUHElkU0bXmWA4OuIin7Poy4wVZHFRERD2Gz2QgKCiIsLIzExMSTXn/kkUf44Q9/2PZzbGws6enpbT8/+uijLFu2jDfeeINZs2a1u53p06dz4403AvDb3/6WhQsXsmHDBi6//PIzZszPz+fzzz+nqKiI5ORkAF588UVGjBjBxo0bGTduHHv27OG+++5j2LBhAAwe/M3vuj179nD11VczatQoAAYOHHjGbXaWyouL0gf0IX3WI+wqv4eX3lzCqOIXSPfbRd9dS3Eu+juliZcQf/l9BKROsDqqiIhPCw30p/CRSZZt2x3Gjh17ws91dXU89NBDrFixgtLSUlpaWmhoaGDPnj2nXc/o0aPb/js8PJyoqCgqKio6lGHr1q0kJye3FReAtLQ0oqOj2bp1K+PGjSMvL4/bb7+dv/zlL2RlZXHttdcyaNAgAH72s59xxx138O6775KVlcXVV199Qp6u4DNjXrr7InWDEmz85213kZC3lr8Me5rV5nn4YZJUlk/Akssp/8MPaPz8DXA6uyWPiEhPYxgGYUEBljzcdU+l784auvfee1m2bBm//e1v+fe//01BQQGjRo2iqanptOs5/hXOt/9snG78/fPQQw/x5Zdf8qMf/Yj333+ftLQ0li1bBsDtt9/O119/zc0338znn3/O2LFjeeqpp9y27VPxmfJi1UXqEqNDufmGH3Pu/e/yt7Gvsty4lEYzgITqLQT/82YOP5ZO/drnoLmhW3OJiIhnCAoKwuFwdGjZtWvXMn36dKZNm8aoUaNITExsGx/TVYYPH05JSQklJSVtzxUWFlJVVUVaWlrbc0OGDOHnP/857777LldddRXPP/9822vJycn89Kc/5bXXXuOee+7hueee69LMPlNerGYLC+TH/3EZl//yVd6c+A5/CbiaajOMmKN7CF95L/W/G0bNO7+BI4esjioiIt0oNTWV9evXs3v3biorK097RmTw4MG89tprFBQUsGXLFn784x+79QzKqWRlZTFq1ChuuukmNm/ezIYNG7jlllu46KKLGDt2LA0NDcyaNYvVq1dTXFzM2rVr2bhxI8OHDwfg7rvv5p133qGoqIjNmzezatWqtte6isqLm4UE+nP1xWO5cc6fWTt5Dc+GzmSvGUd4SxVR6x6jcf5wqv5xFxwqsjqqiIh0g3vvvRd/f3/S0tLo3bv3acevPPHEE8TExHDBBRcwefJkJk2axHnnndel+QzD4PXXXycmJoYf/OAHZGVlMXDgQJYuXQqAv78/Bw8e5JZbbmHIkCFcd911ZGdn8/DDDwPgcDjIzc1l+PDhXH755QwZMoQ//vGPXZvZ7OhkdS9RU1ODzWajurqaqKgoq+NgmiYfbiul4F8vMPHQK4z02w2AEz+qUi8n9of3Qt8x1oYUEfECR48epaioiAEDBhASEmJ1HDkLpzuGrvz+1pmXLmYYBhcOS+LOu+fQcvtq/tDncT5wjsYPJ7G734LnLuGw/YeY2/6lwb0iIiIdoPLSjTJSYvj5T2bS7863+cM5z/Oa80KaTX9iDmzAePl6av4wlpaNz0PTEaujioiIeCyVFwsM6h3Bz//zKr537z949txlLDYnU2uGElW7i4AVd3P0sWE0vP0AVO+1OqqIiIjHUXmxUEJUCLOmXsTVs5/nle+/zR/8cihx9iakpZrQ9Qtx/GE0tX/5T9jzMfjW0CQREZGzpivsegBbaCAzf3gujRNHs2LLPbyweimXVi1jgn8hkbv+D3b9HzUxI4m4aBZ+I6+CgGCrI4uIiFhGZ148SHCAP1eN6c+v8u4j4LYV/Dr5Of7uuJhGM5Cow1/gt/ynNDw2nKb3fgt1Hbvss4iIiK/xyPIybdo0YmJiuOaaa6yOYgnDMBiXGst/33YdE37+Mk+f9wYLuYEyM4bQpoMEffg7Wn6fRv0rt8P+AqvjioiIdCuPLC933XUXL774otUxPEJybBh3T7mA2+bYWXnZSh4NvofNznMIMJsJ/+pV+NNF1PzxUswvl4Ojxeq4IiIiXc4jy8vFF19MZGSk1TE8SnhwADd/bzC/uv8Bqn/8NnPjF7LccQHNpj9RFZ9gvJpD/eMjaVnzB92CQEREgNbfp3fffbfVMdzO7eVlzZo1TJ48maSkJAzDYPny5SctY7fbSU1NJSQkhMzMTDZs2ODuGD7Lz89g4rB4Hv6vHNJm/Z35af/kj85pVJpRhDeUEvD+QzQ/PoyG12ZBxVar44qI9HhdUSCmT5/O1KlT3bpOb+L28lJfX096ejp2u/2Ury9dupS8vDzmzp3L5s2bSU9PZ9KkSVRUnN0A1MbGRmpqak549BRDEiL55fUTufH+P/HPH/yLR/1zKXT2J9DZSOhnf4E/nk/dcz8CXb1XRER8iNvLS3Z2Nr/+9a+ZNm3aKV9/4oknmDlzJjNmzCAtLY1nnnmGsLAwFi9efFbbmzdvHjabre2RnJzcmfheKSY8iJ9cOoLZv/w1O696m19F/463HeNwmAYR+z6El6+n/okMnOv+CA2HrY4rItJjTJ8+nQ8++IAnn3wSwzAwDIPdu3cD8MUXX5CdnU1ERAQJCQncfPPNVFZWtr33H//4B6NGjSI0NJRevXqRlZVFfX09Dz30EC+88AKvv/562zpXr17doTyHDx/mlltuISYmhrCwMLKzs9mxY0fb68XFxUyePJmYmBjCw8MZMWIEb731Vtt7b7rpJnr37k1oaCiDBw/m+eefd9uflSu69TovTU1NbNq0iTlz5rQ95+fnR1ZWFuvWrTurdc6ZM4e8vLy2n2tqanpkgQEI9Pfjyoy+XJnxUzbvuZ5HVq+j346/cp3f+9jqiuGdObSsfIjmYVMIPf82SM4Ew7A6tojI2TFNaLbodiqBYR36/+eTTz7J9u3bGTlyJI888ggAvXv3pqqqiksuuYTbb7+dP/zhDzQ0NHD//fdz3XXX8f7771NaWsqNN97IY489xrRp06itreXf//43pmly7733snXrVmpqatrKQ2xsbIdiT58+nR07dvDGG28QFRXF/fffzxVXXEFhYSGBgYHk5ubS1NTEmjVrCA8Pp7CwkIiICAAeeOABCgsLefvtt4mLi2Pnzp00NDSc5R9g53RreamsrMThcJCQkHDC8wkJCXz11VdtP2dlZbFlyxbq6+vp168fr776KhMmTDjlOoODgwkO1kXbvuu8lBjOu+UKSqsnsvjDrzjyyUtc5XiH4ZQQUPh3KPw79bZzCD3/NvzSb4Cwjv3FFxHxGM1H4LdJ1mz7l/shKPyMi9lsNoKCgggLCyMxMbHt+UWLFnHuuefy29/+tu25xYsXk5yczPbt26mrq6OlpYWrrrqK/v37AzBq1Ki2ZUNDQ2lsbDxhnWdyvLSsXbuWCy64AIC//vWvJCcns3z5cq699lr27NnD1Vdf3batgQMHtr1/z549nHvuuYwdOxaA1NTUDm/b3TzyCrvvvfeey++x2+3Y7XYcDkcXJPJefWyh/PxH59Lww9Gs+OxuXvhoJedWvM5k/3WEV+9sPRvz7lxahk4m5PzboP8FOhsjItLFtmzZwqpVq9rOanzbrl27uOyyy7j00ksZNWoUkyZN4rLLLuOaa64hJibmrLe5detWAgICyMzMbHuuV69eDB06lK1bWyd4/OxnP+OOO+7g3XffJSsri6uvvprRo0cDcMcdd3D11VezefNmLrvsMqZOndpWgrpbt5aXuLg4/P39KS8vP+H58vJyl9rjqeTm5pKbm0tNTQ02m61T6/JFoUH+XDM2mWvG3spXZdfw5LqttGz5O1c5VzLCr5iAr/4JX/2TI1EDCcm8Fb+MH0N4L6tji4i0LzCs9QyIVdvuhLq6OiZPnszvfve7k17r06cP/v7+rFy5ko8++oh3332Xp556il/96lesX7+eAQMGdGrbp3P77bczadIkVqxYwbvvvsu8efP4/e9/z5133kl2djbFxcW89dZbrFy5kksvvZTc3Fwef/zxLsvTnm69zktQUBBjxowhPz+/7Tmn00l+fn67XwuJ+w1LjGLOtEzu/eVjbL1yBb+IfZK/tUyk3gwmrOZr/Fb+N47Hh3L05RwoWqObQoqIZzKM1q9urHi4cIY6KCjopG8FzjvvPL788ktSU1M555xzTniEh4cf2z2D733vezz88MN8+umnBAUFsWzZsnbXeSbDhw+npaWF9evXtz138OBBtm3bRlpaWttzycnJ/PSnP+W1117jnnvu4bnnnmt7rXfv3uTk5PDSSy+xYMEC/vSnP7mUwV3cfualrq6OnTt3tv1cVFREQUEBsbGxpKSkkJeXR05ODmPHjmX8+PEsWLCA+vp6ZsyY0ant6msj131zNmY628qu5sl1W2ne8irTnCsZ7VeE/7blsG05RyJTvzkbE9Hb6tgiIl4lNTWV9evXs3v3biIiIoiNjSU3N5fnnnuOG2+8kV/84hfExsayc+dOXnnlFf785z/zySefkJ+fz2WXXUZ8fDzr16/nwIEDDB8+vG2d77zzDtu2baNXr17YbDYCAwNPm2Pw4MFMmTKFmTNn8uyzzxIZGcns2bPp27cvU6ZMAeDuu+8mOzubIUOGcPjwYVatWtW2zQcffJAxY8YwYsQIGhsbefPNN9te63amm61atcoETnrk5OS0LfPUU0+ZKSkpZlBQkDl+/Hjz448/dtv2q6urTcCsrq522zp7kiONLeY/Pikx731yifnSr6aZtQ/Gm+bcKNOcG2W2PBRrNvz1P01z1yrTdDisjioiPUxDQ4NZWFhoNjQ0WB3FJdu2bTPPP/98MzQ01ATMoqIi0zRNc/v27ea0adPM6OhoMzQ01Bw2bJh59913m06n0ywsLDQnTZpk9u7d2wwODjaHDBliPvXUU23rrKioMH/4wx+aERERJmCuWrXqlNu+6KKLzLvuuqvt50OHDpk333yzabPZzNDQUHPSpEnm9u3b216fNWuWOWjQIDM4ONjs3bu3efPNN5uVlZWmaZrmo48+ag4fPtwMDQ01Y2NjzSlTpphff/21S38WpzuGrvz+NkzTt74TOD7mpbq6mqioKKvjeLVtZbX8c91XNG75B9OcK8nw29X2WkNEMsHjb8Xv3JsgMuE0axERcY+jR49SVFTEgAEDCAkJsTqOnIXTHUNXfn/7THn59tdG27dvV3lxo6PNDlZ8Vsq6tasYXfE6U/0/JMpondvvMPxpPiebkMxbYeBE8PPI22WJiA9QefF+Ki/t0JmXrrW9vJZXP9rG0WNnY87z+2Z805HooYT9+EWIH2ZhQhHxVSov3s9d5UX/TBaXDEmI5FfTxvKrX/2G3VNfJy/2jzzfMokaM4ywqm00P/0Dmjf9xeqYIiLiw3ymvNjtdtLS0hg3bpzVUXqEkEB/rjqvH0/87Ca+d+f/smD43/i3YySBZiOB/zeLmr/dBo11VscUEREf5DPlJTc3l8LCQjZu3Gh1lB5nSEIkD94wkaYbXsVu3IDDNIja/g+qF34fs+wLq+OJiIiP8ZnyIta7dEQS1/78Sf4nfj5lZgy2+iKan51I/br/1YXuRMRtfGyoZo/irmOn8iJuFR8Vwpw7bue9H/yDNc50gswmwt/Jo+KFW6Cx1up4IuLFjl+E7cgRi+4kLZ12/Nid6YJ6Z6LZRtJlvtx3mHUvPsj0oy8RYDg5GJJC5M0vEdQ33epoIuKlSktLqaqqIj4+nrCwMAzdSNYrmKbJkSNHqKioIDo6mj59+py0TI+cKq3rvHimhiYHf1n6Mv+x8wGSjEM0EUjVDx4hfuIdunu1iLjMNE3KysqoqqqyOoqchejoaBITE09ZOntkeTlOZ14806rNW/F/47/4AZsBKE6cRMr05zBCdAdwEXGdw+GgubnZ6hjigsDAQPz9/dt9XeVF5cUjlVcfIX/xg1xbtZhAw0FFQBIhP36RqIGa3i4i0tPpInXikRJsYdxw13xWjP1f9plxxLfsJ+TFy/l6xROajSQiIh2m8iLdys/PYOrkadTc8j5rA8YTRAsDNz7MVwun0VR32Op4IiLiBXymvOgKu95l+KD+nHvfW7zZ52c0mf4MO7yKQ09ksu+LD62OJiIiHk5jXsRyH615l/7v/xd9OUCT6c8XI+7l3GtmY+gO1SIiPYbGvIhXueAHlxFwx1o2hHyPIMPBeYW/Y8vv/4PqQxVWRxMREQ+k8iIeISEhgbH3vcm/B/+CJjOAjPq1HFl4AZ9//J7V0URExMOovIjH8PP348KbfkXxtNfZZyTShwMMe/s6Vi3+b5pbWqyOJyIiHkLlRTzO4IzvE/3zdWyJuoRAw8HEPU9R8LvLKdlbYnU0ERHxAD5TXjTbyLeER8WS/vPX+OLch2gkkHHNGzH/nMXnXxRYHU1ERCym2Ubi8Sq2b8R85cckOCs4YNr46tLnufAHl1odS0RE3EizjcSnxA8ZR1Tu+5QEDaK3UU1G/k2sWP43fKx3i4hIB6m8iFcI7ZVM0t3vUxRxHpFGAz/8dBavvvAkDqcKjIhIT6PyIl7DPyyaAXf/i6/jf0iQ4eC63XN5ddEcjjRpJpKISE+i8iLeJSCYgT/9O7sH/ScANxx6mrf/8P+oqDlicTAREekuKi/iffz8SP3PRewbcx8AVzf8k00LbmRn6SGLg4mISHdQeRHvZBj0nfzfVF76Bxz4ke1cTfmzU1n/1R6rk4mISBdTeRGvFnfhrRy5+iWOEsz32ELI36by1sefWx1LRES6kMqLeL3IUT/CmP5/1PlFke63i+FvXc2LK1ZpKrWIiI/ymfKiK+z2bMGpmYT9NJ+qoEQG+JWTvSGHp/76T5odTqujiYiIm+kKu+Jbako59Kcria3bTq0Zij3hIXJvvY3IkECrk4mIyGnoCrvSc0X1IXbWexzqnUmk0UBe+S9ZtPB3lFY3WJ1MRETcROVFfE+Ijdif/B9VA35EkOFgzpH5vLzwl2wtrbE6mYiIuIHKi/imgGCib/4LtaNvBSDPsZiPnsllzbYKi4OJiEhnqbyI7/LzJ3LaExz9wX8DcJvxBpUv3cqr67+2OJiIiHSGyov4NsMg5JL7aJ68CAd+XOX/b3q/OZ2Fb3+qqdQiIl5K5UV6hMAxN+N348s0+wVzsf8WfrDuVh742wc0tWgqtYiIt1F5kR7DGHo5gbe+RWNgNBl+X3Prtp9w75/eoLqh2epoIiLiApUX6Vn6jSX4J+9xNLwvA/3K+O/yu7h/0V/Ye1h3pRYR8RYeWV7efPNNhg4dyuDBg/nzn/9sdRzxNXGDCflJPkdjhxNvVDG/7pf8ZtGzlBxSgRER8QYeV15aWlrIy8vj/fff59NPP2X+/PkcPHjQ6ljia6L6EPL/3qGx3wVEGg082fIoa1b9y+pUIiLSAR5XXjZs2MCIESPo27cvERERZGdn8+6771odS3xRiI3g6cvZF5tJkOEgfs9bVicSEZEOcHt5WbNmDZMnTyYpKQnDMFi+fPlJy9jtdlJTUwkJCSEzM5MNGza0vbZ//3769u3b9nPfvn3Zt2+fu2OKtAoI5sg5/wFAVJ2u/yIi4g3cXl7q6+tJT0/Hbref8vWlS5eSl5fH3Llz2bx5M+np6UyaNImKCl35VKwR2380AH1bijV1WkTEC7i9vGRnZ/PrX/+aadOmnfL1J554gpkzZzJjxgzS0tJ45plnCAsLY/HixQAkJSWdcKZl3759JCUltbu9xsZGampqTniIuCI2dRQA/YxKistUokVEPF23jnlpampi06ZNZGVlfRPAz4+srCzWrVsHwPjx4/niiy/Yt28fdXV1vP3220yaNKnddc6bNw+bzdb2SE5O7vL9EN9ihPfisF8MAOU7P7M4jYiInEm3lpfKykocDgcJCQknPJ+QkEBZWRkAAQEB/P73v2fixIlkZGRwzz330KtXr3bXOWfOHKqrq9seJSUlXboP4psOhqYCUL/vC2uDiIjIGQVYHeBUrrzySq688soOLRscHExwcDB2ux273Y7D4ejidOKLGqOHQP2nGJXbrI4iIiJn0K1nXuLi4vD396e8vPyE58vLy0lMTOzUunNzcyksLGTjxo2dWo/0TAF9hgMQVbvT4iQiInIm3VpegoKCGDNmDPn5+W3POZ1O8vPzmTBhQndGETlBTP/WQbt9m/fQ7NCMIxERT+b2r43q6urYufObf70WFRVRUFBAbGwsKSkp5OXlkZOTw9ixYxk/fjwLFiygvr6eGTNmdGq7+tpIOiMuNR2AvhygqKySQX3jLU4kIiLtMUzTNN25wtWrVzNx4sSTns/JyWHJkiUALFq0iPnz51NWVkZGRgYLFy4kMzPTLduvqanBZrNRXV1NVFSUW9YpPUPVwylEm9V8dOk/ueDCrDO/QURE3MaV399uP/Ny8cUXc6Y+NGvWLGbNmuXuTYt0SmXoAKKPFFBX8gWg8iIi4qk87t5GZ8tut5OWlsa4ceOsjiJe6mj0YADNOBIR8XA+U14020g6KyBhGAARtbssTiIiIqfjM+VFpLOiU1vvcdSnqRiH061DwURExI1UXkSO6T2gdcZRCuXsrThocRoREWmPz5QXjXmRzvKPjKfGiMTPMNm/63Or44iISDt8prxozIt0mmFwICQVgLq9useRiIin8pnyIuIODcdmHFHxlbVBRESkXSovIt/iH996j6OIGs04EhHxVD5TXjTmRdzB1n8kAIlNu3FqxpGIiEdy++0BrKbbA0hntFTtJ2DBcBymwf7cIpLjY6yOJCLSI7jy+9tnzryIuEOArQ+1hONvmJR+rRlHIiKeSOVF5NsMg4pjM45qSzTjSETEE6m8iHxHg611xpFTM45ERDySz5QXDdgVd/FLaJ1xFF690+IkIiJyKj5TXnSROnEXW8qxGUeNu/Gx8ewiIj7BZ8qLiLv0Hnj8HkellB2usTiNiIh8l8qLyHcExfSjnjACDCf7dmrQroiIp1F5Efkuw6A8OBWAmhJNlxYR8TQqLyKncMQ2CABnuWYciYh4GpUXkVMwjt3jKEwzjkREPI7PlBdNlRZ3ikxunXEUrxlHIiIeR/c2EjmFo5XFhCwaTZPpT9XdxcTHRFodSUTEp+neRiKdFNIrhSOEEGQ42Pv1l1bHERGRb1F5ETkVw6A8uD8A1cWacSQi4klUXkTaURd1DgCO8q0WJxERkW9TeRFph9F7GAChmnEkIuJRVF5E2hFxfMbR0SKLk4iIyLepvIi0I3HQuQCkmPs5WFNvcRoRETlO5UWkHSFx/WkgmGCjhZJdhVbHERGRY1ReRNrj50d5UOuMo6o9mnEkIuIpfKa86Aq70hVqI1vvceQo04wjERFP4TPlJTc3l8LCQjZu3Gh1FPEh5rEZRyFVOyxOIiIix/lMeRHpCsdnHPVu0IwjERFPofIichoJg9IB6G/uo6quweI0IiICKi8ipxUeP5BGggg2min5WuNeREQ8gcqLyOn4+VMamALA4eLPLA4jIiKg8iJyRsdnHDWX6syLiIgnUHkROQOz91AAgjXjSETEI6i8iJxBWN/WGUdxmnEkIuIRVF5EzqBtxpFzL7UNjRanERERjywv06ZNIyYmhmuuucbqKCJEJp5DI4GEGk3s2fWV1XFERHo8jywvd911Fy+++KLVMURa+QdQFpgMwCHNOBIRsZxHlpeLL76YyMhIq2OItKmJODbjSPc4EhGxnMvlZc2aNUyePJmkpCQMw2D58uUnLWO320lNTSUkJITMzEw2bNjgjqwilnHGHZtxdGi7xUlERMTl8lJfX096ejp2u/2Ury9dupS8vDzmzp3L5s2bSU9PZ9KkSVRUVLQtk5GRwciRI0967N+//+z3RKQLhfUdAUDsEc04EhGxWoCrb8jOziY7O7vd15944glmzpzJjBkzAHjmmWdYsWIFixcvZvbs2QAUFBScXdpTaGxspLHxmxkgNTU1blu3yHHxA9NhNaQ4SzjS2ERYcJDVkUREeiy3jnlpampi06ZNZGVlfbMBPz+ysrJYt26dOzfVZt68edhstrZHcnJyl2xHejZb36E0E0C40Ujx19usjiMi0qO5tbxUVlbicDhISEg44fmEhATKyso6vJ6srCyuvfZa3nrrLfr163fa4jNnzhyqq6vbHiUlJWedX6Rd/gGUBvQD4FDR5xaHERHp2Vz+2qg7vPfeex1eNjg4mODgYOx2O3a7HYfD0YXJpCerjhgIVbtpLCu0OoqISI/m1jMvcXFx+Pv7U15efsLz5eXlJCYmunNTJ8nNzaWwsJCNGzd26Xak53L0ap1xFKQZRyIilnJreQkKCmLMmDHk5+e3Ped0OsnPz2fChAnu3JRItwttm3H0tcVJRER6Npe/Nqqrq2Pnzp1tPxcVFVFQUEBsbCwpKSnk5eWRk5PD2LFjGT9+PAsWLKC+vr5t9lFX0ddG0tV6D0yHNZDiKOFoUwshQR75rauIiM8zTNM0XXnD6tWrmThx4knP5+TksGTJEgAWLVrE/PnzKSsrIyMjg4ULF5KZmemWwGdSU1ODzWajurqaqKiobtmm9AxmSyMtv+5DIA62/fhjhg4ZbnUkERGf4crvb5fLi6dTeZGuVPLrUSS37GHdhGeZMOkGq+OIiPgMV35/e+S9jc6G3W4nLS2NcePGWR1FfFhV+EAAju7XjCMREav4THnRbCPpDi2xQwAI1IwjERHL+Ex5EekOIcdmHMXUa8aRiIhVVF5EXNB7QDoAKY49NDa3WJxGRKRn8pnyojEv0h16pQynxfQj0mhgb7HOvoiIWMFnyovGvEh3MAJDKAtIAuBA0RaL04iI9Ew+U15EusthzTgSEbGUyouIi5qPzTgKOLjN4iQiIj2Tz5QXjXmR7hLSJw2AaM04EhGxhM+UF415ke7Sa2DrjKN+LXtobtG9tEREupvPlBeR7hKfOgKHaRBt1LO3ZLfVcUREehyVFxEXGYGhlPn3AeDA15pxJCLS3VReRM7C8RlHDfu+tDiJiEjP4zPlRQN2pTs1HZtx5H9Q9zgSEeluPlNeNGBXulNQYuuMI1v9LouTiIj0PD5TXkS6U68BowHo11yMw+G0OI2ISM+i8iJyFuIHjMRpGsQYdezbV2J1HBGRHkXlReQs+AeHU+afCECFZhyJiHQrlReRs3QobAAA9Xu/sDiJiEjPovIicpYaYwYD4K97HImIdCufKS+aKi3d7fiMo6hazTgSEelOPlNeNFVaultMauuMo77NxTidpsVpRER6Dp8pLyLdLXHgKAB6GTWU7teMIxGR7qLyInKWAkIjKfOLB6D8688sTiMi0nOovIh0QmVo6z2O6vbqHkciIt1F5UWkE45Gt8448qv8yuIkIiI9h8qLSCcEJg4HIFIzjkREuo3Ki0gnxPRvHbSb1FSMaWrGkYhId1B5EemExEHpAPQ2qigrL7U4jYhIz+Az5UUXqRMrBIXbKDd6A1C2U/c4EhHpDj5TXnSROrFKZWjrPY7qdI8jEZFu4TPlRcQqDcdmHBkHdI8jEZHuoPIi0kn+CcdnHO20OImISM+g8iLSSTGprTOOEpt2a8aRiEg3UHkR6aQ+x2YcJXCYygMVFqcREfF9Ki8inRQcEUOF0QuA0l2acSQi0tVUXkTc4EBI64yj2j2fW5xERMT3qbyIuEFD9DkAmJpxJCLS5VReRNzAP751xlGEZhyJiHQ5jysvJSUlXHzxxaSlpTF69GheffVVqyOJnFHUsXscJTbutjaIiEgPEGB1gO8KCAhgwYIFZGRkUFZWxpgxY7jiiisIDw+3OppIu5IGZQCQyEEOHawktlectYFERHyYx5156dOnDxkZGQAkJiYSFxfHoUOHrA0lcgahtl5UGjEA7Nc9jkREupTL5WXNmjVMnjyZpKQkDMNg+fLlJy1jt9tJTU0lJCSEzMxMNmzYcFbhNm3ahMPhIDk5+azeL9KdyoNbZxzVlOgeRyIiXcnl8lJfX096ejp2u/2Ury9dupS8vDzmzp3L5s2bSU9PZ9KkSVRUfHPxroyMDEaOHHnSY//+/W3LHDp0iFtuuYU//elPZ7FbIt3viO3YjKOKrRYnERHxbS6PecnOziY7O7vd15944glmzpzJjBkzAHjmmWdYsWIFixcvZvbs2QAUFBScdhuNjY1MnTqV2bNnc8EFF5xx2cbGxrafa2pqOrgnIu7lFz8cyiGsWjOORES6klvHvDQ1NbFp0yaysrK+2YCfH1lZWaxbt65D6zBNk+nTp3PJJZdw8803n3H5efPmYbPZ2h76ikmsYksZCUCCZhyJiHQpt5aXyspKHA4HCQkJJzyfkJBAWVlZh9axdu1ali5dyvLly8nIyCAjI4PPP2//qqVz5syhurq67VFSUtKpfRA5W4nnZACQxAEOH9YgcxGRruJxU6W///3v43Q6O7x8cHAwwcHBXZhIpGMiYuI5SDS9qKJ01xZixk60OpKIiE9y65mXuLg4/P39KS8vP+H58vJyEhMT3bmpk9jtdtLS0hg3blyXbkfkdMqDUwGoLtaMIxGRruLW8hIUFMSYMWPIz89ve87pdJKfn8+ECRPcuamT5ObmUlhYyMaNG7t0OyKnUx81CACnZhyJiHQZl782qqurY+fOb2ZTFBUVUVBQQGxsLCkpKeTl5ZGTk8PYsWMZP348CxYsoL6+vm32UVex2+3Y7XYcDkeXbkfkdIyE4XBAM45ERLqSYZqm6cobVq9ezcSJJ3+Xn5OTw5IlSwBYtGgR8+fPp6ysjIyMDBYuXEhmZqZbAp9JTU0NNpuN6upqoqKiumWbIsdtW/82Q9++gX0k0Peh7VbHERHxGq78/na5vHg6lRexUs3BUqKeGobTNKi7ZzdRUdFWRxIR8Qqu/P72uHsbiXizqF59OEQUfobJvp3tT/EXEZGz5zPlRbONxFOUB6UCUFP8mbVBRER8lM+UF802Ek9Rd2zGUUu5ZhyJiHQFnykvIh4jfhgAoZpxJCLSJXymvOhrI/EUEf1GAdD76G5rg4iI+CjNNhJxs6qKvUT/cQRO06DhvhLCIyKtjiQi4vE020jEQtG9+1JFROuMo10atCsi4m4qLyLuZhiUHptxVLVb06VFRNzNZ8qLxryIJ6mL1IwjEZGu4jPlRVOlxaP0HgpASNUOi4OIiPgenykvIp4kvN9IAOIaiixOIiLie1ReRLpAwjnnAtDXWcrRhiMWpxER8S0qLyJdIDa+HzWE42+Y7N2pGUciIu6k8iLSBQw/P/YH9gegqlgzjkRE3MlnyotmG4mnqT0246i5VDOORETcyWfKi2YbiadxxrXOOAqu2m5xEhER3+Iz5UXE0xyfcdRLM45ERNxK5UWkiyQMSgegr2M/jY0NFqcREfEdKi8iXSSuTyq1hBJgONm36wur44iI+AyVF5EuYvj5sT+gdcbRoSJNlxYRcRefKS+abSSeqCbi2IyjMs04EhFxF58pL5ptJJ7IGTcEgKDDmnEkIuIuPlNeRDxRaN/WGUexRzTjSETEXVReRLpQfNuMo300NzVanEZExDeovIh0oYR+g6g3QwgyHOzXjCMREbdQeRHpQoafHyWBAwA4sPMTi9OIiPgGlReRLlYdPRyAln1bLE4iIuIbVF5EupjRp3XcS8ThLy1OIiLiG1ReRLpY7Dmt1x5KbtyB6XRanEZExPupvIh0sZRhY2gy/bFRT+meHVbHERHxej5TXnSFXfFUQcEh7AlIBaBs2wZrw4iI+ACfKS+6wq54skORQwFoLNlscRIREe/nM+VFxJOZxwbthh3UoF0Rkc5SeRHpBrYBYwBIatA9jkREOkvlRaQbpKSNx2ka9OYwlWV7rI4jIuLVVF5EukFYhI0S/74A7Nu63uI0IiLeTeVFpJsciBgGwJHiTRYnERHxbiovIt2kJX4UAMEHNGhXRKQzVF5EuklE6nkAJB7ZZnESERHvpvIi0k2S0yYAkGSWU33ogMVpRES8l8eVl6qqKsaOHUtGRgYjR47kueeeszqSiFvYYnuz34gHoGTrxxanERHxXgFWB/iuyMhI1qxZQ1hYGPX19YwcOZKrrrqKXr16WR1NpNPKwoaSVF9B3e7N8L3JVscREfFKHnfmxd/fn7CwMAAaGxsxTRPTNC1OJeIejXEjAAgo/8ziJCIi3svl8rJmzRomT55MUlIShmGwfPnyk5ax2+2kpqYSEhJCZmYmGza4djO6qqoq0tPT6devH/fddx9xcXGuxhTxSGH9W6+0G1enQbsiImfL5fJSX19Peno6drv9lK8vXbqUvLw85s6dy+bNm0lPT2fSpElUVFS0LXN8PMt3H/v37wcgOjqaLVu2UFRUxN/+9jfKy8vPcvdEPEvfYZkAJDv20lBfa3EaERHvZJid+E7GMAyWLVvG1KlT257LzMxk3LhxLFq0CACn00lycjJ33nkns2fPdnkb//Vf/8Ull1zCNddcc8rXGxsbaWxsbPu5pqaG5ORkqquriYqKcnl7Il2t8qH+xFHFV//xGsPGXmp1HBERj1BTU4PNZuvQ72+3jnlpampi06ZNZGVlfbMBPz+ysrJYt25dh9ZRXl5ObW3rv0irq6tZs2YNQ4cObXf5efPmYbPZ2h7Jycmd2wmRLrYvZDAA1bs+sTiJiIh3cmt5qaysxOFwkJCQcMLzCQkJlJWVdWgdxcXFXHjhhaSnp3PhhRdy5513MmrUqHaXnzNnDtXV1W2PkpKSTu2DSFc70mskAEaZBu2KiJwNj5sqPX78eAoKCjq8fHBwMMHBwdjtdux2Ow6Ho+vCibhBcHIG7IPYmq1WRxER8UpuPfMSFxeHv7//SQNsy8vLSUxMdOemTpKbm0thYSEbN27s0u2IdFbi0PMBSGkppqnxqMVpRES8j1vLS1BQEGPGjCE/P7/tOafTSX5+PhMmTHDnpkS8Vp/+Q6ghnCCjhZJtm62OIyLidVwuL3V1dRQUFLR9tVNUVERBQQF79uwBIC8vj+eee44XXniBrVu3cscdd1BfX8+MGTPcGvy77HY7aWlpjBs3rku3I9JZhp8fJUHnAHBwpwbtioi4yuUxL5988gkTJ05s+zkvLw+AnJwclixZwvXXX8+BAwd48MEHKSsrIyMjg3/9618nDeJ1t9zcXHJzc9umWol4stqYNCjfgrm/wOooIiJex+XycvHFF5/xcv2zZs1i1qxZZx1KxNcF9M2A8peJqtagXRERV3ncvY3Olr42Em/Se0jr39P+TbtwtLRYnEZExLt06gq7nsiVK/SJWMXR0kLTo30INZoovvED+g/NsDqSiIilLLvCroh0jH9AAMWBgwA4sMO1G5eKiPR0Ki8iFqmOHg5Ay94Ca4OIiHgZnykvGvMi3sbokw5AxOEvLU4iIuJdfKa86Aq74m1izxkLQHLjDkyn0+I0IiLew2fKi4i3SR56Hk2mPzbqKSvZYXUcERGvofIiYpHgkDBKAvoDUPqVBu2KiHSUz5QXjXkRb3QwchgATSWfWpxERMR7+Ex50ZgX8UZm4mgAQg9+YXESERHv4TPlRcQb2Qa2Dtrt06AxLyIiHaXyImKhlLRxOE2DeA5xoGyP1XFERLyCyouIhcIioinx7wvA/q0atCsi0hE+U140YFe8VWXEUAAaijdbnERExDv4THnRgF3xVs3xowAIPPC5xUlERLyDz5QXEW8V2f88ABKPbLM4iYiId1B5EbFYv7TzAehrllN9uNLiNCIink/lRcRitl4JlBq9AdhbuN7iNCIink/lRcQDlIW1Dtqt3b3J4iQiIp5P5UXEAxyNGwlAQPlnFicREfF8PlNeNFVavFn4sUG7veo0aFdE5EwM0zRNq0O4U01NDTabjerqaqKioqyOI9IhlfuLifvTaBymQdMvSggNj7Q6kohIt3Ll97fPnHkR8WZxSf05SDT+hknxVl2rSETkdFReRDzE3pDBAFR//YnFSUREPJvKi4iHaOg1ovU/SjVoV0TkdFReRDxEUPK5AMTWbLU4iYiIZ1N5EfEQiUMzAejfspumxkaL04iIeC6VFxEP0af/UGoII8hoYc923WFaRKQ9Ki8iHsLw86Mk6BwADu3UjCMRkfb4THnRRerEF9TGtA7ade7fYnESERHP5TPlJTc3l8LCQjZu1L9YxXsF9E0HwFZVaHESERHP5TPlRcQX9B4yHoD+TbtwOBwWpxER8UwqLyIepN856TSYQYQZjezb9bnVcUREPJLKi4gH8Q8IYE/gQAAqtusrUBGRU1F5EfEwVbbhALTs/dTiJCIinknlRcTD+CW1DtoNP6xBuyIip6LyIuJhYgaNBSC5cQem02lxGhERz6PyIuJhkoeNodn0J5o6ykp2Wh1HRMTjqLyIeJjgkDD2BPQHoGzbBovTiIh4HpUXEQ90KHIYAEf3aNCuiMh3eWx5OXLkCP379+fee++1OopIt3MmjgYg9OAXFicREfE8HltefvOb33D++edbHUPEEraBYwBIathucRIREc/jkeVlx44dfPXVV2RnZ1sdRcQSycPH4zQN4jlEZfleq+OIiHgUl8vLmjVrmDx5MklJSRiGwfLly09axm63k5qaSkhICJmZmWzY4Nqgw3vvvZd58+a5Gk3EZ4RHRrPXPwmAfVvXW5xGRMSzuFxe6uvrSU9Px263n/L1pUuXkpeXx9y5c9m8eTPp6elMmjSJioqKtmUyMjIYOXLkSY/9+/fz+uuvM2TIEIYMGdKhPI2NjdTU1JzwEPEFByJaB+0eKd5kcRIREc8S4OobsrOzT/t1zhNPPMHMmTOZMWMGAM888wwrVqxg8eLFzJ49G4CCgoJ23//xxx/zyiuv8Oqrr1JXV0dzczNRUVE8+OCDp1x+3rx5PPzww67uhojHa+49EmryCTqgQbsiIt/m1jEvTU1NbNq0iaysrG824OdHVlYW69at69A65s2bR0lJCbt37+bxxx9n5syZ7RYXgDlz5lBdXd32KCkp6fR+iHiCyNTWQbsJ9dssTiIi4lncWl4qKytxOBwkJCSc8HxCQgJlZWXu3FSb4OBgoqKiTniI+IJ+aa2z7fqZZVRXHbQ4jYiI53D5a6PuNH369A4va7fbsdvtOByOrgsk0o1svRIoNXrTxzzA3sL12C64wupIIiIewa1nXuLi4vD396e8vPyE58vLy0lMTHTnpk6Sm5tLYWEhGzdu7NLtiHSnsrDWges1RRq0KyJynFvLS1BQEGPGjCE/P7/tOafTSX5+PhMmTHDnpkR6hMa4UQAEVHxmcRIREc/h8tdGdXV17Nz5zZ1ui4qKKCgoIDY2lpSUFPLy8sjJyWHs2LGMHz+eBQsWUF9f3zb7qKvoayPxRaEp50IxxNVq0K6IyHGGaZqmK29YvXo1EydOPOn5nJwclixZAsCiRYuYP38+ZWVlZGRksHDhQjIzM90S+Exqamqw2WxUV1dr8K54vcr9u4n7UzoO06DpFyWEhkdaHUlEpEu48vvb5fLi6VRexKeYJgcf7k8vqvnqP5YxbOwlVicSEekSrvz+9sh7G50Nu91OWloa48aNszqKiPsYBvtCWgftVn2tQbsiIuBD5UWzjcRXHek1AgCjdIvFSUREPIPPlBcRXxXcLwOA2JqvrA0iIuIhfKa86Gsj8VUJw1qvtNu/pYjmpkaL04iIWM9nyou+NhJf1af/UGoII8hoYc+2AqvjiIhYzmfKi4ivMvz8KAk6B4CDOzdYnEZExHoqLyJeoDYmDQDnfg3aFRFReRHxAgF9MwCwVRVaG0RExAP4THnRgF3xZb0Ht/69TmnahVO3wBCRHk5X2BXxAo6WZpoeTSLUaGLPTWtIGZxudSQREbfqkVfYFfFl/gGB7AkcAEDFds2oE5GeTeVFxEtU2VoH7Tbv/dTiJCIi1lJ5EfESRp/RAEQc0qBdEenZfKa8aMCu+LrYc1r/bvdr3IHpdFqcRkTEOhqwK+IlGo8ewW9ePwINB6W3fkKflMFWRxIRcRsN2BXxQcEhYewJSAGg9Kv1FqcREbGOyouIFzkUOQyAxj0atCsiPZfKi4gXcSa2DtoNPfilxUlERKyj8iLiRWwDxwDQp2G7xUlERKzjM+VFs42kJ0geNh6naZDAQSrL91kdR0TEEj5TXnJzcyksLGTjRl19VHxXeFQMe/2TANi39WOL04iIWMNnyotIT3EgfCgAR4o3W5xERMQaKi8iXqY5fhQAQQc+tziJiIg1VF5EvExEauug3YR6DdoVkZ5J5UXEyySnZQLQzyylpuqgxWlERLqfyouIl7H1SqTM6A1AydYNFqcREel+Ki8iXqg0bAgANV9/YnESEZHup/Ii4oUae40EIKBCg3ZFpOdReRHxQqH9zwMgrvYri5OIiHQ/nykvusKu9CR9h7UO2k12lNBQX2dxGhGR7uUz5UVX2JWeJC4plYPYCDCc7PlKf+dFpGfxmfIi0qMYBvtDBgNweNcmi8OIiHQvlRcRL1XfawQARtkWi5OIiHQvlRcRLxXc71wAYqo1aFdEehaVFxEvlTC0ddBu/5YimpsaLU4jItJ9VF5EvFSf1KHUEkqw0cye7QVWxxER6TYqLyJeyvDzpyToHAAO7tCMIxHpOVReRLxYTUzroF3nfg3aFZGeI8DqACJy9gL7ZkD5K6QcWM3aRbdbHUdEegij91AuuP4+y7bvkeUlNTWVqKgo/Pz8iImJYdWqVVZHEvFICcMvgM2QRAVJla9aHUdEeojP6sYBKi8n+eijj4iIiLA6hohH6zc4nS3jH6ep9Euro4hID+Ifd46l2/fY8iIiHZN+xUyrI4iIdCuXB+yuWbOGyZMnk5SUhGEYLF++/KRl7HY7qamphISEkJmZyYYNG1zahmEYXHTRRYwbN46//vWvrkYUERERH+bymZf6+nrS09O59dZbueqqq056fenSpeTl5fHMM8+QmZnJggULmDRpEtu2bSM+Ph6AjIwMWlpaTnrvu+++S1JSEh9++CF9+/altLSUrKwsRo0axejRo89i90RERMTXGKZpmmf9ZsNg2bJlTJ06te25zMxMxo0bx6JFiwBwOp0kJydz5513Mnv2bJe3cd999zFixAimT59+ytcbGxtpbPzm6qI1NTUkJydTXV1NVFSUy9sTERGR7ldTU4PNZuvQ72+3XuelqamJTZs2kZWV9c0G/PzIyspi3bp1HVpHfX09tbW1ANTV1fH+++8zYsSIdpefN28eNput7ZGcnNy5nRARERGP5tbyUllZicPhICEh4YTnExISKCsr69A6ysvL+f73v096ejrnn38+t9xyC+PGjWt3+Tlz5lBdXd32KCkp6dQ+iIiIiGfzuNlGAwcOZMuWjl8tNDg4mODgYOx2O3a7HYfD0YXpRERExGpuPfMSFxeHv78/5eXlJzxfXl5OYmKiOzd1ktzcXAoLC9m4Ufd4ERER8WVuLS9BQUGMGTOG/Pz8tuecTif5+flMmDDBnZsSERGRHsrlr43q6urYuXNn289FRUUUFBQQGxtLSkoKeXl55OTkMHbsWMaPH8+CBQuor69nxowZbg3+XfraSEREpGdwear06tWrmThx4knP5+TksGTJEgAWLVrE/PnzKSsrIyMjg4ULF5KZmemWwGfiylQrERER8Qyu/P7u1HVePJHKi4iIiPex7DovVrLb7aSlpZ12WrWIiIh4P515EREREcu58vvb467z0lnHu1hNTY3FSURERKSjjv/e7sg5FZ8rL8dvLaDbBIiIiHif2tpabDbbaZfxua+NnE4n+/fvJzIyEsMw3Lru4zd9LCkp8fmvpLSvvqsn7a/21Xf1pP3tKftqmia1tbUkJSXh53f6Ibk+d+bFz8+Pfv36dek2oqKifPov0LdpX31XT9pf7avv6kn72xP29UxnXI7zmdlGIiIi0jOovIiIiIhXUXlxQXBwMHPnziU4ONjqKF1O++q7etL+al99V0/a3560rx3lcwN2RURExLfpzIuIiIh4FZUXERER8SoqLyIiIuJVVF5ERETEq6i8fIfdbic1NZWQkBAyMzPZsGHDaZd/9dVXGTZsGCEhIYwaNYq33nqrm5KevXnz5jFu3DgiIyOJj49n6tSpbNu27bTvWbJkCYZhnPAICQnppsSd89BDD52UfdiwYad9jzceV4DU1NST9tUwDHJzc0+5vDcd1zVr1jB58mSSkpIwDIPly5ef8Lppmjz44IP06dOH0NBQsrKy2LFjxxnX6+pnvrucbn+bm5u5//77GTVqFOHh4SQlJXHLLbewf//+067zbD4L3eFMx3b69Okn5b788svPuF5PPLZn2tdTfX4Nw2D+/PntrtNTj2tXUnn5lqVLl5KXl8fcuXPZvHkz6enpTJo0iYqKilMu/9FHH3HjjTdy22238emnnzJ16lSmTp3KF1980c3JXfPBBx+Qm5vLxx9/zMqVK2lubuayyy6jvr7+tO+LioqitLS07VFcXNxNiTtvxIgRJ2T/8MMP213WW48rwMaNG0/Yz5UrVwJw7bXXtvsebzmu9fX1pKenY7fbT/n6Y489xsKFC3nmmWdYv3494eHhTJo0iaNHj7a7Tlc/893pdPt75MgRNm/ezAMPPMDmzZt57bXX2LZtG1deeeUZ1+vKZ6G7nOnYAlx++eUn5H755ZdPu05PPbZn2tdv72NpaSmLFy/GMAyuvvrq067XE49rlzKlzfjx483c3Ny2nx0Oh5mUlGTOmzfvlMtfd9115o9+9KMTnsvMzDR/8pOfdGlOd6uoqDAB84MPPmh3meeff9602WzdF8qN5s6da6anp3d4eV85rqZpmnfddZc5aNAg0+l0nvJ1bz2ugLls2bK2n51Op5mYmGjOnz+/7bmqqiozODjYfPnll9tdj6ufeat8d39PZcOGDSZgFhcXt7uMq58FK5xqX3NycswpU6a4tB5vOLYdOa5TpkwxL7nkktMu4w3H1d105uWYpqYmNm3aRFZWVttzfn5+ZGVlsW7dulO+Z926dScsDzBp0qR2l/dU1dXVAMTGxp52ubq6Ovr3709ycjJTpkzhyy+/7I54brFjxw6SkpIYOHAgN910E3v27Gl3WV85rk1NTbz00kvceuutp71JqTcf1+OKioooKys74bjZbDYyMzPbPW5n85n3ZNXV1RiGQXR09GmXc+Wz4ElWr15NfHw8Q4cO5Y477uDgwYPtLusrx7a8vJwVK1Zw2223nXFZbz2uZ0vl5ZjKykocDgcJCQknPJ+QkEBZWdkp31NWVubS8p7I6XRy9913873vfY+RI0e2u9zQoUNZvHgxr7/+Oi+99BJOp5MLLriAvXv3dmPas5OZmcmSJUv417/+xdNPP01RUREXXnghtbW1p1zeF44rwPLly6mqqmL69OntLuPNx/Xbjh8bV47b2XzmPdXRo0e5//77ufHGG0974z5XPwue4vLLL+fFF18kPz+f3/3ud3zwwQdkZ2fjcDhOubyvHNsXXniByMhIrrrqqtMu563HtTN87q7S4prc3Fy++OKLM34/OmHCBCZMmND28wUXXMDw4cN59tlnefTRR7s6ZqdkZ2e3/ffo0aPJzMykf//+/P3vf+/Qv2i81f/+7/+SnZ1NUlJSu8t483GVVs3NzVx33XWYpsnTTz992mW99bNwww03tP33qFGjGD16NIMGDWL16tVceumlFibrWosXL+amm2464yB6bz2unaEzL8fExcXh7+9PeXn5Cc+Xl5eTmJh4yvckJia6tLynmTVrFm+++SarVq2iX79+Lr03MDCQc889l507d3ZRuq4THR3NkCFD2s3u7ccVoLi4mPfee4/bb7/dpfd563E9fmxcOW5n85n3NMeLS3FxMStXrjztWZdTOdNnwVMNHDiQuLi4dnP7wrH997//zbZt21z+DIP3HldXqLwcExQUxJgxY8jPz297zul0kp+ff8K/TL9twoQJJywPsHLlynaX9xSmaTJr1iyWLVvG+++/z4ABA1xeh8Ph4PPPP6dPnz5dkLBr1dXVsWvXrnaze+tx/bbnn3+e+Ph4fvSjH7n0Pm89rgMGDCAxMfGE41ZTU8P69evbPW5n85n3JMeLy44dO3jvvffo1auXy+s402fBU+3du5eDBw+2m9vbjy20njkdM2YM6enpLr/XW4+rS6weMexJXnnlFTM4ONhcsmSJWVhYaP6///f/zOjoaLOsrMw0TdO8+eabzdmzZ7ctv3btWjMgIMB8/PHHza1bt5pz5841AwMDzc8//9yqXeiQO+64w7TZbObq1avN0tLStseRI0falvnuvj788MPmO++8Y+7atcvctGmTecMNN5ghISHml19+acUuuOSee+4xV69ebRYVFZlr1641s7KyzLi4OLOiosI0Td85rsc5HA4zJSXFvP/++096zZuPa21trfnpp5+an376qQmYTzzxhPnpp5+2za75n//5HzM6Otp8/fXXzc8++8ycMmWKOWDAALOhoaFtHZdccon51FNPtf18ps+8lU63v01NTeaVV15p9uvXzywoKDjhc9zY2Ni2ju/u75k+C1Y53b7W1taa9957r7lu3TqzqKjIfO+998zzzjvPHDx4sHn06NG2dXjLsT3T32PTNM3q6mozLCzMfPrpp0+5Dm85rl1J5eU7nnrqKTMlJcUMCgoyx48fb3788cdtr1100UVmTk7OCcv//e9/N4cMGWIGBQWZI0aMMFesWNHNiV0HnPLx/PPPty3z3X29++672/5cEhISzCuuuMLcvHlz94c/C9dff73Zp08fMygoyOzbt695/fXXmzt37mx73VeO63HvvPOOCZjbtm076TVvPq6rVq065d/b4/vjdDrNBx54wExISDCDg4PNSy+99KQ/g/79+5tz58494bnTfeatdLr9LSoqavdzvGrVqrZ1fHd/z/RZsMrp9vXIkSPmZZddZvbu3dsMDAw0+/fvb86cOfOkEuItx/ZMf49N0zSfffZZMzQ01KyqqjrlOrzluHYlwzRNs0tP7YiIiIi4kca8iIiIiFdReRERERGvovIiIiIiXkXlRURERLyKyouIiIh4FZUXERER8SoqLyIiIuJVVF5ERETEq6i8iIiIiFdReRERERGvovIiIiIiXkXlRURERLzK/wd367yGdsPYJAAAAABJRU5ErkJggg==" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "execution_count": 7 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:43.659719Z", + "start_time": "2024-05-09T06:27:41.424482Z" + } + }, + "cell_type": "code", + "source": "model.plot()", + "id": "1eafb70cc5f42e62", + "outputs": [ + { + "data": { + "text/plain": [ + "
" + ], + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZcAAAHiCAYAAAAkiYF/AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAABKjklEQVR4nO3dd3xUVfrH8c+EGiBIEVFBpRgbNuwF1FWKu6xtaVZUEEWafbGuooIioEREUUQpIoqChbKgYseyCPhbuyxFBRUEEgmkQJj7++NxTAhJSJmZc+/M9/16+WI3JOQh3JnvPfec55yQ53keIiIiUZTiugAREUk8ChcREYk6hYuIiESdwkVERKJO4SIiIlGncBERkahTuIiISNQpXEREJOoULiIiEnUKFxERiTqFi4iIRJ3CRUREok7hIiIiUadwERGRqFO4iIhI1FV3XYBIEHiex8aNG9myZQv16tWjcePGhEIh12WJ+JZGLiJlyMrKIiMjg/T0dJo0aULLli1p0qQJ6enpZGRkkJWV5bpEEV8K6SRKkZItWLCArl27kpOTA9joJSIyaqlTpw4zZ86kc+fOTmoU8SuFi0gJFixYQJcuXfA8j3A4XOrnpaSkEAqFmDt3rgJGpAiFi0gxWVlZNG/enNzc3DKDJSIlJYXU1FTWrFlDgwYNYl+gSABozkWkmMmTJ5OTk1OuYAEIh8Pk5OQwZcqUGFcmEhwauYgU4Xke6enprFy5koq8NEKhEK1atWL58uVaRSaCwkVkJxs2bKBJkyYl/t5fgCXA5t18fePGjWNRmkig6LGYSBFbtmzZ5WOpwH7AJOC/wP3AAaV8fXZ2dqxKEwkUhYtIEfXq1QMgBNQD9gYaAbnAacAEoBvwH2AK0K7Y16elpcWrVBFf02MxkSK8bds49uCDWb96NR6QA2wBCv74/RBQEwuYq4FDgW+w0FnSsiVfrVihORcRNHIRMbm58NtvhNav59orr2Qz8AuQRWGwROQD04DTgX8APwKjgUVbtxIaPhx++SWOhYv4k0YukrzCYcjJgS1boKAAataEevXIys+n+X77ldjnEgKKv2BSUlI4pFYtllx7LbVnzYK8PDj3XLj6ajj22Lj9dUT8ROEiyaegwAJl61bwPKhTB+rVs3D5Q2kd+sXDJdKhP2/ePDp16gTZ2TB9OkyYAKtXW7hcfTWccw7UqBG3v6KIawoXSR75+fbmn5cHKSkWKHXrQrVqJX56SXuLRcKl6N5is2bNsmApascOeOsteOopeP992Gcf6N0bevWCRo1i+JcU8QeFiyQ2zyt89LV9u40e6tWz0Uo5Jt6zsrKYMmUKjz76KCtWrPgzXFq3bs3gwYO5/PLL2WOPPcr+Q775xkLmpZfs/3frBtdcA4ceWuW/nohfKVwkMe3YUfjoKxyG1FQLlVq1KvXHeZ7Hpk2byP79d9L22INGjRpVfFXYpk0wZQpMnAi//grt29sjs44dSx09iQSVwkUSy7ZtFio5OTYyqVvXQqV6lM7FC4ftkVpVbN8Oc+bYaOazz+CAAyxkLroI1CcjCULhIsHnebaUeMsWC5fq1QsffVU1CIqLRrgUtWSJhczrr0Pt2hYwfftCy5bR+x4iDihcJLjC4cJHXzt22COvtDR7k47l94x2YIH1xjz7LEyeDJmZ0KmTjWbaty/X3JCI3yhcJHi2by989AWFS4njsdQ3VuESkZcHM2fCk0/aQoBDD7WQ6dYttqEpEmUKFwmOyKOv/HybAI8sJY7lm31xsQ6XCM+DDz+0R2YLFkDDhraMuXdvW9Ys4nMKF/G3UrroSU1187goXuFS1OrV1pT5/PM2sjnnHBvNHHdcfOsQqQCFi/hTObronXARLhHZ2fDCCzaaUfe/+JzCRfylgl30cecyXCKKd//vvbc9Lrv8cnX/i28oXMS9KnbRx5UfwqUodf+LTylcxJ0od9HHhd/CJSLS/f/MM7asWd3/4pjCReIv1l30seTXcIkoqfu/b1+4+GJ1/0tcKVwkPkrroq9b13+Pvsri93ApaulSC5nXXlP3v8SdwkViy0UXfSwFKVwiinf/d+xo8zLq/pcYUrhIbLjsoo+lIIZLREnd/337QvfuwQ178S2Fi0SXH7roYynI4RLhebBokT0ymz8fGjSwZcxXXgn77uu6OkkQChepOr910cdSIoRLUatXw9NPw7RpdmNwzjn2yEzd/1JFChepPL920cdSooVLxJYthd3/q1bBMccUdv8n8r+nxIzCRSrO7130sZSo4RIRDhd2/7/3HjRtCn362KaZjRu7rk4CROEi5ROkLvpYSvRwKeqbb2zDzBkz7N8/0v1/2GGuK5MAULhI2YLYRR9LyRQuEZs2wdSpMHGiLWtu184emXXqlByjVakUhYuUbNs2e/SVm2tvppH5lCB00cdSMoZLxPbtMHeuPTJbvNi6/6+6yrr/69d3XZ34jMJFCiVKF30sJXO4FFW0+79WrcLu/1atXFcmPqFwkV276GvXtlBRY92uFC47+/VX6/6fNMm6/zt0sHmZ007TDUmSU7gks0Ttoo8lhUvJ8vMLu/+//hoOOaSw+z811XV14oDCJRklehd9LClcyuZ58NFHFjKR7v9evewwM3X/JxWFS7IoqYs+soGkHl+Un8Kl/H74obD7PyfHGjKvvtq6/3XNJTyFS6JLxi76WFK4VFzx7v+2bW1eRt3/CU3hkqjy8uxFnYxd9LGkcKm8cBgWLrRHZpHu/969bdNMdf8nHIVLIlEXfewpXKLj22+t+//FF9X9n6AULolAXfTxo3CJrkj3/zPPwM8/w6mn2rxM584aZQecwiXI1EUffwqX2Ni+HebNs0dmixfD/vvbUmZ1/weWwiVo1EXvlsIl9pYts8n/V19V93+AKVyCQl30/qBwiZ+Suv+vvhpOP103UgGgcPE7ddH7i8Il/vLzYdYse2T21Vfq/g8IhYtfqYvenxQu7kS6/596Cv79b+v+v+wyW87crJnr6qQYhYufhMP22Gvr1p276HV35h8KF38o3v3/97/bUmZ1//uGwsUPinbRQ+FSYnUv+4/CxV8i3f8TJsDKlXD00RYy556r149jCheX1EUfPAoXf4p0/z/1FLz7rnX/X3mldf/vuafr6pKSwiXe1EUfbAoX//vuu8Lu/3AYuna10UybNq4rSyoKl3hRF31iULgER2amdf9PnGjd/6ecYiGj7v+4ULjEWvEu+rp17T910QeTwiV4Cgqs+3/8+MLu/6uugksuUfd/DClcYkFd9IlL4RJsn39e2P1fo4ZtL3PVVdC6tevKEo7CJZrURZ/4FC6JYd26wu7/jRsLu//POEM3gFGicIkGddEnD4VLYsnPh1desUdmX30FBx9s3f89eqi/rIoULlWhLvrko3BJTJ4HH39sj8zmzYM99oBevdT9XwUKl4pSF31yU7gkvh9/tO7/556zpxFdukC/fur+ryCFS3mpi15A4ZJMSur+v/pqOO88ve7LQeFSHgUFtv23uuhF4ZJ8wmF4+217ZPbOO9aUOX6866p8L+nCZcf27Xg7dlT8C/PyrOGxEsPilJo1SdEbkq9U+rKvYriE9FjFN3773//I/emnin1R5L2jkjeXjdq2pV6DBpX62qBJuk6+HXl5VK/M/Ei9ehX/mpwcqFGDcEEBKRpGJ4bKBIvn6Vm9D/30xhukn3defL7ZtGnQpg2bfvxR4ZLIUmLdHR/ZPyw7Gxo0IKxHaL4U81FEsdFRUj0iCADP80iL9Uowz4Pnn4fRo2H0aDL32y+2389H9Kwm2iLBkpVlyxnr1HFdkbgQCRaNWpKX59mI5cYb4b77bLuZJKJwiabiwaLtXpJTScGi6yC5RILlppssWPr2TbprQOESLQoWAQWLKFj+oHCJBgWLgIJFFCxFKFyqSsEioGAR+7d/7jkLlvvvT+pgAYVL1ShYBBQsUhgsN99swXLVVUl/DShcKkvBIqBgEQVLKRQulaFgEVCwiIKlDEnZRFklChYBBYvsHCzDhkGfProGitDIpSIULAIKFlGwlIPCpbwULAIKFrF/+6lTFSy7oXApDwWLgIJFCoPlllsULLuhcNkdBYuAgkUULBWkcCmLgkVAwSIKlkpQuJRGwSKgYBH7t58yxYJl+HAFSzkpXEqiYBFQsEhhsPzznxYsvXvrGignhUtxChYBBYsoWKpI4VKUgkVAwSIKlihQh36E58HWrfD77wqWZKZgEc+DyZNhyBB44AG48kpdA5WgkQsoWMQoWETBEjUKFwWLgIJFFCxRltzhomARULCIgiUGkjdcFCwCChaxf/tJkyxYHnxQwRIlyRkuChYBBYsUjlhuvdWC5YordA1ESXKGS06OgkWMgiW5TZ1aOGJRsERVUi5F9gDq14c6dezNJXIHK0nFg53fTHQdJJUQkO95cNdd0L27Pc2QqAl5XnK9ogq2bcMrKIjr96xWqxYp1arF9XtK2Vxd9iHdGfvGms8+I3fNmrh+zyYnn0yDpk3j+j1dSbpwERGR2EvOOZfK8DzYvl2PTkSSmedBQYHeB8pB4VJeBQWwbp39KskrHHZdgbj0xRewzz72q5RJ4SIiIlGncBERkahTuIiISNQpXEREJOoULiIiEnUKFxERiTqFi4iIRJ3CRUREok7hIiIiUadwERGRqFO4iIhI1ClcREQk6hQuIiISdQoXERGJOoWLiIhEncJFRESiTuEiIiJRp3AREZGoU7iIiEjUKVxERCTqFC4iIhJ1ChcREYk6hYuIiESdwkVERKJO4SIiIlGncBERkahTuIiISNQpXEREJOoULiIiEnUKFxERiTqFi4iIRJ3CRUREok7hIiIiUadwERGRqFO4iIhI1ClcREQk6hQuIiISdQoXERGJOoWLiIhEncJFRESiTuEiIiJRp3AREZGoU7iIiEjUKVxERCTqFC4iIhJ1ChcREYk6hYuIiESdwqUcPM9jw4YN/PTTT2zYsAHP81yXJHEWuQZWr16tayBJeZ5HZmYmO8JhMjMzdQ3shsKlDFlZWWRkZJCens4+++7Lyaecwj777kt6ejoZGRlkZWW5LlFirOg10KRJE1q1bk2TJk10DSSRotfA6WecwaZNmzj9jDN0DeyOJyWaP3++V7duXS8UCnmhUMirDl4z8KrDnx+rW7euN3/+fNelSowUvwYALwT2q66BpFD8GjgCvPXgHaFrYLc0cinBggUL6NKlC7m5uXiet8vwN/Kx3NxcunTpwoIFCxxVKrGia0B0DVRNyCv+E0tyWVlZNG/enNzcXMLh8J8frw40BdYBBUU+PyUlhdTUVNasWUODBg3iW6zERGnXAEAIG7oUpWsg8ZR2DRwBLATOAr4o8vm6BnalkUsxkydPJicnZ5c3ldKEw2FycnKYMmVKjCuTeNE1ILoGqk4jlyI8zyM9PZ2VK1fuMgSuAzRk15ELQCgUolWrVixfvpxQKBSnaiUWyroGoOSRC+gaSCRlXQNHAW+y68gFdA0Up5FLERs3bmTFihW7XFAtgG+AU0r5Os/zWLFiBZs2bYpxhRJrpV0DAKlA2z9+LU7XQOIo6xoYA9Qt5et0DexM4VLEli1bdvlYLWAGsB34ajdfn52dHYOqJJ5KugYiDgGW/PFraXQNBF9Z18BH2M3FmWV8va4Bo3Apol69ert87GHgcKA78D27PhIrKi0tLTaFSdyUdA1UhK6B4CvrGrgP+DdwB9C6lM/RNWAULkU0btyY1q1b//m89BKgH3AdsIzSgyUUCtG6dWsaNWoUn0IlZopfA+WlayBxlHUN5AEXAr8C09n5EamugZ0pXIoIhUIMGjQIgMOA8cAUYEI5vnbw4MGaxEsARa+BitI1kBh2dw1kAxcBB2JzMEXpGiik1WLFZGVlcUizZryTk8N24GQgp4zP1/r2xFNaj0NbYClwDDaSjdA1kHjK6nWKuBiYiD3dmKprYBcauRTTYI89WHb88ewL9AyFdhssoVCIWbNm6YJKIA0aNGDmzJmEQiFSUsp+iegaSEzluQaeB54BMrAlyroGdqZwKW7cOPZ5/31W3X47P9WpQygU2mWYG/lYamoq8+bNo1OnTo6KlVjp3Lkzc+fOJTU1VddAkirPNXAT8F1KCu83b06nE05wU6hPKVyK+uQTuOkmuO46jh42jDVr1jBmzBhatWq106e1atWKMWPGsHbtWr2pJLDOnTvrGkhyu7sGRmRkcMjSpdTLy4OrrwbNMvxJcy4RGzbAscdCs2bw7rtQs+afv+V5Hps2bSI7O5u0tDQaNWqkSbsk43kev7/zDg3OOoushQvZ4y9/0TWQZMp8H5gzB7p1gwcegBtucFuoTyhcAMJh6NIFliyx//bbz3VF4kdLl9oNyJIlcMwxrqsRv7njDhgzBt54A0491XU1zumxGMD999sF8dxzChYRqZyhQ+Hkk+GSS2DdOtfVOKdwefNNuyj+9S/Qs3MRqazq1e0GNRyGXr2goKz9PBJfcofLmjV2l9GxI9x5p+tqRCTo9t7bAuaDD+C++1xX41Tyhsu2bdCzJ9SubRdDtWquKxKRRHDaaXDvvTBiBMyb57oaZ5I3XG69FRYvhhkzYM89XVcjIonkxhttkVDv3rB6tetqnEjOcHn5ZVvVMWoUnHSS62pEJNGkpMDTT0ODBnDxxZCf77qiuEu+cPn+e+jTB3r0gEpuUCgislsNG8L06fDVV3Dzza6ribvkCpecHGt02ndfmDAB1AQnIrHUti08/LC930yf7rqauKruuoC48Tzo3x9WrrRtXnSgj4jEQ+/esGgRDBgARx0Fhx3muqK4SJ6Ry8SJMGUKPPEEHH6462pEJFmEQjB2LLRsCRdeCElyDHJyhMvSpTa/cs01cNllrqsRkWRTt649Fvv5Z3uCkgS7biV+uGRmQvfu0KYNPPKI62pEJFkddBA8+SS89BKMH++6mphL7DkXz4Mrr7SAeesta5gUEXGla1f4+GP45z/huOPg+ONdVxQziT1yGTkSXn8dJk+2550iIq4NH26ryC66CDZudF1NzCRuuLz3nm2BfeutcM45rqsRETE1a8Lzz1trxBVX2EaXCSgxw+XXX+2uoH172+NHRMRPmje31atvvQUPPui6mphIvHApKLBgAbs7qJ7Y00oiElAdOtjTlfvug4ULXVcTdYkXLv/6F3z4Ibzwgm1/LSLiV7fdBmedZee/rF3rupqoSqxwmT3bhpjDhtm21yIiflatmi04ql3bNrjcvt11RVGTOOGyciVcfjmcdx7ccovrakREyqdxY2uwXLIEbr/ddTVRkxjhkpdnuxw3bAjPPqsNKUUkWE44wQ4XGzsWZs1yXU1UJMZs9w032LbWH31k5yeIiARN//7WYHnNNXDEEZCe7rqiKgn+yGXqVNtSYexYa0wSEQmiUMg21t1nH1vxmpPjuqIqCXa4fPkl9Otncy19+riuRkSkatLSbP5lxQoYPDjQG1wGN1yys+3grwMPhHHjNM8iIomhTRt7T3vuOZg0yXU1lRbMORfPg7594ZdfYPFiqFPHdUUiItFz8cU2h3z99fa4/+ijXVdUYcEcuYwdCzNm2AFgBx3kuhoRkegbNcpOrbzoIsjKcl1NhQUvXD7+GG6+2RK9WzfX1YiIxEbt2jb/kpkJV10VuPmXYIXLhg3Qs2fhmnARkUTWooU9oZkzJ3CHHQYnXHbsgEsvtYbJF16AGjVcVyQiEntdutiuI3fdZfsmBkRwwuX+++HNN2HaNNuuWkQkWdx9N5x6KlxyCaxb57qacglGuLzxhp3Lcs890LGj62pEROKrenVrGAe47DI7WsTn/B8uP/1kad2pk519ICKSjJo2tSc3ixbB0KGuq9ktf4fLtm02gV+njqV2ir/LFRGJqXbt7CnOyJEwd67rasrk73frIUNsG+oZM2DPPV1XIyLi3o03wt//blterV7tuppS+TdcZsyAjAwYPRpOPNF1NSIi/hAKwdNP2w7wF11kK2h9yJ/h8t131jTUsycMGOC6GhERf2nQwBosv/7amsp9yH/hsnWrdd43bw5PPaUNKUVEStK2rTVWPv00PP+862p24a+NKz3PDsxZtQo+/dS2nxYRkZJdeaVtcDlwIBx1lO2o7BP+GrlMmFB4+JePfkgiIr4UCsGjj0KrVjb/kp3tuqI/+Sdcliyxw3H69bO+FhER2b06dWz+5Zdf4NprfbPBpT/CJTMTevSAI48M3OZsIiLOpafbE5+XX7ajkn3AfbiEw3DFFXZewYwZUKuW64pERILnH/+AQYOsP/A//3FdjQ/CZeRImD0bpkyx7aVFRKRyhg2DY46x+ZeNG52W4jZc3n3X9gu7/XbbVlpERCqvZk1blpyXB5dfbkeVOOIuXH75xdL19NNtt2MREam6Zs3sSdDChfDAA87KCHle1ZcWrP/uO3J++KHiX+h5lW6S3PP446nXsGGlvlak0tdsOFzpDVR1zUplrf/+e3J//LFiXxTZlr965doZGx97bJWu16g0Ua6eM4dDevSIxh+1e0OHwgknsGHPPfVClUqr9DVbmRuiiRPhqKPYsGqVrlmplB/+/W8O7to1Pt9s0iQ44gg2rl7tPlw8z6P+fvtF44/a3Tey4d6JJ7Ip9t9NElhcrlnPg2eesVNUx49n0wEHxPb7ScLyPI/6sT6B1/Ng8mR7lPbYY2Tuv3+V/jj3q8UqIjvbhnq9ermuRKRsnmcjlmuugYcftu3RRfzK82zEMmAAPPSQtYdUUbDC5aGHrBu1Zk3XlYiULhIs/fpZsAwapA1Yxb8iwTJwoL3H9u8fles1OOHieTBmDFx/vV6o4l8KFgmSGAULBClccnMhJ8e6T0X8SMEiQRLDYIEghcvIkVC7NtSr57oSkV15np2r0a+f7Y+nYBE/8zx49lkLlpEjox4sEJRw8Tz7Adx0k16w4j+RYLn2WguWgQN1nYp/RYJl0CB7X7322phcr8EIl6wseyR2662uKxHZmYJFgiROwQJBCZdbboGGDW2lmIhfKFgkSOIYLOC3Y45LEmnsmThRL1zxj6LBMmaM9Qfo+hS/Khoso0bZ3GCMr1f/j1w++8x29tTplOIXChYJEgfBAn4PF8+Dyy6Ddu2gWjXX1YjYNTlhgoJFgiGyBVGcgwX8/lhs61b4/ntYsMB1JSKFwdK/v4JF/C8SLIMHxz1YIJojl3DYDqiJpuuus4n8Km6gJlJlChYJEsfBAtEcuVx3nS0Znjo1On9eQYF1j86apRexuKVgkSDxQbBANEcu559vx2vm50fnz3voIdug8txzo/PniVSG58FTT1mwZGQoWMTfIlsQDR4Mo0c7CxaIZriceSakpcENN1T9zyoosKOPR4/WC1nciQTLgAEWLDHYIkMkaiLBct119t55zTVOr9fohUsoBM89B08+aRPxVXH//XY0Z79+0alNpKIULBIkPgsWiPZS5C5dYK+94NJL7S9bGVu3Wrg8+WSlzyoXqRIFiwSJD4MFoh0uoRDMmwevvQbffFPxr/c86NkTmja1gBKJN8+zG5sBA+DRRxUs4m+Rht7rrrNjHnwSLBCLJsqjj7YRTIcONndSEe++a+H0xhu++QFJEokEy8CBFiwx3ntJpEoiwXL99RYsV1/tq+s1+uESCsGLL8KWLfbiLO/jscxMWxnWvz8cdljUyxIpk4JFgsTnwQKx2v6lTh2YP9/2s3nqqd0HTF4enHGGNUtmZPjuhyQJTsEiQRKAYIFYbv9y8skwdqw9u05JgauuKvkHkJUF550Hv/0GX3yhPcQkvjwPxo+3vZfGjnXaFyCyW5GG3htusGMe+vb17fUau3AJheyFum2b3REuXgy33w777Wdhk5cH779vzT7hMHzyCTRuHLNyRHYRGbEoWCQIIiOWAAQLxHrjylDIwuPQQ+0HcuyxcMwx0KABLF8OK1bABRfYlhqNGsW0FJFdTJhgNz4KFgmCZ56xR2EBCBaIUriEgPwtW0r/hFNOgYUL4c034cMPYfNmW1H217/CkUfaSKasrxeJshCQ73nwwAO27L2qjb8iMfTn9Xr//XDRRYG4XkOeV9lux0I/fvIJOT/8EI16yq3p6afTcO+94/o9JXHompUg+enTT8n56ae4fs+92rWr0vUalXAREREpyt3+Kp5nE/nKNgkKzyv8T8Tvtm+veCN7FLkLl19/hWbNKtfJL+LCsmU2P7hsmetKRMq2cCHUrw8PPuisBHfhss8+8MIL8N57tr2+iIhU3dq10KuX3bjffruzMtxuO3z66TBsGAwfDnPmOC1FRCTwtm2Diy+G2rXtJF+HO8u739P+llvgnHMsaVetcl2NiEhw3XEHLF0K06c7b0p3Hy4pKZawDRtCjx7ROyZZRCSZzJplDcEjRsAJJ7iuxgfhAhYsL70EX34ZnWOSRUSSyfLldpZLt2628aoP+CNcwLaFyciwTQSfe851NSIiwZCTAxdeaIuknnjCN9vCxHZvsYrq2xc++sj2eWrbFtq0cV2RiIh/eZ5tvLpqlW2tlZbmuqI/+WfkApa448ZBq1Y2vMvOdl2RiIh/PfssTJsGjz3mu0MW/RUuAHXr2vzL2rU2klE3tIjIrpYtsznqq66y5cc+479wATj4YJg4EWbMsJGMiIgUysy03ZEPOwxGjXJdTYn8NedSVPfuNv9y001w3HFw0kmuKxIRcS8ctqc6WVl2nHzt2q4rKpE/Ry4RI0bYAWM9e8KGDa6rERFx75FHbEeTiROhRQvX1ZTK3+FSsya8+CLk5sJll1lii4gkqw8+gH/9y3Y26dLFdTVl8ne4AOy3n/W9vPGG7UMmIpKM1q2zU1NPPRXuvtt1Nbvl/3AB6NTJ0vqee+yoZBGRZFJQYMECMHUqVPfvdHlEMMIF4M47oWNHuOQSWLPGdTUiIvEzdKgtcJo2DZo2dV1NuQQnXKpVs8djtWvbBP/27a4rEhGJvblzYeRIuO8+aNfOdTXlFpxwAdhzT+t9WbwYhgxxXY2ISGytWgV9+tixJAHb1DdY4QLW7zJqFIwZAy+/7LoaEZHYyMuzzvuGDWHCBN9sSFle/p8VKsmgQfb8sU8fOPJIOOgg1xWJiETXzTfD11/bUfANGriupsKCN3IBS/AJE2Dffa2TPyfHdUUiItHz/PPw9NP2hOboo11XUynBDBewraVfeglWrID+/bXBpYgkhi+/hAEDbOnxFVe4rqbSghsuAIcfbofjTJliWyGIiARZdrZtSNm6NTz6aODmWYoK5pxLUZddZvMvgwbZaZbHHOO6IhGRivM8Oyjx11/tPa1OHdcVVUmwRy4Rjzxip1Z2725bUYuIBM3jj8PMmfDkk5Ce7rqaKkuMcKld2+ZfMjPhyis1/yIiwfLpp9a7N2gQ/OMfrquJisQIF4CWLWHyZHj9detmFREJgg0brJ/luONg+HDX1URN4oQLWBfrrbfCHXfY2nARET/bscNWhOXl2b5hNWq4rihqEitcAO69F9q3txUXv/7quhoRkdI98AAsXGgrXps1c11NVCVeuFSvbg1IYAFTUOC2HhGRkrz5pp1RddddcNZZrquJusQLF4C994YXXoAPP7RzYERE/GTNGrj8cujQwR7lJ6DEDBeA006zu4IHH4TZs11XIyJitm2zCfw6dWDSJEhJzLfhxPxbRdxyC5x3nt0hrFrluhoREbj9dli2DKZPh8aNXVcTM4kdLqEQPPusbVndvbutyBARceXll+Gxx+Chh+D4411XE1OJHS5gW1W//DJ89VXgDtsRkQTy/fe2vUuPHvZrgkv8cAFo2xbGjrVtFaZOdV2NiCSbrVvhwgttufG4cYHekLK8gr9xZXn16QOLFtkdQ9u2tqOyiEiseZ5t67J6ta1gTUtzXVFcJMfIBexOYdw4OPBA6NbNtrYWEYm1Z56x3rtx4+Cww1xXEzfJEy5gS/9eegl++QX69tUGlyISW0uX2lxv377W1J1EkitcAA46yA4WmzHD5mFERGIhM9MC5YgjYNQo19XEXfKFC9hjseuvh5tvho8/dl2NiCSacNjmeTdvtkditWq5rijukjNcwDr3jz8eeva0La9FRKLl4Ydh3jybbzngANfVOJG84VKzJrz4ojVWXnqpbX0tIlJV779vexoOGQJ//avrapxJ3nABaN7czlB48024/37X1YhI0P36q92stm9vux0nseQOF4COHeHuu+0cmDfecF2NiARVQYEFS0qKNWtXT542wpIoXADuvBM6dYJLLoGffnJdjYgE0d132wKhadNgr71cV+OcwgUK7zTq1LEJ/m3bXFckIkEyezaMHm2P10891XU1vqBwidhzT+t9WbLEJuKK8DyPDRs2sHr1ajZs2ICn5suk43kemZmZAGRmZuoaSEKlvg+sWgVXXQXnnmstDgIoXHZ24ol295GRAS+9RFZWFhkZGaSnp9OkSRNatmxJkyZNSE9PJyMjg6ysLNcVS4wVvQbO6tABgLM6dNA1kETKeh94bNQoCnr0sHNZnnoqKTakLDdPdhYOe96FF3rb69Txjk5N9UKhkBcKhTzgz/8iH6tbt643f/581xVLjMyfP9+rW7fun//ebW3DIK+troGkUfwaKP4+8Dh4WeAtevxx16X6jkYuxYVCvNWjB9/n5DAlN5dUz9vlEYj3x8dyc3Pp0qULCxYscFSsxMqCBQvo0qULubm5f/57F6VrIPHt7hq42PO4ErghFOK0QYN0DRQT8or/xJJcVlYWzZs3p2VODh97HjOBK8r4/JSUFFJTU1mzZg0NGjSIT5ESU5FrIDc3l3A4/OfH2wJLgWOAZUU+X9dA4intGohoA3wAvARcg66BkmjkUszkyZPJycnhS8/jGuAyoG8Znx8Oh8nJyWHKlClxqlBiLXINlPSmUhJdA4mnrGsgDXgB+B9w/R8f0zWwK41civA8j/T0dFauXPnnEHgc0Bs4FbtrLUkoFKJVq1YsX76ckCb0Aq2kayCitJEL6BpIJGVdAwDTgI7AycCKIh/XNbAzjVyK2LhxIytWrNjpgroR+BJYBFwKlHTJeJ7HihUr2LRpU3wKlZgp6RoAqAVcWcbX6RpIHKVdAwAZQFfsacaKYr+na2BnCpcitmzZssvH8oG3sTeXqcAq4EHgqBK+PlunWwZeSdfA+cDXQL9yfL2ugeAr6RqI6Ah8C8wr4+t1DRiFSxH16tXb5WN7YPMuk4HTsIuqD/A59oZzF3DgH5+bliRnYyeyotdAG+BN4BXgO+B47JHYt2V8va6B4CvpfSCiE3aDeW4ZX69rwChcimjcuDGtW7fe6XnpHUAqcDu2OqQ/sA/wV2AxcAuwHPhvrVo0evZZWLMm7nVL9DRu3JhjWrRgLHYDsT/QBfgb8H/YXEtuCV8XCoVo3bo1jRo1ilutEhslvQ9ErAHmAD2ABsV+T9fAzhQuRYRCIQYNGvTn/08HBgIjgF+KfF4BMB+4HNgLu9BSDz6Y0J13wv77w+mnw/jxOoQsaAoKCD3+OB+uX08v4FbgcMp+BFLU4MGDNZGbAIq/DxT3ArAd6FXC7+kaKKTVYsUUXd8+KxymDXAEkFfK5++0vj0UgldegenTYeFC2wqiY0c7R/v880HDZf96+2247jr46ivyL7mEQ2fN4oe8vHItR1aPQ+LZXZ/L2cC1wE3YkmRdA7vSyKWYBg0aMHPmTDphj0KGUHawhEIhZs2aZRfUHnvAFVfAggXw888wZoydod2rl23B3b07zJplp1+KP6xaBV27wllnQf368J//UGvqVJ6YNYtQKERKStkvkV2uAUkIkfeB0q6BN4AfsFVjKaGQroGSxGmbmWDZts3b3KKF925Kihf6Yw8hStlbbMGCBbv/81av9rwRIzzv6KM9Dzyvfn3Pu/xyz5s/3/O2b4/5X0dKkJ3tebff7nm1anles2ae9/zztq9cEbvbV6pC14AEUlnXwJHgvQZep9q1dQ2UQOFSksce87waNbzN77/vZWRkeK1bt97pomrdurWXkZHhZWVlVfzP/uYbz/vXvzwvPd2CpkkTz+vf3/M++MDzduyI/t9FdrZjh+dNnep5++5rwXLXXZ63ZUupn56ZmRn9a0ACpaxrYFGXLl7+JZd4Xl6e6zJ9R3MuxW3cCIceChdcAE8+CVhz1KZNm8jOziYtLY1GjRpVfdLO82DpUpufeeEFWLvWFgP07GlzNEcfre27o23xYhg8GD75BLp1g5EjoUWLcn1pTK4BCZQSr4F166B/f3vkfdFFrkv0FYVLcYMH2zGlX38NTZvG53uGw/DhhxY0L71kAXfIIXDhhXbBHnRQfOpIVL/+CrfdBpMmwZFH2nk9Z5zhuipJFJMn20mU48fboYMCKFx29vXXcMwxMHw43Hijmxq2b4e33rKgeeUV2LIFjj3WQqZnT2je3E1dQZSfb0Fy331Qq5YdQXvVVVC9uuvKJJHk5sI119iNy803u67GNxQuEZ4Hf/ubrR7673+hZk3XFdlFO3euBc3cubBtG7Rvb0HTrZvukkrjeTBnjt0grFoFAwbAPfdAw4auK5NE9dZb8Oij8NBD9tRBtBT5T3Pn2gUycqQ/ggUgNdVCZOZMWLcOnn3WPjZwIOy9N/z1rzB1Kmgvo0Jffw1nn23nmbdsaTcKGRkKFomts86C1q3tqGPdrwMauZj8fDjqKHszmjfP/xPp69fDyy/biObDD6F2bfj7321E87e/2f9PNpmZMHQoPPaYTdI//DCcc47//y0lcXz9Ndx6qzXjnnWW62qcU7iAvRHdfrut3jrsMNfVVMyPP8KLL1rQLFtmjYAXXGBBc9ZZiT+/sGMHTJgAd95pNwl33WUv7lq1XFcmyWjkSPjyS5vcT011XY1TCpf1623p8SWX2DPTIPv2W1vWPH06fP89NGlSuETylFNgN93mgfPeexYk//d/tjPC8OGwzz6uq5Jk9ttvcO219li2V0m7jyUPhUu/frYlyzffQOPGrquJDs+zUUykh2bNGthvv8KlzUHvofnhB7jlFlu2feKJdlNwwgmuqxIx06bZe8q4cTY3mqSSO1w+/9zelB55xFYUJaKSemgOPthCJmg9NFu3wogR9uihUSP73xdfnHgjMgm2vDwbvRx0kPVXJankDRfPszmJ336zuZYaNVxXFHuRHpoXXrAemuxs6+uJ9NDst5/rCkvmeVbzP/9pjzFvvtletGUc6iTi1Lvv2lzusGFwxBGuq3EiecNl5kx7TDR3LnTq5Lqa+MvNtZVxzz9vP4P8/J17aJo0cV2hWbLE5lUWLbKFCqNGQatWrqsSKZvn2c3Qtm32ZCQJR9fJ9zcGe2MdMsSW7SZjsICtZOnatbCHZtIkqFMHBg2ySfG//hWmTLEjA1xYt8666Y8/Hn7/3UZcs2YpWCQYQiHo29eaeN9803U1TiRnuIwZYxtFjhrluhJ/2GMPuPxymD8ffvnFJsi3bLGPNW1a2MgZj3Notm2D0aPtefWsWTB2rC1OUN+ABM1BB8GZZ1qj89atrquJu+R7LPbzz9bL0revTQxL6Yr30KSl7dxDE+15qnnz4IYb4H//swnRoUMTZwWfJKdNm2xF6tlnQ+/erquJq+QLl9694d//tqXHOjWu/L77zkIm0kOz556FPTSnnlq1Z8rffmv7gP3733anN2ZM0k6CSgKaMcNeN489Bs2aua4mbpIrXBYvtmbCxx+3kYtUXGk9NJFzaNq2LX8PTVaW7Vj86KP2Z4weDeefH+weHJHitm2zM18OOMB2kEgSyRMunmeroXJyLGSqVXNdUfCFw7aKK9JDs2GD9dBEmjUPPrjkr9uxA555Bu64w/497rjDHocl455okhwWLbK+rHvuseX/SSB5wuX5522C+q234PTTXVeTeLZvh4ULC8+hKa2H5oMPbGnxsmVw2WXwwANJ9ahAkpTn2f6FmzfbSD0Jbm6TI1y2boU2bawbf8YM19UkvkgPzfTpdq5Kfr797HfssL6V44+3bfBPPtl1pSLxs3KljdD79rVdzBNccixFHjXKOvFHjHBdSXKI9NC8/DKsXm3zKJ99ZsGSkmJbtyxf7q6HRsSFVq2gY0d7ipIEZzAlfrj8+KOFyw032HktEh+eZ6PEE0+0Ucwtt9id22OP2TzL5ZfDXnsV9tDk5rquWCT2Lr3U5iqff951JTGX+I/FLrkE3n/fDvJJS3NdTXL4/HObV3n/fdt6fPRoOPDAnT/np58Ke2iWLo19D42IX7zyCkyebHMv++/vupqYSeyRy6JFdvc8bJiCJR5++w2uucYm8n/7DRYsgNde2zVYwCb4b77ZHpV9+y3cdBN8+qltO7PvvrZ084MP7C5PJJGcc47tfPH00wl9JHLijlzCYZswDoXgo4+ScuO4uNm+3c6uuOce+3kPHWod9hUdfXiejXoiPTQ//VT5HhoRP/vPf+D+++0E1QQ9iyhxw2XyZNv48P33tSoplhYsgOuvt679q6+2psg996z6n1tSD81BBxWeQ1NaD41IEHge3H23bdA6blxCHkeemOGyebPtH/aXv9imcRJ9y5fbli1z5ljfUEYGHHVUbL5XST00bdtayFx4oX/PoREpy48/wuDBtrjlggtcVxN1ifms6MEHLWCGD3ddSeLZvNnOqWjTBr74wkYV77wTu2ABe7x29tk2Gl23zpY4t2plW2nsv7/tvPD44zbPIxIU++9vc4wvvmjHSiSYxBu5rFxpmx4OGQL/+pfrahJHOGxv7rfdZiOH226zSfjUVHc1bd4Mr75qI5rImRkdOtiI5oILoH59d7WJlEd2ti2COfXUhDtqPfHCpXt3a9j76is7/Eqq7qOPbPi+ZImdWT9iBDRv7rqqnf32m41opk+3VWa1akGXLhY0Xbq4DUGRssyebSvHHnkkoQ7DS6zHYu+8Y3eyDzygYImGNWusT+jUU20C8sMPYdo0/wUL2LHM115rCzh+/NFW4qxebTcbTZtCr162pf/27a4rFdnZ3/5mr6kEW5qcOCOXggLbsyotDd57T0tWqyI3Fx5+2Oas6tWzsL7iimAu5/7++8JzaL77zlaydetmI5p27YL5d5LEs3SpLeUfMsRu5hJA4oTLU0/ZM8uPP4bjjnNdTTB5nh0tfPPNdgz0ddfZpHkizF2U1EPTvHlhD80xx+iGRNy6914bdT/+ONSs6bqaKkuMcMnKgkMPteHlxImuqwmm//7X+lXeecfmKB5+2PpKElE4bPNIkR6a336zv2vkHJpDDnFdoSSjtWth4ECb1+ze3XU1VZYYzwTuvx/y8uxXqZiNG23E17Yt/PyzbTI5Z07iBgvYo7B27ax57eefYf58a7QdM8ZuUtq2hZEj7S5SJF6aNbOtYV56CTZtcl1NlQV/5PLdd3D00YXPK6V8tm+H8eOtSzgctl8HDkzuDSPz8nY+hyYvz0LooovsTrJJE9cVSqLbutWWJh93nD1JCLDgh8u558I331hDn47JLZ+33rL5lG++sS1y7r/ftr+XQps326ab06fDG2/YxyI9NOefD3vs4bQ8SWDz59u8y6hRgX6CEOzHYgsW2PLSESMULOWxYoW9MXbsCI0bW9/KU08pWEpSv74dwzxvHvz6q51Dk5trq+aaNi08DE3n0Ei0deoELVoEfmlycEcu27fbs/G997bubK30KV12ti0rfvhhe2McORJ69NDPrDLWrCk8h2bJElv6fv75NqLp0CG5HytK9HzxBdxxh+2CcfrprquplOCGy9ixtmR28WI48kjX1fhTOAzPPQe33mor6oYMsRMh1WAaHd9/b8uap0+3M2kaN7a5GfXQSDQ88IBdY088EcgnM8EMlw0bbFVPt272g5ddffqpbdnyn/9YL8dDDyX0qXdOeR783/8V9tD8+KN6aKTqfv3VDs3r1s2WJwdMMG+thg61u/KhQ11X4j8//2xbeJ90EmzbZtuhvPCCgiWWQiFbsThiBKxaZdvknHsuTJliq34OPthW4337retKJUj23tseuc6aFcgdv4M3cvnqK7sTHDEi8Ev1oiovzza+GzbMNmkcPhx694Zq1VxXlrwKCnY+h2bzZguhyDk0CnzZndxc6NcPDj/cHmkHSLDCxfPsXI+ffrKtPBJgi4Qq8zxbMnvTTfY4ZtAgO2qgQQPXlUlRJfXQnHpqYQ+NVuxJaRYutMP4HnzQDkEMiGA9Fps9G95+21Y7KVhsFNepk51dctBBtsLk4YcVLH5Uuzb84x/Wfb1+vT0yq1/fRt/77gudO8OkSQl5aJRU0ZlnwoEHBm5pcnBGLvn5tiqsdWuYOze5J0g3bbJn+E88Yec/PPKI7auWzD+ToNqwYedzaGrWtH/Liy6Cv/9d59CI+eYbW+05eLAteQ+A4IxcHnsMfvgBRo9O3jfRggLr3E1Pt1MhR4yAL7+0jSaT9WcSdHvuac/U33vPHmsOG2a/9uhhj8oijZw6hya5HXoonHYaTJ0amMbdYIxc1q2zH26vXra5YDJ65x3bsuXLL22iftgwa4iUxFRSD023bnZ4W/v2rqsTFzZssBuRc8+190Kfi3u4rP/uO3IquttsOAw7dlS6+3nP446jXsOGlfraWFj/3Xfk/PBDXL/nnscf76ufQbKr8DXgefZfKFTpUaquAX/JWruW/PXrK/ZFa9fanN1RR1WqSbd+ejqp9epV+Osqo3pcvksRP8ybx8HdusXnmz37LBx5JBtWrfLVi2r1nDkc0qOH/R/Pg19+gc8+szvULVugUSNbenjMMfa/o8BvP4Nkt9M1EGsTJ8JRR+ka8Jn1ixfTvKxTJ8NhWLnSdn7//Xdo2NB6po49FqpX7q1787p1iRsunudRf7/9Yv1NLFiGDYPHH2eTz/oJPM+jfvPmttprxAjbBTUlxbq669a1Pauefdb+d8+ecMMNtqKoCvMqmwLYhJXI4vY6eOYZ2/V6/Hg2HXBAbL+fVIjnedQp6RgHz7Nm3GeegeXLbV6ufn27+Zw7F9q0sUfjzZpV+D0he/PmKFW/e3EPl5iLvKD697fJ/969Ydky11Xtavhw2zvo6KPt0Kr27W2UUq2aTd6uW2cTuWPH2rLVhx6y56zar0rKw/NsxNKvny1P79PHn68D2ZnnWV/Lk0/CEUfY0cctWthIZds2G8k8/7ztq3jddbYTh08X8yTWO1XxYBkwwLc/eI44wlZ+vf22rQzaZx+oVcsuotRUu6CuvdY25hwwwA4Q6tdPq4Zk94oHy6BB/n0dSCHPsxvKceNsL7E77rDHYLVq2U1naqqNWoYOtRWiDz1kWw35dE1W4oxcghQsYMeZQtk1hkJQr5513J92mjVLrl0Lr76qrd2lZAqW4PrsM5gwwd7DOnYs/d+tenW49FILm4cfthNSDzkkvrWWQ2KMXIIWLFCxVT+hEJxxBnzyCXz8sXV679gR0/IkgBQswbVpk23v0rVr2cESEQrZ5555pjVU5+TEp84KCH64RF5Q/fvbCyoIwVIZoZDdnXz0kT2THTjQt8NhccDzbHuQfv1sxwYFS3B4nj0Ca9HC+pgqctPZv79t9zR0qO/eD4IdLpFgGTDAgqV//8R/QR1yiB3v/OST1q0rEgmWa6+1YBk4MPFfB4lkzhxbwDN0aMUX7FSrZqtiv/vOnmr4SHDDJRmDJaJdO9u8s3dv2ypEkpeCJdjy8uyR/tVX2/xqZTRubI/IHnnEtojyiWCGSzIHC9jf9cYb4eST7XztcNh1ReKCgiX4nnzSelg6d678nxEK2eqylBTbc9AnghcuRYPlkUeSL1giQiFbtvjLL7YWXpJL0WAZM0bBElQ9etiEfFX/7apVsxvO2bNtB3kfCFa4FA+Wa69N7hdUWprNu9x7r+03JMmheLAk6iKWZLDPPtCyZXT+rBNOsF09nngiOn9eFQUnXCIvKAXLzrp1s8aqs8/23WoRiQHPs14IBUviiNa/XyhkRyG/+64vmq2DES6RYBk4UMFSXChkq8eWLbNuXUlckWDp31/BIiU76ijr6PfBSlL/h4uCZff22cc6ds8/X6OXRKVgkfIIhazXafZs5wt9/B0uCpbyCYXsjScryw6YksSiYJGKOP10u2Y++shpGf4Nl6LBMmaMgmV3ateGf/7Tdr/V0uTE4Xnw1FMWLBkZChbZvZQU6NDBNsZ1+CTDn+FSPFj69dMLqjzuvde25fbB81aJgkiwDBhgwZKsy+6l4vr0ga1bITPTWQn+CxcFS+XVqAG33mqjPM29BJuCRaqidm07ZGzcOGcl+CtcFCxVd/fdtqXE66+7rkQqS8EiVRUK2ealS5Y4e0zun3CJTFoOHGgvKAVL5dSoYUPiK67Q6CWIPM+2BBkwAB59VMEilXfUUfbrl186+fb+CJdIsAwaZMFyzTV6QVVFRoatHPviC9eVSEVEgmXgQAsWLWKRqgiF7MRbR4/G3IeLgiX66tSBU0+F7t01egkKBYvEwoAB8OuvTg4XdBsuCpbYmT4dvv8eNm92XYnsjoJFYqVpU1ua7GD3DnfhEpm0HDTIXlAKluhq3txWiwwc6LoSKYvnwfjx9u80dqyCRaIrFIKTToJnn437t3YTLpERy+DBFixXX60XVLSFQrbybto0NVX6VWTEMmiQBYsWsUgs9Olj/S5xfjTmJlyefrpwxKJgiZ1zzrFfZ892W4eULLI6UsEisdS4sV1bn3wS129bPa7fDQgB+QDDh9vpaVu3xrsE50JA/pYt8flml11mZ73st198vp+USwjI9zx44AHbdDQJXwfJLgRsz82Nzzc77TTIzoa9947P9wNCnhff5UQ/fvopOT/8EM9vSdPTTqNhHH+ou/PjJ5/E72cQDkNKCk1PP91XP4NkF9dr4A+6Bvzlt+++Iz/Oh/w1OPxw6jVsGJfvFfdwERGRxOe+z6U8PE+T0iKep76lZFdQAL//7rqKcglGuIwda6erPfxw8r64li61SbmlS11XIq5ccAE0bAhvv+26EnFh/Xq47jpbZZuf77qa3QpGuAwcCLfdBkOG2CqzggLXFYnE35Qp1rPQuTNMnuy6Gomn//0PbrnFjtQYPtxutn0uGOGSkmJnlTz5pC1j7toV4rXaSsQv6te3ZeVXXGH/DR2avCP5ZLJ4sd1cN2kCo0ZBs2auKyqXuC9FrpLeva3z/MIL4cwz4bXX7Px4kWRRo4btbNGqFdx+O6xaZf+/Zk3XlUkszJtnN9UnnQQ33hiIEUtEMEYuRXXqBO++a88f27WDr792XZFIfIVCdic7bZrtIffXv9ou2JI4PM+2bBk/3pqhhwwJVLBAEMMF4MgjbSO2PfaA9u01wSnJ6eKL4c03Ydkyu9GKc9+MxMi2bfDQQ/Dqq9C3L1x1lU0NBEzwKo5o3txGMCedBF262GSnSLI57TT46CPIybHXwpIlriuSqvj9d7jzTptnuf32wi2cAii44QI2wfnqq9Crl23Odu+9muCU5HPIIbZv1P77W9jMmeO6IqmMn3+2FWG//GLbAp14ouuKqiTY4QI2wTl+PNx/P9x3n4XMtm2uqxKJr732gnfesTnJ886Dxx93XZFUxDffWLBUr24rwtLTXVdUZcEPF7AJziFDYOpUePFF+PvfNcEpyadOHXj5ZWuyGzDA3qy0s4X/ffihPQo74ACba2na1HVFURGspci7c+GFtga8a1d7PDB7tv2DiSSLatXgkUegZUu4/npYvdrmI1NTXVcmxXkevPIKTJoEZ5xhDeI1ariuKmoSY+RSVPv28MEHkJtrK2i0XYoko8GDYdYsmDsXzjoLfvvNdUVS1I4d9jh/0iTo2RNuuCGhggUSMVwADj4YFi2yM0z+8hd7gYkkm/PPtxWVK1bAKafA8uWuKxKAvDwYNgwWLLDRyiWXJORBcYkZLmATnG+9BR07wj/+YXcJIsnmhBNsJVm1anDyyXbTJe5s2gS33gpffQV3323vTwkqccMFbILzxRdt48tBg2zSXxOckmxatrRemMMPt0dkL73kuqLk9MMPcPPNsHkzjBgBbdu6riimEjtcwO7YRo+2Sc5HHrGu5ngdLSriF40a2WOYrl2hRw8YOVI9YfH0f/9nN7dpafazb9HCdUUxl1irxcoycKA1mV16qfUCzJplu4yKJItateC552zTy3/+E1autLOSqifP24ATCxfCY4/BUUdZwCTJyr3EH7kUde659g+9cqWtKvvf/1xXJBJfoZA1Gz/9NEyYYA2XOr4iNjwPnn8eMjKgQwe4666kCRZItnABOP54m9SsXt2WKn/0keuKROKvTx/bzv2DD6wn7OefXVeUWAoKYMwYeOEFuPxy6N/fHtEnkeQLF7DnnR98AG3a2CMyTXBKMurUybrD16+3TS+//NJ1RYlhyxZbCfbBB7ZLQteuCbnUeHeSM1zAziKfN8/+4S++2Pbz0QSnJJsjj4RPP7UJ/1NPtcfGUnnr19u8yurV9vixfXvXFTmTvOECNsE5aZJtbX3bbTbpX1DguiqR+GrWzO6yTzkFzj7bXhNScf/7ny013r7d9ghr08Z1RU4ld7iADVeHDrWjYidOtIZLTXBKsklLg9dfhyuvtP/uvlsj+Yr4z3/sBnWvvWypcUDOuY8lrUGMuPJKO4CsZ0/bMua112DffV1XJRI/NWrYee2tWtkb5apVtqqsZk3Xlfnb3Ll2c3rSSXDTTfp5/UEjl6I6doT33oMNG2wl2Vdfua5IJL5CIdueZPp0293i7LN1fEVpPA+eecYC+bzzbK5FwfInhUtxRxxhK2gaNrQlmprglGR04YW2N9/nn9tE/+rVrivyl23bbAuX116Da66B3r0Dec59LOmnUZJmzWw32ZNPtoPHpkxxXZFI/LVvDx9/bLv4nnQSfPaZ64r84fff4Y47YMkS+7VLF9cV+ZLCpTRpafDqq3DFFdZwNnSoJjgl+Rx8sAVMixZw+ul2AF8yW7vWelfWrbNz7k84wXVFvqVwKUv16nYW+bBhcP/9NvTdts11VSLxtdde8Pbb0LmznREzbpzritz4+mvbk61GDeuLO/BA1xX5msJld0Ihu6Ceew5mzLAhcGam66pE4qtOHdvJ4rrrrB/s5puT6/iKDz6wc+5btLC5lr32cl2R72kpcnn17GlzMf/4h030z5kDBxzguiqR+KlWDR5+2M6Hue46m+SfOjWxN2P0PNtBffLkhDznPpY0cqmIdu1sJVl+vq2gWbLEdUUi8TdoELzyim2fdOaZ8NtvriuKjR074IknLFgS9Jz7WFK4VNRBB1nAHHCAvbDmzHFdkUj8nXee9YStXGmrKr//3nVF0ZWba3uDvfkmDB6csOfcx5LCpTL22ssuuk6dbOPLJ55wXZFI/B1/PHzyiTUOnnyy3XQlgo0bbYeCb7+1bXA6dHBdUSApXCqrTh07q2HQILuzueWW5JrgFAGbf1m0yJqPO3Swrv4g++EHey1Hzrk/+mjXFQWWwqUqqlWzJYljxsCjj8JFF9lwWiSZNGwICxZA9+7W2T9iRDB7wj7/3FaG1q9vr2st2KkSrRaLhgEDYP/97blsx4422dmkieuqROKnVi3byaJFC9ubbNUqOze+ekDeYt56y/p3jj7aAiaRV8DFiUYu0XLOOdZotnq1rSRbvtx1RSLxFQrZJPjEifbfuedCdrbrqsrmeTBtmj156NjRelkULFGhcImm446z58+1atmy5UWLXFckEn+9e9sy5Q8/tJ6wn392XVHJtm+HRx6xeaIrroBrr026c+5jSeESbQccAO+/bxOcnTpZV79IsunY0W6uNmyAE0+EL75wXdHOtmyBe+6xGm+5xZqjtdQ4qhQusdCwoR0g1L27zcOMHBnMCU6RqjjiCPj0U9hzT3tU/Oabrisy69bZvIrOuY8phUus1KoFzz5rW3LffrtN+hcUuK5KJL723ddG8u3awd/+ZodrubR8ue2LVlBg59wfdpjbehKYwiWWQiEbek+YYEFz/vn+n+AUiba0NHj9dTu6ok8fuOsuNyP5Tz+15si999Y593EQkHWCAXfFFdC8ue1P9Je/2Ol1urAlmVSvbjtZtGplxwGvXg1PP20j/HiYM8du8k4+GW68UccRx4FGLvHSoYPtxbRxoz0i+PJL1xWJxFfk+IoXXrCFLmefHfvjK8JhC7GnntI593GmcImnww+31SmNG9sSzbfecl2RSPz17AkLF8J//2sT/atXx+b75OfbbgGzZxeec68VYXGjcIm3ffeFd96xF9U558CkSa4rEom/du3go48sAE48ERYvju6fHznnfulSnXPviMLFhbQ02yLmyiuhb1/beVVLlSXZHHyw7arcqpUdxPX669H5c9eutRVh69frnHuHFC6uVK9uexk98AAMH26T/vn5rqsSia8mTWzbpLPPttWUY8dW7c/76itriqxZU+fcO6ZwcSkUsjusadPg5Zdt6F7CBKfneWT+8fHMzEw8jXIkkaSmwksv2UmPgwfbaq4dO3b5NM/z2LBhA6tXr2bDhg27vg7ef9+WObdsaT0sOufeKYWLH/ToAW+8YVtktG//5wRnVlYWGRkZpKenc9YfBxad1aED6enpZGRkkJWV5a5mkWhKSYHRo23kkpFhu1vk5AA7vw6aNGlCy5YtadKkSeHrIDPTbs5GjbLXz9ChULeu47+QhDzdBvvH8uU2etm6lY9vvZWOt91Gzh8vsKM9j6XAMcDnf6x4qVOnDjNnzqRz587uahaJttmz7VyYI47gnRtu4Jw+ff58HRR9uwqFQqR4HoNr1uTWtm3Za/BgO1NJK8J8QSMXP0lPh0WLyGzQgDaDB9MhJwfP83YZ/kc+lpubS5cuXViwYIGjgkVi4Jxz4L33yP/+e/a/8EL2K+V1UMvzuBM4bds2Ll28mAWNGytYfETh4jNZNWqQvno184EZnkf/Mj43HA7jeR5du3bVIzJJKFkHHkjbvDzygUWeR7tiv98YeBA4BLgbWAh6HfiMwsVnJk+ezKbcXC4Exvzx3yhK/4cKh8Pk5OQwZcqUOFUoEnuTJ0/m27w8TgX+D3gL6PnH7x0AjATSgCHAf9HrwI805+IjnueRnp7OypUr/3wE0A8LmPeBjticy7JiXxcKhWjVqhXLly8npMcCEnDFXwc1gKeBXsCjQFPgF+BeoOjaSr0O/EXh4iMbNmygSZMmu3y8CzAdu1MrKVyKfn3jxo1jV6BIHJT2OhgJ3Ay8DFwKlNYVpteBP+ixmI9s2bKlxI/PBa4px9dnazt/SQClvQ5uAQYCF1N6sIBeB36hLfd9pF69eqX+3qvYqOXbMr4+LS0tyhWJxF9Zr4Nx5fh6vQ78QSMXH2ncuDGtW7cu8XlxLvY4LLeErwuFQrRu3ZpGjRrFukSRmCvrdVAWvQ78ReHiI6FQiEGDBlXqawcPHqxJTEkIeh0kBk3o+0xWVhbNmzcnNzeXcDi8289PSUkhNTWVNWvW0KBBg9gXKBIHeh0En0YuPtOgQQNmzpxpW1uklP3Pk5KSQigUYtasWXpBSULR6yD4FC4+1LlzZ+bOnUtqaiqhUGiXYX7kY6mpqcybN49OnTo5qlQkdvQ6CDaFi0917tyZNWvWMGbMGFq1arXT77Vq1YoxY8awdu1avaAkoel1EFyacwkAz/PYtGkT2dnZpKWl0ahRI01aStLR6yBYFC4iIhJ1eiwmIiJRp3AREZGoU7iIiEjUKVxERCTqFC4iIhJ1ChcREYk6hYuIiESdwkVERKJO4SIiIlGncBERkahTuIiISNQpXEREJOoULiIiEnUKFxERibr/B7KVoyQY3EgwAAAAAElFTkSuQmCC" + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "execution_count": 8 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:43.693595Z", + "start_time": "2024-05-09T06:27:43.660702Z" + } + }, + "cell_type": "code", + "source": "model.symbolic_formula()", + "id": "e0e6a6c82ea4697a", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "make sure all activations need to be converted to symbolic formulas first!\n" + ] + } + ], + "execution_count": 9 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2024-05-09T06:27:43.696569Z", + "start_time": "2024-05-09T06:27:43.694578Z" + } + }, + "cell_type": "code", + "source": "", + "id": "d4cf25a93d7d3f35", + "outputs": [], + "execution_count": 9 + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/dl/kan/kan/feynman.py b/dl/kan/kan/feynman.py new file mode 100644 index 000000000..6cc55e96f --- /dev/null +++ b/dl/kan/kan/feynman.py @@ -0,0 +1,739 @@ +from sympy import * +import torch + + +def get_feynman_dataset(name): + + global symbols + + tpi = torch.tensor(torch.pi) + + if name == 'test': + symbol = x, y = symbols('x, y') + expr = (x+y) * sin(exp(2*y)) + f = lambda x: (x[:,[0]] + x[:,[1]])*torch.sin(torch.exp(2*x[:,[1]])) + ranges = [-1,1] + + if name == 'I.6.20a' or name == 1: + symbol = theta = symbols('theta') + symbol = [symbol] + expr = exp(-theta**2/2)/sqrt(2*pi) + f = lambda x: torch.exp(-x[:,[0]]**2/2)/torch.sqrt(2*tpi) + ranges = [[-3,3]] + + if name == 'I.6.20' or name == 2: + symbol = theta, sigma = symbols('theta sigma') + expr = exp(-theta**2/(2*sigma**2))/sqrt(2*pi*sigma**2) + f = lambda x: torch.exp(-x[:,[0]]**2/(2*x[:,[1]]**2))/torch.sqrt(2*tpi*x[:,[1]]**2) + ranges = [[-1,1],[0.5,2]] + + if name == 'I.6.20b' or name == 3: + symbol = theta, theta1, sigma = symbols('theta theta1 sigma') + expr = exp(-(theta-theta1)**2/(2*sigma**2))/sqrt(2*pi*sigma**2) + f = lambda x: torch.exp(-(x[:,[0]]-x[:,[1]])**2/(2*x[:,[2]]**2))/torch.sqrt(2*tpi*x[:,[2]]**2) + ranges = [[-1.5,1.5],[-1.5,1.5],[0.5,2]] + + if name == 'I.8.4' or name == 4: + symbol = x1, x2, y1, y2 = symbols('x1 x2 y1 y2') + expr = sqrt((x2-x1)**2+(y2-y1)**2) + f = lambda x: torch.sqrt((x[:,[1]]-x[:,[0]])**2+(x[:,[3]]-x[:,[2]])**2) + ranges = [[-1,1],[-1,1],[-1,1],[-1,1]] + + if name == 'I.9.18' or name == 5: + symbol = G, m1, m2, x1, x2, y1, y2, z1, z2 = symbols('G m1 m2 x1 x2 y1 y2 z1 z2') + expr = G*m1*m2/((x2-x1)**2+(y2-y1)**2+(z2-z1)**2) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/((x[:,[3]]-x[:,[4]])**2+(x[:,[5]]-x[:,[6]])**2+(x[:,[7]]-x[:,[8]])**2) + ranges = [[-1,1],[-1,1],[-1,1],[-1,-0.5],[0.5,1],[-1,-0.5],[0.5,1],[-1,-0.5],[0.5,1]] + + if name == 'I.10.7' or name == 6: + symbol = m0, v, c = symbols('m0 v c') + expr = m0/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[0,1],[1,2]] + + if name == 'I.11.19' or name == 7: + symbol = x1, y1, x2, y2, x3, y3 = symbols('x1 y1 x2 y2 x3 y3') + expr = x1*y1 + x2*y2 + x3*y3 + f = lambda x: x[:,[0]]*x[:,[1]] + x[:,[2]]*x[:,[3]] + x[:,[4]]*x[:,[5]] + ranges = [-1,1] + + if name == 'I.12.1' or name == 8: + symbol = mu, Nn = symbols('mu N_n') + expr = mu * Nn + f = lambda x: x[:,[0]]*x[:,[1]] + ranges = [-1,1] + + if name == 'I.12.2' or name == 9: + symbol = q1, q2, eps, r = symbols('q1 q2 epsilon r') + expr = q1*q2/(4*pi*eps*r**2) + f = lambda x: x[:,[0]]*x[:,[1]]/(4*tpi*x[:,[2]]*x[:,[3]]**2) + ranges = [[-1,1],[-1,1],[0.5,2],[0.5,2]] + + if name == 'I.12.4' or name == 10: + symbol = q1, eps, r = symbols('q1 epsilon r') + expr = q1/(4*pi*eps*r**2) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]*x[:,[2]]**2) + ranges = [[-1,1],[0.5,2],[0.5,2]] + + if name == 'I.12.5' or name == 11: + symbol = q2, Ef = symbols('q2, E_f') + expr = q2*Ef + f = lambda x: x[:,[0]]*x[:,[1]] + ranges = [-1,1] + + if name == 'I.12.11' or name == 12: + symbol = q, Ef, B, v, theta = symbols('q E_f B v theta') + expr = q*(Ef + B*v*sin(theta)) + f = lambda x: x[:,[0]]*(x[:,[1]]+x[:,[2]]*x[:,[3]]*torch.sin(x[:,[4]])) + ranges = [[-1,1],[-1,1],[-1,1],[-1,1],[0,2*tpi]] + + if name == 'I.13.4' or name == 13: + symbol = m, v, u, w = symbols('m u v w') + expr = 1/2*m*(v**2+u**2+w**2) + f = lambda x: 1/2*x[:,[0]]*(x[:,[1]]**2+x[:,[2]]**2+x[:,[3]]**2) + ranges = [[-1,1],[-1,1],[-1,1],[-1,1]] + + if name == 'I.13.12' or name == 14: + symbol = G, m1, m2, r1, r2 = symbols('G m1 m2 r1 r2') + expr = G*m1*m2*(1/r2-1/r1) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]*(1/x[:,[4]]-1/x[:,[3]]) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'I.14.3' or name == 15: + symbol = m, g, z = symbols('m g z') + expr = m*g*z + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]] + ranges = [[0,1],[0,1],[-1,1]] + + if name == 'I.14.4' or name == 16: + symbol = ks, x = symbols('k_s x') + expr = 1/2*ks*x**2 + f = lambda x: 1/2*x[:,[0]]*x[:,[1]]**2 + ranges = [[0,1],[-1,1]] + + if name == 'I.15.3x' or name == 17: + symbol = x, u, t, c = symbols('x u t c') + expr = (x-u*t)/sqrt(1-u**2/c**2) + f = lambda x: (x[:,[0]] - x[:,[1]]*x[:,[2]])/torch.sqrt(1-x[:,[1]]**2/x[:,[3]]**2) + ranges = [[-1,1],[-1,1],[-1,1],[1,2]] + + if name == 'I.15.3t' or name == 18: + symbol = t, u, x, c = symbols('t u x c') + expr = (t-u*x/c**2)/sqrt(1-u**2/c**2) + f = lambda x: (x[:,[0]] - x[:,[1]]*x[:,[2]]/x[:,[3]]**2)/torch.sqrt(1-x[:,[1]]**2/x[:,[3]]**2) + ranges = [[-1,1],[-1,1],[-1,1],[1,2]] + + if name == 'I.15.10' or name == 19: + symbol = m0, v, c = symbols('m0 v c') + expr = m0*v/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]*x[:,[1]]/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[-1,1],[-0.9,0.9],[1.1,2]] + + if name == 'I.16.6' or name == 20: + symbol = u, v, c = symbols('u v c') + expr = (u+v)/(1+u*v/c**2) + f = lambda x: x[:,[0]]*x[:,[1]]/(1+x[:,[0]]*x[:,[1]]/x[:,[2]]**2) + ranges = [[-0.8,0.8],[-0.8,0.8],[1,2]] + + if name == 'I.18.4' or name == 21: + symbol = m1, r1, m2, r2 = symbols('m1 r1 m2 r2') + expr = (m1*r1+m2*r2)/(m1+m2) + f = lambda x: (x[:,[0]]*x[:,[1]]+x[:,[2]]*x[:,[3]])/(x[:,[0]]+x[:,[2]]) + ranges = [[0.5,1],[-1,1],[0.5,1],[-1,1]] + + if name == 'I.18.4' or name == 22: + symbol = r, F, theta = symbols('r F theta') + expr = r*F*sin(theta) + f = lambda x: x[:,[0]]*x[:,[1]]*torch.sin(x[:,[2]]) + ranges = [[-1,1],[-1,1],[0,2*tpi]] + + if name == 'I.18.16' or name == 23: + symbol = m, r, v, theta = symbols('m r v theta') + expr = m*r*v*sin(theta) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]*torch.sin(x[:,[3]]) + ranges = [[-1,1],[-1,1],[-1,1],[0,2*tpi]] + + if name == 'I.24.6' or name == 24: + symbol = m, omega, omega0, x = symbols('m omega omega_0 x') + expr = 1/4*m*(omega**2+omega0**2)*x**2 + f = lambda x: 1/4*x[:,[0]]*(x[:,[1]]**2+x[:,[2]]**2)*x[:,[3]]**2 + ranges = [[0,1],[-1,1],[-1,1],[-1,1]] + + if name == 'I.25.13' or name == 25: + symbol = q, C = symbols('q C') + expr = q/C + f = lambda x: x[:,[0]]/x[:,[1]] + ranges = [[-1,1],[0.5,2]] + + if name == 'I.26.2' or name == 26: + symbol = n, theta2 = symbols('n theta2') + expr = asin(n*sin(theta2)) + f = lambda x: torch.arcsin(x[:,[0]]*torch.sin(x[:,[1]])) + ranges = [[0,0.99],[0,2*tpi]] + + if name == 'I.27.6' or name == 27: + symbol = d1, d2, n = symbols('d1 d2 n') + expr = 1/(1/d1+n/d2) + f = lambda x: 1/(1/x[:,[0]]+x[:,[2]]/x[:,[1]]) + ranges = [[0.5,2],[1,2],[0.5,2]] + + if name == 'I.29.4' or name == 28: + symbol = omega, c = symbols('omega c') + expr = omega/c + f = lambda x: x[:,[0]]/x[:,[1]] + ranges = [[0,1],[0.5,2]] + + if name == 'I.29.16' or name == 29: + symbol = x1, x2, theta1, theta2 = symbols('x1 x2 theta1 theta2') + expr = sqrt(x1**2+x2**2-2*x1*x2*cos(theta1-theta2)) + f = lambda x: torch.sqrt(x[:,[0]]**2+x[:,[1]]**2-2*x[:,[0]]*x[:,[1]]*torch.cos(x[:,[2]]-x[:,[3]])) + ranges = [[-1,1],[-1,1],[0,2*tpi],[0,2*tpi]] + + if name == 'I.30.3' or name == 30: + symbol = I0, n, theta = symbols('I_0 n theta') + expr = I0 * sin(n*theta/2)**2 / sin(theta/2) ** 2 + f = lambda x: x[:,[0]] * torch.sin(x[:,[1]]*x[:,[2]]/2)**2 / torch.sin(x[:,[2]]/2)**2 + ranges = [[0,1],[0,4],[0.4*tpi,1.6*tpi]] + + if name == 'I.30.5' or name == 31: + symbol = lamb, n, d = symbols('lambda n d') + expr = asin(lamb/(n*d)) + f = lambda x: torch.arcsin(x[:,[0]]/(x[:,[1]]*x[:,[2]])) + ranges = [[-1,1],[1,1.5],[1,1.5]] + + if name == 'I.32.5' or name == 32: + symbol = q, a, eps, c = symbols('q a epsilon c') + expr = q**2*a**2/(eps*c**3) + f = lambda x: x[:,[0]]**2*x[:,[1]]**2/(x[:,[2]]*x[:,[3]]**3) + ranges = [[-1,1],[-1,1],[0.5,2],[0.5,2]] + + if name == 'I.32.17' or name == 33: + symbol = eps, c, Ef, r, omega, omega0 = symbols('epsilon c E_f r omega omega_0') + expr = nsimplify((1/2*eps*c*Ef**2)*(8*pi*r**2/3)*(omega**4/(omega**2-omega0**2)**2)) + f = lambda x: (1/2*x[:,[0]]*x[:,[1]]*x[:,[2]]**2)*(8*tpi*x[:,[3]]**2/3)*(x[:,[4]]**4/(x[:,[4]]**2-x[:,[5]]**2)**2) + ranges = [[0,1],[0,1],[-1,1],[0,1],[0,1],[1,2]] + + if name == 'I.34.8' or name == 34: + symbol = q, V, B, p = symbols('q V B p') + expr = q*V*B/p + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[-1,1],[-1,1],[-1,1],[0.5,2]] + + if name == 'I.34.10' or name == 35: + symbol = omega0, v, c = symbols('omega_0 v c') + expr = omega0/(1-v/c) + f = lambda x: x[:,[0]]/(1-x[:,[1]]/x[:,[2]]) + ranges = [[0,1],[0,0.9],[1.1,2]] + + if name == 'I.34.14' or name == 36: + symbol = omega0, v, c = symbols('omega_0 v c') + expr = omega0 * (1+v/c)/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]*(1+x[:,[1]]/x[:,[2]])/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[-0.9,0.9],[1.1,2]] + + if name == 'I.34.27' or name == 37: + symbol = hbar, omega = symbols('hbar omega') + expr = hbar * omega + f = lambda x: x[:,[0]]*x[:,[1]] + ranges = [[-1,1],[-1,1]] + + if name == 'I.37.4' or name == 38: + symbol = I1, I2, delta = symbols('I_1 I_2 delta') + expr = I1 + I2 + 2*sqrt(I1*I2)*cos(delta) + f = lambda x: x[:,[0]] + x[:,[1]] + 2*torch.sqrt(x[:,[0]]*x[:,[1]])*torch.cos(x[:,[2]]) + ranges = [[0.1,1],[0.1,1],[0,2*tpi]] + + if name == 'I.38.12' or name == 39: + symbol = eps, hbar, m, q = symbols('epsilon hbar m q') + expr = 4*pi*eps*hbar**2/(m*q**2) + f = lambda x: 4*tpi*x[:,[0]]*x[:,[1]]**2/(x[:,[2]]*x[:,[3]]**2) + ranges = [[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'I.39.10' or name == 40: + symbol = pF, V = symbols('p_F V') + expr = 3/2 * pF * V + f = lambda x: 3/2 * x[:,[0]] * x[:,[1]] + ranges = [[0,1],[0,1]] + + if name == 'I.39.11' or name == 41: + symbol = gamma, pF, V = symbols('gamma p_F V') + expr = pF * V/(gamma - 1) + f = lambda x: 1/(x[:,[0]]-1) * x[:,[1]] * x[:,[2]] + ranges = [[1.5,3],[0,1],[0,1]] + + if name == 'I.39.22' or name == 42: + symbol = n, kb, T, V = symbols('n k_b T V') + expr = n*kb*T/V + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'I.40.1' or name == 43: + symbol = n0, m, g, x, kb, T = symbols('n_0 m g x k_b T') + expr = n0 * exp(-m*g*x/(kb*T)) + f = lambda x: x[:,[0]] * torch.exp(-x[:,[1]]*x[:,[2]]*x[:,[3]]/(x[:,[4]]*x[:,[5]])) + ranges = [[0,1],[-1,1],[-1,1],[-1,1],[1,2],[1,2]] + + if name == 'I.41.16' or name == 44: + symbol = hbar, omega, c, kb, T = symbols('hbar omega c k_b T') + expr = hbar * omega**3/(pi**2*c**2*(exp(hbar*omega/(kb*T))-1)) + f = lambda x: x[:,[0]]*x[:,[1]]**3/(tpi**2*x[:,[2]]**2*(torch.exp(x[:,[0]]*x[:,[1]]/(x[:,[3]]*x[:,[4]]))-1)) + ranges = [[0.5,1],[0.5,1],[0.5,2],[0.5,2],[0.5,2]] + + if name == 'I.43.16' or name == 45: + symbol = mu, q, Ve, d = symbols('mu q V_e d') + expr = mu*q*Ve/d + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'I.43.31' or name == 46: + symbol = mu, kb, T = symbols('mu k_b T') + expr = mu*kb*T + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]] + ranges = [[0,1],[0,1],[0,1]] + + if name == 'I.43.43' or name == 47: + symbol = gamma, kb, v, A = symbols('gamma k_b v A') + expr = kb*v/A/(gamma-1) + f = lambda x: 1/(x[:,[0]]-1)*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[1.5,3],[0,1],[0,1],[0.5,2]] + + if name == 'I.44.4' or name == 48: + symbol = n, kb, T, V1, V2 = symbols('n k_b T V_1 V_2') + expr = n*kb*T*log(V2/V1) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]*torch.log(x[:,[4]]/x[:,[3]]) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'I.47.23' or name == 49: + symbol = gamma, p, rho = symbols('gamma p rho') + expr = sqrt(gamma*p/rho) + f = lambda x: torch.sqrt(x[:,[0]]*x[:,[1]]/x[:,[2]]) + ranges = [[0.1,1],[0.1,1],[0.5,2]] + + if name == 'I.48.20' or name == 50: + symbol = m, v, c = symbols('m v c') + expr = m*c**2/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]*x[:,[2]]**2/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[-0.9,0.9],[1.1,2]] + + if name == 'I.50.26' or name == 51: + symbol = x1, alpha, omega, t = symbols('x_1 alpha omega t') + expr = x1*(cos(omega*t)+alpha*cos(omega*t)**2) + f = lambda x: x[:,[0]]*(torch.cos(x[:,[2]]*x[:,[3]])+x[:,[1]]*torch.cos(x[:,[2]]*x[:,[3]])**2) + ranges = [[0,1],[0,1],[0,2*tpi],[0,1]] + + if name == 'II.2.42' or name == 52: + symbol = kappa, T1, T2, A, d = symbols('kappa T_1 T_2 A d') + expr = kappa*(T2-T1)*A/d + f = lambda x: x[:,[0]]*(x[:,[2]]-x[:,[1]])*x[:,[3]]/x[:,[4]] + ranges = [[0,1],[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.3.24' or name == 53: + symbol = P, r = symbols('P r') + expr = P/(4*pi*r**2) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]**2) + ranges = [[0,1],[0.5,2]] + + if name == 'II.4.23' or name == 54: + symbol = q, eps, r = symbols('q epsilon r') + expr = q/(4*pi*eps*r) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]*x[:,[2]]) + ranges = [[0,1],[0.5,2],[0.5,2]] + + if name == 'II.6.11' or name == 55: + symbol = eps, pd, theta, r = symbols('epsilon p_d theta r') + expr = 1/(4*pi*eps)*pd*cos(theta)/r**2 + f = lambda x: 1/(4*tpi*x[:,[0]])*x[:,[1]]*torch.cos(x[:,[2]])/x[:,[3]]**2 + ranges = [[0.5,2],[0,1],[0,2*tpi],[0.5,2]] + + if name == 'II.6.15a' or name == 56: + symbol = eps, pd, z, x, y, r = symbols('epsilon p_d z x y r') + expr = 3/(4*pi*eps)*pd*z/r**5*sqrt(x**2+y**2) + f = lambda x: 3/(4*tpi*x[:,[0]])*x[:,[1]]*x[:,[2]]/x[:,[5]]**5*torch.sqrt(x[:,[3]]**2+x[:,[4]]**2) + ranges = [[0.5,2],[0,1],[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.6.15b' or name == 57: + symbol = eps, pd, r, theta = symbols('epsilon p_d r theta') + expr = 3/(4*pi*eps)*pd/r**3*cos(theta)*sin(theta) + f = lambda x: 3/(4*tpi*x[:,[0]])*x[:,[1]]/x[:,[2]]**3*torch.cos(x[:,[3]])*torch.sin(x[:,[3]]) + ranges = [[0.5,2],[0,1],[0.5,2],[0,2*tpi]] + + if name == 'II.8.7' or name == 58: + symbol = q, eps, d = symbols('q epsilon d') + expr = 3/5*q**2/(4*pi*eps*d) + f = lambda x: 3/5*x[:,[0]]**2/(4*tpi*x[:,[1]]*x[:,[2]]) + ranges = [[0,1],[0.5,2],[0.5,2]] + + if name == 'II.8.31' or name == 59: + symbol = eps, Ef = symbols('epsilon E_f') + expr = 1/2*eps*Ef**2 + f = lambda x: 1/2*x[:,[0]]*x[:,[1]]**2 + ranges = [[0,1],[0,1]] + + if name == 'I.10.9' or name == 60: + symbol = sigma, eps, chi = symbols('sigma epsilon chi') + expr = sigma/eps/(1+chi) + f = lambda x: x[:,[0]]/x[:,[1]]/(1+x[:,[2]]) + ranges = [[0,1],[0.5,2],[0,1]] + + if name == 'II.11.3' or name == 61: + symbol = q, Ef, m, omega0, omega = symbols('q E_f m omega_o omega') + expr = q*Ef/(m*(omega0**2-omega**2)) + f = lambda x: x[:,[0]]*x[:,[1]]/(x[:,[2]]*(x[:,[3]]**2-x[:,[4]]**2)) + ranges = [[0,1],[0,1],[0.5,2],[1.5,3],[0,1]] + + if name == 'II.11.17' or name == 62: + symbol = n0, pd, Ef, theta, kb, T = symbols('n_0 p_d E_f theta k_b T') + expr = n0*(1+pd*Ef*cos(theta)/(kb*T)) + f = lambda x: x[:,[0]]*(1+x[:,[1]]*x[:,[2]]*torch.cos(x[:,[3]])/(x[:,[4]]*x[:,[5]])) + ranges = [[0,1],[-1,1],[-1,1],[0,2*tpi],[0.5,2],[0.5,2]] + + + if name == 'II.11.20' or name == 63: + symbol = n, pd, Ef, kb, T = symbols('n p_d E_f k_b T') + expr = n*pd**2*Ef/(3*kb*T) + f = lambda x: x[:,[0]]*x[:,[1]]**2*x[:,[2]]/(3*x[:,[3]]*x[:,[4]]) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'II.11.27' or name == 64: + symbol = n, alpha, eps, Ef = symbols('n alpha epsilon E_f') + expr = n*alpha/(1-n*alpha/3)*eps*Ef + f = lambda x: x[:,[0]]*x[:,[1]]/(1-x[:,[0]]*x[:,[1]]/3)*x[:,[2]]*x[:,[3]] + ranges = [[0,1],[0,2],[0,1],[0,1]] + + if name == 'II.11.28' or name == 65: + symbol = n, alpha = symbols('n alpha') + expr = 1 + n*alpha/(1-n*alpha/3) + f = lambda x: 1 + x[:,[0]]*x[:,[1]]/(1-x[:,[0]]*x[:,[1]]/3) + ranges = [[0,1],[0,2]] + + if name == 'II.13.17' or name == 66: + symbol = eps, c, l, r = symbols('epsilon c l r') + expr = 1/(4*pi*eps*c**2)*(2*l/r) + f = lambda x: 1/(4*tpi*x[:,[0]]*x[:,[1]]**2)*(2*x[:,[2]]/x[:,[3]]) + ranges = [[0.5,2],[0.5,2],[0,1],[0.5,2]] + + if name == 'II.13.23' or name == 67: + symbol = rho, v, c = symbols('rho v c') + expr = rho/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[0,1],[1,2]] + + if name == 'II.13.34' or name == 68: + symbol = rho, v, c = symbols('rho v c') + expr = rho*v/sqrt(1-v**2/c**2) + f = lambda x: x[:,[0]]*x[:,[1]]/torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2) + ranges = [[0,1],[0,1],[1,2]] + + if name == 'II.15.4' or name == 69: + symbol = muM, B, theta = symbols('mu_M B theta') + expr = - muM * B * cos(theta) + f = lambda x: - x[:,[0]]*x[:,[1]]*torch.cos(x[:,[2]]) + ranges = [[0,1],[0,1],[0,2*tpi]] + + if name == 'II.15.5' or name == 70: + symbol = pd, Ef, theta = symbols('p_d E_f theta') + expr = - pd * Ef * cos(theta) + f = lambda x: - x[:,[0]]*x[:,[1]]*torch.cos(x[:,[2]]) + ranges = [[0,1],[0,1],[0,2*tpi]] + + if name == 'II.21.32' or name == 71: + symbol = q, eps, r, v, c = symbols('q epsilon r v c') + expr = q/(4*pi*eps*r*(1-v/c)) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]*x[:,[2]]*(1-x[:,[3]]/x[:,[4]])) + ranges = [[0,1],[0.5,2],[0.5,2],[0,1],[1,2]] + + if name == 'II.24.17' or name == 72: + symbol = omega, c, d = symbols('omega c d') + expr = sqrt(omega**2/c**2-pi**2/d**2) + f = lambda x: torch.sqrt(x[:,[0]]**2/x[:,[1]]**2-tpi**2/x[:,[2]]**2) + ranges = [[1,1.5],[0.75,1],[1*tpi,1.5*tpi]] + + if name == 'II.27.16' or name == 73: + symbol = eps, c, Ef = symbols('epsilon c E_f') + expr = eps * c * Ef**2 + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]**2 + ranges = [[0,1],[0,1],[-1,1]] + + if name == 'II.27.18' or name == 74: + symbol = eps, Ef = symbols('epsilon E_f') + expr = eps * Ef**2 + f = lambda x: x[:,[0]]*x[:,[1]]**2 + ranges = [[0,1],[-1,1]] + + if name == 'II.34.2a' or name == 75: + symbol = q, v, r = symbols('q v r') + expr = q*v/(2*pi*r) + f = lambda x: x[:,[0]]*x[:,[1]]/(2*tpi*x[:,[2]]) + ranges = [[0,1],[0,1],[0.5,2]] + + if name == 'II.34.2' or name == 76: + symbol = q, v, r = symbols('q v r') + expr = q*v*r/2 + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/2 + ranges = [[0,1],[0,1],[0,1]] + + if name == 'II.34.11' or name == 77: + symbol = g, q, B, m = symbols('g q B m') + expr = g*q*B/(2*m) + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/(2*x[:,[3]]) + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.34.29a' or name == 78: + symbol = q, h, m = symbols('q h m') + expr = q*h/(4*pi*m) + f = lambda x: x[:,[0]]*x[:,[1]]/(4*tpi*x[:,[2]]) + ranges = [[0,1],[0,1],[0.5,2]] + + if name == 'II.34.29b' or name == 79: + symbol = g, mu, B, J, hbar = symbols('g mu B J hbar') + expr = g*mu*B*J/hbar + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]*x[:,[3]]/x[:,[4]] + ranges = [[0,1],[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.35.18' or name == 80: + symbol = n0, mu, B, kb, T = symbols('n0 mu B k_b T') + expr = n0/(exp(mu*B/(kb*T))+exp(-mu*B/(kb*T))) + f = lambda x: x[:,[0]]/(torch.exp(x[:,[1]]*x[:,[2]]/(x[:,[3]]*x[:,[4]]))+torch.exp(-x[:,[1]]*x[:,[2]]/(x[:,[3]]*x[:,[4]]))) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'II.35.21' or name == 81: + symbol = n, mu, B, kb, T = symbols('n mu B k_b T') + expr = n*mu*tanh(mu*B/(kb*T)) + f = lambda x: x[:,[0]]*x[:,[1]]*torch.tanh(x[:,[1]]*x[:,[2]]/(x[:,[3]]*x[:,[4]])) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'II.36.38' or name == 82: + symbol = mu, B, kb, T, alpha, M, eps, c = symbols('mu B k_b T alpha M epsilon c') + expr = mu*B/(kb*T) + mu*alpha*M/(eps*c**2*kb*T) + f = lambda x: x[:,[0]]*x[:,[1]]/(x[:,[2]]*x[:,[3]]) + x[:,[0]]*x[:,[4]]*x[:,[5]]/(x[:,[6]]*x[:,[7]]**2*x[:,[2]]*x[:,[3]]) + ranges = [[0,1],[0,1],[0.5,2],[0.5,2],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'II.37.1' or name == 83: + symbol = mu, chi, B = symbols('mu chi B') + expr = mu*(1+chi)*B + f = lambda x: x[:,[0]]*(1+x[:,[1]])*x[:,[2]] + ranges = [[0,1],[0,1],[0,1]] + + if name == 'II.38.3' or name == 84: + symbol = Y, A, x, d = symbols('Y A x d') + expr = Y*A*x/d + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'II.38.14' or name == 85: + symbol = Y, sigma = symbols('Y sigma') + expr = Y/(2*(1+sigma)) + f = lambda x: x[:,[0]]/(2*(1+x[:,[1]])) + ranges = [[0,1],[0,1]] + + if name == 'III.4.32' or name == 86: + symbol = hbar, omega, kb, T = symbols('hbar omega k_b T') + expr = 1/(exp(hbar*omega/(kb*T))-1) + f = lambda x: 1/(torch.exp(x[:,[0]]*x[:,[1]]/(x[:,[2]]*x[:,[3]]))-1) + ranges = [[0.5,1],[0.5,1],[0.5,2],[0.5,2]] + + if name == 'III.4.33' or name == 87: + symbol = hbar, omega, kb, T = symbols('hbar omega k_b T') + expr = hbar*omega/(exp(hbar*omega/(kb*T))-1) + f = lambda x: x[:,[0]]*x[:,[1]]/(torch.exp(x[:,[0]]*x[:,[1]]/(x[:,[2]]*x[:,[3]]))-1) + ranges = [[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'III.7.38' or name == 88: + symbol = mu, B, hbar = symbols('mu B hbar') + expr = 2*mu*B/hbar + f = lambda x: 2*x[:,[0]]*x[:,[1]]/x[:,[2]] + ranges = [[0,1],[0,1],[0.5,2]] + + if name == 'III.8.54' or name == 89: + symbol = E, t, hbar = symbols('E t hbar') + expr = sin(E*t/hbar)**2 + f = lambda x: torch.sin(x[:,[0]]*x[:,[1]]/x[:,[2]])**2 + ranges = [[0,2*tpi],[0,1],[0.5,2]] + + if name == 'III.9.52' or name == 90: + symbol = pd, Ef, t, hbar, omega, omega0 = symbols('p_d E_f t hbar omega omega_0') + expr = pd*Ef*t/hbar*sin((omega-omega0)*t/2)**2/((omega-omega0)*t/2)**2 + f = lambda x: x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]]*torch.sin((x[:,[4]]-x[:,[5]])*x[:,[2]]/2)**2/((x[:,[4]]-x[:,[5]])*x[:,[2]]/2)**2 + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0,tpi],[0,tpi]] + + if name == 'III.10.19' or name == 91: + symbol = mu, Bx, By, Bz = symbols('mu B_x B_y B_z') + expr = mu*sqrt(Bx**2+By**2+Bz**2) + f = lambda x: x[:,[0]]*torch.sqrt(x[:,[1]]**2+x[:,[2]]**2+x[:,[3]]**2) + ranges = [[0,1],[0,1],[0,1],[0,1]] + + if name == 'III.12.43' or name == 92: + symbol = n, hbar = symbols('n hbar') + expr = n * hbar + f = lambda x: x[:,[0]]*x[:,[1]] + ranges = [[0,1],[0,1]] + + if name == 'III.13.18' or name == 93: + symbol = E, d, k, hbar = symbols('E d k hbar') + expr = 2*E*d**2*k/hbar + f = lambda x: 2*x[:,[0]]*x[:,[1]]**2*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'III.14.14' or name == 94: + symbol = I0, q, Ve, kb, T = symbols('I_0 q V_e k_b T') + expr = I0 * (exp(q*Ve/(kb*T))-1) + f = lambda x: x[:,[0]]*(torch.exp(x[:,[1]]*x[:,[2]]/(x[:,[3]]*x[:,[4]]))-1) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2]] + + if name == 'III.15.12' or name == 95: + symbol = U, k, d = symbols('U k d') + expr = 2*U*(1-cos(k*d)) + f = lambda x: 2*x[:,[0]]*(1-torch.cos(x[:,[1]]*x[:,[2]])) + ranges = [[0,1],[0,2*tpi],[0,1]] + + if name == 'III.15.14' or name == 96: + symbol = hbar, E, d = symbols('hbar E d') + expr = hbar**2/(2*E*d**2) + f = lambda x: x[:,[0]]**2/(2*x[:,[1]]*x[:,[2]]**2) + ranges = [[0,1],[0.5,2],[0.5,2]] + + if name == 'III.15.27' or name == 97: + symbol = alpha, n, d = symbols('alpha n d') + expr = 2*pi*alpha/(n*d) + f = lambda x: 2*tpi*x[:,[0]]/(x[:,[1]]*x[:,[2]]) + ranges = [[0,1],[0.5,2],[0.5,2]] + + if name == 'III.17.37' or name == 98: + symbol = beta, alpha, theta = symbols('beta alpha theta') + expr = beta * (1+alpha*cos(theta)) + f = lambda x: x[:,[0]]*(1+x[:,[1]]*torch.cos(x[:,[2]])) + ranges = [[0,1],[0,1],[0,2*tpi]] + + if name == 'III.19.51' or name == 99: + symbol = m, q, eps, hbar, n = symbols('m q epsilon hbar n') + expr = - m * q**4/(2*(4*pi*eps)**2*hbar**2)*1/n**2 + f = lambda x: - x[:,[0]]*x[:,[1]]**4/(2*(4*tpi*x[:,[2]])**2*x[:,[3]]**2)*1/x[:,[4]]**2 + ranges = [[0,1],[0,1],[0.5,2],[0.5,2],[0.5,2]] + + if name == 'III.21.20' or name == 100: + symbol = rho, q, A, m = symbols('rho q A m') + expr = - rho*q*A/m + f = lambda x: - x[:,[0]]*x[:,[1]]*x[:,[2]]/x[:,[3]] + ranges = [[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'Rutherforld scattering' or name == 101: + symbol = Z1, Z2, alpha, hbar, c, E, theta = symbols('Z_1 Z_2 alpha hbar c E theta') + expr = (Z1*Z2*alpha*hbar*c/(4*E*sin(theta/2)**2))**2 + f = lambda x: (x[:,[0]]*x[:,[1]]*x[:,[2]]*x[:,[3]]*x[:,[4]]/(4*x[:,[5]]*torch.sin(x[:,[6]]/2)**2))**2 + ranges = [[0,1],[0,1],[0,1],[0,1],[0,1],[0.5,2],[0.1*tpi,0.9*tpi]] + + if name == 'Friedman equation' or name == 102: + symbol = G, rho, kf, c, af = symbols('G rho k_f c a_f') + expr = sqrt(8*pi*G/3*rho-kf*c**2/af**2) + f = lambda x: torch.sqrt(8*tpi*x[:,[0]]/3*x[:,[1]] - x[:,[2]]*x[:,[3]]**2/x[:,[4]]**2) + ranges = [[1,2],[1,2],[0,1],[0,1],[1,2]] + + if name == 'Compton scattering' or name == 103: + symbol = E, m, c, theta = symbols('E m c theta') + expr = E/(1+E/(m*c**2)*(1-cos(theta))) + f = lambda x: x[:,[0]]/(1+x[:,[0]]/(x[:,[1]]*x[:,[2]]**2)*(1-torch.cos(x[:,[3]]))) + ranges = [[0,1],[0.5,2],[0.5,2],[0,2*tpi]] + + if name == 'Radiated gravitational wave power' or name == 104: + symbol = G, c, m1, m2, r = symbols('G c m_1 m_2 r') + expr = -32/5*G**4/c**5*(m1*m2)**2*(m1+m2)/r**5 + f = lambda x: -32/5*x[:,[0]]**4/x[:,[1]]**5*(x[:,[2]]*x[:,[3]])**2*(x[:,[2]]+x[:,[3]])/x[:,[4]]**5 + ranges = [[0,1],[0.5,2],[0,1],[0,1],[0.5,2]] + + if name == 'Relativistic aberration' or name == 105: + symbol = theta2, v, c = symbols('theta_2 v c') + expr = acos((cos(theta2)-v/c)/(1-v/c*cos(theta2))) + f = lambda x: torch.arccos((torch.cos(x[:,[0]])-x[:,[1]]/x[:,[2]])/(1-x[:,[1]]/x[:,[2]]*torch.cos(x[:,[0]]))) + ranges = [[0,tpi],[0,1],[1,2]] + + if name == 'N-slit diffraction' or name == 106: + symbol = I0, alpha, delta, N = symbols('I_0 alpha delta N') + expr = I0 * (sin(alpha/2)/(alpha/2)*sin(N*delta/2)/sin(delta/2))**2 + f = lambda x: x[:,[0]] * (torch.sin(x[:,[1]]/2)/(x[:,[1]]/2)*torch.sin(x[:,[3]]*x[:,[2]]/2)/torch.sin(x[:,[2]]/2))**2 + ranges = [[0,1],[0.1*tpi,0.9*tpi],[0.1*tpi,0.9*tpi],[0.5,1]] + + if name == 'Goldstein 3.16' or name == 107: + symbol = m, E, U, L, r = symbols('m E U L r') + expr = sqrt(2/m*(E-U-L**2/(2*m*r**2))) + f = lambda x: torch.sqrt(2/x[:,[0]]*(x[:,[1]]-x[:,[2]]-x[:,[3]]**2/(2*x[:,[0]]*x[:,[4]]**2))) + ranges = [[1,2],[2,3],[0,1],[0,1],[1,2]] + + if name == 'Goldstein 3.55' or name == 108: + symbol = m, kG, L, E, theta1, theta2 = symbols('m k_G L E theta_1 theta_2') + expr = m*kG/L**2*(1+sqrt(1+2*E*L**2/(m*kG**2))*cos(theta1-theta2)) + f = lambda x: x[:,[0]]*x[:,[1]]/x[:,[2]]**2*(1+torch.sqrt(1+2*x[:,[3]]*x[:,[2]]**2/(x[:,[0]]*x[:,[1]]**2))*torch.cos(x[:,[4]]-x[:,[5]])) + ranges = [[0.5,2],[0.5,2],[0.5,2],[0,1],[0,2*tpi],[0,2*tpi]] + + if name == 'Goldstein 3.64 (ellipse)' or name == 109: + symbol = d, alpha, theta1, theta2 = symbols('d alpha theta_1 theta_2') + expr = d*(1-alpha**2)/(1+alpha*cos(theta2-theta1)) + f = lambda x: x[:,[0]]*(1-x[:,[1]]**2)/(1+x[:,[1]]*torch.cos(x[:,[2]]-x[:,[3]])) + ranges = [[0,1],[0,0.9],[0,2*tpi],[0,2*tpi]] + + if name == 'Goldstein 3.74 (Kepler)' or name == 110: + symbol = d, G, m1, m2 = symbols('d G m_1 m_2') + expr = 2*pi*d**(3/2)/sqrt(G*(m1+m2)) + f = lambda x: 2*tpi*x[:,[0]]**(3/2)/torch.sqrt(x[:,[1]]*(x[:,[2]]+x[:,[3]])) + ranges = [[0,1],[0.5,2],[0.5,2],[0.5,2]] + + if name == 'Goldstein 3.99' or name == 111: + symbol = eps, E, L, m, Z1, Z2, q = symbols('epsilon E L m Z_1 Z_2 q') + expr = sqrt(1+2*eps**2*E*L**2/(m*(Z1*Z2*q**2)**2)) + f = lambda x: torch.sqrt(1+2*x[:,[0]]**2*x[:,[1]]*x[:,[2]]**2/(x[:,[3]]*(x[:,[4]]*x[:,[5]]*x[:,[6]]**2)**2)) + ranges = [[0,1],[0,1],[0,1],[0.5,2],[0.5,2],[0.5,2],[0.5,2]] + + if name == 'Goldstein 8.56' or name == 112: + symbol = p, q, A, c, m, Ve = symbols('p q A c m V_e') + expr = sqrt((p-q*A)**2*c**2+m**2*c**4) + q*Ve + f = lambda x: torch.sqrt((x[:,[0]]-x[:,[1]]*x[:,[2]])**2*x[:,[3]]**2+x[:,[4]]**2*x[:,[3]]**4) + x[:,[1]]*x[:,[5]] + ranges = [0,1] + + if name == 'Goldstein 12.80' or name == 113: + symbol = m, p, omega, x, alpha, y = symbols('m p omega x alpha y') + expr = 1/(2*m)*(p**2+m**2*omega**2*x**2*(1+alpha*y/x)) + f = lambda x: 1/(2*x[:,[0]]) * (x[:,[1]]**2+x[:,[0]]**2*x[:,[2]]**2*x[:,[3]]**2*(1+x[:,[4]]*x[:,[3]]/x[:,[5]])) + ranges = [[0.5,2],[0,1],[0,1],[0,1],[0,1],[0.5,2]] + + if name == 'Jackson 2.11' or name == 114: + symbol = q, eps, y, Ve, d = symbols('q epsilon y V_e d') + expr = q/(4*pi*eps*y**2)*(4*pi*eps*Ve*d-q*d*y**3/(y**2-d**2)**2) + f = lambda x: x[:,[0]]/(4*tpi*x[:,[1]]*x[:,x[:,[2]]]**2)*(4*tpi*x[:,[1]]*x[:,[3]]*x[:,[4]]-x[:,[0]]*x[:,[4]]*x[:,[2]]**3/(x[:,[2]]**2-x[:,[4]]**2)**2) + ranges = [[0,1],[0.5,2],[1,2],[0,1],[0,1]] + + if name == 'Jackson 3.45' or name == 115: + symbol = q, r, d, alpha = symbols('q r d alpha') + expr = q/sqrt(r**2+d**2-2*d*r*cos(alpha)) + f = lambda x: x[:,[0]]/torch.sqrt(x[:,[1]]**2+x[:,[2]]**2-2*x[:,[1]]*x[:,[2]]*torch.cos(x[:,[3]])) + ranges = [[0,1],[0,1],[0,1],[0,2*tpi]] + + if name == 'Jackson 4.60' or name == 116: + symbol = Ef, theta, alpha, d, r = symbols('E_f theta alpha d r') + expr = Ef * cos(theta) * ((alpha-1)/(alpha+2) * d**3/r**2 - r) + f = lambda x: x[:,[0]] * torch.cos(x[:,[1]]) * ((x[:,[2]]-1)/(x[:,[2]]+2) * x[:,[3]]**3/x[:,[4]]**2 - x[:,[4]]) + ranges = [[0,1],[0,2*tpi],[0,2],[0,1],[0.5,2]] + + if name == 'Jackson 11.38 (Doppler)' or name == 117: + symbol = omega, v, c, theta = symbols('omega v c theta') + expr = sqrt(1-v**2/c**2)/(1+v/c*cos(theta))*omega + f = lambda x: torch.sqrt(1-x[:,[1]]**2/x[:,[2]]**2)/(1+x[:,[1]]/x[:,[2]]*torch.cos(x[:,[3]]))*x[:,[0]] + ranges = [[0,1],[0,1],[1,2],[0,2*tpi]] + + if name == 'Weinberg 15.2.1' or name == 118: + symbol = G, c, kf, af, H = symbols('G c k_f a_f H') + expr = 3/(8*pi*G)*(c**2*kf/af**2+H**2) + f = lambda x: 3/(8*tpi*x[:,[0]])*(x[:,[1]]**2*x[:,[2]]/x[:,[3]]**2+x[:,[4]]**2) + ranges = [[0.5,2],[0,1],[0,1],[0.5,2],[0,1]] + + if name == 'Weinberg 15.2.2' or name == 119: + symbol = G, c, kf, af, H, alpha = symbols('G c k_f a_f H alpha') + expr = -1/(8*pi*G)*(c**4*kf/af**2+c**2*H**2*(1-2*alpha)) + f = lambda x: -1/(8*tpi*x[:,[0]])*(x[:,[1]]**4*x[:,[2]]/x[:,[3]]**2 + x[:,[1]]**2*x[:,[4]]**2*(1-2*x[:,[5]])) + ranges = [[0.5,2],[0,1],[0,1],[0.5,2],[0,1],[0,1]] + + if name == 'Schwarz 13.132 (Klein-Nishina)' or name == 120: + symbol = alpha, hbar, m, c, omega0, omega, theta = symbols('alpha hbar m c omega_0 omega theta') + expr = pi*alpha**2*hbar**2/m**2/c**2*(omega0/omega)**2*(omega0/omega+omega/omega0-sin(theta)**2) + f = lambda x: tpi*x[:,[0]]**2*x[:,[1]]**2/x[:,[2]]**2/x[:,[3]]**2*(x[:,[4]]/x[:,[5]])**2*(x[:,[4]]/x[:,[5]]+x[:,[5]]/x[:,[4]]-torch.sin(x[:,[6]])**2) + ranges = [[0,1],[0,1],[0.5,2],[0.5,2],[0.5,2],[0.5,2],[0,2*tpi]] + + return symbol, expr, f, ranges \ No newline at end of file diff --git a/dl/kan/kan/hypothesis.py b/dl/kan/kan/hypothesis.py new file mode 100644 index 000000000..4850f5098 --- /dev/null +++ b/dl/kan/kan/hypothesis.py @@ -0,0 +1,695 @@ +import numpy as np +import torch +from sklearn.linear_model import LinearRegression +from sympy.utilities.lambdify import lambdify +from sklearn.cluster import AgglomerativeClustering +from .utils import batch_jacobian, batch_hessian +from functools import reduce +from kan.utils import batch_jacobian, batch_hessian +import copy +import matplotlib.pyplot as plt +import sympy +from sympy.printing import latex + + +def detect_separability(model, x, mode='add', score_th=1e-2, res_th=1e-2, n_clusters=None, bias=0., verbose=False): + ''' + detect function separability + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + mode : str + mode = 'add' or mode = 'mul' + score_th : float + threshold of score + res_th : float + threshold of residue + n_clusters : None or int + the number of clusters + bias : float + bias (for multiplicative separability) + verbose : bool + + Returns: + -------- + results (dictionary) + + Example1 + -------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 + torch.exp(x[:,[1]]+x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,3)) + >>> detect_separability(model, x, mode='add') + + Example2 + -------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 * (x[:,[1]]+x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,3)) + >>> detect_separability(model, x, mode='mul') + ''' + results = {} + + if mode == 'add': + hessian = batch_hessian(model, x) + elif mode == 'mul': + compose = lambda *F: reduce(lambda f, g: lambda x: f(g(x)), F) + hessian = batch_hessian(compose(torch.log, torch.abs, lambda x: x+bias, model), x) + + std = torch.std(x, dim=0) + hessian_normalized = hessian * std[None,:] * std[:,None] + score_mat = torch.median(torch.abs(hessian_normalized), dim=0)[0] + results['hessian'] = score_mat + + dist_hard = (score_mat < score_th).float() + + if isinstance(n_clusters, int): + n_cluster_try = [n_clusters, n_clusters] + elif isinstance(n_clusters, list): + n_cluster_try = n_clusters + else: + n_cluster_try = [1,x.shape[1]] + + n_cluster_try = list(range(n_cluster_try[0], n_cluster_try[1]+1)) + + for n_cluster in n_cluster_try: + + clustering = AgglomerativeClustering( + metric='precomputed', + n_clusters=n_cluster, + linkage='complete', + ).fit(dist_hard) + + labels = clustering.labels_ + + groups = [list(np.where(labels == i)[0]) for i in range(n_cluster)] + blocks = [torch.sum(score_mat[groups[i]][:,groups[i]]) for i in range(n_cluster)] + block_sum = torch.sum(torch.stack(blocks)) + total_sum = torch.sum(score_mat) + residual_sum = total_sum - block_sum + residual_ratio = residual_sum / total_sum + + if verbose == True: + print(f'n_group={n_cluster}, residual_ratio={residual_ratio}') + + if residual_ratio < res_th: + results['n_groups'] = n_cluster + results['labels'] = list(labels) + results['groups'] = groups + + if results['n_groups'] > 1: + print(f'{mode} separability detected') + else: + print(f'{mode} separability not detected') + + return results + + +def batch_grad_normgrad(model, x, group, create_graph=False): + # x in shape (Batch, Length) + group_A = group + group_B = list(set(range(x.shape[1])) - set(group)) + + def jac(x): + input_grad = batch_jacobian(model, x, create_graph=True) + input_grad_A = input_grad[:,group_A] + norm = torch.norm(input_grad_A, dim=1, keepdim=True) + 1e-6 + input_grad_A_normalized = input_grad_A/norm + return input_grad_A_normalized + + def _jac_sum(x): + return jac(x).sum(dim=0) + + return torch.autograd.functional.jacobian(_jac_sum, x, create_graph=create_graph).permute(1,0,2)[:,:,group_B] + + +def get_dependence(model, x, group): + group_A = group + group_B = list(set(range(x.shape[1])) - set(group)) + grad_normgrad = batch_grad_normgrad(model, x, group=group) + std = torch.std(x, dim=0) + dependence = grad_normgrad * std[None,group_A,None] * std[None,None,group_B] + dependence = torch.median(torch.abs(dependence), dim=0)[0] + return dependence + +def test_symmetry(model, x, group, dependence_th=1e-3): + ''' + detect function separability + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + group : a list of indices + dependence_th : float + threshold of dependence + + Returns: + -------- + bool + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 * (x[:,[1]]+x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,3)) + >>> print(test_symmetry(model, x, [1,2])) # True + >>> print(test_symmetry(model, x, [0,2])) # False + ''' + if len(group) == x.shape[1] or len(group) == 0: + return True + + dependence = get_dependence(model, x, group) + max_dependence = torch.max(dependence) + return max_dependence < dependence_th + + +def test_separability(model, x, groups, mode='add', threshold=1e-2, bias=0): + ''' + test function separability + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + mode : str + mode = 'add' or mode = 'mul' + score_th : float + threshold of score + res_th : float + threshold of residue + bias : float + bias (for multiplicative separability) + verbose : bool + + Returns: + -------- + bool + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 * (x[:,[1]]+x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,3)) + >>> print(test_separability(model, x, [[0],[1,2]], mode='mul')) # True + >>> print(test_separability(model, x, [[0],[1,2]], mode='add')) # False + ''' + if mode == 'add': + hessian = batch_hessian(model, x) + elif mode == 'mul': + compose = lambda *F: reduce(lambda f, g: lambda x: f(g(x)), F) + hessian = batch_hessian(compose(torch.log, torch.abs, lambda x: x+bias, model), x) + + std = torch.std(x, dim=0) + hessian_normalized = hessian * std[None,:] * std[:,None] + score_mat = torch.median(torch.abs(hessian_normalized), dim=0)[0] + + sep_bool = True + + # internal test + n_groups = len(groups) + for i in range(n_groups): + for j in range(i+1, n_groups): + sep_bool *= torch.max(score_mat[groups[i]][:,groups[j]]) < threshold + + # external test + group_id = [x for xs in groups for x in xs] + nongroup_id = list(set(range(x.shape[1])) - set(group_id)) + if len(nongroup_id) > 0 and len(group_id) > 0: + sep_bool *= torch.max(score_mat[group_id][:,nongroup_id]) < threshold + + return sep_bool + +def test_general_separability(model, x, groups, threshold=1e-2): + ''' + test function separability + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + mode : str + mode = 'add' or mode = 'mul' + score_th : float + threshold of score + res_th : float + threshold of residue + bias : float + bias (for multiplicative separability) + verbose : bool + + Returns: + -------- + bool + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: x[:,[0]] ** 2 * (x[:,[1]]**2+x[:,[2]]**2)**2 + >>> x = torch.normal(0,1,size=(100,3)) + >>> print(test_general_separability(model, x, [[1],[0,2]])) # False + >>> print(test_general_separability(model, x, [[0],[1,2]])) # True + ''' + grad = batch_jacobian(model, x) + + gensep_bool = True + + n_groups = len(groups) + for i in range(n_groups): + for j in range(i+1,n_groups): + group_A = groups[i] + group_B = groups[j] + for member_A in group_A: + for member_B in group_B: + def func(x): + grad = batch_jacobian(model, x, create_graph=True) + return grad[:,[member_B]]/grad[:,[member_A]] + # test if func is multiplicative separable + gensep_bool *= test_separability(func, x, groups, mode='mul', threshold=threshold) + return gensep_bool + + +def get_molecule(model, x, sym_th=1e-3, verbose=True): + ''' + how variables are combined hierarchically + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + sym_th : float + threshold of symmetry + verbose : bool + + Returns: + -------- + list + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: ((x[:,[0]] ** 2 + x[:,[1]] ** 2) ** 2 + (x[:,[2]] ** 2 + x[:,[3]] ** 2) ** 2) ** 2 + ((x[:,[4]] ** 2 + x[:,[5]] ** 2) ** 2 + (x[:,[6]] ** 2 + x[:,[7]] ** 2) ** 2) ** 2 + >>> x = torch.normal(0,1,size=(100,8)) + >>> get_molecule(model, x, verbose=False) + [[[0], [1], [2], [3], [4], [5], [6], [7]], + [[0, 1], [2, 3], [4, 5], [6, 7]], + [[0, 1, 2, 3], [4, 5, 6, 7]], + [[0, 1, 2, 3, 4, 5, 6, 7]]] + ''' + n = x.shape[1] + atoms = [[i] for i in range(n)] + molecules = [] + moleculess = [copy.deepcopy(atoms)] + already_full = False + n_layer = 0 + last_n_molecule = n + + while True: + + + pointer = 0 + current_molecule = [] + remove_atoms = [] + n_atom = 0 + + while len(atoms) > 0: + + # assemble molecule + atom = atoms[pointer] + if verbose: + print(current_molecule) + print(atom) + + if len(current_molecule) == 0: + full = False + current_molecule += atom + remove_atoms.append(atom) + n_atom += 1 + else: + # try assemble the atom to the molecule + if len(current_molecule+atom) == x.shape[1] and already_full == False and n_atom > 1 and n_layer > 0: + full = True + already_full = True + else: + full = False + if test_symmetry(model, x, current_molecule+atom, dependence_th=sym_th): + current_molecule += atom + remove_atoms.append(atom) + n_atom += 1 + + pointer += 1 + + if pointer == len(atoms) or full: + molecules.append(current_molecule) + if full: + molecules.append(atom) + remove_atoms.append(atom) + # remove molecules from atoms + for atom in remove_atoms: + atoms.remove(atom) + current_molecule = [] + remove_atoms = [] + pointer = 0 + + # if not making progress, terminate + if len(molecules) == last_n_molecule: + def flatten(xss): + return [x for xs in xss for x in xs] + moleculess.append([flatten(molecules)]) + break + else: + moleculess.append(copy.deepcopy(molecules)) + + last_n_molecule = len(molecules) + + if len(molecules) == 1: + break + + atoms = molecules + molecules = [] + + n_layer += 1 + + #print(n_layer, atoms) + + + # sort + depth = len(moleculess) - 1 + + for l in list(range(depth,0,-1)): + + molecules_sorted = [] + molecules_l = moleculess[l] + molecules_lm1 = moleculess[l-1] + + + for molecule_l in molecules_l: + start = 0 + for i in range(1,len(molecule_l)+1): + if molecule_l[start:i] in molecules_lm1: + + molecules_sorted.append(molecule_l[start:i]) + start = i + + moleculess[l-1] = molecules_sorted + + return moleculess + + +def get_tree_node(model, x, moleculess, sep_th=1e-2, skip_test=True): + ''' + get tree nodes + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + sep_th : float + threshold of separability + skip_test : bool + if True, don't test the property of each module (to save time) + + Returns: + -------- + arities : list of numbers + properties : list of strings + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: ((x[:,[0]] ** 2 + x[:,[1]] ** 2) ** 2 + (x[:,[2]] ** 2 + x[:,[3]] ** 2) ** 2) ** 2 + ((x[:,[4]] ** 2 + x[:,[5]] ** 2) ** 2 + (x[:,[6]] ** 2 + x[:,[7]] ** 2) ** 2) ** 2 + >>> x = torch.normal(0,1,size=(100,8)) + >>> moleculess = get_molecule(model, x, verbose=False) + >>> get_tree_node(model, x, moleculess, skip_test=False) + ''' + arities = [] + properties = [] + + depth = len(moleculess) - 1 + + for l in range(depth): + molecules_l = copy.deepcopy(moleculess[l]) + molecules_lp1 = copy.deepcopy(moleculess[l+1]) + arity_l = [] + property_l = [] + + for molecule in molecules_lp1: + start = 0 + arity = 0 + groups = [] + for i in range(1,len(molecule)+1): + if molecule[start:i] in molecules_l: + groups.append(molecule[start:i]) + start = i + arity += 1 + arity_l.append(arity) + + if arity == 1: + property = 'Id' + else: + property = '' + # test property + if skip_test: + gensep_bool = False + else: + gensep_bool = test_general_separability(model, x, groups, threshold=sep_th) + + if gensep_bool: + property = 'GS' + if l == depth - 1: + if skip_test: + add_bool = False + mul_bool = False + else: + add_bool = test_separability(model, x, groups, mode='add', threshold=sep_th) + mul_bool = test_separability(model, x, groups, mode='mul', threshold=sep_th) + if add_bool: + property = 'Add' + if mul_bool: + property = 'Mul' + + + property_l.append(property) + + + arities.append(arity_l) + properties.append(property_l) + + return arities, properties + + +def plot_tree(model, x, in_var=None, style='tree', sym_th=1e-3, sep_th=1e-1, skip_sep_test=False, verbose=False): + ''' + get tree graph + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + in_var : list of symbols + input variables + style : str + 'tree' or 'box' + sym_th : float + threshold of symmetry + sep_th : float + threshold of separability + skip_sep_test : bool + if True, don't test the property of each module (to save time) + verbose : bool + + Returns: + -------- + a tree graph + + Example + ------- + >>> from kan.hypothesis import * + >>> model = lambda x: ((x[:,[0]] ** 2 + x[:,[1]] ** 2) ** 2 + (x[:,[2]] ** 2 + x[:,[3]] ** 2) ** 2) ** 2 + ((x[:,[4]] ** 2 + x[:,[5]] ** 2) ** 2 + (x[:,[6]] ** 2 + x[:,[7]] ** 2) ** 2) ** 2 + >>> x = torch.normal(0,1,size=(100,8)) + >>> plot_tree(model, x) + ''' + moleculess = get_molecule(model, x, sym_th=sym_th, verbose=verbose) + arities, properties = get_tree_node(model, x, moleculess, sep_th=sep_th, skip_test=skip_sep_test) + + n = x.shape[1] + var = None + + in_vars = [] + + if in_var == None: + for ii in range(1, n + 1): + exec(f"x{ii} = sympy.Symbol('x_{ii}')") + exec(f"in_vars.append(x{ii})") + elif type(var[0]) == Symbol: + in_vars = var + else: + in_vars = [sympy.symbols(var_) for var_ in var] + + + def flatten(xss): + return [x for xs in xss for x in xs] + + def myrectangle(center_x, center_y, width_x, width_y): + plt.plot([center_x - width_x/2, center_x + width_x/2], [center_y + width_y/2, center_y + width_y/2], color='k') # up + plt.plot([center_x - width_x/2, center_x + width_x/2], [center_y - width_y/2, center_y - width_y/2], color='k') # down + plt.plot([center_x - width_x/2, center_x - width_x/2], [center_y - width_y/2, center_y + width_y/2], color='k') # left + plt.plot([center_x + width_x/2, center_x + width_x/2], [center_y - width_y/2, center_y + width_y/2], color='k') # left + + depth = len(moleculess) + + delta = 1/n + a = 0.3 + b = 0.15 + y0 = 0.5 + + + # draw rectangles + for l in range(depth-1): + molecules = moleculess[l+1] + n_molecule = len(molecules) + + centers = [] + + acc_arity = 0 + + for i in range(n_molecule): + start_id = len(flatten(molecules[:i])) + end_id = len(flatten(molecules[:i+1])) + + center_x = (start_id + (end_id - 1 - start_id)/2) * delta + delta/2 + center_y = (l+1/2)*y0 + width_x = (end_id - start_id - 1 + 2*a)*delta + width_y = 2*b + + # add text (numbers) on rectangles + if style == 'box': + myrectangle(center_x, center_y, width_x, width_y) + plt.text(center_x, center_y, properties[l][i], fontsize=15, horizontalalignment='center', + verticalalignment='center') + elif style == 'tree': + # if 'GS', no rectangle, n=arity tilted lines + # if 'Id', no rectangle, n=arity vertical lines + # if 'Add' or 'Mul'. rectangle, "+" or "x" + # if '', rectangle + property = properties[l][i] + if property == 'GS' or property == 'Add' or property == 'Mul': + color = 'blue' + arity = arities[l][i] + for j in range(arity): + + if l == 0: + # x = (start_id + j) * delta + delta/2, center_x + # y = center_y - b, center_y + b + plt.plot([(start_id + j) * delta + delta/2, center_x], [center_y - b, center_y + b], color=color) + else: + # x = last_centers[acc_arity:acc_arity+arity], center_x + # y = center_y - b, center_y + b + plt.plot([last_centers[acc_arity+j], center_x], [center_y - b, center_y + b], color=color) + + acc_arity += arity + + if property == 'Add' or property == 'Mul': + if property == 'Add': + symbol = '+' + else: + symbol = '*' + + plt.text(center_x, center_y + b, symbol, horizontalalignment='center', + verticalalignment='center', color='red', fontsize=40) + if property == 'Id': + plt.plot([center_x, center_x], [center_y-width_y/2, center_y+width_y/2], color='black') + + if property == '': + myrectangle(center_x, center_y, width_x, width_y) + + + + # connections to the next layer + plt.plot([center_x, center_x], [center_y+width_y/2, center_y+y0-width_y/2], color='k') + centers.append(center_x) + last_centers = copy.deepcopy(centers) + + # connections from input variables to the first layer + for i in range(n): + x_ = (i + 1/2) * delta + # connections to the next layer + plt.plot([x_, x_], [0, y0/2-width_y/2], color='k') + plt.text(x_, -0.05*(depth-1), f'${latex(in_vars[moleculess[0][i][0]])}$', fontsize=20, horizontalalignment='center') + plt.xlim(0,1) + #plt.ylim(0,1); + plt.axis('off'); + plt.show() + + +def test_symmetry_var(model, x, input_vars, symmetry_var): + ''' + test symmetry + + Args: + ----- + model : MultKAN, MLP or python function + x : 2D torch.float + inputs + input_vars : list of sympy symbols + symmetry_var : sympy expression + + Returns: + -------- + cosine similarity + + Example + ------- + >>> from kan.hypothesis import * + >>> from sympy import * + >>> model = lambda x: x[:,[0]] * (x[:,[1]] + x[:,[2]]) + >>> x = torch.normal(0,1,size=(100,8)) + >>> input_vars = a, b, c = symbols('a b c') + >>> symmetry_var = b + c + >>> test_symmetry_var(model, x, input_vars, symmetry_var); + >>> symmetry_var = b * c + >>> test_symmetry_var(model, x, input_vars, symmetry_var); + ''' + orig_vars = input_vars + sym_var = symmetry_var + + # gradients wrt to input (model) + input_grad = batch_jacobian(model, x) + + # gradients wrt to input (symmetry var) + func = lambdify(orig_vars, sym_var,'numpy') # returns a numpy-ready function + + func2 = lambda x: func(*[x[:,[i]] for i in range(len(orig_vars))]) + sym_grad = batch_jacobian(func2, x) + + # get id + idx = [] + sym_symbols = list(sym_var.free_symbols) + for sym_symbol in sym_symbols: + for j in range(len(orig_vars)): + if sym_symbol == orig_vars[j]: + idx.append(j) + + input_grad_part = input_grad[:,idx] + sym_grad_part = sym_grad[:,idx] + + cossim = torch.abs(torch.sum(input_grad_part * sym_grad_part, dim=1)/(torch.norm(input_grad_part, dim=1)*torch.norm(sym_grad_part, dim=1))) + + ratio = torch.sum(cossim > 0.9)/len(cossim) + + print(f'{100*ratio}% data have more than 0.9 cosine similarity') + if ratio > 0.9: + print('suggesting symmetry') + else: + print('not suggesting symmetry') + + return cossim \ No newline at end of file diff --git a/dl/kan/kan/spline.py b/dl/kan/kan/spline.py new file mode 100644 index 000000000..18f01544b --- /dev/null +++ b/dl/kan/kan/spline.py @@ -0,0 +1,147 @@ +import torch + + +def B_batch(x, grid, k=0, extend=True, device='cpu'): + ''' + evaludate x on B-spline bases + + Args: + ----- + x : 2D torch.tensor + inputs, shape (number of splines, number of samples) + grid : 2D torch.tensor + grids, shape (number of splines, number of grid points) + k : int + the piecewise polynomial order of splines. + extend : bool + If True, k points are extended on both ends. If False, no extension (zero boundary condition). Default: True + device : str + devicde + + Returns: + -------- + spline values : 3D torch.tensor + shape (batch, in_dim, G+k). G: the number of grid intervals, k: spline order. + + Example + ------- + >>> from kan.spline import B_batch + >>> x = torch.rand(100,2) + >>> grid = torch.linspace(-1,1,steps=11)[None, :].expand(2, 11) + >>> B_batch(x, grid, k=3).shape + ''' + + x = x.unsqueeze(dim=2) + grid = grid.unsqueeze(dim=0) + + if k == 0: + value = (x >= grid[:, :, :-1]) * (x < grid[:, :, 1:]) + else: + B_km1 = B_batch(x[:,:,0], grid=grid[0], k=k - 1) + + value = (x - grid[:, :, :-(k + 1)]) / (grid[:, :, k:-1] - grid[:, :, :-(k + 1)]) * B_km1[:, :, :-1] + ( + grid[:, :, k + 1:] - x) / (grid[:, :, k + 1:] - grid[:, :, 1:(-k)]) * B_km1[:, :, 1:] + + # in case grid is degenerate + value = torch.nan_to_num(value) + return value + + + +def coef2curve(x_eval, grid, coef, k, device="cpu"): + ''' + converting B-spline coefficients to B-spline curves. Evaluate x on B-spline curves (summing up B_batch results over B-spline basis). + + Args: + ----- + x_eval : 2D torch.tensor + shape (batch, in_dim) + grid : 2D torch.tensor + shape (in_dim, G+2k). G: the number of grid intervals; k: spline order. + coef : 3D torch.tensor + shape (in_dim, out_dim, G+k) + k : int + the piecewise polynomial order of splines. + device : str + devicde + + Returns: + -------- + y_eval : 3D torch.tensor + shape (batch, in_dim, out_dim) + + ''' + + b_splines = B_batch(x_eval, grid, k=k) + y_eval = torch.einsum('ijk,jlk->ijl', b_splines, coef.to(b_splines.device)) + + return y_eval + + +def curve2coef(x_eval, y_eval, grid, k): + ''' + converting B-spline curves to B-spline coefficients using least squares. + + Args: + ----- + x_eval : 2D torch.tensor + shape (batch, in_dim) + y_eval : 3D torch.tensor + shape (batch, in_dim, out_dim) + grid : 2D torch.tensor + shape (in_dim, grid+2*k) + k : int + spline order + lamb : float + regularized least square lambda + + Returns: + -------- + coef : 3D torch.tensor + shape (in_dim, out_dim, G+k) + ''' + #print('haha', x_eval.shape, y_eval.shape, grid.shape) + batch = x_eval.shape[0] + in_dim = x_eval.shape[1] + out_dim = y_eval.shape[2] + n_coef = grid.shape[1] - k - 1 + mat = B_batch(x_eval, grid, k) + mat = mat.permute(1,0,2)[:,None,:,:].expand(in_dim, out_dim, batch, n_coef) + #print('mat', mat.shape) + y_eval = y_eval.permute(1,2,0).unsqueeze(dim=3) + #print('y_eval', y_eval.shape) + device = mat.device + + # coef = torch.linalg.lstsq(mat, y_eval, driver='gelsy' if device == 'cpu' else 'gels').solution[:,:,:,0] + + coef = torch.linalg.lstsq(mat.cpu(), y_eval.cpu(), driver='gelsy' if device == 'cpu' else 'gels').solution[:,:,:,0] + coef = coef.to(device) + # try: + # coef = torch.linalg.lstsq(mat, y_eval).solution[:,:,:,0] + # except: + # print('lstsq failed') + + # manual psuedo-inverse + '''lamb=1e-8 + XtX = torch.einsum('ijmn,ijnp->ijmp', mat.permute(0,1,3,2), mat) + Xty = torch.einsum('ijmn,ijnp->ijmp', mat.permute(0,1,3,2), y_eval) + n1, n2, n = XtX.shape[0], XtX.shape[1], XtX.shape[2] + identity = torch.eye(n,n)[None, None, :, :].expand(n1, n2, n, n).to(device) + A = XtX + lamb * identity + B = Xty + coef = (A.pinverse() @ B)[:,:,:,0]''' + + return coef + + +def extend_grid(grid, k_extend=0): + ''' + extend grid + ''' + h = (grid[:, [-1]] - grid[:, [0]]) / (grid.shape[1] - 1) + + for i in range(k_extend): + grid = torch.cat([grid[:, [0]] - h, grid], dim=1) + grid = torch.cat([grid, grid[:, [-1]] + h], dim=1) + + return grid \ No newline at end of file diff --git a/dl/kan/kan/utils.py b/dl/kan/kan/utils.py new file mode 100644 index 000000000..abb4d558b --- /dev/null +++ b/dl/kan/kan/utils.py @@ -0,0 +1,594 @@ +import numpy as np +import torch +from sklearn.linear_model import LinearRegression +import sympy +import yaml +from sympy.utilities.lambdify import lambdify +import re + +# sigmoid = sympy.Function('sigmoid') +# name: (torch implementation, sympy implementation) + +# singularity protection functions +f_inv = lambda x, y_th: ((x_th := 1/y_th), y_th/x_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(1/x) * (torch.abs(x) >= x_th)) +f_inv2 = lambda x, y_th: ((x_th := 1/y_th**(1/2)), y_th * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**2) * (torch.abs(x) >= x_th)) +f_inv3 = lambda x, y_th: ((x_th := 1/y_th**(1/3)), y_th/x_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**3) * (torch.abs(x) >= x_th)) +f_inv4 = lambda x, y_th: ((x_th := 1/y_th**(1/4)), y_th * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**4) * (torch.abs(x) >= x_th)) +f_inv5 = lambda x, y_th: ((x_th := 1/y_th**(1/5)), y_th/x_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(1/x**5) * (torch.abs(x) >= x_th)) +f_sqrt = lambda x, y_th: ((x_th := 1/y_th**2), x_th/y_th*x * (torch.abs(x) < x_th) + torch.nan_to_num(torch.sqrt(torch.abs(x))*torch.sign(x)) * (torch.abs(x) >= x_th)) +f_power1d5 = lambda x, y_th: torch.abs(x)**1.5 +f_invsqrt = lambda x, y_th: ((x_th := 1/y_th**2), y_th * (torch.abs(x) < x_th) + torch.nan_to_num(1/torch.sqrt(torch.abs(x))) * (torch.abs(x) >= x_th)) +f_log = lambda x, y_th: ((x_th := torch.e**(-y_th)), - y_th * (torch.abs(x) < x_th) + torch.nan_to_num(torch.log(torch.abs(x))) * (torch.abs(x) >= x_th)) +f_tan = lambda x, y_th: ((clip := x % torch.pi), (delta := torch.pi/2-torch.arctan(y_th)), - y_th/delta * (clip - torch.pi/2) * (torch.abs(clip - torch.pi/2) < delta) + torch.nan_to_num(torch.tan(clip)) * (torch.abs(clip - torch.pi/2) >= delta)) +f_arctanh = lambda x, y_th: ((delta := 1-torch.tanh(y_th) + 1e-4), y_th * torch.sign(x) * (torch.abs(x) > 1 - delta) + torch.nan_to_num(torch.arctanh(x)) * (torch.abs(x) <= 1 - delta)) +f_arcsin = lambda x, y_th: ((), torch.pi/2 * torch.sign(x) * (torch.abs(x) > 1) + torch.nan_to_num(torch.arcsin(x)) * (torch.abs(x) <= 1)) +f_arccos = lambda x, y_th: ((), torch.pi/2 * (1-torch.sign(x)) * (torch.abs(x) > 1) + torch.nan_to_num(torch.arccos(x)) * (torch.abs(x) <= 1)) +f_exp = lambda x, y_th: ((x_th := torch.log(y_th)), y_th * (x > x_th) + torch.exp(x) * (x <= x_th)) + +SYMBOLIC_LIB = {'x': (lambda x: x, lambda x: x, 1, lambda x, y_th: ((), x)), + 'x^2': (lambda x: x**2, lambda x: x**2, 2, lambda x, y_th: ((), x**2)), + 'x^3': (lambda x: x**3, lambda x: x**3, 3, lambda x, y_th: ((), x**3)), + 'x^4': (lambda x: x**4, lambda x: x**4, 3, lambda x, y_th: ((), x**4)), + 'x^5': (lambda x: x**5, lambda x: x**5, 3, lambda x, y_th: ((), x**5)), + '1/x': (lambda x: 1/x, lambda x: 1/x, 2, f_inv), + '1/x^2': (lambda x: 1/x**2, lambda x: 1/x**2, 2, f_inv2), + '1/x^3': (lambda x: 1/x**3, lambda x: 1/x**3, 3, f_inv3), + '1/x^4': (lambda x: 1/x**4, lambda x: 1/x**4, 4, f_inv4), + '1/x^5': (lambda x: 1/x**5, lambda x: 1/x**5, 5, f_inv5), + 'sqrt': (lambda x: torch.sqrt(x), lambda x: sympy.sqrt(x), 2, f_sqrt), + 'x^0.5': (lambda x: torch.sqrt(x), lambda x: sympy.sqrt(x), 2, f_sqrt), + 'x^1.5': (lambda x: torch.sqrt(x)**3, lambda x: sympy.sqrt(x)**3, 4, f_power1d5), + '1/sqrt(x)': (lambda x: 1/torch.sqrt(x), lambda x: 1/sympy.sqrt(x), 2, f_invsqrt), + '1/x^0.5': (lambda x: 1/torch.sqrt(x), lambda x: 1/sympy.sqrt(x), 2, f_invsqrt), + 'exp': (lambda x: torch.exp(x), lambda x: sympy.exp(x), 2, f_exp), + 'log': (lambda x: torch.log(x), lambda x: sympy.log(x), 2, f_log), + 'abs': (lambda x: torch.abs(x), lambda x: sympy.Abs(x), 3, lambda x, y_th: ((), torch.abs(x))), + 'sin': (lambda x: torch.sin(x), lambda x: sympy.sin(x), 2, lambda x, y_th: ((), torch.sin(x))), + 'cos': (lambda x: torch.cos(x), lambda x: sympy.cos(x), 2, lambda x, y_th: ((), torch.cos(x))), + 'tan': (lambda x: torch.tan(x), lambda x: sympy.tan(x), 3, f_tan), + 'tanh': (lambda x: torch.tanh(x), lambda x: sympy.tanh(x), 3, lambda x, y_th: ((), torch.tanh(x))), + 'sgn': (lambda x: torch.sign(x), lambda x: sympy.sign(x), 3, lambda x, y_th: ((), torch.sign(x))), + 'arcsin': (lambda x: torch.arcsin(x), lambda x: sympy.asin(x), 4, f_arcsin), + 'arccos': (lambda x: torch.arccos(x), lambda x: sympy.acos(x), 4, f_arccos), + 'arctan': (lambda x: torch.arctan(x), lambda x: sympy.atan(x), 4, lambda x, y_th: ((), torch.arctan(x))), + 'arctanh': (lambda x: torch.arctanh(x), lambda x: sympy.atanh(x), 4, f_arctanh), + '0': (lambda x: x*0, lambda x: x*0, 0, lambda x, y_th: ((), x*0)), + 'gaussian': (lambda x: torch.exp(-x**2), lambda x: sympy.exp(-x**2), 3, lambda x, y_th: ((), torch.exp(-x**2))), + #'cosh': (lambda x: torch.cosh(x), lambda x: sympy.cosh(x), 5), + #'sigmoid': (lambda x: torch.sigmoid(x), sympy.Function('sigmoid'), 4), + #'relu': (lambda x: torch.relu(x), relu), +} + +def create_dataset(f, + n_var=2, + f_mode = 'col', + ranges = [-1,1], + train_num=1000, + test_num=1000, + normalize_input=False, + normalize_label=False, + device='cpu', + seed=0): + ''' + create dataset + + Args: + ----- + f : function + the symbolic formula used to create the synthetic dataset + ranges : list or np.array; shape (2,) or (n_var, 2) + the range of input variables. Default: [-1,1]. + train_num : int + the number of training samples. Default: 1000. + test_num : int + the number of test samples. Default: 1000. + normalize_input : bool + If True, apply normalization to inputs. Default: False. + normalize_label : bool + If True, apply normalization to labels. Default: False. + device : str + device. Default: 'cpu'. + seed : int + random seed. Default: 0. + + Returns: + -------- + dataset : dic + Train/test inputs/labels are dataset['train_input'], dataset['train_label'], + dataset['test_input'], dataset['test_label'] + + Example + ------- + >>> f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + >>> dataset = create_dataset(f, n_var=2, train_num=100) + >>> dataset['train_input'].shape + torch.Size([100, 2]) + ''' + + np.random.seed(seed) + torch.manual_seed(seed) + + if len(np.array(ranges).shape) == 1: + ranges = np.array(ranges * n_var).reshape(n_var,2) + else: + ranges = np.array(ranges) + + + train_input = torch.zeros(train_num, n_var) + test_input = torch.zeros(test_num, n_var) + for i in range(n_var): + train_input[:,i] = torch.rand(train_num,)*(ranges[i,1]-ranges[i,0])+ranges[i,0] + test_input[:,i] = torch.rand(test_num,)*(ranges[i,1]-ranges[i,0])+ranges[i,0] + + if f_mode == 'col': + train_label = f(train_input) + test_label = f(test_input) + elif f_mode == 'row': + train_label = f(train_input.T) + test_label = f(test_input.T) + else: + print(f'f_mode {f_mode} not recognized') + + # if has only 1 dimension + if len(train_label.shape) == 1: + train_label = train_label.unsqueeze(dim=1) + test_label = test_label.unsqueeze(dim=1) + + def normalize(data, mean, std): + return (data-mean)/std + + if normalize_input == True: + mean_input = torch.mean(train_input, dim=0, keepdim=True) + std_input = torch.std(train_input, dim=0, keepdim=True) + train_input = normalize(train_input, mean_input, std_input) + test_input = normalize(test_input, mean_input, std_input) + + if normalize_label == True: + mean_label = torch.mean(train_label, dim=0, keepdim=True) + std_label = torch.std(train_label, dim=0, keepdim=True) + train_label = normalize(train_label, mean_label, std_label) + test_label = normalize(test_label, mean_label, std_label) + + dataset = {} + dataset['train_input'] = train_input.to(device) + dataset['test_input'] = test_input.to(device) + + dataset['train_label'] = train_label.to(device) + dataset['test_label'] = test_label.to(device) + + return dataset + + + +def fit_params(x, y, fun, a_range=(-10,10), b_range=(-10,10), grid_number=101, iteration=3, verbose=True, device='cpu'): + ''' + fit a, b, c, d such that + + .. math:: + |y-(cf(ax+b)+d)|^2 + + is minimized. Both x and y are 1D array. Sweep a and b, find the best fitted model. + + Args: + ----- + x : 1D array + x values + y : 1D array + y values + fun : function + symbolic function + a_range : tuple + sweeping range of a + b_range : tuple + sweeping range of b + grid_num : int + number of steps along a and b + iteration : int + number of zooming in + verbose : bool + print extra information if True + device : str + device + + Returns: + -------- + a_best : float + best fitted a + b_best : float + best fitted b + c_best : float + best fitted c + d_best : float + best fitted d + r2_best : float + best r2 (coefficient of determination) + + Example + ------- + >>> num = 100 + >>> x = torch.linspace(-1,1,steps=num) + >>> noises = torch.normal(0,1,(num,)) * 0.02 + >>> y = 5.0*torch.sin(3.0*x + 2.0) + 0.7 + noises + >>> fit_params(x, y, torch.sin) + r2 is 0.9999727010726929 + (tensor([2.9982, 1.9996, 5.0053, 0.7011]), tensor(1.0000)) + ''' + # fit a, b, c, d such that y=c*fun(a*x+b)+d; both x and y are 1D array. + # sweep a and b, choose the best fitted model + for _ in range(iteration): + a_ = torch.linspace(a_range[0], a_range[1], steps=grid_number, device=device) + b_ = torch.linspace(b_range[0], b_range[1], steps=grid_number, device=device) + a_grid, b_grid = torch.meshgrid(a_, b_, indexing='ij') + post_fun = fun(a_grid[None,:,:] * x[:,None,None] + b_grid[None,:,:]) + x_mean = torch.mean(post_fun, dim=[0], keepdim=True) + y_mean = torch.mean(y, dim=[0], keepdim=True) + numerator = torch.sum((post_fun - x_mean)*(y-y_mean)[:,None,None], dim=0)**2 + denominator = torch.sum((post_fun - x_mean)**2, dim=0)*torch.sum((y - y_mean)[:,None,None]**2, dim=0) + r2 = numerator/(denominator+1e-4) + r2 = torch.nan_to_num(r2) + + + best_id = torch.argmax(r2) + a_id, b_id = torch.div(best_id, grid_number, rounding_mode='floor'), best_id % grid_number + + + if a_id == 0 or a_id == grid_number - 1 or b_id == 0 or b_id == grid_number - 1: + if _ == 0 and verbose==True: + print('Best value at boundary.') + if a_id == 0: + a_range = [a_[0], a_[1]] + if a_id == grid_number - 1: + a_range = [a_[-2], a_[-1]] + if b_id == 0: + b_range = [b_[0], b_[1]] + if b_id == grid_number - 1: + b_range = [b_[-2], b_[-1]] + + else: + a_range = [a_[a_id-1], a_[a_id+1]] + b_range = [b_[b_id-1], b_[b_id+1]] + + a_best = a_[a_id] + b_best = b_[b_id] + post_fun = fun(a_best * x + b_best) + r2_best = r2[a_id, b_id] + + if verbose == True: + print(f"r2 is {r2_best}") + if r2_best < 0.9: + print(f'r2 is not very high, please double check if you are choosing the correct symbolic function.') + + post_fun = torch.nan_to_num(post_fun) + reg = LinearRegression().fit(post_fun[:,None].detach().cpu().numpy(), y.detach().cpu().numpy()) + c_best = torch.from_numpy(reg.coef_)[0].to(device) + d_best = torch.from_numpy(np.array(reg.intercept_)).to(device) + return torch.stack([a_best, b_best, c_best, d_best]), r2_best + + +def sparse_mask(in_dim, out_dim): + ''' + get sparse mask + ''' + in_coord = torch.arange(in_dim) * 1/in_dim + 1/(2*in_dim) + out_coord = torch.arange(out_dim) * 1/out_dim + 1/(2*out_dim) + + dist_mat = torch.abs(out_coord[:,None] - in_coord[None,:]) + in_nearest = torch.argmin(dist_mat, dim=0) + in_connection = torch.stack([torch.arange(in_dim), in_nearest]).permute(1,0) + out_nearest = torch.argmin(dist_mat, dim=1) + out_connection = torch.stack([out_nearest, torch.arange(out_dim)]).permute(1,0) + all_connection = torch.cat([in_connection, out_connection], dim=0) + mask = torch.zeros(in_dim, out_dim) + mask[all_connection[:,0], all_connection[:,1]] = 1. + + return mask + + +def add_symbolic(name, fun, c=1, fun_singularity=None): + ''' + add a symbolic function to library + + Args: + ----- + name : str + name of the function + fun : fun + torch function or lambda function + + Returns: + -------- + None + + Example + ------- + >>> print(SYMBOLIC_LIB['Bessel']) + KeyError: 'Bessel' + >>> add_symbolic('Bessel', torch.special.bessel_j0) + >>> print(SYMBOLIC_LIB['Bessel']) + (, Bessel) + ''' + exec(f"globals()['{name}'] = sympy.Function('{name}')") + if fun_singularity==None: + fun_singularity = fun + SYMBOLIC_LIB[name] = (fun, globals()[name], c, fun_singularity) + + +def ex_round(ex1, n_digit): + ''' + rounding the numbers in an expression to certain floating points + + Args: + ----- + ex1 : sympy expression + n_digit : int + + Returns: + -------- + ex2 : sympy expression + + Example + ------- + >>> from kan.utils import * + >>> from sympy import * + >>> input_vars = a, b = symbols('a b') + >>> expression = 3.14534242 * exp(sin(pi*a) + b**2) - 2.32345402 + >>> ex_round(expression, 2) + ''' + ex2 = ex1 + for a in sympy.preorder_traversal(ex1): + if isinstance(a, sympy.Float): + ex2 = ex2.subs(a, round(a, n_digit)) + return ex2 + + +def augment_input(orig_vars, aux_vars, x): + ''' + augment inputs + + Args: + ----- + orig_vars : list of sympy symbols + aux_vars : list of auxiliary symbols + x : inputs + + Returns: + -------- + augmented inputs + + Example + ------- + >>> from kan.utils import * + >>> from sympy import * + >>> orig_vars = a, b = symbols('a b') + >>> aux_vars = [a + b, a * b] + >>> x = torch.rand(100, 2) + >>> augment_input(orig_vars, aux_vars, x).shape + ''' + # if x is a tensor + if isinstance(x, torch.Tensor): + + aux_values = torch.tensor([]).to(x.device) + + for aux_var in aux_vars: + func = lambdify(orig_vars, aux_var,'numpy') # returns a numpy-ready function + aux_value = torch.from_numpy(func(*[x[:,[i]].numpy() for i in range(len(orig_vars))])) + aux_values = torch.cat([aux_values, aux_value], dim=1) + + x = torch.cat([aux_values, x], dim=1) + + # if x is a dataset + elif isinstance(x, dict): + x['train_input'] = augment_input(orig_vars, aux_vars, x['train_input']) + x['test_input'] = augment_input(orig_vars, aux_vars, x['test_input']) + + return x + + +def batch_jacobian(func, x, create_graph=False, mode='scalar'): + ''' + jacobian + + Args: + ----- + func : function or model + x : inputs + create_graph : bool + + Returns: + -------- + jacobian + + Example + ------- + >>> from kan.utils import batch_jacobian + >>> x = torch.normal(0,1,size=(100,2)) + >>> model = lambda x: x[:,[0]] + x[:,[1]] + >>> batch_jacobian(model, x) + ''' + # x in shape (Batch, Length) + def _func_sum(x): + return func(x).sum(dim=0) + if mode == 'scalar': + return torch.autograd.functional.jacobian(_func_sum, x, create_graph=create_graph)[0] + elif mode == 'vector': + return torch.autograd.functional.jacobian(_func_sum, x, create_graph=create_graph).permute(1,0,2) + +def batch_hessian(model, x, create_graph=False): + ''' + hessian + + Args: + ----- + func : function or model + x : inputs + create_graph : bool + + Returns: + -------- + jacobian + + Example + ------- + >>> from kan.utils import batch_hessian + >>> x = torch.normal(0,1,size=(100,2)) + >>> model = lambda x: x[:,[0]]**2 + x[:,[1]]**2 + >>> batch_hessian(model, x) + ''' + # x in shape (Batch, Length) + jac = lambda x: batch_jacobian(model, x, create_graph=True) + def _jac_sum(x): + return jac(x).sum(dim=0) + return torch.autograd.functional.jacobian(_jac_sum, x, create_graph=create_graph).permute(1,0,2) + + +def create_dataset_from_data(inputs, labels, train_ratio=0.8, device='cpu'): + ''' + create dataset from data + + Args: + ----- + inputs : 2D torch.float + labels : 2D torch.float + train_ratio : float + the ratio of training fraction + device : str + + Returns: + -------- + dataset (dictionary) + + Example + ------- + >>> from kan.utils import create_dataset_from_data + >>> x = torch.normal(0,1,size=(100,2)) + >>> y = torch.normal(0,1,size=(100,1)) + >>> dataset = create_dataset_from_data(x, y) + >>> dataset['train_input'].shape + ''' + num = inputs.shape[0] + train_id = np.random.choice(num, int(num*train_ratio), replace=False) + test_id = list(set(np.arange(num)) - set(train_id)) + dataset = {} + dataset['train_input'] = inputs[train_id].detach().to(device) + dataset['test_input'] = inputs[test_id].detach().to(device) + dataset['train_label'] = labels[train_id].detach().to(device) + dataset['test_label'] = labels[test_id].detach().to(device) + + return dataset + + +def get_derivative(model, inputs, labels, derivative='hessian', loss_mode='pred', reg_metric='w', lamb=0., lamb_l1=1., lamb_entropy=0.): + ''' + compute the jacobian/hessian of loss wrt to model parameters + + Args: + ----- + inputs : 2D torch.float + labels : 2D torch.float + derivative : str + 'jacobian' or 'hessian' + device : str + + Returns: + -------- + jacobian or hessian + ''' + def get_mapping(model): + + mapping = {} + name = 'model1' + + keys = list(model.state_dict().keys()) + for key in keys: + + y = re.findall(".[0-9]+", key) + if len(y) > 0: + y = y[0][1:] + x = re.split(".[0-9]+", key) + mapping[key] = name + '.' + x[0] + '[' + y + ']' + x[1] + + + y = re.findall("_[0-9]+", key) + if len(y) > 0: + y = y[0][1:] + x = re.split(".[0-9]+", key) + mapping[key] = name + '.' + x[0] + '[' + y + ']' + + return mapping + + + #model1 = copy.deepcopy(model) + model1 = model.copy() + mapping = get_mapping(model) + + # collect keys and shapes + keys = list(model.state_dict().keys()) + shapes = [] + + for params in model.parameters(): + shapes.append(params.shape) + + + # turn a flattened vector to model params + def param2statedict(p, keys, shapes): + + new_state_dict = {} + + start = 0 + n_group = len(keys) + for i in range(n_group): + shape = shapes[i] + n_params = torch.prod(torch.tensor(shape)) + new_state_dict[keys[i]] = p[start:start+n_params].reshape(shape) + start += n_params + + return new_state_dict + + def differentiable_load_state_dict(mapping, state_dict, model1): + + for key in keys: + if mapping[key][-1] != ']': + exec(f"del {mapping[key]}") + exec(f"{mapping[key]} = state_dict[key]") + + + # input: p, output: output + def get_param2loss_fun(inputs, labels): + + def param2loss_fun(p): + + p = p[0] + state_dict = param2statedict(p, keys, shapes) + # this step is non-differentiable + #model.load_state_dict(state_dict) + differentiable_load_state_dict(mapping, state_dict, model1) + if loss_mode == 'pred': + pred_loss = torch.mean((model1(inputs) - labels)**2, dim=(0,1), keepdim=True) + loss = pred_loss + elif loss_mode == 'reg': + reg_loss = model1.get_reg(reg_metric=reg_metric, lamb_l1=lamb_l1, lamb_entropy=lamb_entropy) * torch.ones(1,1) + loss = reg_loss + elif loss_mode == 'all': + pred_loss = torch.mean((model1(inputs) - labels)**2, dim=(0,1), keepdim=True) + reg_loss = model1.get_reg(reg_metric=reg_metric, lamb_l1=lamb_l1, lamb_entropy=lamb_entropy) * torch.ones(1,1) + loss = pred_loss + lamb * reg_loss + return loss + + return param2loss_fun + + fun = get_param2loss_fun(inputs, labels) + p = model2param(model)[None,:] + if derivative == 'hessian': + result = batch_hessian(fun, p) + elif derivative == 'jacobian': + result = batch_jacobian(fun, p) + return result + +def model2param(model): + ''' + turn model parameters into a flattened vector + ''' + p = torch.tensor([]).to(model.device) + for params in model.parameters(): + p = torch.cat([p, params.reshape(-1,)], dim=0) + return p diff --git a/dl/kan/requirements.txt b/dl/kan/requirements.txt new file mode 100644 index 000000000..4ccd831cb --- /dev/null +++ b/dl/kan/requirements.txt @@ -0,0 +1,8 @@ +# matplotlib==3.6.2 +numpy==1.24.4 +scikit_learn==1.1.3 +setuptools==65.5.0 +sympy==1.11.1 +# torch==2.2.2 +tqdm>=4.66.2 +pandas==2.0.1 diff --git a/dl/kan/run_train.sh b/dl/kan/run_train.sh new file mode 100644 index 000000000..4f0a1c36d --- /dev/null +++ b/dl/kan/run_train.sh @@ -0,0 +1 @@ +python3 ./train_kan.py --steps 100 \ No newline at end of file diff --git a/dl/kan/train_kan.py b/dl/kan/train_kan.py new file mode 100644 index 000000000..744c7e6e1 --- /dev/null +++ b/dl/kan/train_kan.py @@ -0,0 +1,39 @@ +from kan import * +from kan.utils import create_dataset +import argparse +import time + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--steps", type=int, default=100, help= "training epoch") + parser.add_argument("--batch_size", type=int, default=1000, help= "training batch size") + args = parser.parse_args() + + torch.set_default_dtype(torch.float) + + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + print(device) + + # create a KAN: 2D inputs, 1D output, and 5 hidden neurons. cubic spline (k=3), 5 grid intervals (grid=5). + model = KAN(width=[2,5,1], grid=3, k=3, seed=42, device=device) + + + # create dataset f(x,y) = exp(sin(pi*x)+y^2) + f = lambda x: torch.exp(torch.sin(torch.pi*x[:,[0]]) + x[:,[1]]**2) + dataset = create_dataset(f, n_var=2, device=device) + dataset['train_input'].shape, dataset['train_label'].shape + + model(dataset['train_input']) + + t_start = time.time() + + model.fit(dataset, opt="LBFGS", steps=args.steps, lamb=0.001, batch=args.batch_size) + + t_elapse = time.time() - t_start + real_batchz_size = args.batch_size if args.batch_size < dataset['train_input'].shape[0] else dataset['train_input'].shape[0] + total_samples = args.steps * real_batchz_size + print(f"finished {total_samples} samples, total time: {t_elapse:5.3} s, samples_per_second:{total_samples/t_elapse:7.3}") + + +if __name__ == "__main__": + main() \ No newline at end of file -- Gitee