# -*- coding: utf-8 -*- """ Created on Tue Jun 1 15:03:34 2021 @author: assafr """ ## Cleanining the enviroment # %reset -f # %clear import numpy as np from scipy.optimize import minimize import scipy.linalg as spla ## Paramters n=np.int(300) # sample size sigsq=1 # the kernel is signal*exp(-|Ah|)+sigsq, where A= signal=1 eta_true=0 # eta=0 -> canonical axes (0,pi/2) lambda1=1 lambda2=20 # lambda=1 means isotropic setting, lambda!=1 is anisotropy ## Functions def Kernel(s,signal,lambda1,lambda2,sigsq,eta): rotation=np.array([[np.cos(eta),-np.sin(eta)],[np.sin(eta),np.cos(eta)]]) scaling=np.diag(np.array([1/lambda1**2,1/lambda2**2])) A=rotation.T@scaling@rotation cov=[] for col in range(len(s)): s=np.column_stack((s[:,0]-s[col,0],s[:,1]-s[col,1])) cov.append(signal*np.exp(-np.sqrt(np.diag(s@A@s.T)))) cov=np.array(cov) +sigsq*np.eye(len(s)) return(cov) def loss(par,version,z,s): sigsq=par[0] signal=par[1] lambda1=par[2] if version=='h0': eta=0 lambda2=lambda1 else: eta=par[4] lambda2=par[3] rotation=np.array([[np.cos(eta),-np.sin(eta)],[np.sin(eta),np.cos(eta)]]) scaling=np.diag(np.array([1/lambda1**2,1/lambda2**2])) A=rotation.T@scaling@rotation cov=[] for col in range(len(s)): s=np.column_stack((s[:,0]-s[col,0],s[:,1]-s[col,1])) cov.append(signal*np.exp(-np.sqrt(np.diag(s@A@s.T)))) cov=np.array(cov) +sigsq*np.eye(len(s)) cov_root= spla.cholesky(cov,lower=True) return(np.sum(np.log(np.diagonal(cov_root)))+0.5*z.T.dot(spla.cho_solve((cov_root,True),z))+0.5 * len(z) * np.log(2*np.pi)) ## Sampling s=np.column_stack((np.random.uniform(0,1,n),np.random.uniform(0,1,n))) var_true=Kernel(s,signal,lambda1,lambda2,sigsq,eta_true) z=np.random.multivariate_normal(np.zeros(len(s)), var_true, 1).ravel() ## sample statistic Opt_h0=minimize(loss,[1,1,1],args = ('h0',z,s), #Optimization over signal,lambdas,sigsq method='L-BFGS-B',bounds=((0.1,100),(0.1,100),(0.1,100)),options={'maxiter':50}) #you can decrease the 'maxiter' Opt_h1=minimize(loss,[1,1,1,1,1],args = ('h1',z,s), #Optimization over signal,lambdas,sigsq and eta method='L-BFGS-B',bounds=((0.1,100),(0.1,100),(0.1,100),(0.1,100),(-np.pi/10,np.pi/2+np.pi/10)),options={'maxiter':50}) ## due to technical computational reasons the bounds of eta shuold include 0-epsilon and pi/2+epsilon statistic=Opt_h0['fun']-Opt_h1['fun'] ## bootstrap eta_h0=0 var_bootstrap=Kernel(s,Opt_h0['x'][1],Opt_h0['x'][2],Opt_h0['x'][2],Opt_h0['x'][0],eta_h0) B=100 # Number of bootstrap iterations statistic_dist_h0=[] for i in range(B): z_b=np.random.multivariate_normal(np.zeros(len(s)), var_bootstrap, 1).ravel() try: Opt_h0_b=(minimize(loss,[1,1,1],args = ('h0',z_b,s),method='L-BFGS-B',bounds=((0.01,100),(0.01,100),(0.01,100)),options={'maxiter':50})['fun']) Opt_h1_b=(minimize(loss,[1,1,1,1,1],args = ('h1',z_b,s),method='L-BFGS-B',bounds=((0.01,100),(0.01,100),(0.01,100),(0.01,100),(-np.pi/10,np.pi/2+np.pi/10)),options={'maxiter':50})['fun']) statistic_dist_h0.append(Opt_h0_b-Opt_h1_b) except Exception: print('A technical issue with the starting points - the loop continues with the next iteration.') print(i) ## Summarizing the results PV=sum(statistic