agd.AutomaticDifferentiation.Dense2

  1# Copyright 2020 Jean-Marie Mirebeau, University Paris-Sud, CNRS, University Paris-Saclay
  2# Distributed WITHOUT ANY WARRANTY. Licensed under the Apache License, Version 2.0, see http://www.apache.org/licenses/LICENSE-2.0
  3
  4import numpy as np
  5import functools
  6from . import ad_generic
  7from . import misc
  8from . import Dense
  9from . import Base
 10
 11_add_dim = misc._add_dim; _add_dim2 = misc._add_dim2; _add_coef=misc._add_coef;
 12
 13class denseAD2(Base.baseAD):
 14	"""
 15	A class for dense forward second order automatic differentiation
 16	"""
 17
 18	def __init__(self,value,coef1=None,coef2=None,broadcast_ad=False):
 19		if self.is_ad(value): # Case where one should just reproduce value
 20			assert coef1 is None and coef2 is None
 21			self.value,self.coef1,self.coef2=value.value,value.coef1,value.coef2
 22			return
 23		if ad_generic.is_ad(value):
 24			raise Base.ADCastError(f"Attempting to cast {type(value)} to incompatible type denseAD2")
 25		self.value = ad_generic.asarray(value)
 26		self.coef1 = (np.zeros_like(value,shape=self.shape+(0,)) if coef1 is None 
 27			else misc._test_or_broadcast_ad(coef1,self.shape,broadcast_ad) )
 28		self.coef2 = (np.zeros_like(value,shape=self.shape+(0,0)) if coef2 is None 
 29			else misc._test_or_broadcast_ad(coef2,self.shape,broadcast_ad,2) )
 30
 31	@classmethod
 32	def order(cls): return 2
 33	def copy(self,order='C'):
 34		return self.new(self.value.copy(order=order),
 35			self.coef1.copy(order=order),self.coef2.copy(order=order))
 36	def as_tuple(self): return self.value,self.coef1,self.coef2
 37
 38	# Representation 
 39	def __iter__(self):
 40		for value,coef1,coef2 in zip(self.value,self.coef1,self.coef2):
 41			yield self.new(value,coef1,coef2)
 42
 43	def __str__(self):
 44		return "denseAD2("+str(self.value)+","+misc._prep_nl(str(self.coef1))+","+misc._prep_nl(str(self.coef2)) +")"
 45	def __repr__(self):
 46		return "denseAD2("+repr(self.value)+","+misc._prep_nl(repr(self.coef1))+","+misc._prep_nl(repr(self.coef2)) +")"
 47
 48	# Operators
 49	def as_func(self,h):
 50		"""Replaces the symbolic perturbation with h"""
 51		value,coef1,coef2 = (misc.add_ndim(e,h.ndim-1) for e in (self.value,self.coef1,self.coef2))
 52		hh = np.expand_dims(h,axis=0) * np.expand_dims(h,axis=1)
 53		return value+(coef1*h).sum(axis=self.ndim) + 0.5*(hh*coef2).sum(axis=(self.ndim,self.ndim+1))
 54
 55	def __add__(self,other):
 56		if self.is_ad(other):
 57			return self.new(self.value+other.value,_add_coef(self.coef1,other.coef1),_add_coef(self.coef2,other.coef2))
 58		else:
 59			return self.new(self.value+other, self.coef1, self.coef2, broadcast_ad=True)
 60	def __sub__(self,other):
 61		if self.is_ad(other):
 62			return self.new(self.value-other.value,_add_coef(self.coef1,-other.coef1),_add_coef(self.coef2,-other.coef2))
 63		else:
 64			return self.new(self.value-other, self.coef1, self.coef2, broadcast_ad=True)
 65
 66	def __mul__(self,other):
 67		if self.is_ad(other):
 68			mixed = np.expand_dims(self.coef1,axis=-1)*np.expand_dims(other.coef1,axis=-2)
 69			return self.new(self.value*other.value, _add_coef(_add_dim(self.value)*other.coef1,_add_dim(other.value)*self.coef1),
 70				_add_coef(_add_coef(_add_dim2(self.value)*other.coef2,_add_dim2(other.value)*self.coef2),_add_coef(mixed,np.moveaxis(mixed,-2,-1))))
 71		elif self.isndarray(other):
 72			return self.new(self.value*other,_add_dim(other)*self.coef1,_add_dim2(other)*self.coef2)
 73		else:
 74			return self.new(self.value*other,other*self.coef1,other*self.coef2)
 75
 76	def __truediv__(self,other):
 77		if self.is_ad(other):
 78			return self.__mul__(other.__pow__(-1))
 79		elif self.isndarray(other):
 80			inv = 1./other
 81			return self.new(self.value*inv,_add_dim(inv)*self.coef1,_add_dim2(inv)*self.coef2)
 82		else:
 83			inv = 1./other
 84			return self.new(self.value*inv,self.coef1*inv,self.coef2*inv)
 85
 86	__rmul__ = __mul__
 87	__radd__ = __add__
 88	def __rsub__(self,other): 		return -(self-other)
 89	def __rtruediv__(self,other):	return self.__pow__(-1).__mul__(other)
 90
 91
 92	def __neg__(self):		return self.new(-self.value,-self.coef1,-self.coef2)
 93
 94	# Math functions
 95	def _math_helper(self,deriv): # Inputs : a=f(x), b=f'(x), c=f''(x), where x=self.value
 96		a,b,c=deriv
 97		mixed = np.expand_dims(self.coef1,axis=-1)*np.expand_dims(self.coef1,axis=-2)
 98		return self.new(a,_add_dim(b)*self.coef1,_add_dim2(b)*self.coef2+_add_dim2(c)*mixed)
 99
100	@classmethod
101	def compose(cls,a,t):
102		assert cls.is_ad(a) and all(cls.is_ad(b) for b in t)
103		b = np.moveaxis(cls.concatenate(t,axis=0),0,-1)
104		coef1 = (_add_dim(a.coef1)*b.coef1).sum(axis=-2)
105		coef2_pure = (_add_dim2(a.coef1)*b.coef2).sum(axis=-3)
106		shape_factor = b.shape[:-1]
107		mixed = ( b.coef1.reshape(shape_factor+(a.size_ad,1,b.size_ad,1))
108			     *b.coef1.reshape(shape_factor+(1,a.size_ad,1,b.size_ad)) )
109		coef2_mixed = (_add_dim2(a.coef2)*mixed).sum(axis=-3).sum(axis=-3)
110		return cls.new(a.value,coef1,coef2_pure+coef2_mixed)
111
112	#Indexing
113	@property
114	def size_ad(self):  return self.coef1.shape[-1]
115
116	def to_first(self): return Dense.denseAD(self.value,self.coef1)
117	def gradient(self,i=None): 
118		"""Returns the gradient, or the i-th component of the gradient if specified."""
119		grad = np.moveaxis(self.coef1,-1,0)
120		return grad if i is None else grad[i]
121	def hessian(self,i=None,j=None): 
122		"""Returns the hessian, or component (i,j) of the hessian if specified."""
123		assert (i is None) == (j is None)
124		hess = np.moveaxis(self.coef2,(-2,-1),(0,1))
125		return hess if i is None else hess[i,j]
126
127	def __getitem__(self,key):
128		ekey1,ekey2 = misc.key_expand(key,1),misc.key_expand(key,2)
129		return self.new(self.value[key], self.coef1[ekey1], self.coef2[ekey2])
130
131	def __setitem__(self,key,other):
132		ekey1,ekey2 = misc.key_expand(key,1),misc.key_expand(key,2)
133		if self.is_ad(other):
134			osad = other.size_ad
135			if osad==0: return self.__setitem__(key,other.value)
136			elif self.size_ad==0: 
137				self.coef1=np.zeros_like(self.value,shape=self.coef1.shape[:-1]+(osad,))
138				self.coef2=np.zeros_like(self.value,shape=self.coef2.shape[:-2]+(osad,osad))
139			self.value[key] = other.value
140			self.coef1[ekey1] = other.coef1
141			self.coef2[ekey2] = other.coef2
142		else:
143			self.value[key] = other
144			self.coef1[ekey1] = 0.
145			self.coef2[ekey2] = 0.
146
147
148	def reshape(self,shape,order='C'):
149		value = self.value.reshape(shape,order=order)
150		return self.new(value,self.coef1.reshape(value.shape+(self.size_ad,),order=order),
151			self.coef2.reshape(value.shape+(self.size_ad,self.size_ad),order=order))
152
153	def broadcast_to(self,shape):
154		shape1 = shape+(self.size_ad,)
155		shape2 = shape+(self.size_ad,self.size_ad)
156		return self.new(np.broadcast_to(self.value,shape), 
157			np.broadcast_to(self.coef1,shape1), np.broadcast_to(self.coef2,shape2))
158
159	def pad(self,pad_width,*args,constant_values=0,**kwargs):
160		return self.new(
161			np.pad(self.value,pad_width,*args,constant_values=constant_values,**kwargs),
162			np.pad(self.coef1,pad_width+((0,0),),*args,constant_values=0,**kwargs),
163			np.pad(self.coef2,pad_width+((0,0),(0,0),),*args,constant_values=0,**kwargs),)
164	
165	def transpose(self,axes=None):
166		if axes is None: axes = tuple(reversed(range(self.ndim)))
167		axes1 = tuple(axes) +(self.ndim,)
168		axes2 = tuple(axes) +(self.ndim,self.ndim+1)
169		return self.new(self.value.transpose(axes),
170			self.coef1.transpose(axes1),self.coef2.transpose(axes2))
171
172	def allclose(self,other,*args,**kwargs): 
173		return np.allclose(self.value,other.value,*args,**kwargs) and \
174		       np.allclose(self.coef, other.coef, *args,**kwargs) and \
175		       np.allclose(self.coef2,other.coef2,*args,**kwargs)
176
177	# Reductions
178	def sum(self,axis=None,out=None,**kwargs):
179		if axis is None: return self.flatten().sum(axis=0,out=out,**kwargs)
180		axis=misc.normalize_axis(axis,self.ndim)
181		out = self.new(self.value.sum(axis,**kwargs), 
182			self.coef1.sum(axis,**kwargs), self.coef2.sum(axis,**kwargs))
183		return out
184
185	@classmethod
186	def concatenate(cls,elems,axis=0):
187		axis1,axis2 = (axis,axis) if axis>=0 else (axis-1,axis-2)
188		elems2 = tuple(cls(e) for e in elems)
189		size_ad = max(e.size_ad for e in elems2)
190		assert all((e.size_ad==size_ad or e.size_ad==0) for e in elems2)
191		return cls( 
192		np.concatenate(tuple(e.value for e in elems2), axis=axis), 
193		np.concatenate(tuple(e.coef1 if e.size_ad==size_ad else 
194			np.zeros_like(e.coef1,shape=e.shape+(size_ad,)) for e in elems2),axis=axis1),
195		np.concatenate(tuple(e.coef2 if e.size_ad==size_ad else 
196			np.zeros_like(e.coef2,shape=e.shape+(size_ad,size_ad)) for e in elems2),axis=axis2))
197
198	def apply_linear_operator(self,op):
199		return self.new(op(self.value),
200			misc.apply_linear_operator(op,self.coef1,flatten_ndim=1),
201			misc.apply_linear_operator(op,self.coef2,flatten_ndim=2))
202# -------- End of class denseAD2 -------
203
204# -------- Factory methods -----
205
206def identity(*args,**kwargs):
207	arr = Dense.identity(*args,**kwargs)
208	return denseAD2(arr.value,arr.coef,
209		np.zeros_like(arr.value,shape=arr.shape+(arr.size_ad,arr.size_ad)))
210
211def register(*args,**kwargs):
212	return Dense.register(*args,**kwargs,ident=identity)
class denseAD2(agd.AutomaticDifferentiation.Base.baseAD):
 14class denseAD2(Base.baseAD):
 15	"""
 16	A class for dense forward second order automatic differentiation
 17	"""
 18
 19	def __init__(self,value,coef1=None,coef2=None,broadcast_ad=False):
 20		if self.is_ad(value): # Case where one should just reproduce value
 21			assert coef1 is None and coef2 is None
 22			self.value,self.coef1,self.coef2=value.value,value.coef1,value.coef2
 23			return
 24		if ad_generic.is_ad(value):
 25			raise Base.ADCastError(f"Attempting to cast {type(value)} to incompatible type denseAD2")
 26		self.value = ad_generic.asarray(value)
 27		self.coef1 = (np.zeros_like(value,shape=self.shape+(0,)) if coef1 is None 
 28			else misc._test_or_broadcast_ad(coef1,self.shape,broadcast_ad) )
 29		self.coef2 = (np.zeros_like(value,shape=self.shape+(0,0)) if coef2 is None 
 30			else misc._test_or_broadcast_ad(coef2,self.shape,broadcast_ad,2) )
 31
 32	@classmethod
 33	def order(cls): return 2
 34	def copy(self,order='C'):
 35		return self.new(self.value.copy(order=order),
 36			self.coef1.copy(order=order),self.coef2.copy(order=order))
 37	def as_tuple(self): return self.value,self.coef1,self.coef2
 38
 39	# Representation 
 40	def __iter__(self):
 41		for value,coef1,coef2 in zip(self.value,self.coef1,self.coef2):
 42			yield self.new(value,coef1,coef2)
 43
 44	def __str__(self):
 45		return "denseAD2("+str(self.value)+","+misc._prep_nl(str(self.coef1))+","+misc._prep_nl(str(self.coef2)) +")"
 46	def __repr__(self):
 47		return "denseAD2("+repr(self.value)+","+misc._prep_nl(repr(self.coef1))+","+misc._prep_nl(repr(self.coef2)) +")"
 48
 49	# Operators
 50	def as_func(self,h):
 51		"""Replaces the symbolic perturbation with h"""
 52		value,coef1,coef2 = (misc.add_ndim(e,h.ndim-1) for e in (self.value,self.coef1,self.coef2))
 53		hh = np.expand_dims(h,axis=0) * np.expand_dims(h,axis=1)
 54		return value+(coef1*h).sum(axis=self.ndim) + 0.5*(hh*coef2).sum(axis=(self.ndim,self.ndim+1))
 55
 56	def __add__(self,other):
 57		if self.is_ad(other):
 58			return self.new(self.value+other.value,_add_coef(self.coef1,other.coef1),_add_coef(self.coef2,other.coef2))
 59		else:
 60			return self.new(self.value+other, self.coef1, self.coef2, broadcast_ad=True)
 61	def __sub__(self,other):
 62		if self.is_ad(other):
 63			return self.new(self.value-other.value,_add_coef(self.coef1,-other.coef1),_add_coef(self.coef2,-other.coef2))
 64		else:
 65			return self.new(self.value-other, self.coef1, self.coef2, broadcast_ad=True)
 66
 67	def __mul__(self,other):
 68		if self.is_ad(other):
 69			mixed = np.expand_dims(self.coef1,axis=-1)*np.expand_dims(other.coef1,axis=-2)
 70			return self.new(self.value*other.value, _add_coef(_add_dim(self.value)*other.coef1,_add_dim(other.value)*self.coef1),
 71				_add_coef(_add_coef(_add_dim2(self.value)*other.coef2,_add_dim2(other.value)*self.coef2),_add_coef(mixed,np.moveaxis(mixed,-2,-1))))
 72		elif self.isndarray(other):
 73			return self.new(self.value*other,_add_dim(other)*self.coef1,_add_dim2(other)*self.coef2)
 74		else:
 75			return self.new(self.value*other,other*self.coef1,other*self.coef2)
 76
 77	def __truediv__(self,other):
 78		if self.is_ad(other):
 79			return self.__mul__(other.__pow__(-1))
 80		elif self.isndarray(other):
 81			inv = 1./other
 82			return self.new(self.value*inv,_add_dim(inv)*self.coef1,_add_dim2(inv)*self.coef2)
 83		else:
 84			inv = 1./other
 85			return self.new(self.value*inv,self.coef1*inv,self.coef2*inv)
 86
 87	__rmul__ = __mul__
 88	__radd__ = __add__
 89	def __rsub__(self,other): 		return -(self-other)
 90	def __rtruediv__(self,other):	return self.__pow__(-1).__mul__(other)
 91
 92
 93	def __neg__(self):		return self.new(-self.value,-self.coef1,-self.coef2)
 94
 95	# Math functions
 96	def _math_helper(self,deriv): # Inputs : a=f(x), b=f'(x), c=f''(x), where x=self.value
 97		a,b,c=deriv
 98		mixed = np.expand_dims(self.coef1,axis=-1)*np.expand_dims(self.coef1,axis=-2)
 99		return self.new(a,_add_dim(b)*self.coef1,_add_dim2(b)*self.coef2+_add_dim2(c)*mixed)
100
101	@classmethod
102	def compose(cls,a,t):
103		assert cls.is_ad(a) and all(cls.is_ad(b) for b in t)
104		b = np.moveaxis(cls.concatenate(t,axis=0),0,-1)
105		coef1 = (_add_dim(a.coef1)*b.coef1).sum(axis=-2)
106		coef2_pure = (_add_dim2(a.coef1)*b.coef2).sum(axis=-3)
107		shape_factor = b.shape[:-1]
108		mixed = ( b.coef1.reshape(shape_factor+(a.size_ad,1,b.size_ad,1))
109			     *b.coef1.reshape(shape_factor+(1,a.size_ad,1,b.size_ad)) )
110		coef2_mixed = (_add_dim2(a.coef2)*mixed).sum(axis=-3).sum(axis=-3)
111		return cls.new(a.value,coef1,coef2_pure+coef2_mixed)
112
113	#Indexing
114	@property
115	def size_ad(self):  return self.coef1.shape[-1]
116
117	def to_first(self): return Dense.denseAD(self.value,self.coef1)
118	def gradient(self,i=None): 
119		"""Returns the gradient, or the i-th component of the gradient if specified."""
120		grad = np.moveaxis(self.coef1,-1,0)
121		return grad if i is None else grad[i]
122	def hessian(self,i=None,j=None): 
123		"""Returns the hessian, or component (i,j) of the hessian if specified."""
124		assert (i is None) == (j is None)
125		hess = np.moveaxis(self.coef2,(-2,-1),(0,1))
126		return hess if i is None else hess[i,j]
127
128	def __getitem__(self,key):
129		ekey1,ekey2 = misc.key_expand(key,1),misc.key_expand(key,2)
130		return self.new(self.value[key], self.coef1[ekey1], self.coef2[ekey2])
131
132	def __setitem__(self,key,other):
133		ekey1,ekey2 = misc.key_expand(key,1),misc.key_expand(key,2)
134		if self.is_ad(other):
135			osad = other.size_ad
136			if osad==0: return self.__setitem__(key,other.value)
137			elif self.size_ad==0: 
138				self.coef1=np.zeros_like(self.value,shape=self.coef1.shape[:-1]+(osad,))
139				self.coef2=np.zeros_like(self.value,shape=self.coef2.shape[:-2]+(osad,osad))
140			self.value[key] = other.value
141			self.coef1[ekey1] = other.coef1
142			self.coef2[ekey2] = other.coef2
143		else:
144			self.value[key] = other
145			self.coef1[ekey1] = 0.
146			self.coef2[ekey2] = 0.
147
148
149	def reshape(self,shape,order='C'):
150		value = self.value.reshape(shape,order=order)
151		return self.new(value,self.coef1.reshape(value.shape+(self.size_ad,),order=order),
152			self.coef2.reshape(value.shape+(self.size_ad,self.size_ad),order=order))
153
154	def broadcast_to(self,shape):
155		shape1 = shape+(self.size_ad,)
156		shape2 = shape+(self.size_ad,self.size_ad)
157		return self.new(np.broadcast_to(self.value,shape), 
158			np.broadcast_to(self.coef1,shape1), np.broadcast_to(self.coef2,shape2))
159
160	def pad(self,pad_width,*args,constant_values=0,**kwargs):
161		return self.new(
162			np.pad(self.value,pad_width,*args,constant_values=constant_values,**kwargs),
163			np.pad(self.coef1,pad_width+((0,0),),*args,constant_values=0,**kwargs),
164			np.pad(self.coef2,pad_width+((0,0),(0,0),),*args,constant_values=0,**kwargs),)
165	
166	def transpose(self,axes=None):
167		if axes is None: axes = tuple(reversed(range(self.ndim)))
168		axes1 = tuple(axes) +(self.ndim,)
169		axes2 = tuple(axes) +(self.ndim,self.ndim+1)
170		return self.new(self.value.transpose(axes),
171			self.coef1.transpose(axes1),self.coef2.transpose(axes2))
172
173	def allclose(self,other,*args,**kwargs): 
174		return np.allclose(self.value,other.value,*args,**kwargs) and \
175		       np.allclose(self.coef, other.coef, *args,**kwargs) and \
176		       np.allclose(self.coef2,other.coef2,*args,**kwargs)
177
178	# Reductions
179	def sum(self,axis=None,out=None,**kwargs):
180		if axis is None: return self.flatten().sum(axis=0,out=out,**kwargs)
181		axis=misc.normalize_axis(axis,self.ndim)
182		out = self.new(self.value.sum(axis,**kwargs), 
183			self.coef1.sum(axis,**kwargs), self.coef2.sum(axis,**kwargs))
184		return out
185
186	@classmethod
187	def concatenate(cls,elems,axis=0):
188		axis1,axis2 = (axis,axis) if axis>=0 else (axis-1,axis-2)
189		elems2 = tuple(cls(e) for e in elems)
190		size_ad = max(e.size_ad for e in elems2)
191		assert all((e.size_ad==size_ad or e.size_ad==0) for e in elems2)
192		return cls( 
193		np.concatenate(tuple(e.value for e in elems2), axis=axis), 
194		np.concatenate(tuple(e.coef1 if e.size_ad==size_ad else 
195			np.zeros_like(e.coef1,shape=e.shape+(size_ad,)) for e in elems2),axis=axis1),
196		np.concatenate(tuple(e.coef2 if e.size_ad==size_ad else 
197			np.zeros_like(e.coef2,shape=e.shape+(size_ad,size_ad)) for e in elems2),axis=axis2))
198
199	def apply_linear_operator(self,op):
200		return self.new(op(self.value),
201			misc.apply_linear_operator(op,self.coef1,flatten_ndim=1),
202			misc.apply_linear_operator(op,self.coef2,flatten_ndim=2))

A class for dense forward second order automatic differentiation

denseAD2(value, coef1=None, coef2=None, broadcast_ad=False)
19	def __init__(self,value,coef1=None,coef2=None,broadcast_ad=False):
20		if self.is_ad(value): # Case where one should just reproduce value
21			assert coef1 is None and coef2 is None
22			self.value,self.coef1,self.coef2=value.value,value.coef1,value.coef2
23			return
24		if ad_generic.is_ad(value):
25			raise Base.ADCastError(f"Attempting to cast {type(value)} to incompatible type denseAD2")
26		self.value = ad_generic.asarray(value)
27		self.coef1 = (np.zeros_like(value,shape=self.shape+(0,)) if coef1 is None 
28			else misc._test_or_broadcast_ad(coef1,self.shape,broadcast_ad) )
29		self.coef2 = (np.zeros_like(value,shape=self.shape+(0,0)) if coef2 is None 
30			else misc._test_or_broadcast_ad(coef2,self.shape,broadcast_ad,2) )
value
coef1
coef2
@classmethod
def order(cls):
32	@classmethod
33	def order(cls): return 2
def copy(self, order='C'):
34	def copy(self,order='C'):
35		return self.new(self.value.copy(order=order),
36			self.coef1.copy(order=order),self.coef2.copy(order=order))
def as_tuple(self):
37	def as_tuple(self): return self.value,self.coef1,self.coef2
def as_func(self, h):
50	def as_func(self,h):
51		"""Replaces the symbolic perturbation with h"""
52		value,coef1,coef2 = (misc.add_ndim(e,h.ndim-1) for e in (self.value,self.coef1,self.coef2))
53		hh = np.expand_dims(h,axis=0) * np.expand_dims(h,axis=1)
54		return value+(coef1*h).sum(axis=self.ndim) + 0.5*(hh*coef2).sum(axis=(self.ndim,self.ndim+1))

Replaces the symbolic perturbation with h

@classmethod
def compose(cls, a, t):
101	@classmethod
102	def compose(cls,a,t):
103		assert cls.is_ad(a) and all(cls.is_ad(b) for b in t)
104		b = np.moveaxis(cls.concatenate(t,axis=0),0,-1)
105		coef1 = (_add_dim(a.coef1)*b.coef1).sum(axis=-2)
106		coef2_pure = (_add_dim2(a.coef1)*b.coef2).sum(axis=-3)
107		shape_factor = b.shape[:-1]
108		mixed = ( b.coef1.reshape(shape_factor+(a.size_ad,1,b.size_ad,1))
109			     *b.coef1.reshape(shape_factor+(1,a.size_ad,1,b.size_ad)) )
110		coef2_mixed = (_add_dim2(a.coef2)*mixed).sum(axis=-3).sum(axis=-3)
111		return cls.new(a.value,coef1,coef2_pure+coef2_mixed)
size_ad
114	@property
115	def size_ad(self):  return self.coef1.shape[-1]
def to_first(self):
117	def to_first(self): return Dense.denseAD(self.value,self.coef1)
def gradient(self, i=None):
118	def gradient(self,i=None): 
119		"""Returns the gradient, or the i-th component of the gradient if specified."""
120		grad = np.moveaxis(self.coef1,-1,0)
121		return grad if i is None else grad[i]

Returns the gradient, or the i-th component of the gradient if specified.

def hessian(self, i=None, j=None):
122	def hessian(self,i=None,j=None): 
123		"""Returns the hessian, or component (i,j) of the hessian if specified."""
124		assert (i is None) == (j is None)
125		hess = np.moveaxis(self.coef2,(-2,-1),(0,1))
126		return hess if i is None else hess[i,j]

Returns the hessian, or component (i,j) of the hessian if specified.

def reshape(self, shape, order='C'):
149	def reshape(self,shape,order='C'):
150		value = self.value.reshape(shape,order=order)
151		return self.new(value,self.coef1.reshape(value.shape+(self.size_ad,),order=order),
152			self.coef2.reshape(value.shape+(self.size_ad,self.size_ad),order=order))
def broadcast_to(self, shape):
154	def broadcast_to(self,shape):
155		shape1 = shape+(self.size_ad,)
156		shape2 = shape+(self.size_ad,self.size_ad)
157		return self.new(np.broadcast_to(self.value,shape), 
158			np.broadcast_to(self.coef1,shape1), np.broadcast_to(self.coef2,shape2))
def pad(self, pad_width, *args, constant_values=0, **kwargs):
160	def pad(self,pad_width,*args,constant_values=0,**kwargs):
161		return self.new(
162			np.pad(self.value,pad_width,*args,constant_values=constant_values,**kwargs),
163			np.pad(self.coef1,pad_width+((0,0),),*args,constant_values=0,**kwargs),
164			np.pad(self.coef2,pad_width+((0,0),(0,0),),*args,constant_values=0,**kwargs),)
def transpose(self, axes=None):
166	def transpose(self,axes=None):
167		if axes is None: axes = tuple(reversed(range(self.ndim)))
168		axes1 = tuple(axes) +(self.ndim,)
169		axes2 = tuple(axes) +(self.ndim,self.ndim+1)
170		return self.new(self.value.transpose(axes),
171			self.coef1.transpose(axes1),self.coef2.transpose(axes2))
def allclose(self, other, *args, **kwargs):
173	def allclose(self,other,*args,**kwargs): 
174		return np.allclose(self.value,other.value,*args,**kwargs) and \
175		       np.allclose(self.coef, other.coef, *args,**kwargs) and \
176		       np.allclose(self.coef2,other.coef2,*args,**kwargs)
def sum(self, axis=None, out=None, **kwargs):
179	def sum(self,axis=None,out=None,**kwargs):
180		if axis is None: return self.flatten().sum(axis=0,out=out,**kwargs)
181		axis=misc.normalize_axis(axis,self.ndim)
182		out = self.new(self.value.sum(axis,**kwargs), 
183			self.coef1.sum(axis,**kwargs), self.coef2.sum(axis,**kwargs))
184		return out
@classmethod
def concatenate(cls, elems, axis=0):
186	@classmethod
187	def concatenate(cls,elems,axis=0):
188		axis1,axis2 = (axis,axis) if axis>=0 else (axis-1,axis-2)
189		elems2 = tuple(cls(e) for e in elems)
190		size_ad = max(e.size_ad for e in elems2)
191		assert all((e.size_ad==size_ad or e.size_ad==0) for e in elems2)
192		return cls( 
193		np.concatenate(tuple(e.value for e in elems2), axis=axis), 
194		np.concatenate(tuple(e.coef1 if e.size_ad==size_ad else 
195			np.zeros_like(e.coef1,shape=e.shape+(size_ad,)) for e in elems2),axis=axis1),
196		np.concatenate(tuple(e.coef2 if e.size_ad==size_ad else 
197			np.zeros_like(e.coef2,shape=e.shape+(size_ad,size_ad)) for e in elems2),axis=axis2))
def apply_linear_operator(self, op):
199	def apply_linear_operator(self,op):
200		return self.new(op(self.value),
201			misc.apply_linear_operator(op,self.coef1,flatten_ndim=1),
202			misc.apply_linear_operator(op,self.coef2,flatten_ndim=2))
def identity(*args, **kwargs):
207def identity(*args,**kwargs):
208	arr = Dense.identity(*args,**kwargs)
209	return denseAD2(arr.value,arr.coef,
210		np.zeros_like(arr.value,shape=arr.shape+(arr.size_ad,arr.size_ad)))
def register(*args, **kwargs):
212def register(*args,**kwargs):
213	return Dense.register(*args,**kwargs,ident=identity)