Coverage for /builds/kinetik161/ase/ase/optimize/sciopt.py: 68.22%
107 statements
« prev ^ index » next coverage.py v7.2.7, created at 2023-12-10 11:04 +0000
« prev ^ index » next coverage.py v7.2.7, created at 2023-12-10 11:04 +0000
1from typing import IO, Optional, Union
3import numpy as np
4import scipy.optimize as opt
6from ase import Atoms
7from ase.optimize.optimize import Optimizer
10class Converged(Exception):
11 pass
14class OptimizerConvergenceError(Exception):
15 pass
18class SciPyOptimizer(Optimizer):
19 """General interface for SciPy optimizers
21 Only the call to the optimizer is still needed
22 """
24 def __init__(
25 self,
26 atoms: Atoms,
27 logfile: Union[IO, str] = '-',
28 trajectory: Optional[str] = None,
29 callback_always: bool = False,
30 alpha: float = 70.0,
31 master: Optional[bool] = None,
32 force_consistent=Optimizer._deprecated,
33 ):
34 """Initialize object
36 Parameters:
38 atoms: Atoms object
39 The Atoms object to relax.
41 trajectory: string
42 Pickle file used to store trajectory of atomic movement.
44 logfile: file object or str
45 If *logfile* is a string, a file with that name will be opened.
46 Use '-' for stdout.
48 callback_always: book
49 Should the callback be run after each force call (also in the
50 linesearch)
52 alpha: float
53 Initial guess for the Hessian (curvature of energy surface). A
54 conservative value of 70.0 is the default, but number of needed
55 steps to converge might be less if a lower value is used. However,
56 a lower value also means risk of instability.
58 master: boolean
59 Defaults to None, which causes only rank 0 to save files. If
60 set to true, this rank will save files.
62 """
63 restart = None
64 Optimizer.__init__(self, atoms, restart, logfile, trajectory,
65 master, force_consistent=force_consistent)
66 self.force_calls = 0
67 self.callback_always = callback_always
68 self.H0 = alpha
69 self.max_steps = 0
71 def x0(self):
72 """Return x0 in a way SciPy can use
74 This class is mostly usable for subclasses wanting to redefine the
75 parameters (and the objective function)"""
76 return self.optimizable.get_positions().reshape(-1)
78 def f(self, x):
79 """Objective function for use of the optimizers"""
80 self.optimizable.set_positions(x.reshape(-1, 3))
81 # Scale the problem as SciPy uses I as initial Hessian.
82 return self.optimizable.get_potential_energy() / self.H0
84 def fprime(self, x):
85 """Gradient of the objective function for use of the optimizers"""
86 self.optimizable.set_positions(x.reshape(-1, 3))
87 self.force_calls += 1
89 if self.callback_always:
90 self.callback(x)
92 # Remember that forces are minus the gradient!
93 # Scale the problem as SciPy uses I as initial Hessian.
94 return - self.optimizable.get_forces().reshape(-1) / self.H0
96 def callback(self, x):
97 """Callback function to be run after each iteration by SciPy
99 This should also be called once before optimization starts, as SciPy
100 optimizers only calls it after each iteration, while ase optimizers
101 call something similar before as well.
103 :meth:`callback`() can raise a :exc:`Converged` exception to signal the
104 optimisation is complete. This will be silently ignored by
105 :meth:`run`().
106 """
107 if self.nsteps < self.max_steps:
108 self.nsteps += 1
109 f = self.optimizable.get_forces()
110 self.log(f)
111 self.call_observers()
112 if self.converged(f):
113 raise Converged
115 def run(self, fmax=0.05, steps=100000000):
116 self.fmax = fmax
118 try:
119 # As SciPy does not log the zeroth iteration, we do that manually
120 if self.nsteps == 0:
121 self.log()
122 self.call_observers()
124 self.max_steps = steps + self.nsteps
126 # Scale the problem as SciPy uses I as initial Hessian.
127 self.call_fmin(fmax / self.H0, steps)
128 except Converged:
129 pass
130 return self.converged()
132 def dump(self, data):
133 pass
135 def load(self):
136 pass
138 def call_fmin(self, fmax, steps):
139 raise NotImplementedError
142class SciPyFminCG(SciPyOptimizer):
143 """Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
145 def call_fmin(self, fmax, steps):
146 output = opt.fmin_cg(self.f,
147 self.x0(),
148 fprime=self.fprime,
149 # args=(),
150 gtol=fmax * 0.1, # Should never be reached
151 norm=np.inf,
152 # epsilon=
153 maxiter=steps,
154 full_output=1,
155 disp=0,
156 # retall=0,
157 callback=self.callback)
158 warnflag = output[-1]
159 if warnflag == 2:
160 raise OptimizerConvergenceError(
161 'Warning: Desired error not necessarily achieved '
162 'due to precision loss')
165class SciPyFminBFGS(SciPyOptimizer):
166 """Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
168 def call_fmin(self, fmax, steps):
169 output = opt.fmin_bfgs(self.f,
170 self.x0(),
171 fprime=self.fprime,
172 # args=(),
173 gtol=fmax * 0.1, # Should never be reached
174 norm=np.inf,
175 # epsilon=1.4901161193847656e-08,
176 maxiter=steps,
177 full_output=1,
178 disp=0,
179 # retall=0,
180 callback=self.callback)
181 warnflag = output[-1]
182 if warnflag == 2:
183 raise OptimizerConvergenceError(
184 'Warning: Desired error not necessarily achieved '
185 'due to precision loss')
188class SciPyGradientlessOptimizer(Optimizer):
189 """General interface for gradient less SciPy optimizers
191 Only the call to the optimizer is still needed
193 Note: If you redefine x0() and f(), you don't even need an atoms object.
194 Redefining these also allows you to specify an arbitrary objective
195 function.
197 XXX: This is still a work in progress
198 """
200 def __init__(self, atoms, logfile='-', trajectory=None,
201 callback_always=False, master=None,
202 force_consistent=Optimizer._deprecated):
203 """Initialize object
205 Parameters:
207 atoms: Atoms object
208 The Atoms object to relax.
210 trajectory: string
211 Pickle file used to store trajectory of atomic movement.
213 logfile: file object or str
214 If *logfile* is a string, a file with that name will be opened.
215 Use '-' for stdout.
217 callback_always: book
218 Should the callback be run after each force call (also in the
219 linesearch)
221 alpha: float
222 Initial guess for the Hessian (curvature of energy surface). A
223 conservative value of 70.0 is the default, but number of needed
224 steps to converge might be less if a lower value is used. However,
225 a lower value also means risk of instability.
227 master: boolean
228 Defaults to None, which causes only rank 0 to save files. If
229 set to true, this rank will save files.
231 """
232 restart = None
233 Optimizer.__init__(self, atoms, restart, logfile, trajectory,
234 master, force_consistent=force_consistent)
235 self.function_calls = 0
236 self.callback_always = callback_always
238 def x0(self):
239 """Return x0 in a way SciPy can use
241 This class is mostly usable for subclasses wanting to redefine the
242 parameters (and the objective function)"""
243 return self.optimizable.get_positions().reshape(-1)
245 def f(self, x):
246 """Objective function for use of the optimizers"""
247 self.optimizable.set_positions(x.reshape(-1, 3))
248 self.function_calls += 1
249 # Scale the problem as SciPy uses I as initial Hessian.
250 return self.optimizable.get_potential_energy()
252 def callback(self, x):
253 """Callback function to be run after each iteration by SciPy
255 This should also be called once before optimization starts, as SciPy
256 optimizers only calls it after each iteration, while ase optimizers
257 call something similar before as well.
258 """
259 # We can't assume that forces are available!
260 # f = self.optimizable.get_forces()
261 # self.log(f)
262 self.call_observers()
263 # if self.converged(f):
264 # raise Converged
265 self.nsteps += 1
267 def run(self, ftol=0.01, xtol=0.01, steps=100000000):
268 self.xtol = xtol
269 self.ftol = ftol
270 # As SciPy does not log the zeroth iteration, we do that manually
271 self.callback(None)
272 try:
273 # Scale the problem as SciPy uses I as initial Hessian.
274 self.call_fmin(xtol, ftol, steps)
275 except Converged:
276 pass
277 return self.converged()
279 def dump(self, data):
280 pass
282 def load(self):
283 pass
285 def call_fmin(self, xtol, ftol, steps):
286 raise NotImplementedError
289class SciPyFmin(SciPyGradientlessOptimizer):
290 """Nelder-Mead Simplex algorithm
292 Uses only function calls.
294 XXX: This is still a work in progress
295 """
297 def call_fmin(self, xtol, ftol, steps):
298 opt.fmin(self.f,
299 self.x0(),
300 # args=(),
301 xtol=xtol,
302 ftol=ftol,
303 maxiter=steps,
304 # maxfun=None,
305 # full_output=1,
306 disp=0,
307 # retall=0,
308 callback=self.callback)
311class SciPyFminPowell(SciPyGradientlessOptimizer):
312 """Powell's (modified) level set method
314 Uses only function calls.
316 XXX: This is still a work in progress
317 """
319 def __init__(self, *args, **kwargs):
320 """Parameters:
322 direc: float
323 How much to change x to initially. Defaults to 0.04.
324 """
325 direc = kwargs.pop('direc', None)
326 SciPyGradientlessOptimizer.__init__(self, *args, **kwargs)
328 if direc is None:
329 self.direc = np.eye(len(self.x0()), dtype=float) * 0.04
330 else:
331 self.direc = np.eye(len(self.x0()), dtype=float) * direc
333 def call_fmin(self, xtol, ftol, steps):
334 opt.fmin_powell(self.f,
335 self.x0(),
336 # args=(),
337 xtol=xtol,
338 ftol=ftol,
339 maxiter=steps,
340 # maxfun=None,
341 # full_output=1,
342 disp=0,
343 # retall=0,
344 callback=self.callback,
345 direc=self.direc)