From 98aad5ea6886677a560300b82b73d7188c743613 Mon Sep 17 00:00:00 2001 From: Joachim van der Herten Date: Tue, 22 Aug 2017 00:46:16 +0200 Subject: [PATCH 1/8] Added support to configure a callback in BayesianOptimizer, allowing control over the models between iterations. Slightly reworked MCMCAcquisition to support this scenario --- GPflowOpt/acquisition/acquisition.py | 24 +++++++++--- GPflowOpt/bo.py | 24 +++++++++--- testing/test_acquisition.py | 13 ++++++- testing/test_optimizers.py | 57 ++++++++++++++++++++++++++++ 4 files changed, 105 insertions(+), 13 deletions(-) diff --git a/GPflowOpt/acquisition/acquisition.py b/GPflowOpt/acquisition/acquisition.py index 390111e..3c536a3 100644 --- a/GPflowOpt/acquisition/acquisition.py +++ b/GPflowOpt/acquisition/acquisition.py @@ -383,19 +383,23 @@ class MCMCAcquistion(AcquisitionSum): def __init__(self, acquisition, n_slices, **kwargs): assert isinstance(acquisition, Acquisition) assert n_slices > 0 - - copies = [copy.deepcopy(acquisition) for _ in range(n_slices - 1)] - for c in copies: - c.optimize_restarts = 0 - # the call to the constructor of the parent classes, will optimize acquisition, so it obtains the MLE solution. - super(MCMCAcquistion, self).__init__([acquisition] + copies) + super(MCMCAcquistion, self).__init__([acquisition]*n_slices) + self._needs_new_copies = True self._sample_opt = kwargs def _optimize_models(self): # Optimize model #1 self.operands[0]._optimize_models() + # Copy it again if needed due to changed free state + if self._needs_new_copies: + new_copies = [copy.deepcopy(self.operands[0]) for _ in range(len(self.operands) - 1)] + for c in new_copies: + c.optimize_restarts = 0 + self.operands = ParamList([self.operands[0]] + new_copies) + self._needs_new_copies = False + # Draw samples using HMC # Sample each model of the acquisition function - results in a list of 2D ndarrays. hypers = np.hstack([model.sample(len(self.operands), **self._sample_opt) for model in self.models]) @@ -419,3 +423,11 @@ def set_data(self, X, Y): def build_acquisition(self, Xcand): # Average the predictions of the copies. return 1. / len(self.operands) * super(MCMCAcquistion, self).build_acquisition(Xcand) + + def _kill_autoflow(self): + """ + Following the recompilation of models, the free state might have changed. This means updating the samples can + cause inconsistencies and errors. Flag for recreation on next optimize + """ + super(MCMCAcquistion, self)._kill_autoflow() + self._needs_new_copies = True diff --git a/GPflowOpt/bo.py b/GPflowOpt/bo.py index d153d90..df4c9d3 100644 --- a/GPflowOpt/bo.py +++ b/GPflowOpt/bo.py @@ -18,9 +18,9 @@ from scipy.optimize import OptimizeResult from .acquisition import Acquisition, MCMCAcquistion -from .optim import Optimizer, SciPyOptimizer -from .objective import ObjectiveWrapper from .design import Design, EmptyDesign +from .objective import ObjectiveWrapper +from .optim import Optimizer, SciPyOptimizer from .pareto import non_dominated_sort @@ -32,7 +32,8 @@ class BayesianOptimizer(Optimizer): Additionally, it is configured with a separate optimizer for the acquisition function. """ - def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=True, hyper_draws=None): + def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=True, hyper_draws=None, + iter_callback=None): """ :param Domain domain: The optimization space. :param Acquisition acquisition: The acquisition function to optimize over the domain. @@ -51,6 +52,12 @@ def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=Tr are obtained using Hamiltonian MC. (see `GPflow documentation `_ for details) for each model. The acquisition score is computed for each draw, and averaged. + :param callable iter_callback: (optional) this function or object will be called after each evaluate, after the + data of all models has been updated with all models as retrieved by acquisition.models as argument without + the wrapping model handling any scaling . This allows custom model optimization strategies to be implemented. + All manipulations of GPflow models are permitted. Combined with the optimize_restarts parameter of + :class:`~.Acquisition` this allows several scenarios: do the optimization manually from the callback + (optimize_restarts equals zero), orchoose the starting point + some random restarts (optimize_restarts > 0). """ assert isinstance(acquisition, Acquisition) assert hyper_draws is None or hyper_draws > 0 @@ -69,6 +76,8 @@ def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=Tr initial = initial or EmptyDesign(domain) self.set_initial(initial.generate()) + self._iter_callback = iter_callback + @Optimizer.domain.setter def domain(self, dom): assert (self.domain.size == dom.size) @@ -86,6 +95,8 @@ def _update_model_data(self, newX, newY): assert self.acquisition.data[0].shape[1] == newX.shape[-1] assert self.acquisition.data[1].shape[1] == newY.shape[-1] assert newX.shape[0] == newY.shape[0] + if newX.size == 0: + return X = np.vstack((self.acquisition.data[0], newX)) Y = np.vstack((self.acquisition.data[1], newY)) self.acquisition.set_data(X, Y) @@ -175,8 +186,7 @@ def _optimize(self, fx, n_iter): :return: OptimizeResult object """ - assert(isinstance(fx, ObjectiveWrapper)) - + assert (isinstance(fx, ObjectiveWrapper)) # Evaluate and add the initial design (if any) initial = self.get_initial() values = fx(initial) @@ -190,6 +200,10 @@ def inverse_acquisition(x): # Optimization loop for i in range(n_iter): + # If callback specified, and acquisition has the setup flag enabled (indicating an upcoming compilation, + # run the callback. + if self._iter_callback and self.acquisition._needs_setup: + self._iter_callback([m.wrapped for m in self.acquisition.models]) result = self.optimizer.optimize(inverse_acquisition) self._update_model_data(result.x, fx(result.x)) diff --git a/testing/test_acquisition.py b/testing/test_acquisition.py index 7b37319..a3fa300 100644 --- a/testing/test_acquisition.py +++ b/testing/test_acquisition.py @@ -218,9 +218,18 @@ def test_marginalized_score(self, acquisition): ei_mcmc = acquisition.evaluate(Xt) np.testing.assert_almost_equal(ei_mle, ei_mcmc, decimal=5) - @parameterized.expand(list(zip([aggregations[2]]))) - def test_mcmc_acq_models(self, acquisition): + def test_mcmc_acq(self): + acquisition = GPflowOpt.acquisition.MCMCAcquistion( + GPflowOpt.acquisition.ExpectedImprovement(create_parabola_model(domain)), 5) + for oper in acquisition.operands: + self.assertListEqual(acquisition.models, oper.models) + self.assertEqual(acquisition.operands[0], oper) + self.assertTrue(acquisition._needs_new_copies) + acquisition._optimize_models() self.assertListEqual(acquisition.models, acquisition.operands[0].models) + for oper in acquisition.operands[1:]: + self.assertNotEqual(acquisition.operands[0], oper) + self.assertFalse(acquisition._needs_new_copies) class TestJointAcquisition(unittest.TestCase): diff --git a/testing/test_optimizers.py b/testing/test_optimizers.py index 87ce3db..37b1470 100644 --- a/testing/test_optimizers.py +++ b/testing/test_optimizers.py @@ -288,6 +288,62 @@ def test_mcmc(self): self.assertTrue(np.allclose(result.x, 0), msg="Optimizer failed to find optimum") self.assertTrue(np.allclose(result.fun, 0), msg="Incorrect function value returned") + def test_callback(self): + class DummyCallback(object): + def __init__(self): + self.counter = 0 + + def __call__(self, models): + self.counter += 1 + + c = DummyCallback() + optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.acquisition, iter_callback=c) + result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=2) + self.assertEqual(c.counter, 2) + + def test_callback_recompile(self): + class DummyCallback(object): + def __init__(self): + self.recompile = False + + def __call__(self, models): + c = np.random.randint(2, 10) + models[0].kern.variance.prior = GPflow.priors.Gamma(c, 1./c) + self.recompile = models[0]._needs_recompile + + c = DummyCallback() + optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.acquisition, iter_callback=c) + self.acquisition.evaluate(np.zeros((1,2))) # Make sure its run and setup to skip + result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1) + self.assertFalse(c.recompile) + result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1) + self.assertTrue(c.recompile) + self.assertFalse(self.acquisition.models[0]._needs_recompile) + + def test_callback_recompile_mcmc(self): + class DummyCallback(object): + def __init__(self): + self.no_models = 0 + + def __call__(self, models): + c = np.random.randint(2, 10) + models[0].kern.variance.prior = GPflow.priors.Gamma(c, 1. / c) + self.no_models = len(models) + + c = DummyCallback() + optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.acquisition, hyper_draws=5, iter_callback=c) + opers = optimizer.acquisition.operands + result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1) + self.assertEqual(c.no_models, 1) + self.assertEqual(id(opers[0]), id(optimizer.acquisition.operands[0])) + for op1, op2 in zip(opers[1:], optimizer.acquisition.operands[1:]): + self.assertNotEqual(id(op1), id(op2)) + opers = optimizer.acquisition.operands + result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1) + self.assertEqual(id(opers[0]), id(optimizer.acquisition.operands[0])) + for op1, op2 in zip(opers[1:], optimizer.acquisition.operands[1:]): + self.assertNotEqual(id(op1), id(op2)) + class TestSilentOptimization(unittest.TestCase): @contextmanager @@ -323,3 +379,4 @@ def _optimize(self, objective): opt.optimize(None) output = out.getvalue().strip() self.assertEqual(output, '') + From 107b5c5983ecb5072f081bb62185f79d068c904f Mon Sep 17 00:00:00 2001 From: Joachim van der Herten Date: Tue, 22 Aug 2017 09:36:41 +0200 Subject: [PATCH 2/8] Adjusted documentation, parameter renaming and making tests more strict --- GPflowOpt/acquisition/acquisition.py | 4 ++-- GPflowOpt/bo.py | 6 +++--- testing/test_acquisition.py | 8 ++++++-- testing/test_optimizers.py | 10 +++++----- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/GPflowOpt/acquisition/acquisition.py b/GPflowOpt/acquisition/acquisition.py index 3c536a3..596f53d 100644 --- a/GPflowOpt/acquisition/acquisition.py +++ b/GPflowOpt/acquisition/acquisition.py @@ -375,8 +375,8 @@ class MCMCAcquistion(AcquisitionSum): """ Apply MCMC over the hyperparameters of an acquisition function (= over the hyperparameters of the contained models). - The models passed into an object of this class are optimized with MLE, and then further sampled with HMC. - These hyperparameter samples are then set in copies of the acquisition. + The models passed into an object of this class are optimized with MLE (fast burn-in), and then further sampled with + HMC. These hyperparameter samples are then set in copies of the acquisition. For evaluating the underlying acquisition function, the predictions of the acquisition copies are averaged. """ diff --git a/GPflowOpt/bo.py b/GPflowOpt/bo.py index df4c9d3..8aefcc8 100644 --- a/GPflowOpt/bo.py +++ b/GPflowOpt/bo.py @@ -33,7 +33,7 @@ class BayesianOptimizer(Optimizer): """ def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=True, hyper_draws=None, - iter_callback=None): + callback=None): """ :param Domain domain: The optimization space. :param Acquisition acquisition: The acquisition function to optimize over the domain. @@ -52,7 +52,7 @@ def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=Tr are obtained using Hamiltonian MC. (see `GPflow documentation `_ for details) for each model. The acquisition score is computed for each draw, and averaged. - :param callable iter_callback: (optional) this function or object will be called after each evaluate, after the + :param callable callback: (optional) this function or object will be called after each evaluate, after the data of all models has been updated with all models as retrieved by acquisition.models as argument without the wrapping model handling any scaling . This allows custom model optimization strategies to be implemented. All manipulations of GPflow models are permitted. Combined with the optimize_restarts parameter of @@ -76,7 +76,7 @@ def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=Tr initial = initial or EmptyDesign(domain) self.set_initial(initial.generate()) - self._iter_callback = iter_callback + self._iter_callback = callback @Optimizer.domain.setter def domain(self, dom): diff --git a/testing/test_acquisition.py b/testing/test_acquisition.py index a3fa300..d9b0461 100644 --- a/testing/test_acquisition.py +++ b/testing/test_acquisition.py @@ -220,7 +220,7 @@ def test_marginalized_score(self, acquisition): def test_mcmc_acq(self): acquisition = GPflowOpt.acquisition.MCMCAcquistion( - GPflowOpt.acquisition.ExpectedImprovement(create_parabola_model(domain)), 5) + GPflowOpt.acquisition.ExpectedImprovement(create_parabola_model(domain)), 10) for oper in acquisition.operands: self.assertListEqual(acquisition.models, oper.models) self.assertEqual(acquisition.operands[0], oper) @@ -230,7 +230,11 @@ def test_mcmc_acq(self): for oper in acquisition.operands[1:]: self.assertNotEqual(acquisition.operands[0], oper) self.assertFalse(acquisition._needs_new_copies) - + acquisition.setup() + Xt = np.random.rand(20, 2) * 2 - 1 + ei_mle = acquisition.operands[0].evaluate(Xt) + ei_mcmc = acquisition.evaluate(Xt) + np.testing.assert_almost_equal(ei_mle, ei_mcmc, decimal=5) class TestJointAcquisition(unittest.TestCase): diff --git a/testing/test_optimizers.py b/testing/test_optimizers.py index 37b1470..7783dc7 100644 --- a/testing/test_optimizers.py +++ b/testing/test_optimizers.py @@ -214,8 +214,8 @@ def test_optimize_multi_objective(self): result = optimizer.optimize(vlmop2, n_iter=2) self.assertTrue(result.success) self.assertEqual(result.nfev, 2, "Only 2 evaluations permitted") - self.assertTupleEqual(result.x.shape, (9, 2)) - self.assertTupleEqual(result.fun.shape, (9, 2)) + self.assertTupleEqual(result.x.shape, (7, 2)) + self.assertTupleEqual(result.fun.shape, (7, 2)) _, dom = GPflowOpt.pareto.non_dominated_sort(result.fun) self.assertTrue(np.all(dom==0)) @@ -297,7 +297,7 @@ def __call__(self, models): self.counter += 1 c = DummyCallback() - optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.acquisition, iter_callback=c) + optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.acquisition, callback=c) result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=2) self.assertEqual(c.counter, 2) @@ -312,7 +312,7 @@ def __call__(self, models): self.recompile = models[0]._needs_recompile c = DummyCallback() - optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.acquisition, iter_callback=c) + optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.acquisition, callback=c) self.acquisition.evaluate(np.zeros((1,2))) # Make sure its run and setup to skip result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1) self.assertFalse(c.recompile) @@ -331,7 +331,7 @@ def __call__(self, models): self.no_models = len(models) c = DummyCallback() - optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.acquisition, hyper_draws=5, iter_callback=c) + optimizer = GPflowOpt.BayesianOptimizer(self.domain, self.acquisition, hyper_draws=5, callback=c) opers = optimizer.acquisition.operands result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1) self.assertEqual(c.no_models, 1) From 58b948295f4de7c61fd1f1969ef71ffc8c1432b5 Mon Sep 17 00:00:00 2001 From: Joachim van der Herten Date: Tue, 22 Aug 2017 23:49:15 +0200 Subject: [PATCH 3/8] Fixes in tests --- testing/test_acquisition.py | 2 +- testing/test_modelwrapper.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/test_acquisition.py b/testing/test_acquisition.py index 28d2501..f610938 100644 --- a/testing/test_acquisition.py +++ b/testing/test_acquisition.py @@ -230,7 +230,7 @@ def test_mcmc_acq(self): for oper in acquisition.operands[1:]: self.assertNotEqual(acquisition.operands[0], oper) self.assertFalse(acquisition._needs_new_copies) - acquisition.setup() + acquisition._setup() Xt = np.random.rand(20, 2) * 2 - 1 ei_mle = acquisition.operands[0].evaluate(Xt) ei_mcmc = acquisition.evaluate(Xt) diff --git a/testing/test_modelwrapper.py b/testing/test_modelwrapper.py index 0de5971..6266b74 100644 --- a/testing/test_modelwrapper.py +++ b/testing/test_modelwrapper.py @@ -138,8 +138,8 @@ def test_parent_hook(self): self.assertTrue(self.m._needs_recompile) self.assertFalse(hasattr(p.predictor, '_predict_f_AF_storage')) - self.assertEqual(m.highest_parent.get_free_state, p.get_free_state) - m.highest_parent._needs_setup = True + self.assertEqual(self.m.highest_parent.get_free_state, p.get_free_state) + self.m.highest_parent._needs_setup = True self.assertTrue(hasattr(p, '_needs_setup')) self.assertTrue(p._needs_setup) From 86fedb19a80d88353b702aca2c1b25d91694243e Mon Sep 17 00:00:00 2001 From: Joachim van der Herten Date: Wed, 23 Aug 2017 10:35:52 +0200 Subject: [PATCH 4/8] Added a jitchol-like callback for raising the likelihood variance --- GPflowOpt/bo.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/GPflowOpt/bo.py b/GPflowOpt/bo.py index 8aefcc8..d80fb50 100644 --- a/GPflowOpt/bo.py +++ b/GPflowOpt/bo.py @@ -16,6 +16,7 @@ import numpy as np from scipy.optimize import OptimizeResult +import tensorflow as tf from .acquisition import Acquisition, MCMCAcquistion from .design import Design, EmptyDesign @@ -24,6 +25,23 @@ from .pareto import non_dominated_sort +def jitchol_callback(models): + for m in models: + try: + m.likelihood.variance + except AttributeError: + continue + s = m.get_free_state() + eKdiag = np.mean(np.diag(m.kern.compute_K_symm(m.X.value))) + for e in [0] + [10**ex for ex in range(-6,-1)]: + try: + m.likelihood.variance = m.likelihood.variance.value + e * eKdiag + m.optimize(maxiter=5) + break + except tf.errors.InvalidArgumentError: + m.set_state(s) + + class BayesianOptimizer(Optimizer): """ A traditional Bayesian optimization framework implementation. @@ -33,7 +51,7 @@ class BayesianOptimizer(Optimizer): """ def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=True, hyper_draws=None, - callback=None): + callback=jitchol_callback): """ :param Domain domain: The optimization space. :param Acquisition acquisition: The acquisition function to optimize over the domain. From 12825692b93dc92637bff26df0b08c62a513eb29 Mon Sep 17 00:00:00 2001 From: Joachim van der Herten Date: Wed, 23 Aug 2017 23:22:49 +0200 Subject: [PATCH 5/8] Improved the jitchol callback --- GPflowOpt/bo.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/GPflowOpt/bo.py b/GPflowOpt/bo.py index 80b6193..6db336c 100644 --- a/GPflowOpt/bo.py +++ b/GPflowOpt/bo.py @@ -17,6 +17,7 @@ import numpy as np from scipy.optimize import OptimizeResult import tensorflow as tf +from GPflow.gpr import GPR from .acquisition import Acquisition, MCMCAcquistion from .design import Design, EmptyDesign @@ -26,10 +27,13 @@ def jitchol_callback(models): + """ + Default callback for BayesianOptimizer. For all GPR models, increase the likelihood variance in case of cholesky + faillures. This is similar to the use of jitchol in GPy + :return: + """ for m in models: - try: - m.likelihood.variance - except AttributeError: + if not isinstance(m, GPR): continue s = m.get_free_state() eKdiag = np.mean(np.diag(m.kern.compute_K_symm(m.X.value))) @@ -38,7 +42,7 @@ def jitchol_callback(models): m.likelihood.variance = m.likelihood.variance.value + e * eKdiag m.optimize(maxiter=5) break - except tf.errors.InvalidArgumentError: + except tf.errors.InvalidArgumentError: # pragma: no cover m.set_state(s) From 3331fa0054d39d0b17717bda59c0eb4863b615cc Mon Sep 17 00:00:00 2001 From: Joachim van der Herten Date: Wed, 30 Aug 2017 23:21:34 +0200 Subject: [PATCH 6/8] Addressing code review comments --- gpflowopt/acquisition/acquisition.py | 2 ++ gpflowopt/bo.py | 24 +++++++++++++++--------- testing/test_optimizers.py | 9 +++++++++ 3 files changed, 26 insertions(+), 9 deletions(-) diff --git a/gpflowopt/acquisition/acquisition.py b/gpflowopt/acquisition/acquisition.py index 39e8422..d6d6909 100644 --- a/gpflowopt/acquisition/acquisition.py +++ b/gpflowopt/acquisition/acquisition.py @@ -447,6 +447,8 @@ def build_acquisition(self, Xcand): def _kill_autoflow(self): """ + Flag for recreation on next optimize. + Following the recompilation of models, the free state might have changed. This means updating the samples can cause inconsistencies and errors. Flag for recreation on next optimize """ diff --git a/gpflowopt/bo.py b/gpflowopt/bo.py index af4cb59..39cb02d 100644 --- a/gpflowopt/bo.py +++ b/gpflowopt/bo.py @@ -24,17 +24,23 @@ from .objective import ObjectiveWrapper from .optim import Optimizer, SciPyOptimizer from .pareto import non_dominated_sort +from .models import ModelWrapper def jitchol_callback(models): """ - Default callback for BayesianOptimizer. For all GPR models, increase the likelihood variance in case of cholesky - faillures. This is similar to the use of jitchol in GPy - :return: + Increase the likelihood in case of cholesky faillures. + + This is similar to the use of jitchol in GPy. Default callback for BayesianOptimizer. + Only usable on GPR models, other types are ignored. """ - for m in models: + for m in np.atleast_1d(models): + if isinstance(m, ModelWrapper): + jitchol_callback(m.wrapped) # pragma: no cover + if not isinstance(m, GPR): continue + s = m.get_free_state() eKdiag = np.mean(np.diag(m.kern.compute_K_symm(m.X.value))) for e in [0] + [10**ex for ex in range(-6,-1)]: @@ -74,12 +80,12 @@ def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=Tr are obtained using Hamiltonian MC. (see `GPflow documentation `_ for details) for each model. The acquisition score is computed for each draw, and averaged. - :param callable callback: (optional) this function or object will be called after each evaluate, after the + :param callable callback: (optional) this function or object will be called, after the data of all models has been updated with all models as retrieved by acquisition.models as argument without the wrapping model handling any scaling . This allows custom model optimization strategies to be implemented. All manipulations of GPflow models are permitted. Combined with the optimize_restarts parameter of :class:`~.Acquisition` this allows several scenarios: do the optimization manually from the callback - (optimize_restarts equals zero), orchoose the starting point + some random restarts (optimize_restarts > 0). + (optimize_restarts equals 0), or choose the starting point + some random restarts (optimize_restarts > 0). """ assert isinstance(acquisition, Acquisition) assert hyper_draws is None or hyper_draws > 0 @@ -98,7 +104,7 @@ def __init__(self, domain, acquisition, optimizer=None, initial=None, scaling=Tr initial = initial or EmptyDesign(domain) self.set_initial(initial.generate()) - self._iter_callback = callback + self._model_callback = callback @Optimizer.domain.setter def domain(self, dom): @@ -224,8 +230,8 @@ def inverse_acquisition(x): for i in range(n_iter): # If callback specified, and acquisition has the setup flag enabled (indicating an upcoming compilation, # run the callback. - if self._iter_callback and self.acquisition._needs_setup: - self._iter_callback([m.wrapped for m in self.acquisition.models]) + if self._model_callback and self.acquisition._needs_setup: + self._model_callback([m.wrapped for m in self.acquisition.models]) result = self.optimizer.optimize(inverse_acquisition) self._update_model_data(result.x, fx(result.x)) diff --git a/testing/test_optimizers.py b/testing/test_optimizers.py index 7980dd3..2a896f0 100644 --- a/testing/test_optimizers.py +++ b/testing/test_optimizers.py @@ -344,6 +344,15 @@ def __call__(self, models): for op1, op2 in zip(opers[1:], optimizer.acquisition.operands[1:]): self.assertNotEqual(id(op1), id(op2)) + def test_nongpr_model(self): + design = gpflowopt.design.LatinHyperCube(16, self.domain) + X, Y = design.generate(), parabola2d(design.generate())[0] + m = gpflow.vgp.VGP(X, Y, gpflow.kernels.RBF(2, ARD=True), likelihood=gpflow.likelihoods.Gaussian()) + acq = gpflowopt.acquisition.ExpectedImprovement(m) + optimizer = gpflowopt.BayesianOptimizer(self.domain, self.acquisition) + result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1) + self.assertTrue(result.success) + class TestSilentOptimization(unittest.TestCase): @contextmanager From 0d5d4bb8d0ec9d15aba0854f01d5bac9b30e7caa Mon Sep 17 00:00:00 2001 From: Joachim van der Herten Date: Wed, 30 Aug 2017 23:47:31 +0200 Subject: [PATCH 7/8] Coverage fix --- testing/test_optimizers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/test_optimizers.py b/testing/test_optimizers.py index 2a896f0..5b13a18 100644 --- a/testing/test_optimizers.py +++ b/testing/test_optimizers.py @@ -349,7 +349,7 @@ def test_nongpr_model(self): X, Y = design.generate(), parabola2d(design.generate())[0] m = gpflow.vgp.VGP(X, Y, gpflow.kernels.RBF(2, ARD=True), likelihood=gpflow.likelihoods.Gaussian()) acq = gpflowopt.acquisition.ExpectedImprovement(m) - optimizer = gpflowopt.BayesianOptimizer(self.domain, self.acquisition) + optimizer = gpflowopt.BayesianOptimizer(self.domain, acq) result = optimizer.optimize(lambda X: parabola2d(X)[0], n_iter=1) self.assertTrue(result.success) From 339b699e89c3e852ffcad3af56be5b573486cdf7 Mon Sep 17 00:00:00 2001 From: Joachim van der Herten Date: Thu, 31 Aug 2017 21:04:02 +0200 Subject: [PATCH 8/8] Fix comments --- gpflowopt/acquisition/acquisition.py | 2 +- gpflowopt/bo.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gpflowopt/acquisition/acquisition.py b/gpflowopt/acquisition/acquisition.py index d6d6909..2caefcf 100644 --- a/gpflowopt/acquisition/acquisition.py +++ b/gpflowopt/acquisition/acquisition.py @@ -450,7 +450,7 @@ def _kill_autoflow(self): Flag for recreation on next optimize. Following the recompilation of models, the free state might have changed. This means updating the samples can - cause inconsistencies and errors. Flag for recreation on next optimize + cause inconsistencies and errors. """ super(MCMCAcquistion, self)._kill_autoflow() self._needs_new_copies = True diff --git a/gpflowopt/bo.py b/gpflowopt/bo.py index 39cb02d..749a7c8 100644 --- a/gpflowopt/bo.py +++ b/gpflowopt/bo.py @@ -29,7 +29,7 @@ def jitchol_callback(models): """ - Increase the likelihood in case of cholesky faillures. + Increase the likelihood in case of Cholesky failures. This is similar to the use of jitchol in GPy. Default callback for BayesianOptimizer. Only usable on GPR models, other types are ignored. @@ -228,8 +228,8 @@ def inverse_acquisition(x): # Optimization loop for i in range(n_iter): - # If callback specified, and acquisition has the setup flag enabled (indicating an upcoming compilation, - # run the callback. + # If a callback is specified, and acquisition has the setup flag enabled (indicating an upcoming + # compilation), run the callback. if self._model_callback and self.acquisition._needs_setup: self._model_callback([m.wrapped for m in self.acquisition.models]) result = self.optimizer.optimize(inverse_acquisition)