diff --git a/optimism/WarmStart.py b/optimism/WarmStart.py index 47642bf1..9c675f97 100644 --- a/optimism/WarmStart.py +++ b/optimism/WarmStart.py @@ -1,8 +1,7 @@ -from scipy.sparse.linalg import cg, LinearOperator - from optimism.JaxConfig import * -import optimism.Objective as Objective +from scipy.sparse.linalg import cg, LinearOperator + def warm_start_increment(objective, x, pNew, index=0): dp = objective.p[index] - pNew[index] @@ -14,7 +13,7 @@ def warm_start_increment(objective, x, pNew, index=0): raise('invalid warm start parameter gradient direction') sz = b.size - op = lambda v: objective.hessian_vec(x, v) + op = lambda v: objective.hessian_vec(x, np.array(v, dtype=np.float64)) Lop = LinearOperator((sz,sz), matvec = op) diff --git a/optimism/contact/test/test_NewtonGlobalization.py b/optimism/contact/test/test_NewtonGlobalization.py index b8512988..f60e3101 100644 --- a/optimism/contact/test/test_NewtonGlobalization.py +++ b/optimism/contact/test/test_NewtonGlobalization.py @@ -133,7 +133,7 @@ def test_newton_step(self): xl = np.hstack( (self.x, self.lam) ) r0 = np.linalg.norm( residual(xl) ) - xl += newton_step(residual, lambda v: linear_op(xl,v), xl)[0] + xl += newton_step(residual, lambda v: linear_op(xl, np.array(v, dtype=np.float64)), xl)[0] r1 = np.linalg.norm( residual(xl) ) self.assertTrue( r1 < 10*r0 ) @@ -144,7 +144,7 @@ def test_globalized_newton_step_with_cubic(self): linear_op = create_linear_op(residual) x = np.array([0.1]) r0 = np.linalg.norm( residual(x) ) - x += globalized_newton_step(residual, lambda v: linear_op(x,v), x, self.etak, self.t) + x += globalized_newton_step(residual, lambda v: linear_op(x, np.array(v, dtype=np.float64)), x, self.etak, self.t) r1 = np.linalg.norm( residual(x) ) self.assertTrue( r1 < r0 ) @@ -154,7 +154,7 @@ def test_globalized_newton_step_nonconvex(self): xl = np.hstack( (self.x, self.lam) ) r0 = np.linalg.norm( residual(xl) ) - xl += globalized_newton_step(residual, lambda v: linear_op(xl,v), xl, self.etak, self.t) + xl += globalized_newton_step(residual, lambda v: linear_op(xl, np.array(v, dtype=np.float64)), xl, self.etak, self.t) r1 = np.linalg.norm( residual(xl) ) self.assertTrue( r1 < r0 ) @@ -171,7 +171,7 @@ def test_al_solver(self): randRhs = np.array(rand(self.x.size)) randRhs *= 0.25 / np.linalg.norm(randRhs) - penalty0 = unconstrainedObjective.hessian_vec(self.x, randRhs) @ randRhs + penalty0 = unconstrainedObjective.hessian_vec(self.x, np.array(randRhs, dtype=np.float64)) @ randRhs alObjective = ConstrainedObjective(lambda x,p: objective(x), lambda x,p: constraint(x), diff --git a/optimism/inverse/test/test_Hyperelastic_gradient_checks.py b/optimism/inverse/test/test_Hyperelastic_gradient_checks.py index 83b52cd6..96a1e0d5 100644 --- a/optimism/inverse/test/test_Hyperelastic_gradient_checks.py +++ b/optimism/inverse/test/test_Hyperelastic_gradient_checks.py @@ -164,7 +164,7 @@ def total_work_gradient_with_adjoint(self, storedState, parameters): n = self.dofManager.get_unknown_size() self.objective.p = p # have to update parameters to get precond to work self.objective.update_precond(Uu) # update preconditioner for use in cg (will converge in 1 iteration as long as the preconditioner is not approximate) - dRdu = linalg.LinearOperator((n, n), lambda V: onp.asarray(self.objective.hessian_vec(Uu, V))) + dRdu = linalg.LinearOperator((n, n), lambda V: onp.asarray(self.objective.hessian_vec(Uu, np.array(V, dtype=np.float64)))) dRdu_decomp = linalg.LinearOperator((n, n), lambda V: onp.asarray(self.objective.apply_precond(V))) adjointVector = linalg.cg(dRdu, onp.array(adjointLoad, copy=False), rtol=1e-10, atol=0.0, M=dRdu_decomp)[0] diff --git a/optimism/inverse/test/test_J2Plastic_gradient_checks.py b/optimism/inverse/test/test_J2Plastic_gradient_checks.py index 53c3eea2..32b1112f 100644 --- a/optimism/inverse/test/test_J2Plastic_gradient_checks.py +++ b/optimism/inverse/test/test_J2Plastic_gradient_checks.py @@ -171,7 +171,7 @@ def total_work_gradient(self, storedState, parameters): p_objective = Objective.Params(bc_data=p.bc_data, state_data=p_prev.state_data) # remember R is a function of ivs_prev self.objective.p = p_objective self.objective.update_precond(Uu) # update preconditioner for use in cg (will converge in 1 iteration as long as the preconditioner is not approximate) - dRdu = linalg.LinearOperator((n, n), lambda V: onp.asarray(self.objective.hessian_vec(Uu, V))) + dRdu = linalg.LinearOperator((n, n), lambda V: onp.asarray(self.objective.hessian_vec(Uu, np.array(V, dtype=np.float64)))) dRdu_decomp = linalg.LinearOperator((n, n), lambda V: onp.asarray(self.objective.apply_precond(V))) adjointVector = linalg.cg(dRdu, onp.array(adjointLoad, copy=False), rtol=1e-10, atol=0.0, M=dRdu_decomp)[0] @@ -253,7 +253,7 @@ def target_curve_gradient(self, storedState, parameters): p_objective = Objective.Params(bc_data=p.bc_data, state_data=p_prev.state_data) # remember R is a function of ivs_prev self.objective.p = p_objective self.objective.update_precond(Uu) # update preconditioner for use in cg (will converge in 1 iteration as long as the preconditioner is not approximate) - dRdu = linalg.LinearOperator((n, n), lambda V: onp.asarray(self.objective.hessian_vec(Uu, V))) + dRdu = linalg.LinearOperator((n, n), lambda V: onp.asarray(self.objective.hessian_vec(Uu, np.array(V, np.float64)))) dRdu_decomp = linalg.LinearOperator((n, n), lambda V: onp.asarray(self.objective.apply_precond(V))) adjointVector = linalg.cg(dRdu, onp.array(adjointLoad, copy=False), rtol=1e-10, atol=0.0, M=dRdu_decomp)[0] diff --git a/setup.py b/setup.py index c4edecda..8972933b 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ 'matplotlib', # this is not strictly necessary 'metis', 'netcdf4', - 'scipy',], + 'scipy<1.15.0',], #tests_require=[], # could put chex and pytest here extras_require={'sparse': ['scikit-sparse'], 'test': ['pytest', 'pytest-cov', 'pytest-xdist'],