11 #ifndef IMPKERNEL_SINGLETON_MACROS_H
12 #define IMPKERNEL_SINGLETON_MACROS_H
14 #include "internal/TupleRestraint.h"
15 #include "internal/functors.h"
25 #define IMP_SINGLETON_SCORE(Name) \
26 IMPKERNEL_DEPRECATED_MACRO(2.1, "Declare methods yourself and use " \
27 "IMP_SINGLETON_SCORE_METHODS() to fill in the rest."); \
28 IMP_IMPLEMENT(double evaluate(Particle* p,\
29 DerivativeAccumulator *da) const); \
30 IMP_IMPLEMENT_INLINE(double evaluate_index(kernel::Model *m, \
32 DerivativeAccumulator *da) const, { \
33 return evaluate(IMP::kernel::internal::get_particle(m,p), da); \
35 IMP_IMPLEMENT_INLINE(double evaluate_if_good_index(kernel::Model *m, \
37 DerivativeAccumulator *da, \
38 double max) const, { \
40 return evaluate_index(m, p, da); \
42 IMP_BACKWARDS_MACRO_INPUTS; \
43 IMP_OBJECT_NO_WARNING(Name)
48 #define IMP_SIMPLE_SINGLETON_SCORE(Name) \
49 IMPKERNEL_DEPRECATED_MACRO(2.1, "Declare methods yourself and use " \
50 "IMP_SINGLETON_SCORE_METHODS() to fill in the rest."); \
51 IMP_IMPLEMENT(double evaluate(Particle* p, \
52 DerivativeAccumulator *da) const); \
53 IMP_IMPLEMENT_INLINE(kernel::ModelObjectsTemp \
54 do_get_inputs(kernel::Model *m, \
55 const ParticleIndexes &pis) const, { \
56 kernel::ModelObjectsTemp ret; \
57 ret+=IMP::kernel::get_particles(m, pis); \
60 IMP_IMPLEMENT_INLINE(Restraints do_create_current_decomposition \
62 ParticleIndex vt) const, { \
63 return IMP::kernel::internal \
64 ::create_score_current_decomposition(this, m, vt); \
66 IMP_OBJECT_NO_WARNING(Name)
72 #define IMP_COMPOSITE_SINGLETON_SCORE(Name) \
73 IMPKERNEL_DEPRECATED_MACRO(2.1, "Do it yourself."); \
74 IMP_IMPLEMENT_INLINE(double evaluate(Particle* p, \
75 DerivativeAccumulator *da) const, { \
76 return evaluate_index(IMP::kernel::internal::get_model(p), \
77 IMP::kernel::internal::get_index(p), da); \
79 IMP_IMPLEMENT(double evaluate_index(kernel::Model *m, ParticleIndex p,\
80 DerivativeAccumulator *da) const); \
81 IMP_IMPLEMENT(double evaluate_if_good_index(kernel::Model *m, \
83 DerivativeAccumulator *da, \
85 IMP_IMPLEMENT_INLINE(double evaluate_indexes(kernel::Model *m, \
86 const ParticleIndexes &p, \
87 DerivativeAccumulator *da, \
88 unsigned int lower_bound, \
89 unsigned int upper_bound) const,\
92 for (unsigned int i=lower_bound; i < upper_bound; ++i) { \
93 ret+= evaluate_index(m, p[i], da); \
97 IMP_IMPLEMENT_INLINE(double \
98 evaluate_if_good_indexes(kernel::Model *m, \
99 const ParticleIndexes &p, \
100 DerivativeAccumulator *da, \
102 unsigned int lower_bound, \
103 unsigned int upper_bound) const, { \
105 for (unsigned int i=lower_bound; i < upper_bound; ++i) { \
106 ret+= evaluate_if_good_index(m, p[i], da, max-ret); \
107 if (ret>max) return std::numeric_limits<double>::max(); \
111 IMP_IMPLEMENT(kernel::ModelObjectsTemp \
112 do_get_inputs(kernel::Model *m, \
113 const ParticleIndexes &pis) const ); \
114 IMP_OBJECT_NO_WARNING(Name)
120 #define IMP_SINGLETON_SCORE_METHODS(Name) \
121 double evaluate_indexes(kernel::Model *m, \
122 const ParticleIndexes &p, \
123 DerivativeAccumulator *da, \
124 unsigned int lower_bound, \
125 unsigned int upper_bound) const IMP_FINAL { \
127 for (unsigned int i=lower_bound; i < upper_bound; ++i) { \
128 ret+= evaluate_index(m, p[i], da); \
132 double evaluate_if_good_indexes(kernel::Model *m, \
133 const ParticleIndexes &p, \
134 DerivativeAccumulator *da, \
136 unsigned int lower_bound, \
137 unsigned int upper_bound) const { \
139 for (unsigned int i=lower_bound; i < upper_bound; ++i) { \
140 ret+= evaluate_if_good_index(m, p[i], da, max-ret); \
141 if (ret>max) return std::numeric_limits<double>::max(); \
150 #define IMP_INDEX_SINGLETON_SCORE(Name) \
151 IMPKERNEL_DEPRECATED_MACRO(2.1, "Declare methods yourself and use "\
152 "IMP_SINGLETON_SCORE_METHODS() to fill in the rest."); \
153 double evaluate(Particle* p, DerivativeAccumulator *da) const { \
154 return evaluate_index(IMP::kernel::internal::get_model(p), \
155 IMP::kernel::internal::get_index(p), \
158 double evaluate_index(kernel::Model *m, ParticleIndex p, \
159 DerivativeAccumulator *da) const IMP_FINAL; \
160 double evaluate_if_good_index(kernel::Model *m, ParticleIndex p, \
161 DerivativeAccumulator *da, \
162 double max) const { \
164 return evaluate_index(m, p, da); \
166 double evaluate_indexes(kernel::Model *m, \
167 const ParticleIndexes &p, \
168 DerivativeAccumulator *da, \
169 unsigned int lower_bound, \
170 unsigned int upper_bound) const IMP_FINAL { \
172 for (unsigned int i=lower_bound; i < upper_bound; ++i) { \
173 ret+= evaluate_index(m, p[i], da); \
177 double evaluate_if_good_indexes(kernel::Model *m, \
178 const ParticleIndexes &p, \
179 DerivativeAccumulator *da, \
181 unsigned int lower_bound, \
182 unsigned int upper_bound) const { \
184 for (unsigned int i=lower_bound; i < upper_bound; ++i) { \
185 ret+= evaluate_if_good_index(m, p[i], da, max-ret); \
186 if (ret>max) return std::numeric_limits<double>::max(); \
190 kernel::ModelObjectsTemp do_get_inputs(kernel::Model *m, \
191 const ParticleIndexes &pis) const; \
192 IMP_OBJECT_METHODS(Name)
199 #define IMP_SINGLETON_PREDICATE(Name) \
200 IMPKERNEL_DEPRECATED_MACRO(2.1, "Use IMP_SINGLETON_PREDICATE_METHODS"\
201 " and declare the methods you implement."); \
202 IMP_IMPLEMENT(int get_value(Particle* a) const); \
203 IMP_IMPLEMENT_INLINE(Ints get_value(const \
204 ParticlesTemp &o) const, { \
205 Ints ret(o.size()); \
206 for (unsigned int i=0; i< o.size(); ++i) { \
207 ret[i]+= Name::get_value(o[i]); \
211 IMP_IMPLEMENT_INLINE(int get_value_index(kernel::Model *m, \
214 return Name::get_value(IMP::kernel::internal::get_particle(m, vt)); \
216 IMP_IMPLEMENT_INLINE(Ints get_value_index(kernel::Model *m, \
217 const ParticleIndexes &o) const, { \
218 Ints ret(o.size()); \
219 for (unsigned int i=0; i< o.size(); ++i) { \
220 ret[i]+= Name::get_value_index(m, o[i]); \
224 IMP_BACKWARDS_MACRO_INPUTS; \
225 IMP_OBJECT_NO_WARNING(Name)
229 #define IMP_SINGLETON_PREDICATE_METHODS(Name) \
230 int get_value(Particle* a) const { \
231 return get_value_index(IMP::kernel::internal::get_model(a), \
232 IMP::kernel::internal::get_index(a)); \
234 Ints get_value(const ParticlesTemp &o) const { \
235 Ints ret(o.size()); \
236 for (unsigned int i=0; i< o.size(); ++i) { \
237 ret[i]+= Name::get_value(o[i]); \
241 Ints get_value_index(kernel::Model *m, const ParticleIndexes &o) const { \
242 Ints ret(o.size()); \
243 for (unsigned int i=0; i< o.size(); ++i) { \
244 ret[i]+= Name::get_value_index(m, o[i]); \
248 IMP_IMPLEMENT_INLINE_NO_SWIG(void remove_if_equal(kernel::Model *m, \
249 ParticleIndexes& ps, \
250 int value) const, { \
251 ps.erase(std::remove_if(ps.begin(), ps.end(), \
252 IMP::kernel::internal::PredicateEquals<Name, true>(this, \
256 IMP_IMPLEMENT_INLINE_NO_SWIG(void remove_if_not_equal(kernel::Model *m, \
257 ParticleIndexes& ps, \
258 int value) const, { \
259 ps.erase(std::remove_if(ps.begin(), ps.end(), \
260 IMP::kernel::internal::PredicateEquals<Name, false>(this, \
268 #define IMP_INDEX_SINGLETON_PREDICATE(Name, return_value, return_inputs) \
269 IMPKERNEL_DEPRECATED_MACRO(2.1, "Declare the methods yourself and use" \
270 " IMP_SINGLETON_PREDICATE_METHODS to fill in the rest."); \
271 int get_value(Particle* a) const { \
272 return get_value_index(IMP::kernel::internal::get_model(a), \
273 IMP::kernel::internal::get_index(a)); \
275 Ints get_value(const ParticlesTemp &o) const { \
276 Ints ret(o.size()); \
277 for (unsigned int i=0; i< o.size(); ++i) { \
278 ret[i]+= Name::get_value(o[i]); \
282 int get_value_index(kernel::Model *m, ParticleIndex pi) const { \
285 Ints get_value_index(kernel::Model *m, const ParticleIndexes &o) const { \
286 Ints ret(o.size()); \
287 for (unsigned int i=0; i< o.size(); ++i) { \
288 ret[i]+= Name::get_value_index(m, o[i]); \
292 IMP_IMPLEMENT_INLINE_NO_SWIG(void remove_if_equal(kernel::Model *m, \
293 ParticleIndexes& ps, \
294 int value) const, { \
295 ps.erase(std::remove_if(ps.begin(), ps.end(), \
296 IMP::kernel::internal::PredicateEquals<Name, true>(this, \
300 IMP_IMPLEMENT_INLINE_NO_SWIG(void remove_if_not_equal(kernel::Model *m, \
301 ParticleIndexes& ps, \
302 int value) const, { \
303 ps.erase(std::remove_if(ps.begin(), ps.end(), \
304 IMP::kernel::internal::PredicateEquals<Name, false>(this, \
308 kernel::ModelObjectsTemp do_get_inputs(kernel::Model *m, \
309 const ParticleIndexes &pi) const { \
312 IMP_OBJECT_METHODS(Name)
318 #define IMP_SINGLETON_MODIFIER(Name) \
319 IMPKERNEL_DEPRECATED_MACRO(2.1, "Declare methods yourself and" \
320 " use IMP_SINGLETON_MODIFIER_METHODS to fill in the rest"); \
321 IMP_IMPLEMENT(void apply(Particle* a) const); \
322 IMP_IMPLEMENT_INLINE(void apply_index(kernel::Model *m, \
323 ParticleIndex a) const, { \
324 return Name::apply(IMP::kernel::internal::get_particle(m,a)); \
326 IMP_BACKWARDS_MACRO_INPUTS; \
327 IMP_BACKWARDS_MACRO_OUTPUTS; \
328 IMP_OBJECT_NO_WARNING(Name)
331 #define IMP_SINGLETON_DERIVATIVE_MODIFIER(Name) \
332 IMP_SINGLETON_MODIFIER(Name)
338 #define IMP_SINGLETON_MODIFIER_METHODS(Name) \
339 virtual void apply_indexes(kernel::Model *m, const ParticleIndexes &o, \
340 unsigned int lower_bound, \
341 unsigned int upper_bound) const IMP_FINAL { \
342 for (unsigned int i=lower_bound; i < upper_bound; ++i) { \
343 apply_index(m, o[i]); \
352 #define IMP_INDEX_SINGLETON_MODIFIER(Name) \
353 IMPKERNEL_DEPRECATED_MACRO(2.1, "Declare methods yourself and" \
354 " use IMP_SINGLETON_MODIFIER_METHODS to fill in the rest"); \
355 void apply(Particle* a) const { \
356 apply_index(IMP::kernel::internal::get_model(a), \
357 IMP::kernel::internal::get_index(a)); \
359 void apply_index(kernel::Model *m, ParticleIndex a) const IMP_FINAL; \
360 void apply_indexes(kernel::Model *m, const ParticleIndexes &o, \
361 unsigned int lower_bound, \
362 unsigned int upper_bound) const IMP_FINAL { \
363 for (unsigned int i=lower_bound; i < upper_bound; ++i) { \
364 apply_index(m, o[i]); \
367 kernel::ModelObjectsTemp do_get_inputs(kernel::Model *m, const ParticleIndexes &pis) const; \
368 kernel::ModelObjectsTemp do_get_outputs(kernel::Model *m, \
369 const ParticleIndexes &pis) const; \
370 IMP_OBJECT_METHODS(Name)
373 #define IMP_INDEX_SINGLETON_DERIVATIVE_MODIFIER(Name) \
374 IMP_INDEX_SINGLETON_MODIFIER(Name)
380 #define IMP_IMPLEMENT_SINGLETON_CONTAINER(Name) \
381 IMP_IMPLEMENT_INLINE(void do_apply(const SingletonModifier *sm) const, {\
384 virtual ParticleIndexes get_all_possible_indexes() const IMP_OVERRIDE; \
385 IMP_OBJECT_NO_WARNING(Name)
392 #define IMP_SINGLETON_CONTAINER_METHODS(Name) \
393 void do_apply(const SingletonModifier *sm) const { \
401 #define IMP_SINGLETON_CONTAINER(Name) \
402 IMPKERNEL_DEPRECATED_MACRO(2.1, "Declare the methods yourself and use "\
403 "IMP_SINGLETON_CONTAINER_METHODS"); \
404 IMP_IMPLEMENT(ParticleIndexes get_indexes() const); \
405 IMP_IMPLEMENT(ParticleIndexes get_range_indexes() const); \
406 IMP_IMPLEMENT(void do_before_evaluate()); \
407 IMP_IMPLEMENT(ParticlesTemp get_input_particles() const); \
408 IMP_IMPLEMENT(ContainersTemp get_input_containers() const); \
409 IMP_IMPLEMENT_INLINE(kernel::ModelObjectsTemp do_get_inputs() const, { \
410 kernel::ModelObjects ret; \
411 ret+=get_input_containers(); \
412 ret+=get_input_particles(); \
415 IMP_IMPLEMENT_SINGLETON_CONTAINER(Name)
421 #define IMP_ACTIVE_SINGLETON_CONTAINER(Name) \
422 IMP_SINGLETON_CONTAINER(name)
426 #define IMP_FOREACH_SINGLETON(sequence, operation) do { \
427 IMPKERNEL_DEPRECATED_MACRO(2.1, "Use IMP_CONTAINER_FOREACH instead"); \
428 IMP::kernel::ParticlesTemp imp_all=sequence->get(); \
429 for (unsigned int _2=0; \
430 _2 != imp_all.size(); \
432 IMP::kernel::Particle* _1= imp_all[_2]; \
433 bool imp_foreach_break=false; \
435 if (imp_foreach_break) break; \
443 #define IMP_FOREACH_SINGLETON_INDEX(sequence, operation) \
444 IMPKERNEL_DEPRECATED_MACRO(2.1, "Use IMP_CONTAINER_FOREACH instead"); \
445 IMP_CONTAINER_FOREACH(SingletonContainer, sequence, operation)
Various general useful macros for IMP.
Macros to define containers of objects.