IMP logo
IMP Reference Guide  2.8.0
The Integrative Modeling Platform
output.py
1 """@namespace IMP.pmi.output
2  Classes for writing output files and processing them.
3 """
4 
5 from __future__ import print_function, division
6 import IMP
7 import IMP.atom
8 import IMP.core
9 import IMP.pmi
10 import IMP.pmi.tools
11 import os
12 import sys
13 import ast
14 import RMF
15 import numpy as np
16 import operator
17 try:
18  import cPickle as pickle
19 except ImportError:
20  import pickle
21 
22 class ProtocolOutput(object):
23  """Base class for capturing a modeling protocol.
24  Unlike simple output of model coordinates, a complete
25  protocol includes the input data used, details on the restraints,
26  sampling, and clustering, as well as output models.
27  Use via IMP.pmi.representation.Representation.add_protocol_output()
28  (for PMI 1) or
29  IMP.pmi.topology.System.add_protocol_output() (for PMI 2).
30 
31  @see IMP.pmi.mmcif.ProtocolOutput for a concrete subclass that outputs
32  mmCIF files.
33  """
34  pass
35 
36 def _flatten(seq):
37  l = []
38  for elt in seq:
39  t = type(elt)
40  if t is tuple or t is list:
41  for elt2 in _flatten(elt):
42  l.append(elt2)
43  else:
44  l.append(elt)
45  return l
46 
47 class Output(object):
48  """Class for easy writing of PDBs, RMFs, and stat files"""
49  def __init__(self, ascii=True,atomistic=False):
50  self.dictionary_pdbs = {}
51  self.dictionary_rmfs = {}
52  self.dictionary_stats = {}
53  self.dictionary_stats2 = {}
54  self.best_score_list = None
55  self.nbestscoring = None
56  self.suffixes = []
57  self.replica_exchange = False
58  self.ascii = ascii
59  self.initoutput = {}
60  self.residuetypekey = IMP.StringKey("ResidueName")
61  self.chainids = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
62  self.dictchain = {} # keys are molecule names, values are chain ids
63  self.particle_infos_for_pdb = {}
64  self.atomistic=atomistic
65  self.use_pmi2 = False
66 
67  def get_pdb_names(self):
68  return list(self.dictionary_pdbs.keys())
69 
70  def get_rmf_names(self):
71  return list(self.dictionary_rmfs.keys())
72 
73  def get_stat_names(self):
74  return list(self.dictionary_stats.keys())
75 
76  def _init_dictchain(self, name, prot):
77  self.dictchain[name] = {}
78  self.use_pmi2 = False
79 
80  # attempt to find PMI objects.
81  if IMP.pmi.get_is_canonical(prot):
82  self.use_pmi2 = True
83  self.atomistic = True #detects automatically
84  for n,mol in enumerate(IMP.atom.get_by_type(prot,IMP.atom.MOLECULE_TYPE)):
85  chid = IMP.atom.Chain(mol).get_id()
86  self.dictchain[name][IMP.pmi.get_molecule_name_and_copy(mol)] = chid
87  else:
88  for n, i in enumerate(self.dictionary_pdbs[name].get_children()):
89  self.dictchain[name][i.get_name()] = self.chainids[n]
90 
91  def init_pdb(self, name, prot):
92  """Init PDB Writing.
93  @param name The PDB filename
94  @param prot The hierarchy to write to this pdb file
95  \note if the PDB name is 'System' then will use Selection to get molecules
96  """
97  flpdb = open(name, 'w')
98  flpdb.close()
99  self.dictionary_pdbs[name] = prot
100  self._init_dictchain(name, prot)
101 
102  def write_psf(self,filename,name):
103  flpsf=open(filename,'w')
104  flpsf.write("PSF CMAP CHEQ"+"\n")
105  index_residue_pair_list={}
106  (particle_infos_for_pdb, geometric_center)=self.get_particle_infos_for_pdb_writing(name)
107  nparticles=len(particle_infos_for_pdb)
108  flpsf.write(str(nparticles)+" !NATOM"+"\n")
109  for n,p in enumerate(particle_infos_for_pdb):
110  atom_index=n+1
111  residue_type=p[2]
112  chain=p[3]
113  resid=p[4]
114  flpsf.write('{0:8d}{1:1s}{2:4s}{3:1s}{4:4s}{5:1s}{6:4s}{7:1s}{8:4s}{9:1s}{10:4s}{11:14.6f}{12:14.6f}{13:8d}{14:14.6f}{15:14.6f}'.format(atom_index," ",chain," ",str(resid)," ",'"'+residue_type.get_string()+'"'," ","C"," ","C",1.0,0.0,0,0.0,0.0))
115  flpsf.write('\n')
116  #flpsf.write(str(atom_index)+" "+str(chain)+" "+str(resid)+" "+str(residue_type).replace('"','')+" C C "+"1.0 0.0 0 0.0 0.0\n")
117  if chain not in index_residue_pair_list:
118  index_residue_pair_list[chain]=[(atom_index,resid)]
119  else:
120  index_residue_pair_list[chain].append((atom_index,resid))
121 
122 
123  #now write the connectivity
124  indexes_pairs=[]
125  for chain in sorted(index_residue_pair_list.keys()):
126 
127  ls=index_residue_pair_list[chain]
128  #sort by residue
129  ls=sorted(ls, key=lambda tup: tup[1])
130  #get the index list
131  indexes=[x[0] for x in ls]
132  # get the contiguous pairs
133  indexes_pairs+=list(IMP.pmi.tools.sublist_iterator(indexes,lmin=2,lmax=2))
134  nbonds=len(indexes_pairs)
135  flpsf.write(str(nbonds)+" !NBOND: bonds"+"\n")
136 
137  sublists=[indexes_pairs[i:i+4] for i in range(0,len(indexes_pairs),4)]
138 
139  # save bonds in fized column format
140  for ip in sublists:
141  if len(ip)==4:
142  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}{6:8d}{7:8d}'.format(ip[0][0],ip[0][1],
143  ip[1][0],ip[1][1],ip[2][0],ip[2][1],ip[3][0],ip[3][1]))
144  elif len(ip)==3:
145  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}'.format(ip[0][0],ip[0][1],ip[1][0],
146  ip[1][1],ip[2][0],ip[2][1]))
147  elif len(ip)==2:
148  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}'.format(ip[0][0],ip[0][1],ip[1][0],ip[1][1]))
149  elif len(ip)==1:
150  flpsf.write('{0:8d}{1:8d}'.format(ip[0][0],ip[0][1]))
151  flpsf.write('\n')
152 
153  del particle_infos_for_pdb
154  flpsf.close()
155 
156  def write_pdb(self,name,
157  appendmode=True,
158  translate_to_geometric_center=False,
159  write_all_residues_per_bead=False):
160  if appendmode:
161  flpdb = open(name, 'a')
162  else:
163  flpdb = open(name, 'w')
164 
165  (particle_infos_for_pdb,
166  geometric_center) = self.get_particle_infos_for_pdb_writing(name)
167 
168  if not translate_to_geometric_center:
169  geometric_center = (0, 0, 0)
170 
171  for n,tupl in enumerate(particle_infos_for_pdb):
172  (xyz, atom_type, residue_type,
173  chain_id, residue_index, all_indexes, radius) = tupl
174  if atom_type is None:
175  atom_type = IMP.atom.AT_CA
176  if ( (write_all_residues_per_bead) and (all_indexes is not None) ):
177  for residue_number in all_indexes:
178  flpdb.write(IMP.atom.get_pdb_string((xyz[0] - geometric_center[0],
179  xyz[1] - geometric_center[1],
180  xyz[2] - geometric_center[2]),
181  n+1, atom_type, residue_type,
182  chain_id, residue_number,' ',1.00,radius))
183  else:
184  flpdb.write(IMP.atom.get_pdb_string((xyz[0] - geometric_center[0],
185  xyz[1] - geometric_center[1],
186  xyz[2] - geometric_center[2]),
187  n+1, atom_type, residue_type,
188  chain_id, residue_index,' ',1.00,radius))
189  flpdb.write("ENDMDL\n")
190  flpdb.close()
191 
192  del particle_infos_for_pdb
193 
194  def get_prot_name_from_particle(self, name, p):
195  """Get the protein name from the particle.
196  This is done by traversing the hierarchy."""
197  if self.use_pmi2:
198  return IMP.pmi.get_molecule_name_and_copy(p), True
199  else:
201  p, self.dictchain[name])
202 
203  def get_particle_infos_for_pdb_writing(self, name):
204  # index_residue_pair_list={}
205 
206  # the resindexes dictionary keep track of residues that have been already
207  # added to avoid duplication
208  # highest resolution have highest priority
209  resindexes_dict = {}
210 
211  # this dictionary dill contain the sequence of tuples needed to
212  # write the pdb
213  particle_infos_for_pdb = []
214 
215  geometric_center = [0, 0, 0]
216  atom_count = 0
217  atom_index = 0
218 
219  if self.use_pmi2:
220  # select highest resolution
221  ps = IMP.atom.Selection(self.dictionary_pdbs[name],resolution=0).get_selected_particles()
222  else:
223  ps = IMP.atom.get_leaves(self.dictionary_pdbs[name])
224 
225  for n, p in enumerate(ps):
226  protname, is_a_bead = self.get_prot_name_from_particle(name, p)
227 
228  if protname not in resindexes_dict:
229  resindexes_dict[protname] = []
230 
231  if IMP.atom.Atom.get_is_setup(p) and self.atomistic:
232  residue = IMP.atom.Residue(IMP.atom.Atom(p).get_parent())
233  rt = residue.get_residue_type()
234  resind = residue.get_index()
235  atomtype = IMP.atom.Atom(p).get_atom_type()
236  xyz = list(IMP.core.XYZ(p).get_coordinates())
237  radius = IMP.core.XYZR(p).get_radius()
238  geometric_center[0] += xyz[0]
239  geometric_center[1] += xyz[1]
240  geometric_center[2] += xyz[2]
241  atom_count += 1
242  particle_infos_for_pdb.append((xyz,
243  atomtype, rt, self.dictchain[name][protname], resind, None, radius))
244  resindexes_dict[protname].append(resind)
245 
247 
248  residue = IMP.atom.Residue(p)
249  resind = residue.get_index()
250  # skip if the residue was already added by atomistic resolution
251  # 0
252  if resind in resindexes_dict[protname]:
253  continue
254  else:
255  resindexes_dict[protname].append(resind)
256  rt = residue.get_residue_type()
257  xyz = IMP.core.XYZ(p).get_coordinates()
258  radius = IMP.core.XYZR(p).get_radius()
259  geometric_center[0] += xyz[0]
260  geometric_center[1] += xyz[1]
261  geometric_center[2] += xyz[2]
262  atom_count += 1
263  particle_infos_for_pdb.append((xyz, None,
264  rt, self.dictchain[name][protname], resind, None, radius))
265 
266  elif IMP.atom.Fragment.get_is_setup(p) and not is_a_bead:
267  resindexes = IMP.pmi.tools.get_residue_indexes(p)
268  resind = resindexes[len(resindexes) // 2]
269  if resind in resindexes_dict[protname]:
270  continue
271  else:
272  resindexes_dict[protname].append(resind)
273  rt = IMP.atom.ResidueType('BEA')
274  xyz = IMP.core.XYZ(p).get_coordinates()
275  radius = IMP.core.XYZR(p).get_radius()
276  geometric_center[0] += xyz[0]
277  geometric_center[1] += xyz[1]
278  geometric_center[2] += xyz[2]
279  atom_count += 1
280  particle_infos_for_pdb.append((xyz, None,
281  rt, self.dictchain[name][protname], resind, resindexes, radius))
282 
283  else:
284  if is_a_bead:
285  rt = IMP.atom.ResidueType('BEA')
286  resindexes = IMP.pmi.tools.get_residue_indexes(p)
287  if len(resindexes) > 0:
288  resind = resindexes[len(resindexes) // 2]
289  xyz = IMP.core.XYZ(p).get_coordinates()
290  radius = IMP.core.XYZR(p).get_radius()
291  geometric_center[0] += xyz[0]
292  geometric_center[1] += xyz[1]
293  geometric_center[2] += xyz[2]
294  atom_count += 1
295  particle_infos_for_pdb.append((xyz, None,
296  rt, self.dictchain[name][protname], resind, resindexes, radius))
297 
298  if atom_count > 0:
299  geometric_center = (geometric_center[0] / atom_count,
300  geometric_center[1] / atom_count,
301  geometric_center[2] / atom_count)
302 
303  particle_infos_for_pdb = sorted(particle_infos_for_pdb, key=operator.itemgetter(3, 4))
304 
305  return (particle_infos_for_pdb, geometric_center)
306 
307 
308  def write_pdbs(self, appendmode=True):
309  for pdb in self.dictionary_pdbs.keys():
310  self.write_pdb(pdb, appendmode)
311 
312  def init_pdb_best_scoring(self,
313  suffix,
314  prot,
315  nbestscoring,
316  replica_exchange=False):
317  # save only the nbestscoring conformations
318  # create as many pdbs as needed
319 
320  self.suffixes.append(suffix)
321  self.replica_exchange = replica_exchange
322  if not self.replica_exchange:
323  # common usage
324  # if you are not in replica exchange mode
325  # initialize the array of scores internally
326  self.best_score_list = []
327  else:
328  # otherwise the replicas must cominucate
329  # through a common file to know what are the best scores
330  self.best_score_file_name = "best.scores.rex.py"
331  self.best_score_list = []
332  best_score_file = open(self.best_score_file_name, "w")
333  best_score_file.write(
334  "self.best_score_list=" + str(self.best_score_list))
335  best_score_file.close()
336 
337  self.nbestscoring = nbestscoring
338  for i in range(self.nbestscoring):
339  name = suffix + "." + str(i) + ".pdb"
340  flpdb = open(name, 'w')
341  flpdb.close()
342  self.dictionary_pdbs[name] = prot
343  self._init_dictchain(name, prot)
344 
345  def write_pdb_best_scoring(self, score):
346  if self.nbestscoring is None:
347  print("Output.write_pdb_best_scoring: init_pdb_best_scoring not run")
348 
349  # update the score list
350  if self.replica_exchange:
351  # read the self.best_score_list from the file
352  exec(open(self.best_score_file_name).read())
353 
354  if len(self.best_score_list) < self.nbestscoring:
355  self.best_score_list.append(score)
356  self.best_score_list.sort()
357  index = self.best_score_list.index(score)
358  for suffix in self.suffixes:
359  for i in range(len(self.best_score_list) - 2, index - 1, -1):
360  oldname = suffix + "." + str(i) + ".pdb"
361  newname = suffix + "." + str(i + 1) + ".pdb"
362  # rename on Windows fails if newname already exists
363  if os.path.exists(newname):
364  os.unlink(newname)
365  os.rename(oldname, newname)
366  filetoadd = suffix + "." + str(index) + ".pdb"
367  self.write_pdb(filetoadd, appendmode=False)
368 
369  else:
370  if score < self.best_score_list[-1]:
371  self.best_score_list.append(score)
372  self.best_score_list.sort()
373  self.best_score_list.pop(-1)
374  index = self.best_score_list.index(score)
375  for suffix in self.suffixes:
376  for i in range(len(self.best_score_list) - 1, index - 1, -1):
377  oldname = suffix + "." + str(i) + ".pdb"
378  newname = suffix + "." + str(i + 1) + ".pdb"
379  os.rename(oldname, newname)
380  filenametoremove = suffix + \
381  "." + str(self.nbestscoring) + ".pdb"
382  os.remove(filenametoremove)
383  filetoadd = suffix + "." + str(index) + ".pdb"
384  self.write_pdb(filetoadd, appendmode=False)
385 
386  if self.replica_exchange:
387  # write the self.best_score_list to the file
388  best_score_file = open(self.best_score_file_name, "w")
389  best_score_file.write(
390  "self.best_score_list=" + str(self.best_score_list))
391  best_score_file.close()
392 
393  def init_rmf(self, name, hierarchies, rs=None, geometries=None):
394  rh = RMF.create_rmf_file(name)
395  IMP.rmf.add_hierarchies(rh, hierarchies)
396  if rs is not None:
398  if geometries is not None:
399  IMP.rmf.add_geometries(rh,geometries)
400  self.dictionary_rmfs[name] = rh
401 
402  def add_restraints_to_rmf(self, name, objectlist):
403  flatobjectlist=_flatten(objectlist)
404  for o in flatobjectlist:
405  try:
406  rs = o.get_restraint_for_rmf()
407  except:
408  rs = o.get_restraint()
410  self.dictionary_rmfs[name],
411  rs.get_restraints())
412 
413  def add_geometries_to_rmf(self, name, objectlist):
414  for o in objectlist:
415  geos = o.get_geometries()
416  IMP.rmf.add_geometries(self.dictionary_rmfs[name], geos)
417 
418  def add_particle_pair_from_restraints_to_rmf(self, name, objectlist):
419  for o in objectlist:
420 
421  pps = o.get_particle_pairs()
422  for pp in pps:
424  self.dictionary_rmfs[name],
426 
427  def write_rmf(self, name):
428  IMP.rmf.save_frame(self.dictionary_rmfs[name])
429  self.dictionary_rmfs[name].flush()
430 
431  def close_rmf(self, name):
432  del self.dictionary_rmfs[name]
433 
434  def write_rmfs(self):
435  for rmf in self.dictionary_rmfs.keys():
436  self.write_rmf(rmf)
437 
438  def init_stat(self, name, listofobjects):
439  if self.ascii:
440  flstat = open(name, 'w')
441  flstat.close()
442  else:
443  flstat = open(name, 'wb')
444  flstat.close()
445 
446  # check that all objects in listofobjects have a get_output method
447  for l in listofobjects:
448  if not "get_output" in dir(l):
449  raise ValueError("Output: object %s doesn't have get_output() method" % str(l))
450  self.dictionary_stats[name] = listofobjects
451 
452  def set_output_entry(self, key, value):
453  self.initoutput.update({key: value})
454 
455  def write_stat(self, name, appendmode=True):
456  output = self.initoutput
457  for obj in self.dictionary_stats[name]:
458  d = obj.get_output()
459  # remove all entries that begin with _ (private entries)
460  dfiltered = dict((k, v) for k, v in d.items() if k[0] != "_")
461  output.update(dfiltered)
462 
463  if appendmode:
464  writeflag = 'a'
465  else:
466  writeflag = 'w'
467 
468  if self.ascii:
469  flstat = open(name, writeflag)
470  flstat.write("%s \n" % output)
471  flstat.close()
472  else:
473  flstat = open(name, writeflag + 'b')
474  cPickle.dump(output, flstat, 2)
475  flstat.close()
476 
477  def write_stats(self):
478  for stat in self.dictionary_stats.keys():
479  self.write_stat(stat)
480 
481  def get_stat(self, name):
482  output = {}
483  for obj in self.dictionary_stats[name]:
484  output.update(obj.get_output())
485  return output
486 
487  def write_test(self, name, listofobjects):
488 # write the test:
489 # output=output.Output()
490 # output.write_test("test_modeling11_models.rmf_45492_11Sep13_veena_imp-020713.dat",outputobjects)
491 # run the test:
492 # output=output.Output()
493 # output.test("test_modeling11_models.rmf_45492_11Sep13_veena_imp-020713.dat",outputobjects)
494  flstat = open(name, 'w')
495  output = self.initoutput
496  for l in listofobjects:
497  if not "get_test_output" in dir(l) and not "get_output" in dir(l):
498  raise ValueError("Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
499  self.dictionary_stats[name] = listofobjects
500 
501  for obj in self.dictionary_stats[name]:
502  try:
503  d = obj.get_test_output()
504  except:
505  d = obj.get_output()
506  # remove all entries that begin with _ (private entries)
507  dfiltered = dict((k, v) for k, v in d.items() if k[0] != "_")
508  output.update(dfiltered)
509  #output.update({"ENVIRONMENT": str(self.get_environment_variables())})
510  #output.update(
511  # {"IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
512  flstat.write("%s \n" % output)
513  flstat.close()
514 
515  def test(self, name, listofobjects, tolerance=1e-5):
516  output = self.initoutput
517  for l in listofobjects:
518  if not "get_test_output" in dir(l) and not "get_output" in dir(l):
519  raise ValueError("Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
520  for obj in listofobjects:
521  try:
522  output.update(obj.get_test_output())
523  except:
524  output.update(obj.get_output())
525  #output.update({"ENVIRONMENT": str(self.get_environment_variables())})
526  #output.update(
527  # {"IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
528 
529  flstat = open(name, 'r')
530 
531  passed=True
532  for l in flstat:
533  test_dict = ast.literal_eval(l)
534  for k in test_dict:
535  if k in output:
536  old_value = str(test_dict[k])
537  new_value = str(output[k])
538  try:
539  float(old_value)
540  is_float = True
541  except ValueError:
542  is_float = False
543 
544  if is_float:
545  fold = float(old_value)
546  fnew = float(new_value)
547  diff = abs(fold - fnew)
548  if diff > tolerance:
549  print("%s: test failed, old value: %s new value %s; "
550  "diff %f > %f" % (str(k), str(old_value),
551  str(new_value), diff,
552  tolerance), file=sys.stderr)
553  passed=False
554  elif test_dict[k] != output[k]:
555  if len(old_value) < 50 and len(new_value) < 50:
556  print("%s: test failed, old value: %s new value %s"
557  % (str(k), old_value, new_value), file=sys.stderr)
558  passed=False
559  else:
560  print("%s: test failed, omitting results (too long)"
561  % str(k), file=sys.stderr)
562  passed=False
563 
564  else:
565  print("%s from old objects (file %s) not in new objects"
566  % (str(k), str(name)), file=sys.stderr)
567  return passed
568 
569  def get_environment_variables(self):
570  import os
571  return str(os.environ)
572 
573  def get_versions_of_relevant_modules(self):
574  import IMP
575  versions = {}
576  versions["IMP_VERSION"] = IMP.get_module_version()
577  try:
578  import IMP.pmi
579  versions["PMI_VERSION"] = IMP.pmi.get_module_version()
580  except (ImportError):
581  pass
582  try:
583  import IMP.isd2
584  versions["ISD2_VERSION"] = IMP.isd2.get_module_version()
585  except (ImportError):
586  pass
587  try:
588  import IMP.isd_emxl
589  versions["ISD_EMXL_VERSION"] = IMP.isd_emxl.get_module_version()
590  except (ImportError):
591  pass
592  return versions
593 
594 #-------------------
595  def init_stat2(
596  self,
597  name,
598  listofobjects,
599  extralabels=None,
600  listofsummedobjects=None):
601  # this is a new stat file that should be less
602  # space greedy!
603  # listofsummedobjects must be in the form [([obj1,obj2,obj3,obj4...],label)]
604  # extralabels
605 
606  if listofsummedobjects is None:
607  listofsummedobjects = []
608  if extralabels is None:
609  extralabels = []
610  flstat = open(name, 'w')
611  output = {}
612  stat2_keywords = {"STAT2HEADER": "STAT2HEADER"}
613  stat2_keywords.update(
614  {"STAT2HEADER_ENVIRON": str(self.get_environment_variables())})
615  stat2_keywords.update(
616  {"STAT2HEADER_IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
617  stat2_inverse = {}
618 
619  for l in listofobjects:
620  if not "get_output" in dir(l):
621  raise ValueError("Output: object %s doesn't have get_output() method" % str(l))
622  else:
623  d = l.get_output()
624  # remove all entries that begin with _ (private entries)
625  dfiltered = dict((k, v)
626  for k, v in d.items() if k[0] != "_")
627  output.update(dfiltered)
628 
629  # check for customizable entries
630  for l in listofsummedobjects:
631  for t in l[0]:
632  if not "get_output" in dir(t):
633  raise ValueError("Output: object %s doesn't have get_output() method" % str(t))
634  else:
635  if "_TotalScore" not in t.get_output():
636  raise ValueError("Output: object %s doesn't have _TotalScore entry to be summed" % str(t))
637  else:
638  output.update({l[1]: 0.0})
639 
640  for k in extralabels:
641  output.update({k: 0.0})
642 
643  for n, k in enumerate(output):
644  stat2_keywords.update({n: k})
645  stat2_inverse.update({k: n})
646 
647  flstat.write("%s \n" % stat2_keywords)
648  flstat.close()
649  self.dictionary_stats2[name] = (
650  listofobjects,
651  stat2_inverse,
652  listofsummedobjects,
653  extralabels)
654 
655  def write_stat2(self, name, appendmode=True):
656  output = {}
657  (listofobjects, stat2_inverse, listofsummedobjects,
658  extralabels) = self.dictionary_stats2[name]
659 
660  # writing objects
661  for obj in listofobjects:
662  od = obj.get_output()
663  dfiltered = dict((k, v) for k, v in od.items() if k[0] != "_")
664  for k in dfiltered:
665  output.update({stat2_inverse[k]: od[k]})
666 
667  # writing summedobjects
668  for l in listofsummedobjects:
669  partial_score = 0.0
670  for t in l[0]:
671  d = t.get_output()
672  partial_score += float(d["_TotalScore"])
673  output.update({stat2_inverse[l[1]]: str(partial_score)})
674 
675  # writing extralabels
676  for k in extralabels:
677  if k in self.initoutput:
678  output.update({stat2_inverse[k]: self.initoutput[k]})
679  else:
680  output.update({stat2_inverse[k]: "None"})
681 
682  if appendmode:
683  writeflag = 'a'
684  else:
685  writeflag = 'w'
686 
687  flstat = open(name, writeflag)
688  flstat.write("%s \n" % output)
689  flstat.close()
690 
691  def write_stats2(self):
692  for stat in self.dictionary_stats2.keys():
693  self.write_stat2(stat)
694 
695 
696 class ProcessOutput(object):
697  """A class for reading stat files"""
698  def __init__(self, filename):
699  self.filename = filename
700  self.isstat1 = False
701  self.isstat2 = False
702 
703  # open the file
704  if not self.filename is None:
705  f = open(self.filename, "r")
706  else:
707  raise ValueError("No file name provided. Use -h for help")
708 
709  # get the keys from the first line
710  for line in f.readlines():
711  d = ast.literal_eval(line)
712  self.klist = list(d.keys())
713  # check if it is a stat2 file
714  if "STAT2HEADER" in self.klist:
715  self.isstat2 = True
716  for k in self.klist:
717  if "STAT2HEADER" in str(k):
718  # if print_header: print k, d[k]
719  del d[k]
720  stat2_dict = d
721  # get the list of keys sorted by value
722  kkeys = [k[0]
723  for k in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
724  self.klist = [k[1]
725  for k in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
726  self.invstat2_dict = {}
727  for k in kkeys:
728  self.invstat2_dict.update({stat2_dict[k]: k})
729  else:
730  IMP.handle_use_deprecated("statfile v1 is deprecated. "
731  "Please convert to statfile v2.\n")
732  self.isstat1 = True
733  self.klist.sort()
734 
735  break
736  f.close()
737 
738  def get_keys(self):
739  return self.klist
740 
741  def show_keys(self, ncolumns=2, truncate=65):
742  IMP.pmi.tools.print_multicolumn(self.get_keys(), ncolumns, truncate)
743 
744  def get_fields(self, fields, filtertuple=None, filterout=None, get_every=1):
745  '''
746  Get the desired field names, and return a dictionary.
747  Namely, "fields" are the queried keys in the stat file (eg. ["Total_Score",...])
748  The returned data structure is a dictionary, where each key is a field and the value
749  is the time series (ie, frame ordered series)
750  of that field (ie, {"Total_Score":[Score_0,Score_1,Score_2,Score_3,...],....} )
751 
752  @param fields (list of strings) queried keys in the stat file (eg. "Total_Score"....)
753  @param filterout specify if you want to "grep" out something from
754  the file, so that it is faster
755  @param filtertuple a tuple that contains
756  ("TheKeyToBeFiltered",relationship,value)
757  where relationship = "<", "==", or ">"
758  @param get_every only read every Nth line from the file
759  '''
760 
761  outdict = {}
762  for field in fields:
763  outdict[field] = []
764 
765  # print fields values
766  f = open(self.filename, "r")
767  line_number = 0
768 
769  for line in f.readlines():
770  if not filterout is None:
771  if filterout in line:
772  continue
773  line_number += 1
774 
775  if line_number % get_every != 0:
776  continue
777  #if line_number % 1000 == 0:
778  # print "ProcessOutput.get_fields: read line %s from file %s" % (str(line_number), self.filename)
779  try:
780  d = ast.literal_eval(line)
781  except:
782  print("# Warning: skipped line number " + str(line_number) + " not a valid line")
783  continue
784 
785  if self.isstat1:
786 
787  if not filtertuple is None:
788  keytobefiltered = filtertuple[0]
789  relationship = filtertuple[1]
790  value = filtertuple[2]
791  if relationship == "<":
792  if float(d[keytobefiltered]) >= value:
793  continue
794  if relationship == ">":
795  if float(d[keytobefiltered]) <= value:
796  continue
797  if relationship == "==":
798  if float(d[keytobefiltered]) != value:
799  continue
800  [outdict[field].append(d[field]) for field in fields]
801 
802  elif self.isstat2:
803  if line_number == 1:
804  continue
805 
806  if not filtertuple is None:
807  keytobefiltered = filtertuple[0]
808  relationship = filtertuple[1]
809  value = filtertuple[2]
810  if relationship == "<":
811  if float(d[self.invstat2_dict[keytobefiltered]]) >= value:
812  continue
813  if relationship == ">":
814  if float(d[self.invstat2_dict[keytobefiltered]]) <= value:
815  continue
816  if relationship == "==":
817  if float(d[self.invstat2_dict[keytobefiltered]]) != value:
818  continue
819 
820  [outdict[field].append(d[self.invstat2_dict[field]])
821  for field in fields]
822  f.close()
823  return outdict
824 
825 
826 
827 class CrossLinkIdentifierDatabase(object):
828  def __init__(self):
829  self.clidb=dict()
830 
831  def check_key(self,key):
832  if key not in self.clidb:
833  self.clidb[key]={}
834 
835  def set_unique_id(self,key,value):
836  self.check_key(key)
837  self.clidb[key]["XLUniqueID"]=str(value)
838 
839  def set_protein1(self,key,value):
840  self.check_key(key)
841  self.clidb[key]["Protein1"]=str(value)
842 
843  def set_protein2(self,key,value):
844  self.check_key(key)
845  self.clidb[key]["Protein2"]=str(value)
846 
847  def set_residue1(self,key,value):
848  self.check_key(key)
849  self.clidb[key]["Residue1"]=int(value)
850 
851  def set_residue2(self,key,value):
852  self.check_key(key)
853  self.clidb[key]["Residue2"]=int(value)
854 
855  def set_idscore(self,key,value):
856  self.check_key(key)
857  self.clidb[key]["IDScore"]=float(value)
858 
859  def set_state(self,key,value):
860  self.check_key(key)
861  self.clidb[key]["State"]=int(value)
862 
863  def set_sigma1(self,key,value):
864  self.check_key(key)
865  self.clidb[key]["Sigma1"]=str(value)
866 
867  def set_sigma2(self,key,value):
868  self.check_key(key)
869  self.clidb[key]["Sigma2"]=str(value)
870 
871  def set_psi(self,key,value):
872  self.check_key(key)
873  self.clidb[key]["Psi"]=str(value)
874 
875  def get_unique_id(self,key):
876  return self.clidb[key]["XLUniqueID"]
877 
878  def get_protein1(self,key):
879  return self.clidb[key]["Protein1"]
880 
881  def get_protein2(self,key):
882  return self.clidb[key]["Protein2"]
883 
884  def get_residue1(self,key):
885  return self.clidb[key]["Residue1"]
886 
887  def get_residue2(self,key):
888  return self.clidb[key]["Residue2"]
889 
890  def get_idscore(self,key):
891  return self.clidb[key]["IDScore"]
892 
893  def get_state(self,key):
894  return self.clidb[key]["State"]
895 
896  def get_sigma1(self,key):
897  return self.clidb[key]["Sigma1"]
898 
899  def get_sigma2(self,key):
900  return self.clidb[key]["Sigma2"]
901 
902  def get_psi(self,key):
903  return self.clidb[key]["Psi"]
904 
905  def set_float_feature(self,key,value,feature_name):
906  self.check_key(key)
907  self.clidb[key][feature_name]=float(value)
908 
909  def set_int_feature(self,key,value,feature_name):
910  self.check_key(key)
911  self.clidb[key][feature_name]=int(value)
912 
913  def set_string_feature(self,key,value,feature_name):
914  self.check_key(key)
915  self.clidb[key][feature_name]=str(value)
916 
917  def get_feature(self,key,feature_name):
918  return self.clidb[key][feature_name]
919 
920  def write(self,filename):
921  import pickle
922  with open(filename, 'wb') as handle:
923  pickle.dump(self.clidb,handle)
924 
925  def load(self,filename):
926  import pickle
927  with open(filename, 'rb') as handle:
928  self.clidb=pickle.load(handle)
929 
930 def plot_fields(fields, framemin=None, framemax=None):
931  import matplotlib as mpl
932  mpl.use('Agg')
933  import matplotlib.pyplot as plt
934 
935  plt.rc('lines', linewidth=4)
936  fig, axs = plt.subplots(nrows=len(fields))
937  fig.set_size_inches(10.5, 5.5 * len(fields))
938  plt.rc('axes', color_cycle=['r'])
939 
940  n = 0
941  for key in fields:
942  if framemin is None:
943  framemin = 0
944  if framemax is None:
945  framemax = len(fields[key])
946  x = list(range(framemin, framemax))
947  y = [float(y) for y in fields[key][framemin:framemax]]
948  if len(fields) > 1:
949  axs[n].plot(x, y)
950  axs[n].set_title(key, size="xx-large")
951  axs[n].tick_params(labelsize=18, pad=10)
952  else:
953  axs.plot(x, y)
954  axs.set_title(key, size="xx-large")
955  axs.tick_params(labelsize=18, pad=10)
956  n += 1
957 
958  # Tweak spacing between subplots to prevent labels from overlapping
959  plt.subplots_adjust(hspace=0.3)
960  plt.show()
961 
962 
964  name, values_lists, valuename=None, bins=40, colors=None, format="png",
965  reference_xline=None, yplotrange=None, xplotrange=None,normalized=True,
966  leg_names=None):
967 
968  '''Plot a list of histograms from a value list.
969  @param name the name of the plot
970  @param value_lists the list of list of values eg: [[...],[...],[...]]
971  @param valuename the y-label
972  @param bins the number of bins
973  @param colors If None, will use rainbow. Else will use specific list
974  @param format output format
975  @param reference_xline plot a reference line parallel to the y-axis
976  @param yplotrange the range for the y-axis
977  @param normalized whether the histogram is normalized or not
978  @param leg_names names for the legend
979  '''
980 
981  import matplotlib as mpl
982  mpl.use('Agg')
983  import matplotlib.pyplot as plt
984  import matplotlib.cm as cm
985  fig = plt.figure(figsize=(18.0, 9.0))
986 
987  if colors is None:
988  colors = cm.rainbow(np.linspace(0, 1, len(values_lists)))
989  for nv,values in enumerate(values_lists):
990  col=colors[nv]
991  if leg_names is not None:
992  label=leg_names[nv]
993  else:
994  label=str(nv)
995  h=plt.hist(
996  [float(y) for y in values],
997  bins=bins,
998  color=col,
999  normed=normalized,histtype='step',lw=4,
1000  label=label)
1001 
1002  # plt.title(name,size="xx-large")
1003  plt.tick_params(labelsize=12, pad=10)
1004  if valuename is None:
1005  plt.xlabel(name, size="xx-large")
1006  else:
1007  plt.xlabel(valuename, size="xx-large")
1008  plt.ylabel("Frequency", size="xx-large")
1009 
1010  if not yplotrange is None:
1011  plt.ylim()
1012  if not xplotrange is None:
1013  plt.xlim(xplotrange)
1014 
1015  plt.legend(loc=2)
1016 
1017  if not reference_xline is None:
1018  plt.axvline(
1019  reference_xline,
1020  color='red',
1021  linestyle='dashed',
1022  linewidth=1)
1023 
1024  plt.savefig(name + "." + format, dpi=150, transparent=True)
1025  plt.show()
1026 
1027 
1028 def plot_fields_box_plots(name, values, positions, frequencies=None,
1029  valuename="None", positionname="None", xlabels=None,scale_plot_length=1.0):
1030  '''
1031  Plot time series as boxplots.
1032  fields is a list of time series, positions are the x-values
1033  valuename is the y-label, positionname is the x-label
1034  '''
1035 
1036  import matplotlib as mpl
1037  mpl.use('Agg')
1038  import matplotlib.pyplot as plt
1039  from matplotlib.patches import Polygon
1040 
1041  bps = []
1042  fig = plt.figure(figsize=(float(len(positions))*scale_plot_length, 5.0))
1043  fig.canvas.set_window_title(name)
1044 
1045  ax1 = fig.add_subplot(111)
1046 
1047  plt.subplots_adjust(left=0.1, right=0.990, top=0.95, bottom=0.4)
1048 
1049  bps.append(plt.boxplot(values, notch=0, sym='', vert=1,
1050  whis=1.5, positions=positions))
1051 
1052  plt.setp(bps[-1]['boxes'], color='black', lw=1.5)
1053  plt.setp(bps[-1]['whiskers'], color='black', ls=":", lw=1.5)
1054 
1055  if frequencies is not None:
1056  for n,v in enumerate(values):
1057  plist=[positions[n]]*len(v)
1058  ax1.plot(plist, v, 'gx', alpha=0.7, markersize=7)
1059 
1060  # print ax1.xaxis.get_majorticklocs()
1061  if not xlabels is None:
1062  ax1.set_xticklabels(xlabels)
1063  plt.xticks(rotation=90)
1064  plt.xlabel(positionname)
1065  plt.ylabel(valuename)
1066 
1067  plt.savefig(name+".pdf",dpi=150)
1068  plt.show()
1069 
1070 
1071 def plot_xy_data(x,y,title=None,out_fn=None,display=True,set_plot_yaxis_range=None,
1072  xlabel=None,ylabel=None):
1073  import matplotlib as mpl
1074  mpl.use('Agg')
1075  import matplotlib.pyplot as plt
1076  plt.rc('lines', linewidth=2)
1077 
1078  fig, ax = plt.subplots(nrows=1)
1079  fig.set_size_inches(8,4.5)
1080  if title is not None:
1081  fig.canvas.set_window_title(title)
1082 
1083  #plt.rc('axes', color='r')
1084  ax.plot(x,y,color='r')
1085  if set_plot_yaxis_range is not None:
1086  x1,x2,y1,y2=plt.axis()
1087  y1=set_plot_yaxis_range[0]
1088  y2=set_plot_yaxis_range[1]
1089  plt.axis((x1,x2,y1,y2))
1090  if title is not None:
1091  ax.set_title(title)
1092  if xlabel is not None:
1093  ax.set_xlabel(xlabel)
1094  if ylabel is not None:
1095  ax.set_ylabel(ylabel)
1096  if out_fn is not None:
1097  plt.savefig(out_fn+".pdf")
1098  if display:
1099  plt.show()
1100  plt.close(fig)
1101 
1102 def plot_scatter_xy_data(x,y,labelx="None",labely="None",
1103  xmin=None,xmax=None,ymin=None,ymax=None,
1104  savefile=False,filename="None.eps",alpha=0.75):
1105 
1106  import matplotlib as mpl
1107  mpl.use('Agg')
1108  import matplotlib.pyplot as plt
1109  import sys
1110  from matplotlib import rc
1111  #rc('font', **{'family':'serif','serif':['Palatino']})
1112  rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
1113  #rc('text', usetex=True)
1114 
1115  fig, axs = plt.subplots(1)
1116 
1117  axs0 = axs
1118 
1119  axs0.set_xlabel(labelx, size="xx-large")
1120  axs0.set_ylabel(labely, size="xx-large")
1121  axs0.tick_params(labelsize=18, pad=10)
1122 
1123  plot2 = []
1124 
1125  plot2.append(axs0.plot(x, y, 'o', color='k',lw=2, ms=0.1, alpha=alpha, c="w"))
1126 
1127  axs0.legend(
1128  loc=0,
1129  frameon=False,
1130  scatterpoints=1,
1131  numpoints=1,
1132  columnspacing=1)
1133 
1134  fig.set_size_inches(8.0, 8.0)
1135  fig.subplots_adjust(left=0.161, right=0.850, top=0.95, bottom=0.11)
1136  if (not ymin is None) and (not ymax is None):
1137  axs0.set_ylim(ymin,ymax)
1138  if (not xmin is None) and (not xmax is None):
1139  axs0.set_xlim(xmin,xmax)
1140 
1141  #plt.show()
1142  if savefile:
1143  fig.savefig(filename, dpi=300)
1144 
1145 
1146 def get_graph_from_hierarchy(hier):
1147  graph = []
1148  depth_dict = {}
1149  depth = 0
1150  (graph, depth, depth_dict) = recursive_graph(
1151  hier, graph, depth, depth_dict)
1152 
1153  # filters node labels according to depth_dict
1154  node_labels_dict = {}
1155  node_size_dict = {}
1156  for key in depth_dict:
1157  node_size_dict = 10 / depth_dict[key]
1158  if depth_dict[key] < 3:
1159  node_labels_dict[key] = key
1160  else:
1161  node_labels_dict[key] = ""
1162  draw_graph(graph, labels_dict=node_labels_dict)
1163 
1164 
1165 def recursive_graph(hier, graph, depth, depth_dict):
1166  depth = depth + 1
1167  nameh = IMP.atom.Hierarchy(hier).get_name()
1168  index = str(hier.get_particle().get_index())
1169  name1 = nameh + "|#" + index
1170  depth_dict[name1] = depth
1171 
1172  children = IMP.atom.Hierarchy(hier).get_children()
1173 
1174  if len(children) == 1 or children is None:
1175  depth = depth - 1
1176  return (graph, depth, depth_dict)
1177 
1178  else:
1179  for c in children:
1180  (graph, depth, depth_dict) = recursive_graph(
1181  c, graph, depth, depth_dict)
1182  nameh = IMP.atom.Hierarchy(c).get_name()
1183  index = str(c.get_particle().get_index())
1184  namec = nameh + "|#" + index
1185  graph.append((name1, namec))
1186 
1187  depth = depth - 1
1188  return (graph, depth, depth_dict)
1189 
1190 
1191 def draw_graph(graph, labels_dict=None, graph_layout='spring',
1192  node_size=5, node_color=None, node_alpha=0.3,
1193  node_text_size=11, fixed=None, pos=None,
1194  edge_color='blue', edge_alpha=0.3, edge_thickness=1,
1195  edge_text_pos=0.3,
1196  validation_edges=None,
1197  text_font='sans-serif',
1198  out_filename=None):
1199 
1200  import matplotlib as mpl
1201  mpl.use('Agg')
1202  import networkx as nx
1203  import matplotlib.pyplot as plt
1204  from math import sqrt, pi
1205 
1206  # create networkx graph
1207  G = nx.Graph()
1208 
1209  # add edges
1210  if type(edge_thickness) is list:
1211  for edge,weight in zip(graph,edge_thickness):
1212  G.add_edge(edge[0], edge[1], weight=weight)
1213  else:
1214  for edge in graph:
1215  G.add_edge(edge[0], edge[1])
1216 
1217  if node_color==None:
1218  node_color_rgb=(0,0,0)
1219  node_color_hex="000000"
1220  else:
1222  tmpcolor_rgb=[]
1223  tmpcolor_hex=[]
1224  for node in G.nodes():
1225  cctuple=cc.rgb(node_color[node])
1226  tmpcolor_rgb.append((cctuple[0]/255,cctuple[1]/255,cctuple[2]/255))
1227  tmpcolor_hex.append(node_color[node])
1228  node_color_rgb=tmpcolor_rgb
1229  node_color_hex=tmpcolor_hex
1230 
1231  # get node sizes if dictionary
1232  if type(node_size) is dict:
1233  tmpsize=[]
1234  for node in G.nodes():
1235  size=sqrt(node_size[node])/pi*10.0
1236  tmpsize.append(size)
1237  node_size=tmpsize
1238 
1239  for n,node in enumerate(G.nodes()):
1240  color=node_color_hex[n]
1241  size=node_size[n]
1242  nx.set_node_attributes(G, "graphics", {node : {'type': 'ellipse','w': size, 'h': size,'fill': '#'+color, 'label': node}})
1243  nx.set_node_attributes(G, "LabelGraphics", {node : {'type': 'text','text':node, 'color':'#000000', 'visible':'true'}})
1244 
1245  for edge in G.edges():
1246  nx.set_edge_attributes(G, "graphics", {edge : {'width': 1,'fill': '#000000'}})
1247 
1248  for ve in validation_edges:
1249  print(ve)
1250  if (ve[0],ve[1]) in G.edges():
1251  print("found forward")
1252  nx.set_edge_attributes(G, "graphics", {ve : {'width': 1,'fill': '#00FF00'}})
1253  elif (ve[1],ve[0]) in G.edges():
1254  print("found backward")
1255  nx.set_edge_attributes(G, "graphics", {(ve[1],ve[0]) : {'width': 1,'fill': '#00FF00'}})
1256  else:
1257  G.add_edge(ve[0], ve[1])
1258  print("not found")
1259  nx.set_edge_attributes(G, "graphics", {ve : {'width': 1,'fill': '#FF0000'}})
1260 
1261  # these are different layouts for the network you may try
1262  # shell seems to work best
1263  if graph_layout == 'spring':
1264  print(fixed, pos)
1265  graph_pos = nx.spring_layout(G,k=1.0/8.0,fixed=fixed,pos=pos)
1266  elif graph_layout == 'spectral':
1267  graph_pos = nx.spectral_layout(G)
1268  elif graph_layout == 'random':
1269  graph_pos = nx.random_layout(G)
1270  else:
1271  graph_pos = nx.shell_layout(G)
1272 
1273 
1274  # draw graph
1275  nx.draw_networkx_nodes(G, graph_pos, node_size=node_size,
1276  alpha=node_alpha, node_color=node_color_rgb,
1277  linewidths=0)
1278  nx.draw_networkx_edges(G, graph_pos, width=edge_thickness,
1279  alpha=edge_alpha, edge_color=edge_color)
1280  nx.draw_networkx_labels(
1281  G, graph_pos, labels=labels_dict, font_size=node_text_size,
1282  font_family=text_font)
1283  if out_filename:
1284  plt.savefig(out_filename)
1285  nx.write_gml(G,'out.gml')
1286  plt.show()
1287 
1288 
1289 def draw_table():
1290 
1291  # still an example!
1292 
1293  from ipyD3 import d3object
1294  from IPython.display import display
1295 
1296  d3 = d3object(width=800,
1297  height=400,
1298  style='JFTable',
1299  number=1,
1300  d3=None,
1301  title='Example table with d3js',
1302  desc='An example table created created with d3js with data generated with Python.')
1303  data = [
1304  [1277.0,
1305  654.0,
1306  288.0,
1307  1976.0,
1308  3281.0,
1309  3089.0,
1310  10336.0,
1311  4650.0,
1312  4441.0,
1313  4670.0,
1314  944.0,
1315  110.0],
1316  [1318.0,
1317  664.0,
1318  418.0,
1319  1952.0,
1320  3581.0,
1321  4574.0,
1322  11457.0,
1323  6139.0,
1324  7078.0,
1325  6561.0,
1326  2354.0,
1327  710.0],
1328  [1783.0,
1329  774.0,
1330  564.0,
1331  1470.0,
1332  3571.0,
1333  3103.0,
1334  9392.0,
1335  5532.0,
1336  5661.0,
1337  4991.0,
1338  2032.0,
1339  680.0],
1340  [1301.0,
1341  604.0,
1342  286.0,
1343  2152.0,
1344  3282.0,
1345  3369.0,
1346  10490.0,
1347  5406.0,
1348  4727.0,
1349  3428.0,
1350  1559.0,
1351  620.0],
1352  [1537.0,
1353  1714.0,
1354  724.0,
1355  4824.0,
1356  5551.0,
1357  8096.0,
1358  16589.0,
1359  13650.0,
1360  9552.0,
1361  13709.0,
1362  2460.0,
1363  720.0],
1364  [5691.0,
1365  2995.0,
1366  1680.0,
1367  11741.0,
1368  16232.0,
1369  14731.0,
1370  43522.0,
1371  32794.0,
1372  26634.0,
1373  31400.0,
1374  7350.0,
1375  3010.0],
1376  [1650.0,
1377  2096.0,
1378  60.0,
1379  50.0,
1380  1180.0,
1381  5602.0,
1382  15728.0,
1383  6874.0,
1384  5115.0,
1385  3510.0,
1386  1390.0,
1387  170.0],
1388  [72.0, 60.0, 60.0, 10.0, 120.0, 172.0, 1092.0, 675.0, 408.0, 360.0, 156.0, 100.0]]
1389  data = [list(i) for i in zip(*data)]
1390  sRows = [['January',
1391  'February',
1392  'March',
1393  'April',
1394  'May',
1395  'June',
1396  'July',
1397  'August',
1398  'September',
1399  'October',
1400  'November',
1401  'Deecember']]
1402  sColumns = [['Prod {0}'.format(i) for i in range(1, 9)],
1403  [None, '', None, None, 'Group 1', None, None, 'Group 2']]
1404  d3.addSimpleTable(data,
1405  fontSizeCells=[12, ],
1406  sRows=sRows,
1407  sColumns=sColumns,
1408  sRowsMargins=[5, 50, 0],
1409  sColsMargins=[5, 20, 10],
1410  spacing=0,
1411  addBorders=1,
1412  addOutsideBorders=-1,
1413  rectWidth=45,
1414  rectHeight=0
1415  )
1416  html = d3.render(mode=['html', 'show'])
1417  display(html)
static bool get_is_setup(const IMP::ParticleAdaptor &p)
Definition: Residue.h:155
A class for reading stat files.
Definition: output.py:696
RMF::FrameID save_frame(RMF::FileHandle file, std::string name="")
Save the current state of the linked objects as a new RMF frame.
static bool get_is_setup(const IMP::ParticleAdaptor &p)
Definition: atom/Atom.h:241
def plot_field_histogram
Plot a list of histograms from a value list.
Definition: output.py:963
def plot_fields_box_plots
Plot time series as boxplots.
Definition: output.py:1028
Miscellaneous utilities.
Definition: tools.py:1
void handle_use_deprecated(std::string message)
std::string get_module_version()
Change color code to hexadecimal to rgb.
Definition: tools.py:1452
void write_pdb(const Selection &mhd, TextOutput out, unsigned int model=1)
def get_prot_name_from_particle
Return the component name provided a particle and a list of names.
Definition: tools.py:1017
def get_fields
Get the desired field names, and return a dictionary.
Definition: output.py:744
static bool get_is_setup(Model *m, ParticleIndex pi)
Definition: Fragment.h:46
std::string get_molecule_name_and_copy(atom::Hierarchy h)
Walk up a PMI2 hierarchy/representations and get the "molname.copynum".
Definition: utilities.h:85
The standard decorator for manipulating molecular structures.
Ints get_index(const ParticlesTemp &particles, const Subset &subset, const Subsets &excluded)
def init_pdb
Init PDB Writing.
Definition: output.py:91
A decorator for a particle representing an atom.
Definition: atom/Atom.h:234
Base class for capturing a modeling protocol.
Definition: output.py:22
The type for a residue.
A decorator for a particle with x,y,z coordinates.
Definition: XYZ.h:30
A base class for Keys.
Definition: Key.h:44
void add_hierarchies(RMF::NodeHandle fh, const atom::Hierarchies &hs)
Class for easy writing of PDBs, RMFs, and stat files.
Definition: output.py:47
void add_geometries(RMF::NodeHandle parent, const display::GeometriesTemp &r)
Add geometries to a given parent node.
void add_restraints(RMF::NodeHandle fh, const Restraints &hs)
bool get_is_canonical(atom::Hierarchy h)
Walk up a PMI2 hierarchy/representations and check if the root is named System.
Definition: utilities.h:91
Display a segment connecting a pair of particles.
Definition: XYZR.h:170
A decorator for a residue.
Definition: Residue.h:134
Basic functionality that is expected to be used by a wide variety of IMP users.
def get_prot_name_from_particle
Get the protein name from the particle.
Definition: output.py:194
void add_geometry(RMF::FileHandle file, display::Geometry *r)
Add a single geometry to the file.
Store info for a chain of a protein.
Definition: Chain.h:21
Python classes to represent, score, sample and analyze models.
Functionality for loading, creating, manipulating and scoring atomic structures.
Hierarchies get_leaves(const Selection &h)
Select hierarchy particles identified by the biological name.
Definition: Selection.h:66
def get_residue_indexes
Retrieve the residue indexes for the given particle.
Definition: tools.py:1037
std::string get_module_version()
def sublist_iterator
Yield all sublists of length >= lmin and <= lmax.
Definition: tools.py:1133
A decorator for a particle with x,y,z coordinates and a radius.
Definition: XYZR.h:27