Package madgraph :: Package interface :: Module madgraph_interface
[hide private]
[frames] | no frames]

Source Code for Module madgraph.interface.madgraph_interface

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph 5 project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, please visit: http://madgraph.phys.ucl.ac.be 
  13  # 
  14  ################################################################################ 
  15  """A user friendly command line interface to access MadGraph features at LO. 
  16     Uses the cmd package for command interpretation and tab completion. 
  17  """ 
  18   
  19  import atexit 
  20  import logging 
  21  import optparse 
  22  import os 
  23  import pydoc 
  24  import re 
  25  import subprocess 
  26  import sys 
  27  import shutil 
  28  import traceback 
  29  import time 
  30   
  31  #usefull shortcut 
  32  pjoin = os.path.join 
  33   
  34  try: 
  35      import readline 
  36      GNU_SPLITTING = ('GNU' in readline.__doc__) 
  37  except: 
  38      GNU_SPLITTING = True 
  39   
  40   
  41  import madgraph 
  42  from madgraph import MG4DIR, MG5DIR, MadGraph5Error 
  43   
  44   
  45  import madgraph.core.base_objects as base_objects 
  46  import madgraph.core.diagram_generation as diagram_generation 
  47  import madgraph.core.drawing as draw_lib 
  48  import madgraph.core.helas_objects as helas_objects 
  49   
  50  import madgraph.iolibs.drawing_eps as draw 
  51  import madgraph.iolibs.export_cpp as export_cpp 
  52  import madgraph.iolibs.export_v4 as export_v4 
  53  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  54  import madgraph.iolibs.file_writers as writers 
  55  import madgraph.iolibs.files as files 
  56  import madgraph.iolibs.group_subprocs as group_subprocs 
  57  import madgraph.iolibs.import_v4 as import_v4 
  58  import madgraph.iolibs.save_load_object as save_load_object 
  59   
  60  import madgraph.interface.extended_cmd as cmd 
  61  import madgraph.interface.tutorial_text as tutorial_text 
  62  import madgraph.interface.launch_ext_program as launch_ext 
  63  import madgraph.interface.madevent_interface as madevent_interface 
  64   
  65  import madgraph.various.process_checks as process_checks 
  66  import madgraph.various.banner as banner_module 
  67  import madgraph.various.misc as misc 
  68  import madgraph.various.cluster as cluster 
  69   
  70  import models as ufomodels 
  71  import models.import_ufo as import_ufo 
  72   
  73  import aloha.aloha_fct as aloha_fct 
  74  import aloha.create_aloha as create_aloha 
  75   
  76  # Special logger for the Cmd Interface 
  77  logger = logging.getLogger('cmdprint') # -> stdout 
  78  logger_stderr = logging.getLogger('fatalerror') # ->stderr 
  79  logger_tuto = logging.getLogger('tutorial') # -> stdout include instruction in   
80 #order to learn MG5 81 82 #=============================================================================== 83 # CmdExtended 84 #=============================================================================== 85 -class CmdExtended(cmd.Cmd):
86 """Particularisation of the cmd command for MG5""" 87 88 #suggested list of command 89 next_possibility = { 90 'start': ['import model ModelName', 'import command PATH', 91 'import proc_v4 PATH', 'tutorial'], 92 'import model' : ['generate PROCESS','define MULTIPART PART1 PART2 ...', 93 'display particles', 'display interactions'], 94 'define': ['define MULTIPART PART1 PART2 ...', 'generate PROCESS', 95 'display multiparticles'], 96 'generate': ['add process PROCESS','output [OUTPUT_TYPE] [PATH]','display diagrams'], 97 'add process':['output [OUTPUT_TYPE] [PATH]', 'display processes'], 98 'output':['launch','open index.html','history PATH', 'exit'], 99 'display': ['generate PROCESS', 'add process PROCESS', 'output [OUTPUT_TYPE] [PATH]'], 100 'import proc_v4' : ['launch','exit'], 101 'launch': ['open index.html','exit'], 102 'tutorial': ['generate PROCESS', 'import model MODEL', 'help TOPIC'] 103 } 104 105 debug_output = 'MG5_debug' 106 error_debug = 'Please report this bug on https://bugs.launchpad.net/madgraph5\n' 107 error_debug += 'More information is found in \'%(debug)s\'.\n' 108 error_debug += 'Please attach this file to your report.' 109 110 config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/madgraph5\n' 111 112 keyboard_stop_msg = """stopping all operation 113 in order to quit mg5 please enter exit""" 114 115 # Define the Error Class # Define how error are handle 116 InvalidCmd = madgraph.InvalidCmd 117 ConfigurationError = MadGraph5Error 118
119 - def __init__(self, *arg, **opt):
120 """Init history and line continuation""" 121 122 # If possible, build an info line with current version number 123 # and date, from the VERSION text file 124 info = misc.get_pkg_info() 125 info_line = "" 126 127 if info.has_key('version') and info.has_key('date'): 128 len_version = len(info['version']) 129 len_date = len(info['date']) 130 if len_version + len_date < 30: 131 info_line = "#* VERSION %s %s %s *\n" % \ 132 (info['version'], 133 (30 - len_version - len_date) * ' ', 134 info['date']) 135 136 # Create a header for the history file. 137 # Remember to fill in time at writeout time! 138 self.history_header = \ 139 '#************************************************************\n' + \ 140 '#* MadGraph 5 *\n' + \ 141 '#* *\n' + \ 142 "#* * * *\n" + \ 143 "#* * * * * *\n" + \ 144 "#* * * * * 5 * * * * *\n" + \ 145 "#* * * * * *\n" + \ 146 "#* * * *\n" + \ 147 "#* *\n" + \ 148 "#* *\n" + \ 149 info_line + \ 150 "#* *\n" + \ 151 "#* The MadGraph Development Team - Please visit us at *\n" + \ 152 "#* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \ 153 '#* *\n' + \ 154 '#************************************************************\n' + \ 155 '#* *\n' + \ 156 '#* Command File for MadGraph 5 *\n' + \ 157 '#* *\n' + \ 158 '#* run as ./bin/mg5 filename *\n' + \ 159 '#* *\n' + \ 160 '#************************************************************\n' 161 162 if info_line: 163 info_line = info_line[1:] 164 165 logger.info(\ 166 "************************************************************\n" + \ 167 "* *\n" + \ 168 "* W E L C O M E to M A D G R A P H 5 *\n" + \ 169 "* *\n" + \ 170 "* *\n" + \ 171 "* * * *\n" + \ 172 "* * * * * *\n" + \ 173 "* * * * * 5 * * * * *\n" + \ 174 "* * * * * *\n" + \ 175 "* * * *\n" + \ 176 "* *\n" + \ 177 info_line + \ 178 "* *\n" + \ 179 "* The MadGraph Development Team - Please visit us at *\n" + \ 180 "* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \ 181 "* *\n" + \ 182 "* Type 'help' for in-line help. *\n" + \ 183 "* Type 'tutorial' to learn how MG5 works *\n" + \ 184 "* *\n" + \ 185 "************************************************************") 186 187 cmd.Cmd.__init__(self, *arg, **opt)
188
189 - def postcmd(self,stop, line):
190 """ finishing a command 191 This looks if we have to write an additional text for the tutorial.""" 192 193 # Print additional information in case of routines fails 194 if stop == False: 195 return False 196 197 args=line.split() 198 # Return for empty line 199 if len(args)==0: 200 return stop 201 202 # try to print linked to the first word in command 203 #as import_model,... if you don't find then try print with only 204 #the first word. 205 if len(args)==1: 206 command=args[0] 207 else: 208 command = args[0]+'_'+args[1].split('.')[0] 209 210 try: 211 logger_tuto.info(getattr(tutorial_text, command).replace('\n','\n\t')) 212 except: 213 try: 214 logger_tuto.info(getattr(tutorial_text, args[0]).replace('\n','\n\t')) 215 except: 216 pass 217 218 return stop
219 220
221 - def get_history_header(self):
222 """return the history header""" 223 return self.history_header % misc.get_time_info()
224
225 #=============================================================================== 226 # HelpToCmd 227 #=============================================================================== 228 -class HelpToCmd(cmd.HelpCmd):
229 """ The Series of help routine for the MadGraphCmd""" 230
231 - def help_save(self):
232 logger.info("syntax: save %s FILENAME" % "|".join(self._save_opts)) 233 logger.info("-- save information as file FILENAME") 234 logger.info(" FILENAME is optional for saving 'options'.")
235
236 - def help_load(self):
237 logger.info("syntax: load %s FILENAME" % "|".join(self._save_opts)) 238 logger.info("-- load information from file FILENAME")
239
240 - def help_import(self):
241 logger.info("syntax: import " + "|".join(self._import_formats) + \ 242 " FILENAME") 243 logger.info("-- imports file(s) in various formats") 244 logger.info("") 245 logger.info(" import model MODEL[-RESTRICTION] [--modelname]:") 246 logger.info(" Import a UFO model.") 247 logger.info(" MODEL should be a valid UFO model name") 248 logger.info(" Model restrictions are specified by MODEL-RESTRICTION") 249 logger.info(" with the file restrict_RESTRICTION.dat in the model dir.") 250 logger.info(" By default, restrict_default.dat is used.") 251 logger.info(" Specify model_name-full to get unrestricted model.") 252 logger.info(" '--modelname' keeps the original particle names for the model") 253 logger.info("") 254 logger.info(" import model_v4 MODEL [--modelname] :") 255 logger.info(" Import an MG4 model.") 256 logger.info(" Model should be the name of the model") 257 logger.info(" or the path to theMG4 model directory") 258 logger.info(" '--modelname' keeps the original particle names for the model") 259 logger.info("") 260 logger.info(" import proc_v4 [PATH] :" ) 261 logger.info(" Execute MG5 based on a proc_card.dat in MG4 format.") 262 logger.info(" Path to the proc_card is optional if you are in a") 263 logger.info(" madevent directory") 264 logger.info("") 265 logger.info(" import command PATH :") 266 logger.info(" Execute the list of command in the file at PATH") 267 logger.info("") 268 logger.info(" import banner PATH [--no_launch]:") 269 logger.info(" Rerun the exact same run define in the valid banner.")
270
271 - def help_install(self):
272 logger.info("syntax: install " + "|".join(self._install_opts)) 273 logger.info("-- Download the last version of the program and install it") 274 logger.info(" localy in the current Madgraph version. In order to have") 275 logger.info(" a sucessfull instalation, you will need to have up-to-date") 276 logger.info(" F77 and/or C and Root compiler.")
277
278 - def help_display(self):
279 logger.info("syntax: display " + "|".join(self._display_opts)) 280 logger.info("-- display a the status of various internal state variables") 281 logger.info(" for particles/interactions you can specify the name or id of the") 282 logger.info(" particles/interactions to receive more details information.") 283 logger.info(" Example: display particles e+.") 284 logger.info(" For \"checks\", can specify only to see failed checks.") 285 logger.info(" For \"diagrams\", you can specify where the file will be written.") 286 logger.info(" Example: display diagrams ./")
287 288
289 - def help_launch(self):
290 """help for launch command""" 291 _launch_parser.print_help()
292
293 - def help_tutorial(self):
294 logger.info("syntax: tutorial [" + "|".join(self._tutorial_opts) + "]") 295 logger.info("-- start/stop the tutorial mode")
296
297 - def help_open(self):
298 logger.info("syntax: open FILE ") 299 logger.info("-- open a file with the appropriate editor.") 300 logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') 301 logger.info(' the path to the last created/used directory is used') 302 logger.info(' The program used to open those files can be chosen in the') 303 logger.info(' configuration file ./input/mg5_configuration.txt')
304
305 - def help_output(self):
306 logger.info("syntax: output [" + "|".join(self._export_formats) + \ 307 "] [path|.|auto] [options]") 308 logger.info("-- Output any generated process(es) to file.") 309 logger.info(" mode: Default mode is madevent. Default path is \'.\' or auto.") 310 logger.info(" - If mode is madevent, create a MadEvent process directory.") 311 logger.info(" - If mode is standalone, create a Standalone directory") 312 logger.info(" - If mode is matrix, output the matrix.f files for all") 313 logger.info(" generated processes in directory \"path\".") 314 logger.info(" - If mode is standalone_cpp, create a standalone C++") 315 logger.info(" directory in \"path\".") 316 logger.info(" - If mode is pythia8, output all files needed to generate") 317 logger.info(" the processes using Pythia 8. The files are written in") 318 logger.info(" the Pythia 8 directory (default).") 319 logger.info(" NOTE: The Pythia 8 directory is set in the ./input/mg5_configuration.txt") 320 logger.info(" - If mode is aloha: Special syntax output:") 321 logger.info(" syntax: aloha [ROUTINE] [--options]" ) 322 logger.info(" valid options for aloha output are:") 323 logger.info(" --format=Fortran|Python|Cpp : defining the output language") 324 logger.info(" --output= : defining output directory") 325 logger.info(" path: The path of the process directory.") 326 logger.info(" If you put '.' as path, your pwd will be used.") 327 logger.info(" If you put 'auto', an automatic directory PROC_XX_n will be created.") 328 logger.info(" options:") 329 logger.info(" -f: force cleaning of the directory if it already exists") 330 logger.info(" -d: specify other MG/ME directory") 331 logger.info(" -noclean: no cleaning performed in \"path\".") 332 logger.info(" -nojpeg: no jpeg diagrams will be generated.") 333 logger.info(" -name: the postfix of the main file in pythia8 mode.") 334 logger.info(" Examples:") 335 logger.info(" output") 336 logger.info(" output standalone MYRUN -f") 337 logger.info(" output pythia8 ../pythia8/ -name qcdprocs")
338
339 - def help_check(self):
340 341 logger.info("syntax: check [" + "|".join(self._check_opts) + "] [param_card] process_definition") 342 logger.info("-- check a process or set of processes. Options:") 343 logger.info("full: Perform all three checks described below:") 344 logger.info(" permutation, gauge and lorentz_invariance.") 345 logger.info("permutation: Check that the model and MG5 are working") 346 logger.info(" properly by generating permutations of the process and") 347 logger.info(" checking that the resulting matrix elements give the") 348 logger.info(" same value.") 349 logger.info("gauge: Check that processes with massless gauge bosons") 350 logger.info(" are gauge invariant") 351 logger.info("lorentz_invariance: Check that the amplitude is lorentz") 352 logger.info(" invariant by comparing the amplitiude in different frames") 353 logger.info("If param_card is given, that param_card is used instead") 354 logger.info(" of the default values for the model.") 355 logger.info("For process syntax, please see help generate")
356
357 - def help_generate(self):
358 359 logger.info("syntax: generate INITIAL STATE > REQ S-CHANNEL > FINAL STATE $ EXCL S-CHANNEL / FORBIDDEN PARTICLES COUP1=ORDER1 COUP2=ORDER2 @N") 360 logger.info("-- generate diagrams for a given process") 361 logger.info(" Syntax example: l+ vl > w+ > l+ vl a $ z / a h QED=3 QCD=0 @1") 362 logger.info(" Alternative required s-channels can be separated by \"|\":") 363 logger.info(" b b~ > W+ W- | H+ H- > ta+ vt ta- vt~") 364 logger.info(" If no coupling orders are given, MG5 will try to determine") 365 logger.info(" orders to ensure maximum number of QCD vertices.") 366 logger.info(" Note that if there are more than one non-QCD coupling type,") 367 logger.info(" coupling orders need to be specified by hand.") 368 logger.info("Decay chain syntax:") 369 logger.info(" core process, decay1, (decay2, (decay2', ...)), ... etc") 370 logger.info(" Example: p p > t~ t QED=0, (t~ > W- b~, W- > l- vl~), t > j j b @2") 371 logger.info(" Note that identical particles will all be decayed.") 372 logger.info("To generate a second process use the \"add process\" command")
373
374 - def help_add(self):
375 376 logger.info("syntax: add process INITIAL STATE > REQ S-CHANNEL > FINAL STATE $ EXCL S-CHANNEL / FORBIDDEN PARTICLES COUP1=ORDER1 COUP2=ORDER2") 377 logger.info("-- generate diagrams for a process and add to existing processes") 378 logger.info(" Syntax example: l+ vl > w+ > l+ vl a $ z / a h QED=3 QCD=0 @1") 379 logger.info(" Alternative required s-channels can be separated by \"|\":") 380 logger.info(" b b~ > W+ W- | H+ H- > ta+ vt ta- vt~") 381 logger.info(" If no coupling orders are given, MG5 will try to determine") 382 logger.info(" orders to ensure maximum number of QCD vertices.") 383 logger.info("Decay chain syntax:") 384 logger.info(" core process, decay1, (decay2, (decay2', ...)), ... etc") 385 logger.info(" Example: p p > t~ t QED=0, (t~ > W- b~, W- > l- vl~), t > j j b @2") 386 logger.info(" Note that identical particles will all be decayed.")
387
388 - def help_define(self):
389 logger.info("syntax: define multipart_name [=] part_name_list") 390 logger.info("-- define a multiparticle") 391 logger.info(" Example: define p = g u u~ c c~ d d~ s s~ b b~")
392 393
394 - def help_set(self):
395 logger.info("syntax: set %s argument" % "|".join(self._set_options)) 396 logger.info("-- set options for generation or output") 397 logger.info(" group_subprocesses True/False/Auto: ") 398 logger.info(" (default Auto) Smart grouping of subprocesses into ") 399 logger.info(" directories, mirroring of initial states, and ") 400 logger.info(" combination of integration channels.") 401 logger.info(" Example: p p > j j j w+ gives 5 directories and 184 channels") 402 logger.info(" (cf. 65 directories and 1048 channels for regular output)") 403 logger.info(" Auto means False for decay computation and True for") 404 logger.info(" collisions.") 405 logger.info(" ignore_six_quark_processes multi_part_label") 406 logger.info(" (default none) ignore processes with at least 6 of any") 407 logger.info(" of the quarks given in multi_part_label.") 408 logger.info(" These processes give negligible contribution to the") 409 logger.info(" cross section but have subprocesses/channels.") 410 logger.info(" stdout_level DEBUG|INFO|WARNING|ERROR|CRITICAL") 411 logger.info(" change the default level for printed information") 412 logger.info(" fortran_compiler NAME") 413 logger.info(" (default None) Force a specific fortran compiler.") 414 logger.info(" If None, it tries first g77 and if not present gfortran.") 415 logger.info(" timeout VALUE") 416 logger.info(" (default 20) Seconds allowed to answer questions.") 417 logger.info(" Note that pressing tab always stops the timer.")
418
419 #=============================================================================== 420 # CheckValidForCmd 421 #=============================================================================== 422 -class CheckValidForCmd(cmd.CheckCmd):
423 """ The Series of help routine for the MadGraphCmd""" 424
425 - class RWError(MadGraph5Error):
426 """a class for read/write errors"""
427
428 - def check_add(self, args):
429 """check the validity of line 430 syntax: add process PROCESS 431 """ 432 433 if len(args) < 2: 434 self.help_add() 435 raise self.InvalidCmd('\"add\" requires at least two arguments') 436 437 if args[0] != 'process': 438 raise self.InvalidCmd('\"add\" requires the argument \"process\"') 439 440 if not self._curr_model: 441 logger.info("No model currently active, so we import the Standard Model") 442 self.do_import('model sm') 443 444 self.check_process_format(' '.join(args[1:]))
445
446 - def check_define(self, args):
447 """check the validity of line 448 syntax: define multipart_name [ part_name_list ] 449 """ 450 451 452 if len(args) < 2: 453 self.help_define() 454 raise self.InvalidCmd('\"define\" command requires at least two arguments') 455 456 if args[1] == '=': 457 del args[1] 458 if len(args) < 2: 459 self.help_define() 460 raise self.InvalidCmd('\"define\" command requires at least one particles name after \"=\"') 461 462 if '=' in args: 463 self.help_define() 464 raise self.InvalidCmd('\"define\" command requires symbols \"=\" at the second position') 465 466 if not self._curr_model: 467 logger.info('No model currently active. Try with the Standard Model') 468 self.do_import('model sm') 469 470 if self._curr_model['particles'].find_name(args[0]): 471 raise self.InvalidCmd("label %s is a particle name in this model\n\ 472 Please retry with another name." % args[0])
473
474 - def check_display(self, args):
475 """check the validity of line 476 syntax: display XXXXX 477 """ 478 479 if len(args) < 1: 480 self.help_display() 481 raise self.InvalidCmd, 'display requires an argument specifying what to display' 482 if args[0] not in self._display_opts: 483 self.help_display() 484 raise self.InvalidCmd, 'Invalid arguments for display command: %s' % args[0] 485 486 if not self._curr_model: 487 raise self.InvalidCmd("No model currently active, please import a model!") 488 489 if args[0] in ['processes', 'diagrams'] and not self._curr_amps: 490 raise self.InvalidCmd("No process generated, please generate a process!") 491 if args[0] == 'checks' and not self._comparisons: 492 raise self.InvalidCmd("No check results to display.") 493 494 if args[0] == 'variable' and len(args) !=2: 495 raise self.InvalidCmd('variable need a variable name')
496 497
498 - def check_draw(self, args):
499 """check the validity of line 500 syntax: draw DIRPATH [option=value] 501 """ 502 503 if len(args) < 1: 504 args.append('/tmp') 505 506 if not self._curr_amps: 507 raise self.InvalidCmd("No process generated, please generate a process!") 508 509 if not os.path.isdir(args[0]): 510 raise self.InvalidCmd( "%s is not a valid directory for export file" % args[0])
511
512 - def check_check(self, args):
513 """check the validity of args""" 514 515 if not self._curr_model: 516 raise self.InvalidCmd("No model currently active, please import a model!") 517 518 if self._model_v4_path: 519 raise self.InvalidCmd(\ 520 "\"check\" not possible for v4 models") 521 522 if len(args) < 2: 523 self.help_check() 524 raise self.InvalidCmd("\"check\" requires a process.") 525 526 param_card = None 527 if os.path.isfile(args[1]): 528 param_card = args.pop(1) 529 530 if args[0] not in self._check_opts: 531 args.insert(0, 'full') 532 533 if any([',' in elem for elem in args]): 534 raise self.InvalidCmd('Decay chains not allowed in check') 535 536 self.check_process_format(" ".join(args[1:])) 537 538 return param_card
539
540 - def check_generate(self, args):
541 """check the validity of args""" 542 # Not called anymore see check_add 543 return self.check_add(args)
544
545 - def check_process_format(self, process):
546 """ check the validity of the string given to describe a format """ 547 548 #check balance of paranthesis 549 if process.count('(') != process.count(')'): 550 raise self.InvalidCmd('Invalid Format, no balance between open and close parenthesis') 551 #remove parenthesis for fututre introspection 552 process = process.replace('(',' ').replace(')',' ') 553 554 # split following , (for decay chains) 555 subprocesses = process.split(',') 556 if len(subprocesses) > 1: 557 for subprocess in subprocesses: 558 self.check_process_format(subprocess) 559 return 560 561 # request that we have one or two > in the process 562 if process.count('>') not in [1,2]: 563 raise self.InvalidCmd( 564 'wrong format for \"%s\" this part requires one or two symbols \'>\', %s found' 565 % (process, process.count('>'))) 566 567 # we need at least one particles in each pieces 568 particles_parts = process.split('>') 569 for particles in particles_parts: 570 if re.match(r'^\s*$', particles): 571 raise self.InvalidCmd( 572 '\"%s\" is a wrong process format. Please try again' % process) 573 574 # '/' and '$' sould be used only after the process definition 575 for particles in particles_parts[:-1]: 576 if re.search('\D/', particles): 577 raise self.InvalidCmd( 578 'wrong process format: restriction should be place after the final states') 579 if re.search('\D\$', particles): 580 raise self.InvalidCmd( 581 'wrong process format: restriction should be place after the final states')
582 583
584 - def check_import(self, args):
585 """check the validity of line""" 586 587 modelname = False 588 if '-modelname' in args: 589 args.remove('-modelname') 590 modelname = True 591 elif '--modelname' in args: 592 args.remove('--modelname') 593 modelname = True 594 595 if not args: 596 self.help_import() 597 raise self.InvalidCmd('wrong \"import\" format') 598 599 if len(args) >= 2 and args[0] not in self._import_formats: 600 self.help_import() 601 raise self.InvalidCmd('wrong \"import\" format') 602 elif len(args) == 1: 603 if args[0] in self._import_formats: 604 if args[0] != "proc_v4": 605 self.help_import() 606 raise self.InvalidCmd('wrong \"import\" format') 607 elif not self._export_dir: 608 self.help_import() 609 raise self.InvalidCmd('PATH is mandatory in the current context\n' + \ 610 'Did you forget to run the \"output\" command') 611 # The type of the import is not given -> guess it 612 format = self.find_import_type(args[0]) 613 logger.info('The import format was not given, so we guess it as %s' % format) 614 args.insert(0, format) 615 if self.history[-1].startswith('import'): 616 self.history[-1] = 'import %s %s' % \ 617 (format, ' '.join(self.history[-1].split()[1:])) 618 619 if modelname: 620 args.append('-modelname')
621 622 623
624 - def check_install(self, args):
625 """check that the install command is valid""" 626 627 if len(args) != 1: 628 self.help_install() 629 raise self.InvalidCmd('install command require at least one argument') 630 631 if args[0] not in self._install_opts: 632 if not args[0].startswith('td'): 633 self.help_install() 634 raise self.InvalidCmd('Not recognize program %s ' % args[0]) 635 636 if args[0] in ["ExRootAnalysis", "Delphes"]: 637 if not misc.which('root'): 638 raise self.InvalidCmd( 639 '''In order to install ExRootAnalysis, you need to install Root on your computer first. 640 please follow information on http://root.cern.ch/drupal/content/downloading-root''') 641 if 'ROOTSYS' not in os.environ: 642 raise self.InvalidCmd( 643 '''The environment variable ROOTSYS is not configured. 644 You can set it by adding the following lines in your .bashrc [.bash_profile for mac]: 645 export ROOTSYS=%s 646 export PATH=$PATH:$ROOTSYS/bin 647 export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$ROOTSYS/lib 648 export DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:$ROOTSYS/lib 649 This will take effect only in a NEW terminal 650 ''' % os.path.realpath(pjoin(misc.which('root'), \ 651 os.path.pardir, os.path.pardir)))
652 653
654 - def check_launch(self, args, options):
655 """check the validity of the line""" 656 # modify args in order to be MODE DIR 657 # mode being either standalone or madevent 658 if not( 0 <= int(options.cluster) <= 2): 659 return self.InvalidCmd, 'cluster mode should be between 0 and 2' 660 661 if not args: 662 if self._done_export: 663 mode = self.find_output_type(self._done_export[0]) 664 if mode != self._done_export[1]: 665 raise self.InvalidCmd, \ 666 '%s not valid directory for launch' % self._done_export[0] 667 args.append(self._done_export[1]) 668 args.append(self._done_export[0]) 669 return 670 else: 671 self.help_launch() 672 raise self.InvalidCmd, \ 673 'No default location available, please specify location.' 674 675 if len(args) != 1: 676 self.help_launch() 677 return self.InvalidCmd, 'Invalid Syntax: Too many argument' 678 679 # search for a valid path 680 if os.path.isdir(args[0]): 681 path = os.path.realpath(args[0]) 682 elif os.path.isdir(pjoin(MG5DIR,args[0])): 683 path = pjoin(MG5DIR,args[0]) 684 elif MG4DIR and os.path.isdir(pjoin(MG4DIR,args[0])): 685 path = pjoin(MG4DIR,args[0]) 686 else: 687 raise self.InvalidCmd, '%s is not a valid directory' % args[0] 688 689 mode = self.find_output_type(path) 690 691 args[0] = mode 692 args.append(path) 693 # inform where we are for future command 694 self._done_export = [path, mode]
695 696
697 - def find_import_type(self, path):
698 """ identify the import type of a given path 699 valid output: model/model_v4/proc_v4/command""" 700 701 possibility = [pjoin(MG5DIR,'models',path), \ 702 pjoin(MG5DIR,'models',path+'_v4'), path] 703 if '-' in path: 704 name = path.rsplit('-',1)[0] 705 possibility = [pjoin(MG5DIR,'models',name), name] + possibility 706 # Check if they are a valid directory 707 for name in possibility: 708 if os.path.isdir(name): 709 if os.path.exists(pjoin(name,'particles.py')): 710 return 'model' 711 elif os.path.exists(pjoin(name,'particles.dat')): 712 return 'model_v4' 713 714 # Not valid directory so maybe a file 715 if os.path.isfile(path): 716 text = open(path).read() 717 pat = re.compile('(Begin process|<MGVERSION>)', re.I) 718 matches = pat.findall(text) 719 if not matches: 720 return 'command' 721 elif len(matches) > 1: 722 return 'banner' 723 elif matches[0].lower() == 'begin process': 724 return 'proc_v4' 725 else: 726 return 'banner' 727 else: 728 return 'proc_v4'
729 730 731 732
733 - def find_output_type(self, path):
734 """ identify the type of output of a given directory: 735 valid output: madevent/standalone/standalone_cpp""" 736 737 card_path = pjoin(path,'Cards') 738 bin_path = pjoin(path,'bin') 739 src_path = pjoin(path,'src') 740 include_path = pjoin(path,'include') 741 subproc_path = pjoin(path,'SubProcesses') 742 743 if os.path.isfile(pjoin(include_path, 'Pythia.h')): 744 return 'pythia8' 745 elif not os.path.isdir(os.path.join(path, 'SubProcesses')): 746 raise self.InvalidCmd, '%s : Not a valid directory' % path 747 748 if os.path.isdir(src_path): 749 return 'standalone_cpp' 750 elif os.path.isfile(pjoin(bin_path,'generate_events')): 751 return 'madevent' 752 elif os.path.isdir(card_path): 753 return 'standalone' 754 755 raise self.InvalidCmd, '%s : Not a valid directory' % path
756
757 - def check_load(self, args):
758 """ check the validity of the line""" 759 760 if len(args) != 2 or args[0] not in self._save_opts: 761 self.help_load() 762 raise self.InvalidCmd('wrong \"load\" format')
763 764
765 - def check_save(self, args):
766 """ check the validity of the line""" 767 if len(args) == 0: 768 args.append('options') 769 770 if args[0] not in self._save_opts: 771 self.help_save() 772 raise self.InvalidCmd('wrong \"save\" format') 773 if args[0] != 'options' and len(args) != 2: 774 self.help_save() 775 raise self.InvalidCmd('wrong \"save\" format') 776 777 if len(args) == 2: 778 basename = os.path.dirname(args[1]) 779 if not os.path.exists(basename): 780 raise self.InvalidCmd('%s is not a valid path, please retry' % \ 781 args[1]) 782 783 elif args[0] == 'options' and len(args) == 1: 784 args.append(pjoin(MG5DIR,'input','mg5_configuration.txt'))
785 786
787 - def check_set(self, args):
788 """ check the validity of the line""" 789 790 if len(args) < 2: 791 self.help_set() 792 raise self.InvalidCmd('set needs an option and an argument') 793 794 if args[0] not in self._set_options: 795 if not args[0] in self.options and not args[0] in self.options: 796 self.help_set() 797 raise self.InvalidCmd('Possible options for set are %s' % \ 798 self._set_options) 799 800 if args[0] in ['group_subprocesses']: 801 if args[1] not in ['False', 'True', 'Auto']: 802 raise self.InvalidCmd('%s needs argument False, True or Auto' % \ 803 args[0]) 804 if args[0] in ['ignore_six_quark_processes']: 805 if args[1] not in self._multiparticles.keys() and args[1] != 'False': 806 raise self.InvalidCmd('ignore_six_quark_processes needs ' + \ 807 'a multiparticle name as argument') 808 809 if args[0] in ['stdout_level']: 810 if args[1] not in ['DEBUG','INFO','WARNING','ERROR','CRITICAL']: 811 raise self.InvalidCmd('output_level needs ' + \ 812 'a valid level') 813 814 if args[0] in ['timeout']: 815 if not args[1].isdigit(): 816 raise self.InvalidCmd('timeout values should be a integer')
817 818
819 - def check_open(self, args):
820 """ check the validity of the line """ 821 822 if len(args) != 1: 823 self.help_open() 824 raise self.InvalidCmd('OPEN command requires exactly one argument') 825 826 if args[0].startswith('./'): 827 if not os.path.isfile(args[0]): 828 raise self.InvalidCmd('%s: not such file' % args[0]) 829 return True 830 831 # if special : create the path. 832 if not self._done_export: 833 if not os.path.isfile(args[0]): 834 self.help_open() 835 raise self.InvalidCmd('No command \"output\" or \"launch\" used. Impossible to associate this name to a file') 836 else: 837 return True 838 839 path = self._done_export[0] 840 if os.path.isfile(pjoin(path,args[0])): 841 args[0] = pjoin(path,args[0]) 842 elif os.path.isfile(pjoin(path,'Cards',args[0])): 843 args[0] = pjoin(path,'Cards',args[0]) 844 elif os.path.isfile(pjoin(path,'HTML',args[0])): 845 args[0] = pjoin(path,'HTML',args[0]) 846 # special for card with _default define: copy the default and open it 847 elif '_card.dat' in args[0]: 848 name = args[0].replace('_card.dat','_card_default.dat') 849 if os.path.isfile(pjoin(path,'Cards', name)): 850 files.cp(path + '/Cards/' + name, path + '/Cards/'+ args[0]) 851 args[0] = pjoin(path,'Cards', args[0]) 852 else: 853 raise self.InvalidCmd('No default path for this file') 854 elif not os.path.isfile(args[0]): 855 raise self.InvalidCmd('No default path for this file')
856 857
858 - def check_output(self, args):
859 """ check the validity of the line""" 860 861 if args and args[0] in self._export_formats: 862 self._export_format = args.pop(0) 863 else: 864 self._export_format = 'madevent' 865 866 if not self._curr_model: 867 text = 'No model found. Please import a model first and then retry.' 868 raise self.InvalidCmd(text) 869 870 if self._model_v4_path and \ 871 (self._export_format not in self._v4_export_formats): 872 text = " The Model imported (MG4 format) does not contain enough\n " 873 text += " information for this type of output. In order to create\n" 874 text += " output for " + args[0] + ", you have to use a UFO model.\n" 875 text += " Those model can be imported with mg5> import model NAME." 876 logger.warning(text) 877 raise self.InvalidCmd('') 878 879 if self._export_format == 'aloha': 880 return 881 882 883 if not self._curr_amps: 884 text = 'No processes generated. Please generate a process first.' 885 raise self.InvalidCmd(text) 886 887 888 889 890 891 if args and args[0][0] != '-': 892 # This is a path 893 path = args.pop(0) 894 forbiden_chars = ['>','<',';','&'] 895 for char in forbiden_chars: 896 if char in path: 897 raise self.invalidCmd('%s is not allowed in the output path' % char) 898 # Check for special directory treatment 899 if path == 'auto' and self._export_format in \ 900 ['madevent', 'standalone', 'standalone_cpp']: 901 self.get_default_path() 902 elif path != 'auto': 903 self._export_dir = path 904 elif path == 'auto': 905 if self.options['pythia8_path']: 906 self._export_dir = self.options['pythia8_path'] 907 else: 908 self._export_dir = '.' 909 else: 910 if self._export_format != 'pythia8': 911 # No valid path 912 self.get_default_path() 913 else: 914 if self.options['pythia8_path']: 915 self._export_dir = self.options['pythia8_path'] 916 else: 917 self._export_dir = '.' 918 919 self._export_dir = os.path.realpath(self._export_dir)
920
921 - def get_default_path(self):
922 """Set self._export_dir to the default (\'auto\') path""" 923 924 if self._export_format in ['madevent', 'standalone']: 925 # Detect if this script is launched from a valid copy of the Template, 926 # if so store this position as standard output directory 927 if 'TemplateVersion.txt' in os.listdir('.'): 928 #Check for ./ 929 self._export_dir = os.path.realpath('.') 930 return 931 elif 'TemplateVersion.txt' in os.listdir('..'): 932 #Check for ../ 933 self._export_dir = os.path.realpath('..') 934 return 935 elif self.stdin != sys.stdin: 936 #Check for position defined by the input files 937 input_path = os.path.realpath(self.stdin.name).split(os.path.sep) 938 print "Not standard stdin, use input path" 939 if input_path[-2] == 'Cards': 940 self._export_dir = os.path.sep.join(input_path[:-2]) 941 if 'TemplateVersion.txt' in self._export_dir: 942 return 943 944 if self._export_format.startswith('madevent'): 945 name_dir = lambda i: 'PROC_%s_%s' % \ 946 (self._curr_model['name'], i) 947 auto_path = lambda i: pjoin(self.writing_dir, 948 name_dir(i)) 949 elif self._export_format == 'standalone': 950 name_dir = lambda i: 'PROC_SA_%s_%s' % \ 951 (self._curr_model['name'], i) 952 auto_path = lambda i: pjoin(self.writing_dir, 953 name_dir(i)) 954 elif self._export_format == 'standalone_cpp': 955 name_dir = lambda i: 'PROC_SA_CPP_%s_%s' % \ 956 (self._curr_model['name'], i) 957 auto_path = lambda i: pjoin(self.writing_dir, 958 name_dir(i)) 959 elif self._export_format == 'pythia8': 960 if self.options['pythia8_path']: 961 self._export_dir = self.options['pythia8_path'] 962 else: 963 self._export_dir = '.' 964 return 965 else: 966 self._export_dir = '.' 967 return 968 for i in range(500): 969 if os.path.isdir(auto_path(i)): 970 continue 971 else: 972 self._export_dir = auto_path(i) 973 break 974 if not self._export_dir: 975 raise self.InvalidCmd('Can\'t use auto path,' + \ 976 'more than 500 dirs already')
977
978 979 #=============================================================================== 980 # CheckValidForCmdWeb 981 #=============================================================================== 982 -class CheckValidForCmdWeb(CheckValidForCmd):
983 """ Check the validity of input line for web entry 984 (no explicit path authorized)""" 985
986 - class WebRestriction(MadGraph5Error):
987 """class for WebRestriction"""
988
989 - def check_draw(self, args):
990 """check the validity of line 991 syntax: draw FILEPATH [option=value] 992 """ 993 raise self.WebRestriction('direct call to draw is forbidden on the web')
994
995 - def check_display(self, args):
996 """ check the validity of line in web mode """ 997 998 if args[0] == 'mg5_variable': 999 raise self.WebRestriction('Display internal variable is forbidden on the web') 1000 1001 CheckValidForCmd.check_history(self, args)
1002
1003 - def check_check(self, args):
1004 """ Not authorize for the Web""" 1005 1006 raise self.WebRestriction('Check call is forbidden on the web')
1007
1008 - def check_history(self, args):
1009 """check the validity of line 1010 No Path authorize for the Web""" 1011 1012 CheckValidForCmd.check_history(self, args) 1013 1014 if len(args) == 2 and args[1] not in ['.', 'clean']: 1015 raise self.WebRestriction('Path can\'t be specify on the web.')
1016 1017
1018 - def check_import(self, args):
1019 """check the validity of line 1020 No Path authorize for the Web""" 1021 1022 if not args: 1023 raise self.WebRestriction, 'import requires at least one option' 1024 1025 if args[0] not in self._import_formats: 1026 args[:] = ['command', './Cards/proc_card_mg5.dat'] 1027 elif args[0] == 'proc_v4': 1028 args[:] = [args[0], './Cards/proc_card.dat'] 1029 elif args[0] == 'command': 1030 args[:] = [args[0], './Cards/proc_card_mg5.dat'] 1031 1032 CheckValidForCmd.check_import(self, args)
1033
1034 - def check_install(self, args):
1035 """ No possibility to install new software on the web """ 1036 raise self.WebRestriction('Impossible to install program on the cluster')
1037
1038 - def check_load(self, args):
1039 """ check the validity of the line 1040 No Path authorize for the Web""" 1041 1042 CheckValidForCmd.check_load(self, args) 1043 1044 if len(args) == 2: 1045 if args[0] != 'model': 1046 raise self.WebRestriction('only model can be loaded online') 1047 if 'model.pkl' not in args[1]: 1048 raise self.WebRestriction('not valid pkl file: wrong name') 1049 if not os.path.realpath(args[1]).startswith(pjoin(MG4DIR, \ 1050 'Models')): 1051 raise self.WebRestriction('Wrong path to load model')
1052
1053 - def check_save(self, args):
1054 """ not authorize on web""" 1055 raise self.WebRestriction('\"save\" command not authorize online')
1056
1057 - def check_open(self, args):
1058 """ not authorize on web""" 1059 raise self.WebRestriction('\"open\" command not authorize online')
1060
1061 - def check_output(self, args):
1062 """ check the validity of the line""" 1063 1064 1065 # first pass to the default 1066 CheckValidForCmd.check_output(self, args) 1067 1068 args[:] = [self._export_format, '.', '-f'] 1069 1070 # Check that we output madevent 1071 if 'madevent' != self._export_format: 1072 raise self.WebRestriction, 'only available output format is madevent (at current stage)'
1073
1074 #=============================================================================== 1075 # CompleteForCmd 1076 #=============================================================================== 1077 -class CompleteForCmd(cmd.CompleteCmd):
1078 """ The Series of help routine for the MadGraphCmd""" 1079 1080
1081 - def model_completion(self, text, process):
1082 """ complete the line with model information """ 1083 1084 while ',' in process: 1085 process = process[process.index(',')+1:] 1086 args = self.split_arg(process) 1087 couplings = [] 1088 1089 # Force '>' if two initial particles. 1090 if len(args) == 2 and args[-1] != '>': 1091 return self.list_completion(text, '>') 1092 1093 # Add non-particle names 1094 if len(args) > 0 and args[-1] != '>': 1095 couplings = ['>'] 1096 if '>' in args and args.index('>') < len(args) - 1: 1097 couplings = [c + "=" for c in self._couplings] + \ 1098 ['@','$','/','>',','] 1099 return self.list_completion(text, self._particle_names + \ 1100 self._multiparticles.keys() + couplings)
1101 1102
1103 - def complete_generate(self, text, line, begidx, endidx):
1104 "Complete the add command" 1105 1106 # Return list of particle names and multiparticle names, as well as 1107 # coupling orders and allowed symbols 1108 args = self.split_arg(line[0:begidx]) 1109 if len(args) > 2 and args[-1] == '@' or args[-1].endswith('='): 1110 return 1111 1112 try: 1113 return self.model_completion(text, ' '.join(args[1:])) 1114 except Exception as error: 1115 print error
1116 1117 #if len(args) > 1 and args[-1] != '>': 1118 # couplings = ['>'] 1119 #if '>' in args and args.index('>') < len(args) - 1: 1120 # couplings = [c + "=" for c in self._couplings] + ['@','$','/','>'] 1121 #return self.list_completion(text, self._particle_names + \ 1122 # self._multiparticles.keys() + couplings) 1123
1124 - def complete_add(self, text, line, begidx, endidx):
1125 "Complete the add command" 1126 1127 args = self.split_arg(line[0:begidx]) 1128 1129 # Format 1130 if len(args) == 1: 1131 return self.list_completion(text, self._add_opts) 1132 1133 return self.complete_generate(text, " ".join(args[1:]), begidx, endidx) 1134 1135 # Return list of particle names and multiparticle names, as well as 1136 # coupling orders and allowed symbols 1137 couplings = [] 1138 if len(args) > 2 and args[-1] != '>': 1139 couplings = ['>'] 1140 if '>' in args and args.index('>') < len(args) - 1: 1141 couplings = [c + "=" for c in self._couplings] + ['@','$','/','>'] 1142 return self.list_completion(text, self._particle_names + \ 1143 self._multiparticles.keys() + couplings)
1144
1145 - def complete_check(self, text, line, begidx, endidx):
1146 "Complete the add command" 1147 1148 args = self.split_arg(line[0:begidx]) 1149 1150 # Format 1151 if len(args) == 1: 1152 return self.list_completion(text, self._check_opts) 1153 1154 1155 1156 1157 # Directory continuation 1158 if args[-1].endswith(os.path.sep): 1159 return self.path_completion(text, pjoin(*[a for a in args \ 1160 if a.endswith(os.path.sep)])) 1161 # autocompletion for particles/couplings 1162 model_comp = self.model_completion(text, ' '.join(args[2:])) 1163 1164 if len(args) == 2: 1165 return model_comp + self.path_completion(text) 1166 1167 if len(args) > 2: 1168 return model_comp
1169 1170
1171 - def complete_tutorial(self, text, line, begidx, endidx):
1172 "Complete the tutorial command" 1173 1174 # Format 1175 if len(self.split_arg(line[0:begidx])) == 1: 1176 return self.list_completion(text, self._tutorial_opts)
1177
1178 - def complete_define(self, text, line, begidx, endidx):
1179 """Complete particle information""" 1180 return self.model_completion(text, line[6:])
1181
1182 - def complete_display(self, text, line, begidx, endidx):
1183 "Complete the display command" 1184 1185 args = self.split_arg(line[0:begidx]) 1186 # Format 1187 if len(args) == 1: 1188 return self.list_completion(text, self._display_opts) 1189 1190 if len(args) == 2 and args[1] == 'checks': 1191 return self.list_completion(text, ['failed']) 1192 1193 if len(args) == 2 and args[1] == 'particles': 1194 return self.model_completion(text, line[begidx:])
1195
1196 - def complete_draw(self, text, line, begidx, endidx):
1197 "Complete the draw command" 1198 1199 args = self.split_arg(line[0:begidx]) 1200 1201 # Directory continuation 1202 if args[-1].endswith(os.path.sep): 1203 return self.path_completion(text, 1204 pjoin(*[a for a in args if a.endswith(os.path.sep)]), 1205 only_dirs = True) 1206 # Format 1207 if len(args) == 1: 1208 return self.path_completion(text, '.', only_dirs = True) 1209 1210 1211 #option 1212 if len(args) >= 2: 1213 opt = ['horizontal', 'external=', 'max_size=', 'add_gap=', 1214 'non_propagating', '--'] 1215 return self.list_completion(text, opt)
1216
1217 - def complete_launch(self, text, line, begidx, endidx):
1218 """ complete the launch command""" 1219 args = self.split_arg(line[0:begidx]) 1220 1221 # Directory continuation 1222 if args[-1].endswith(os.path.sep): 1223 return self.path_completion(text, 1224 pjoin(*[a for a in args if a.endswith(os.path.sep)]), 1225 only_dirs = True) 1226 # Format 1227 if len(args) == 1: 1228 out = {'Path from ./': self.path_completion(text, '.', only_dirs = True)} 1229 if MG5DIR != os.path.realpath('.'): 1230 out['Path from %s' % MG5DIR] = self.path_completion(text, 1231 MG5DIR, only_dirs = True, relative=False) 1232 if MG4DIR and MG4DIR != os.path.realpath('.') and MG4DIR != MG5DIR: 1233 out['Path from %s' % MG4DIR] = self.path_completion(text, 1234 MG4DIR, only_dirs = True, relative=False) 1235 1236 1237 #option 1238 if len(args) >= 2: 1239 out={} 1240 1241 if line[0:begidx].endswith('--laststep='): 1242 opt = ['parton', 'pythia', 'pgs','delphes','auto'] 1243 out['Options'] = self.list_completion(text, opt, line) 1244 else: 1245 opt = ['--cluster', '--multicore', '-i', '--name=', '-f','-m', '-n', 1246 '--interactive', '--laststep=parton', '--laststep=pythia', 1247 '--laststep=pgs', '--laststep=delphes','--laststep=auto'] 1248 out['Options'] = self.list_completion(text, opt, line) 1249 1250 1251 return self.deal_multiple_categories(out)
1252
1253 - def complete_load(self, text, line, begidx, endidx):
1254 "Complete the load command" 1255 1256 args = self.split_arg(line[0:begidx]) 1257 1258 # Format 1259 if len(args) == 1: 1260 return self.list_completion(text, self._save_opts) 1261 1262 # Directory continuation 1263 if args[-1].endswith(os.path.sep): 1264 return self.path_completion(text, 1265 pjoin(*[a for a in args if \ 1266 a.endswith(os.path.sep)])) 1267 1268 # Filename if directory is not given 1269 if len(args) == 2: 1270 return self.path_completion(text)
1271
1272 - def complete_save(self, text, line, begidx, endidx):
1273 "Complete the save command" 1274 1275 args = self.split_arg(line[0:begidx]) 1276 1277 # Format 1278 if len(args) == 1: 1279 return self.list_completion(text, self._save_opts) 1280 1281 # Directory continuation 1282 if args[-1].endswith(os.path.sep): 1283 return self.path_completion(text, 1284 pjoin(*[a for a in args if a.endswith(os.path.sep)]), 1285 only_dirs = True) 1286 1287 # Filename if directory is not given 1288 if len(args) == 2: 1289 return self.path_completion(text)
1290 1291 @cmd.debug()
1292 - def complete_open(self, text, line, begidx, endidx):
1293 """ complete the open command """ 1294 1295 args = self.split_arg(line[0:begidx]) 1296 1297 # Directory continuation 1298 if os.path.sep in args[-1] + text: 1299 return self.path_completion(text, 1300 pjoin(*[a for a in args if \ 1301 a.endswith(os.path.sep)])) 1302 1303 possibility = [] 1304 if self._done_export: 1305 path = self._done_export[0] 1306 possibility = ['index.html'] 1307 if os.path.isfile(pjoin(path,'README')): 1308 possibility.append('README') 1309 if os.path.isdir(pjoin(path,'Cards')): 1310 possibility += [f for f in os.listdir(pjoin(path,'Cards')) 1311 if f.endswith('.dat')] 1312 if os.path.isdir(pjoin(path,'HTML')): 1313 possibility += [f for f in os.listdir(pjoin(path,'HTML')) 1314 if f.endswith('.html') and 'default' not in f] 1315 else: 1316 possibility.extend(['./','../']) 1317 if os.path.exists('MG5_debug'): 1318 possibility.append('MG5_debug') 1319 if os.path.exists('ME5_debug'): 1320 possibility.append('ME5_debug') 1321 1322 return self.list_completion(text, possibility)
1323 1324 @cmd.debug()
1325 - def complete_output(self, text, line, begidx, endidx, 1326 possible_options = ['f', 'noclean', 'nojpeg'], 1327 possible_options_full = ['-f', '-noclean', '-nojpeg']):
1328 "Complete the output command" 1329 1330 possible_format = self._export_formats 1331 #don't propose directory use by MG_ME 1332 forbidden_names = ['MadGraphII', 'Template', 'pythia-pgs', 'CVS', 1333 'Calculators', 'MadAnalysis', 'SimpleAnalysis', 1334 'mg5', 'DECAY', 'EventConverter', 'Models', 1335 'ExRootAnalysis', 'HELAS', 'Transfer_Fct', 'aloha'] 1336 1337 #name of the run =>proposes old run name 1338 args = self.split_arg(line[0:begidx]) 1339 if len(args) >= 1: 1340 if len(args) > 1 and args[1] == 'aloha': 1341 try: 1342 return self.aloha_complete_output(text, line, begidx, endidx) 1343 except Exception, error: 1344 print error 1345 # Directory continuation 1346 if args[-1].endswith(os.path.sep): 1347 return [name for name in self.path_completion(text, 1348 pjoin(*[a for a in args if a.endswith(os.path.sep)]), 1349 only_dirs = True) if name not in forbidden_names] 1350 # options 1351 if args[-1][0] == '-' or len(args) > 1 and args[-2] == '-': 1352 return self.list_completion(text, possible_options) 1353 if len(args) > 2: 1354 return self.list_completion(text, possible_options_full) 1355 # Formats 1356 if len(args) == 1: 1357 format = possible_format + ['.' + os.path.sep, '..' + os.path.sep, 'auto'] 1358 return self.list_completion(text, format) 1359 1360 # directory names 1361 content = [name for name in self.path_completion(text, '.', only_dirs = True) \ 1362 if name not in forbidden_names] 1363 content += ['auto'] 1364 return self.list_completion(text, content)
1365
1366 - def aloha_complete_output(self, text, line, begidx, endidx):
1367 "Complete the output aloha command" 1368 args = self.split_arg(line[0:begidx]) 1369 completion_categories = {} 1370 1371 forbidden_names = ['MadGraphII', 'Template', 'pythia-pgs', 'CVS', 1372 'Calculators', 'MadAnalysis', 'SimpleAnalysis', 1373 'mg5', 'DECAY', 'EventConverter', 'Models', 1374 'ExRootAnalysis', 'Transfer_Fct', 'aloha', 1375 'apidoc','vendor'] 1376 1377 1378 # options 1379 options = ['--format=Fortran', '--format=Python','--format=CPP','--output='] 1380 options = self.list_completion(text, options) 1381 if options: 1382 completion_categories['options'] = options 1383 1384 if args[-1] == '--output=' or args[-1].endswith(os.path.sep): 1385 # Directory continuation 1386 completion_categories['path'] = [name for name in self.path_completion(text, 1387 pjoin(*[a for a in args if a.endswith(os.path.sep)]), 1388 only_dirs = True) if name not in forbidden_names] 1389 1390 else: 1391 ufomodel = ufomodels.load_model(self._curr_model.get('name')) 1392 wf_opt = [] 1393 amp_opt = [] 1394 opt_conjg = [] 1395 for lor in ufomodel.all_lorentz: 1396 amp_opt.append('%s_0' % lor.name) 1397 for i in range(len(lor.spins)): 1398 wf_opt.append('%s_%i' % (lor.name,i+1)) 1399 if i % 2 == 0 and lor.spins[i] == 2: 1400 opt_conjg.append('%sC%i_%i' % (lor.name,i //2 +1,i+1)) 1401 completion_categories['amplitude routines'] = self.list_completion(text, amp_opt) 1402 completion_categories['Wavefunctions routines'] = self.list_completion(text, wf_opt) 1403 completion_categories['conjugate_routines'] = self.list_completion(text, opt_conjg) 1404 1405 return self.deal_multiple_categories(completion_categories)
1406
1407 - def complete_set(self, text, line, begidx, endidx):
1408 "Complete the set command" 1409 args = self.split_arg(line[0:begidx]) 1410 1411 # Format 1412 if len(args) == 1: 1413 opts = self.options.keys() 1414 return self.list_completion(text, opts) 1415 1416 if len(args) == 2: 1417 if args[1] in ['group_subprocesses']: 1418 return self.list_completion(text, ['False', 'True', 'Auto']) 1419 1420 elif args[1] in ['ignore_six_quark_processes']: 1421 return self.list_completion(text, self._multiparticles.keys()) 1422 1423 elif args[1] == 'stdout_level': 1424 return self.list_completion(text, ['DEBUG','INFO','WARNING','ERROR','CRITICAL']) 1425 1426 elif args[1] == 'fortran_compiler': 1427 return self.list_completion(text, ['f77','g77','gfortran']) 1428 elif args[1] == 'nb_core': 1429 return self.list_completion(text, [str(i) for i in range(100)]) 1430 elif args[1] == 'run_mode': 1431 return self.list_completion(text, [str(i) for i in range(3)]) 1432 elif args[1] == 'cluster_type': 1433 return self.list_completion(text, cluster.from_name.keys()) 1434 elif args[1] == 'cluster_queue': 1435 return [] 1436 elif args[1] == 'automatic_html_opening': 1437 return self.list_completion(text, ['False', 'True']) 1438 else: 1439 # directory names 1440 second_set = [name for name in self.path_completion(text, '.', only_dirs = True)] 1441 return self.list_completion(text, first_set + second_set) 1442 elif len(args) >2 and args[-1].endswith(os.path.sep): 1443 return self.path_completion(text, 1444 pjoin(*[a for a in args if a.endswith(os.path.sep)]), 1445 only_dirs = True)
1446
1447 - def complete_import(self, text, line, begidx, endidx):
1448 "Complete the import command" 1449 1450 args=self.split_arg(line[0:begidx]) 1451 1452 # Format 1453 if len(args) == 1: 1454 opt = self.list_completion(text, self._import_formats) 1455 if opt: 1456 return opt 1457 mode = 'all' 1458 elif args[1] in self._import_formats: 1459 mode = args[1] 1460 else: 1461 args.insert(1, 'all') 1462 mode = 'all' 1463 1464 1465 completion_categories = {} 1466 # restriction continuation (for UFO) 1467 if mode in ['model', 'all'] and '-' in text: 1468 # deal with - in readline splitting (different on some computer) 1469 path = '-'.join([part for part in text.split('-')[:-1]]) 1470 # remove the final - for the model name 1471 # find the different possibilities 1472 all_name = self.find_restrict_card(path, no_restrict=False) 1473 all_name += self.find_restrict_card(path, no_restrict=False, 1474 base_dir=pjoin(MG5DIR,'models')) 1475 1476 # select the possibility according to the current line 1477 all_name = [name+' ' for name in all_name if name.startswith(text) 1478 and name.strip() != text] 1479 1480 1481 if all_name: 1482 completion_categories['Restricted model'] = all_name 1483 1484 # Path continuation 1485 if os.path.sep in args[-1]: 1486 if mode.startswith('model') or mode == 'all': 1487 # Directory continuation 1488 try: 1489 cur_path = pjoin(*[a for a in args \ 1490 if a.endswith(os.path.sep)]) 1491 except: 1492 pass 1493 else: 1494 all_dir = self.path_completion(text, cur_path, only_dirs = True) 1495 if mode in ['model_v4','all']: 1496 completion_categories['Path Completion'] = all_dir 1497 # Only UFO model here 1498 new = [] 1499 data = [new.__iadd__(self.find_restrict_card(name, base_dir=cur_path)) 1500 for name in all_dir] 1501 if data: 1502 completion_categories['Path Completion'] = all_dir + new 1503 else: 1504 try: 1505 cur_path = pjoin(*[a for a in args \ 1506 if a.endswith(os.path.sep)]) 1507 except: 1508 pass 1509 else: 1510 all_path = self.path_completion(text, cur_path) 1511 if mode == 'all': 1512 new = [] 1513 data = [new.__iadd__(self.find_restrict_card(name, base_dir=cur_path)) 1514 for name in all_path] 1515 if data: 1516 completion_categories['Path Completion'] = data[0] 1517 else: 1518 completion_categories['Path Completion'] = all_path 1519 1520 # Model directory name if directory is not given 1521 if (len(args) == 2): 1522 is_model = True 1523 if mode == 'model': 1524 file_cond = lambda p : os.path.exists(pjoin(MG5DIR,'models',p,'particles.py')) 1525 mod_name = lambda name: name 1526 elif mode == 'model_v4': 1527 file_cond = lambda p : (os.path.exists(pjoin(MG5DIR,'models',p,'particles.dat')) 1528 or os.path.exists(pjoin(self._mgme_dir,'Models',p,'particles.dat'))) 1529 mod_name = lambda name :(name[-3:] != '_v4' and name or name[:-3]) 1530 elif mode == 'all': 1531 mod_name = lambda name: name 1532 file_cond = lambda p : os.path.exists(pjoin(MG5DIR,'models',p,'particles.py')) \ 1533 or os.path.exists(pjoin(MG5DIR,'models',p,'particles.dat')) \ 1534 or os.path.exists(pjoin(self._mgme_dir,'Models',p,'particles.dat')) 1535 else: 1536 cur_path = pjoin(*[a for a in args \ 1537 if a.endswith(os.path.sep)]) 1538 all_path = self.path_completion(text, cur_path) 1539 completion_categories['model name'] = all_path 1540 is_model = False 1541 1542 if is_model: 1543 model_list = [mod_name(name) for name in \ 1544 self.path_completion(text, 1545 pjoin(MG5DIR,'models'), 1546 only_dirs = True) \ 1547 if file_cond(name)] 1548 1549 if mode == 'model_v4': 1550 completion_categories['model name'] = model_list 1551 else: 1552 # need to update the list with the possible restriction 1553 all_name = [] 1554 for model_name in model_list: 1555 all_name += self.find_restrict_card(model_name, 1556 base_dir=pjoin(MG5DIR,'models')) 1557 if mode == 'all': 1558 cur_path = pjoin(*[a for a in args \ 1559 if a.endswith(os.path.sep)]) 1560 all_path = self.path_completion(text, cur_path) 1561 completion_categories['model name'] = all_path + all_name 1562 elif mode == 'model': 1563 completion_categories['model name'] = all_name 1564 1565 # Options 1566 if mode == 'all' and len(args)>1: 1567 mode = self.find_import_type(args[2]) 1568 1569 if len(args) >= 3 and mode.startswith('model') and not '-modelname' in line: 1570 if not text and not completion_categories: 1571 return ['--modelname'] 1572 elif not (os.path.sep in args[-1] and line[-1] != ' '): 1573 completion_categories['options'] = self.list_completion(text, ['--modelname','-modelname']) 1574 if len(args) >= 3 and mode.startswith('banner') and not '--no_launch' in line: 1575 completion_categories['options'] = self.list_completion(text, ['--no_launch']) 1576 return self.deal_multiple_categories(completion_categories)
1577 1578 1579
1580 - def find_restrict_card(self, model_name, base_dir='./', no_restrict=True):
1581 """find the restriction file associate to a given model""" 1582 1583 # check if the model_name should be keeped as a possibility 1584 if no_restrict: 1585 output = [model_name] 1586 else: 1587 output = [] 1588 1589 # check that the model is a valid model 1590 if not os.path.exists(pjoin(base_dir, model_name, 'couplings.py')): 1591 # not valid UFO model 1592 return output 1593 1594 if model_name.endswith(os.path.sep): 1595 model_name = model_name[:-1] 1596 1597 # look for _default and treat this case 1598 if os.path.exists(pjoin(base_dir, model_name, 'restrict_default.dat')): 1599 output.append('%s-full' % model_name) 1600 1601 # look for other restrict_file 1602 for name in os.listdir(pjoin(base_dir, model_name)): 1603 if name.startswith('restrict_') and not name.endswith('default.dat') \ 1604 and name.endswith('.dat'): 1605 tag = name[9:-4] #remove restrict and .dat 1606 while model_name.endswith(os.path.sep): 1607 model_name = model_name[:-1] 1608 output.append('%s-%s' % (model_name, tag)) 1609 1610 # return 1611 return output
1612
1613 - def complete_install(self, text, line, begidx, endidx):
1614 "Complete the import command" 1615 1616 args = self.split_arg(line[0:begidx]) 1617 1618 # Format 1619 if len(args) == 1: 1620 return self.list_completion(text, self._install_opts)
1621
1622 #=============================================================================== 1623 # MadGraphCmd 1624 #=============================================================================== 1625 -class MadGraphCmd(HelpToCmd, CheckValidForCmd, CompleteForCmd, CmdExtended):
1626 """The command line processor of MadGraph""" 1627 1628 writing_dir = '.' 1629 1630 # Options and formats available 1631 _display_opts = ['particles', 'interactions', 'processes', 'diagrams', 1632 'diagrams_text', 'multiparticles', 'couplings', 'lorentz', 1633 'checks', 'parameters', 'options', 'coupling_order','variable'] 1634 _add_opts = ['process'] 1635 _save_opts = ['model', 'processes', 'options'] 1636 _tutorial_opts = ['start', 'stop'] 1637 _check_opts = ['full', 'permutation', 'gauge', 'lorentz_invariance'] 1638 _import_formats = ['model_v4', 'model', 'proc_v4', 'command', 'banner'] 1639 _install_opts = ['pythia-pgs', 'Delphes', 'MadAnalysis', 'ExRootAnalysis'] 1640 _v4_export_formats = ['madevent', 'standalone', 'matrix'] 1641 _export_formats = _v4_export_formats + ['standalone_cpp', 'pythia8', 'aloha'] 1642 _set_options = ['group_subprocesses', 1643 'ignore_six_quark_processes', 1644 'stdout_level', 1645 'fortran_compiler'] 1646 # Variables to store object information 1647 _curr_model = None #base_objects.Model() 1648 _curr_amps = diagram_generation.AmplitudeList() 1649 _curr_matrix_elements = helas_objects.HelasMultiProcess() 1650 _curr_fortran_model = None 1651 _curr_cpp_model = None 1652 _curr_exporter = None 1653 _done_export = False 1654
1655 - def preloop(self):
1656 """Initializing before starting the main loop""" 1657 1658 self.prompt = 'mg5>' 1659 1660 # By default, load the UFO Standard Model 1661 logger.info("Loading default model: sm") 1662 self.do_import('model sm') 1663 self.history.append('import model sm') 1664 1665 # preloop mother 1666 CmdExtended.preloop(self)
1667 1668
1669 - def __init__(self, mgme_dir = '', *completekey, **stdin):
1670 """ add a tracker of the history """ 1671 1672 CmdExtended.__init__(self, *completekey, **stdin) 1673 1674 # Set MG/ME directory path 1675 if mgme_dir: 1676 if os.path.isdir(pjoin(mgme_dir, 'Template')): 1677 self._mgme_dir = mgme_dir 1678 logger.info('Setting MG/ME directory to %s' % mgme_dir) 1679 else: 1680 logger.warning('Warning: Directory %s not valid MG/ME directory' % \ 1681 mgme_dir) 1682 self._mgme_dir = MG4DIR 1683 1684 # Variables to store state information 1685 self._multiparticles = {} 1686 self.options = {} 1687 self._generate_info = "" # store the first generated process 1688 self._model_v4_path = None 1689 self._use_lower_part_names = False 1690 self._export_dir = None 1691 self._export_format = 'madevent' 1692 self._mgme_dir = MG4DIR 1693 self._comparisons = None 1694 1695 # Load the configuration file 1696 self.set_configuration()
1697
1698 - def do_quit(self, line):
1699 """Do quit""" 1700 1701 if self._done_export and \ 1702 os.path.exists(pjoin(self._done_export[0],'RunWeb')): 1703 os.remove(pjoin(self._done_export[0],'RunWeb')) 1704 1705 value = super(MadGraphCmd, self).do_quit(line) 1706 print 1707 return value
1708 1709 # Add a process to the existing multiprocess definition 1710 # Generate a new amplitude
1711 - def do_add(self, line):
1712 """Generate an amplitude for a given process and add to 1713 existing amplitudes 1714 """ 1715 1716 args = self.split_arg(line) 1717 1718 # Check the validity of the arguments 1719 self.check_add(args) 1720 1721 if args[0] == 'process': 1722 # Rejoin line 1723 line = ' '.join(args[1:]) 1724 1725 # store the first process (for the perl script) 1726 if not self._generate_info: 1727 self._generate_info = line 1728 1729 # Reset Helas matrix elements 1730 self._curr_matrix_elements = helas_objects.HelasMultiProcess() 1731 1732 # Extract process from process definition 1733 if ',' in line: 1734 myprocdef, line = self.extract_decay_chain_process(line) 1735 else: 1736 myprocdef = self.extract_process(line) 1737 1738 # Check that we have something 1739 if not myprocdef: 1740 raise self.InvalidCmd("Empty or wrong format process, please try again.") 1741 # Check that we have the same number of initial states as 1742 # existing processes 1743 if self._curr_amps and self._curr_amps[0].get_ninitial() != \ 1744 myprocdef.get_ninitial(): 1745 raise self.InvalidCmd("Can not mix processes with different number of initial states.") 1746 1747 cpu_time1 = time.time() 1748 1749 # Generate processes 1750 if self.options['group_subprocesses'] == 'Auto': 1751 collect_mirror_procs = True 1752 else: 1753 collect_mirror_procs = self.options['group_subprocesses'] 1754 ignore_six_quark_processes = \ 1755 self.options['ignore_six_quark_processes'] if \ 1756 "ignore_six_quark_processes" in self.options \ 1757 else [] 1758 1759 myproc = diagram_generation.MultiProcess(myprocdef, 1760 collect_mirror_procs =\ 1761 collect_mirror_procs, 1762 ignore_six_quark_processes = \ 1763 ignore_six_quark_processes) 1764 1765 for amp in myproc.get('amplitudes'): 1766 if amp not in self._curr_amps: 1767 self._curr_amps.append(amp) 1768 else: 1769 raise self.InvalidCmd, "Duplicate process %s found. Please check your processes." % \ 1770 amp.nice_string_processes() 1771 1772 1773 # Reset _done_export, since we have new process 1774 self._done_export = False 1775 1776 cpu_time2 = time.time() 1777 1778 nprocs = len(myproc.get('amplitudes')) 1779 ndiags = sum([amp.get_number_of_diagrams() for \ 1780 amp in myproc.get('amplitudes')]) 1781 logger.info("%i processes with %i diagrams generated in %0.3f s" % \ 1782 (nprocs, ndiags, (cpu_time2 - cpu_time1))) 1783 ndiags = sum([amp.get_number_of_diagrams() for \ 1784 amp in self._curr_amps]) 1785 logger.info("Total: %i processes with %i diagrams" % \ 1786 (len(self._curr_amps), ndiags))
1787 1788 # Define a multiparticle label
1789 - def do_define(self, line, log=True):
1790 """Define a multiparticle""" 1791 1792 if self._use_lower_part_names: 1793 # Particle names lowercase 1794 line = line.lower() 1795 # Make sure there are spaces around = and | 1796 line = line.replace("=", " = ") 1797 line = line.replace("|", " | ") 1798 args = self.split_arg(line) 1799 # check the validity of the arguments 1800 self.check_define(args) 1801 1802 label = args[0] 1803 1804 pdg_list = self.extract_particle_ids(args[1:]) 1805 self.optimize_order(pdg_list) 1806 self._multiparticles[label] = pdg_list 1807 if log: 1808 logger.info("Defined multiparticle %s" % \ 1809 self.multiparticle_string(label))
1810 1811 # Display
1812 - def do_display(self, line, output=sys.stdout):
1813 """Display current internal status""" 1814 1815 args = self.split_arg(line) 1816 #check the validity of the arguments 1817 self.check_display(args) 1818 1819 if args[0] == 'diagrams': 1820 self.draw(' '.join(args[1:])) 1821 1822 if args[0] == 'particles' and len(args) == 1: 1823 propagating_particle = [] 1824 nb_unpropagating = 0 1825 for particle in self._curr_model['particles']: 1826 if particle.get('propagating'): 1827 propagating_particle.append(particle) 1828 else: 1829 nb_unpropagating += 1 1830 1831 print "Current model contains %i particles:" % \ 1832 len(propagating_particle) 1833 part_antipart = [part for part in propagating_particle \ 1834 if not part['self_antipart']] 1835 part_self = [part for part in propagating_particle \ 1836 if part['self_antipart']] 1837 for part in part_antipart: 1838 print part['name'] + '/' + part['antiname'], 1839 print '' 1840 for part in part_self: 1841 print part['name'], 1842 print '' 1843 if nb_unpropagating: 1844 print 'In addition of %s un-physical particle mediating new interactions.' \ 1845 % nb_unpropagating 1846 1847 elif args[0] == 'particles': 1848 for arg in args[1:]: 1849 if arg.isdigit() or (arg[0] == '-' and arg[1:].isdigit()): 1850 particle = self._curr_model.get_particle(abs(int(arg))) 1851 else: 1852 particle = self._curr_model['particles'].find_name(arg) 1853 if not particle: 1854 raise self.InvalidCmd, 'no particle %s in current model' % arg 1855 1856 print "Particle %s has the following properties:" % particle.get_name() 1857 print str(particle) 1858 1859 elif args[0] == 'interactions' and len(args) == 1: 1860 text = "Current model contains %i interactions\n" % \ 1861 len(self._curr_model['interactions']) 1862 for i, inter in enumerate(self._curr_model['interactions']): 1863 text += str(i+1) + ':' 1864 for part in inter['particles']: 1865 if part['is_part']: 1866 text += part['name'] 1867 else: 1868 text += part['antiname'] 1869 text += " " 1870 text += " ".join(order + '=' + str(inter['orders'][order]) \ 1871 for order in inter['orders']) 1872 text += '\n' 1873 pydoc.pager(text) 1874 1875 elif args[0] == 'interactions' and len(args)==2 and args[1].isdigit(): 1876 for arg in args[1:]: 1877 if int(arg) > len(self._curr_model['interactions']): 1878 raise self.InvalidCmd, 'no interaction %s in current model' % arg 1879 if int(arg) == 0: 1880 print 'Special interactions which identify two particles' 1881 else: 1882 print "Interactions %s has the following property:" % arg 1883 print self._curr_model['interactions'][int(arg)-1] 1884 1885 elif args[0] == 'interactions': 1886 request_part = args[1:] 1887 text = '' 1888 for i, inter in enumerate(self._curr_model['interactions']): 1889 present_part = [part['is_part'] and part['name'] or part['antiname'] 1890 for part in inter['particles'] 1891 if (part['is_part'] and part['name'] in request_part) or 1892 (not part['is_part'] and part['antiname'] in request_part)] 1893 if len(present_part) < len(request_part): 1894 continue 1895 # check that all particles are selected at least once 1896 if set(present_part) != set(request_part): 1897 continue 1898 # check if a particle is asked more than once 1899 if len(request_part) > len(set(request_part)): 1900 for p in request_part: 1901 if request_part.count(p) > present_part.count(p): 1902 continue 1903 1904 name = str(i+1) + ' : ' 1905 for part in inter['particles']: 1906 if part['is_part']: 1907 name += part['name'] 1908 else: 1909 name += part['antiname'] 1910 name += " " 1911 text += "\nInteractions %s has the following property:\n" % name 1912 text += str(self._curr_model['interactions'][i]) 1913 1914 text += '\n' 1915 print name 1916 if text =='': 1917 text += 'No matching for any interactions' 1918 pydoc.pager(text) 1919 1920 1921 elif args[0] == 'parameters' and len(args) == 1: 1922 text = "Current model contains %i parameters\n" % \ 1923 sum([len(part) for part in 1924 self._curr_model['parameters'].values()]) 1925 1926 for key, item in self._curr_model['parameters'].items(): 1927 text += '\nparameter type: %s\n' % str(key) 1928 for value in item: 1929 if hasattr(value, 'expr'): 1930 if value.value is not None: 1931 text+= ' %s = %s = %s\n' % (value.name, value.expr ,value.value) 1932 else: 1933 text+= ' %s = %s\n' % (value.name, value.expr) 1934 else: 1935 if value.value is not None: 1936 text+= ' %s = %s\n' % (value.name, value.value) 1937 else: 1938 text+= ' %s \n' % (value.name) 1939 pydoc.pager(text) 1940 1941 elif args[0] == 'processes': 1942 for amp in self._curr_amps: 1943 print amp.nice_string_processes() 1944 1945 elif args[0] == 'diagrams_text': 1946 text = "\n".join([amp.nice_string() for amp in self._curr_amps]) 1947 pydoc.pager(text) 1948 1949 elif args[0] == 'multiparticles': 1950 print 'Multiparticle labels:' 1951 for key in self._multiparticles: 1952 print self.multiparticle_string(key) 1953 1954 elif args[0] == 'coupling_order': 1955 hierarchy = self._curr_model['order_hierarchy'].items() 1956 #self._curr_model.get_order_hierarchy().items() 1957 def order(first, second): 1958 if first[1] < second[1]: 1959 return -1 1960 else: 1961 return 1
1962 hierarchy.sort(order) 1963 for order in hierarchy: 1964 print ' %s : weight = %s' % order 1965 1966 elif args[0] == 'couplings' and len(args) == 1: 1967 if self._model_v4_path: 1968 print 'No couplings information available in V4 model' 1969 return 1970 text = '' 1971 text = "Current model contains %i couplings\n" % \ 1972 sum([len(part) for part in 1973 self._curr_model['couplings'].values()]) 1974 keys = self._curr_model['couplings'].keys() 1975 def key_sort(x, y): 1976 if ('external',) == x: 1977 return -1 1978 elif ('external',) == y: 1979 return +1 1980 elif len(x) < len(y): 1981 return -1 1982 else: 1983 return 1
1984 keys.sort(key_sort) 1985 for key in keys: 1986 item = self._curr_model['couplings'][key] 1987 text += '\ncouplings type: %s\n' % str(key) 1988 for value in item: 1989 if value.value is not None: 1990 text+= ' %s = %s = %s\n' % (value.name, value.expr ,value.value) 1991 else: 1992 text+= ' %s = %s\n' % (value.name, value.expr) 1993 1994 pydoc.pager(text) 1995 1996 elif args[0] == 'couplings': 1997 if self._model_v4_path: 1998 print 'No couplings information available in V4 model' 1999 return 2000 try: 2001 ufomodel = ufomodels.load_model(self._curr_model.get('name')) 2002 print eval('ufomodel.couplings.%s.nice_string()'%args[1]) 2003 except: 2004 raise self.InvalidCmd, 'no couplings %s in current model' % args[1] 2005 2006 elif args[0] == 'lorentz': 2007 if self._model_v4_path: 2008 print 'No lorentz information available in V4 model' 2009 return 2010 elif len(args) == 1: 2011 raise self.InvalidCmd,\ 2012 'display lorentz require an argument: the name of the lorentz structure.' 2013 return 2014 try: 2015 ufomodel = ufomodels.load_model(self._curr_model.get('name')) 2016 print eval('ufomodel.lorentz.%s.nice_string()'%args[1]) 2017 except: 2018 raise self.InvalidCmd, 'no lorentz %s in current model' % args[1] 2019 2020 elif args[0] == 'checks': 2021 comparisons = self._comparisons[0] 2022 if len(args) > 1 and args[1] == 'failed': 2023 comparisons = [c for c in comparisons if not c['passed']] 2024 outstr = "Process check results:" 2025 for comp in comparisons: 2026 outstr += "\n%s:" % comp['process'].nice_string() 2027 outstr += "\n Phase space point: (px py pz E)" 2028 for i, p in enumerate(comp['momenta']): 2029 outstr += "\n%2s %+.9e %+.9e %+.9e %+.9e" % tuple([i] + p) 2030 outstr += "\n Permutation values:" 2031 outstr += "\n " + str(comp['values']) 2032 if comp['passed']: 2033 outstr += "\n Process passed (rel. difference %.9e)" % \ 2034 comp['difference'] 2035 else: 2036 outstr += "\n Process failed (rel. difference %.9e)" % \ 2037 comp['difference'] 2038 2039 used_aloha = sorted(self._comparisons[1]) 2040 outstr += "\nChecked ALOHA routines:" 2041 for aloha in used_aloha: 2042 aloha_str = aloha[0] 2043 if aloha[1]: 2044 aloha_str += 'C' + 'C'.join([str(ia) for ia in aloha[1]]) 2045 aloha_str += "_%d" % aloha[2] 2046 outstr += "\n" + aloha_str 2047 2048 pydoc.pager(outstr) 2049 2050 elif args[0] in ["options", "variable"]: 2051 super(MadGraphCmd, self).do_display(line, output) 2052 2053
2054 - def multiparticle_string(self, key):
2055 """Returns a nicely formatted string for the multiparticle""" 2056 2057 if self._multiparticles[key] and \ 2058 isinstance(self._multiparticles[key][0], list): 2059 return "%s = %s" % (key, "|".join([" ".join([self._curr_model.\ 2060 get('particle_dict')[part_id].get_name() \ 2061 for part_id in id_list]) \ 2062 for id_list in self._multiparticles[key]])) 2063 else: 2064 return "%s = %s" % (key, " ".join([self._curr_model.\ 2065 get('particle_dict')[part_id].get_name() \ 2066 for part_id in self._multiparticles[key]]))
2067 2068 2069
2070 - def do_tutorial(self, line):
2071 """Activate/deactivate the tutorial mode.""" 2072 2073 args = self.split_arg(line) 2074 if len(args) > 0 and args[0] == "stop": 2075 logger_tuto.info("\n\tThanks for using the tutorial!") 2076 logger_tuto.setLevel(logging.ERROR) 2077 else: 2078 logger_tuto.setLevel(logging.INFO) 2079 2080 if not self._mgme_dir: 2081 logger_tuto.info(\ 2082 "\n\tWarning: To use all features in this tutorial, " + \ 2083 "please run from a" + \ 2084 "\n\t valid MG_ME directory.")
2085
2086 - def draw(self, line):
2087 """ draw the Feynman diagram for the given process """ 2088 2089 args = self.split_arg(line) 2090 # Check the validity of the arguments 2091 self.check_draw(args) 2092 2093 # Check if we plot a decay chain 2094 if any([isinstance(a, diagram_generation.DecayChainAmplitude) for \ 2095 a in self._curr_amps]) and not self._done_export: 2096 warn = 'WARNING: You try to draw decay chain diagrams without first running output.\n' 2097 warn += '\t The decay processes will be drawn separately' 2098 logger.warning(warn) 2099 2100 (options, args) = _draw_parser.parse_args(args) 2101 options = draw_lib.DrawOption(options) 2102 start = time.time() 2103 2104 # Collect amplitudes 2105 amplitudes = diagram_generation.AmplitudeList() 2106 2107 for amp in self._curr_amps: 2108 amplitudes.extend(amp.get_amplitudes()) 2109 2110 for amp in amplitudes: 2111 filename = pjoin(args[0], 'diagrams_' + \ 2112 amp.get('process').shell_string() + ".eps") 2113 plot = draw.MultiEpsDiagramDrawer(amp['diagrams'], 2114 filename, 2115 model=self._curr_model, 2116 amplitude='', 2117 legend=amp.get('process').input_string()) 2118 2119 logger.info("Drawing " + \ 2120 amp.get('process').nice_string()) 2121 plot.draw(opt=options) 2122 logger.info("Wrote file " + filename) 2123 self.exec_cmd('open %s' % filename) 2124 2125 stop = time.time() 2126 logger.info('time to draw %s' % (stop - start))
2127 2128 # Generate a new amplitude
2129 - def do_check(self, line):
2130 """Check a given process or set of processes""" 2131 2132 args = self.split_arg(line) 2133 2134 # Check args validity 2135 param_card = self.check_check(args) 2136 2137 line = " ".join(args[1:]) 2138 myprocdef = self.extract_process(line) 2139 2140 # Check that we have something 2141 if not myprocdef: 2142 raise self.InvalidCmd("Empty or wrong format process, please try again.") 2143 2144 # Disable diagram generation logger 2145 diag_logger = logging.getLogger('madgraph.diagram_generation') 2146 old_level = diag_logger.getEffectiveLevel() 2147 diag_logger.setLevel(logging.WARNING) 2148 2149 # run the check 2150 cpu_time1 = time.time() 2151 # Run matrix element generation check on processes 2152 2153 comparisons = [] 2154 gauge_result = [] 2155 lorentz_result =[] 2156 nb_processes = 0 2157 2158 if args[0] in ['permutation', 'full']: 2159 comparisons = process_checks.check_processes(myprocdef, 2160 param_card = param_card, 2161 quick = True) 2162 nb_processes += len(comparisons[0]) 2163 2164 if args[0] in ['gauge', 'full']: 2165 gauge_result = process_checks.check_gauge(myprocdef, 2166 param_card = param_card) 2167 nb_processes += len(gauge_result) 2168 2169 if args[0] in ['lorentz_invariance', 'full']: 2170 lorentz_result = process_checks.check_lorentz(myprocdef, 2171 param_card = param_card) 2172 nb_processes += len(lorentz_result) 2173 2174 cpu_time2 = time.time() 2175 2176 logger.info("%i processes checked in %0.3f s" \ 2177 % (nb_processes, 2178 (cpu_time2 - cpu_time1))) 2179 2180 text = "" 2181 2182 if gauge_result: 2183 text += 'Gauge results:\n' 2184 text += process_checks.output_gauge(gauge_result) + '\n' 2185 2186 if lorentz_result: 2187 text += 'Lorentz invariance results:\n' 2188 text += process_checks.output_lorentz_inv(lorentz_result) + '\n' 2189 2190 if comparisons: 2191 text += 'Process permutation results:\n' 2192 text += process_checks.output_comparisons(comparisons[0]) + '\n' 2193 self._comparisons = comparisons 2194 2195 logger.info(text) 2196 pydoc.pager(text) 2197 # Restore diagram logger 2198 diag_logger.setLevel(old_level) 2199 2200 return
2201 2202 # Generate a new amplitude
2203 - def do_generate(self, line):
2204 """Generate an amplitude for a given process""" 2205 2206 # Reset amplitudes 2207 self._curr_amps = diagram_generation.AmplitudeList() 2208 # Reset Helas matrix elements 2209 self._curr_matrix_elements = None 2210 self._generate_info = line 2211 # Reset _done_export, since we have new process 2212 self._done_export = False 2213 # Also reset _export_format and _export_dir 2214 self._export_format = None 2215 2216 # Remove previous generations from history 2217 self.clean_history(to_remove=['add process'], remove_bef_lb1='generate', 2218 to_keep=['add','import','set','load']) 2219 2220 # Call add process 2221 args = self.split_arg(line) 2222 args.insert(0, 'process') 2223 2224 self.do_add(" ".join(args))
2225
2226 - def extract_process(self, line, proc_number = 0, overall_orders = {}):
2227 """Extract a process definition from a string. Returns 2228 a ProcessDefinition.""" 2229 2230 # Check basic validity of the line 2231 if not line.count('>') in [1,2]: 2232 self.do_help('generate') 2233 print 2234 raise self.InvalidCmd('Wrong use of \">\" special character.') 2235 2236 2237 # Perform sanity modifications on the lines: 2238 # Add a space before and after any > , $ / | 2239 space_before = re.compile(r"(?P<carac>\S)(?P<tag>[/\,\\$\\>|])(?P<carac2>\S)") 2240 line = space_before.sub(r'\g<carac> \g<tag> \g<carac2>', line) 2241 2242 # Use regular expressions to extract s-channel propagators, 2243 # forbidden s-channel propagators/particles, coupling orders 2244 # and process number, starting from the back 2245 2246 # Start with process number (identified by "@") 2247 proc_number_pattern = re.compile("^(.+)@\s*(\d+)\s*(.*)$") 2248 proc_number_re = proc_number_pattern.match(line) 2249 if proc_number_re: 2250 proc_number = int(proc_number_re.group(2)) 2251 line = proc_number_re.group(1) + \ 2252 proc_number_re.group(3) 2253 2254 # Then take coupling orders (identified by "=") 2255 order_pattern = re.compile("^(.+)\s+(\w+)\s*=\s*(\d+)\s*$") 2256 order_re = order_pattern.match(line) 2257 orders = {} 2258 while order_re: 2259 orders[order_re.group(2)] = int(order_re.group(3)) 2260 line = order_re.group(1) 2261 order_re = order_pattern.match(line) 2262 2263 if self._use_lower_part_names: 2264 # Particle names lowercase 2265 line = line.lower() 2266 2267 # Now check for forbidden particles, specified using "/" 2268 slash = line.find("/") 2269 dollar = line.find("$") 2270 forbidden_particles = "" 2271 if slash > 0: 2272 if dollar > slash: 2273 forbidden_particles_re = re.match("^(.+)\s*/\s*(.+\s*)(\$.*)$", line) 2274 else: 2275 forbidden_particles_re = re.match("^(.+)\s*/\s*(.+\s*)$", line) 2276 if forbidden_particles_re: 2277 forbidden_particles = forbidden_particles_re.group(2) 2278 line = forbidden_particles_re.group(1) 2279 if len(forbidden_particles_re.groups()) > 2: 2280 line = line + forbidden_particles_re.group(3) 2281 2282 # Now check for forbidden schannels, specified using "$$" 2283 forbidden_schannels_re = re.match("^(.+)\s*\$\s*\$\s*(.+)\s*$", line) 2284 forbidden_schannels = "" 2285 if forbidden_schannels_re: 2286 forbidden_schannels = forbidden_schannels_re.group(2) 2287 line = forbidden_schannels_re.group(1) 2288 2289 # Now check for forbidden onshell schannels, specified using "$" 2290 forbidden_onsh_schannels_re = re.match("^(.+)\s*\$\s*(.+)\s*$", line) 2291 forbidden_onsh_schannels = "" 2292 if forbidden_onsh_schannels_re: 2293 forbidden_onsh_schannels = forbidden_onsh_schannels_re.group(2) 2294 line = forbidden_onsh_schannels_re.group(1) 2295 2296 # Now check for required schannels, specified using "> >" 2297 required_schannels_re = re.match("^(.+?)>(.+?)>(.+)$", line) 2298 required_schannels = "" 2299 if required_schannels_re: 2300 required_schannels = required_schannels_re.group(2) 2301 line = required_schannels_re.group(1) + ">" + \ 2302 required_schannels_re.group(3) 2303 2304 args = self.split_arg(line) 2305 2306 myleglist = base_objects.MultiLegList() 2307 state = False 2308 2309 # Extract process 2310 for part_name in args: 2311 if part_name == '>': 2312 if not myleglist: 2313 raise self.InvalidCmd, "No final state particles" 2314 state = True 2315 continue 2316 2317 mylegids = [] 2318 if part_name in self._multiparticles: 2319 if isinstance(self._multiparticles[part_name][0], list): 2320 raise self.InvalidCmd,\ 2321 "Multiparticle %s is or-multiparticle" % part_name + \ 2322 " which can be used only for required s-channels" 2323 mylegids.extend(self._multiparticles[part_name]) 2324 else: 2325 mypart = self._curr_model['particles'].find_name(part_name) 2326 if mypart: 2327 mylegids.append(mypart.get_pdg_code()) 2328 2329 if mylegids: 2330 myleglist.append(base_objects.MultiLeg({'ids':mylegids, 2331 'state':state})) 2332 else: 2333 raise self.InvalidCmd, \ 2334 "No particle %s in model" % part_name 2335 2336 if filter(lambda leg: leg.get('state') == True, myleglist): 2337 # We have a valid process 2338 2339 # Now extract restrictions 2340 forbidden_particle_ids = \ 2341 self.extract_particle_ids(forbidden_particles) 2342 if forbidden_particle_ids and \ 2343 isinstance(forbidden_particle_ids[0], list): 2344 raise self.InvalidCmd,\ 2345 "Multiparticle %s is or-multiparticle" % part_name + \ 2346 " which can be used only for required s-channels" 2347 forbidden_onsh_schannel_ids = \ 2348 self.extract_particle_ids(forbidden_onsh_schannels) 2349 forbidden_schannel_ids = \ 2350 self.extract_particle_ids(forbidden_schannels) 2351 if forbidden_onsh_schannel_ids and \ 2352 isinstance(forbidden_onsh_schannel_ids[0], list): 2353 raise self.InvalidCmd,\ 2354 "Multiparticle %s is or-multiparticle" % part_name + \ 2355 " which can be used only for required s-channels" 2356 if forbidden_schannel_ids and \ 2357 isinstance(forbidden_schannel_ids[0], list): 2358 raise self.InvalidCmd,\ 2359 "Multiparticle %s is or-multiparticle" % part_name + \ 2360 " which can be used only for required s-channels" 2361 required_schannel_ids = \ 2362 self.extract_particle_ids(required_schannels) 2363 if required_schannel_ids and not \ 2364 isinstance(required_schannel_ids[0], list): 2365 required_schannel_ids = [required_schannel_ids] 2366 2367 2368 return \ 2369 base_objects.ProcessDefinition({'legs': myleglist, 2370 'model': self._curr_model, 2371 'id': proc_number, 2372 'orders': orders, 2373 'forbidden_particles': forbidden_particle_ids, 2374 'forbidden_onsh_s_channels': forbidden_onsh_schannel_ids, 2375 'forbidden_s_channels': \ 2376 forbidden_schannel_ids, 2377 'required_s_channels': required_schannel_ids, 2378 'overall_orders': overall_orders 2379 })
2380 # 'is_decay_chain': decay_process\ 2381
2382 - def extract_particle_ids(self, args):
2383 """Extract particle ids from a list of particle names. If 2384 there are | in the list, this corresponds to an or-list, which 2385 is represented as a list of id lists. An or-list is used to 2386 allow multiple required s-channel propagators to be specified 2387 (e.g. Z/gamma).""" 2388 2389 if isinstance(args, basestring): 2390 args.replace("|", " | ") 2391 args = self.split_arg(args) 2392 all_ids = [] 2393 ids=[] 2394 for part_name in args: 2395 mypart = self._curr_model['particles'].find_name(part_name) 2396 if mypart: 2397 ids.append([mypart.get_pdg_code()]) 2398 elif part_name in self._multiparticles: 2399 ids.append(self._multiparticles[part_name]) 2400 elif part_name == "|": 2401 # This is an "or-multiparticle" 2402 if ids: 2403 all_ids.append(ids) 2404 ids = [] 2405 else: 2406 raise self.InvalidCmd("No particle %s in model" % part_name) 2407 all_ids.append(ids) 2408 # Flatten id list, to take care of multiparticles and 2409 # or-multiparticles 2410 res_lists = [] 2411 for i, id_list in enumerate(all_ids): 2412 res_lists.extend(diagram_generation.expand_list_list(id_list)) 2413 # Trick to avoid duplication while keeping ordering 2414 for ilist, idlist in enumerate(res_lists): 2415 set_dict = {} 2416 res_lists[ilist] = [set_dict.setdefault(i,i) for i in idlist \ 2417 if i not in set_dict] 2418 2419 if len(res_lists) == 1: 2420 res_lists = res_lists[0] 2421 2422 return res_lists
2423
2424 - def optimize_order(self, pdg_list):
2425 """Optimize the order of particles in a pdg list, so that 2426 similar particles are next to each other. Sort according to: 2427 1. pdg > 0, 2. spin, 3. color, 4. mass > 0""" 2428 2429 if not pdg_list: 2430 return 2431 if not isinstance(pdg_list[0], int): 2432 return 2433 2434 model = self._curr_model 2435 pdg_list.sort(key = lambda i: i < 0) 2436 pdg_list.sort(key = lambda i: model.get_particle(i).is_fermion()) 2437 pdg_list.sort(key = lambda i: model.get_particle(i).get('color'), 2438 reverse = True) 2439 pdg_list.sort(key = lambda i: \ 2440 model.get_particle(i).get('mass').lower() != 'zero')
2441
2442 - def extract_decay_chain_process(self, line, level_down=False):
2443 """Recursively extract a decay chain process definition from a 2444 string. Returns a ProcessDefinition.""" 2445 2446 # Start with process number (identified by "@") and overall orders 2447 proc_number_pattern = re.compile("^(.+)@\s*(\d+)\s*((\w+\s*=\s*\d+\s*)*)$") 2448 proc_number_re = proc_number_pattern.match(line) 2449 proc_number = 0 2450 overall_orders = {} 2451 if proc_number_re: 2452 proc_number = int(proc_number_re.group(2)) 2453 line = proc_number_re.group(1) 2454 if proc_number_re.group(3): 2455 order_pattern = re.compile("^(.*?)\s*(\w+)\s*=\s*(\d+)\s*$") 2456 order_line = proc_number_re.group(3) 2457 order_re = order_pattern.match(order_line) 2458 while order_re: 2459 overall_orders[order_re.group(2)] = int(order_re.group(3)) 2460 order_line = order_re.group(1) 2461 order_re = order_pattern.match(order_line) 2462 logger.info(line) 2463 2464 index_comma = line.find(",") 2465 index_par = line.find(")") 2466 min_index = index_comma 2467 if index_par > -1 and (index_par < min_index or min_index == -1): 2468 min_index = index_par 2469 2470 if min_index > -1: 2471 core_process = self.extract_process(line[:min_index], proc_number, 2472 overall_orders) 2473 else: 2474 core_process = self.extract_process(line, proc_number, 2475 overall_orders) 2476 2477 #level_down = False 2478 2479 while index_comma > -1: 2480 line = line[index_comma + 1:] 2481 if not line.strip(): 2482 break 2483 index_par = line.find(')') 2484 if line.lstrip()[0] == '(': 2485 # Go down one level in process hierarchy 2486 #level_down = True 2487 line = line.lstrip()[1:] 2488 # This is where recursion happens 2489 decay_process, line = \ 2490 self.extract_decay_chain_process(line, 2491 level_down=True) 2492 index_comma = line.find(",") 2493 index_par = line.find(')') 2494 else: 2495 index_comma = line.find(",") 2496 min_index = index_comma 2497 if index_par > -1 and \ 2498 (index_par < min_index or min_index == -1): 2499 min_index = index_par 2500 if min_index > -1: 2501 decay_process = self.extract_process(line[:min_index]) 2502 else: 2503 decay_process = self.extract_process(line) 2504 2505 core_process.get('decay_chains').append(decay_process) 2506 2507 if level_down: 2508 if index_par == -1: 2509 raise self.InvalidCmd, \ 2510 "Missing ending parenthesis for decay process" 2511 2512 if index_par < index_comma: 2513 line = line[index_par + 1:] 2514 level_down = False 2515 break 2516 2517 if level_down: 2518 index_par = line.find(')') 2519 if index_par == -1: 2520 raise self.InvalidCmd, \ 2521 "Missing ending parenthesis for decay process" 2522 line = line[index_par + 1:] 2523 2524 # Return the core process (ends recursion when there are no 2525 # more decays) 2526 return core_process, line
2527 2528 2529 # Import files
2530 - def do_import(self, line):
2531 """Import files with external formats""" 2532 2533 args = self.split_arg(line) 2534 # Check argument's validity 2535 self.check_import(args) 2536 2537 if args[0].startswith('model'): 2538 self._model_v4_path = None 2539 # Clear history, amplitudes and matrix elements when a model is imported 2540 # Remove previous imports, generations and outputs from history 2541 self.clean_history(remove_bef_lb1='import') 2542 # Reset amplitudes and matrix elements 2543 self._curr_amps = diagram_generation.AmplitudeList() 2544 self._curr_matrix_elements = helas_objects.HelasMultiProcess() 2545 # Import model 2546 if args[0].endswith('_v4'): 2547 self._curr_model, self._model_v4_path = \ 2548 import_v4.import_model(args[1], self._mgme_dir) 2549 self._curr_fortran_model = \ 2550 helas_call_writers.FortranHelasCallWriter(\ 2551 self._curr_model) 2552 else: 2553 try: 2554 self._curr_model = import_ufo.import_model(args[1]) 2555 except import_ufo.UFOImportError, error: 2556 logger_stderr.warning('WARNING: %s' % error) 2557 logger_stderr.info('Trying to run `import model_v4 %s` instead.' \ 2558 % args[1]) 2559 self.exec_cmd('import model_v4 %s ' % args[1], precmd=True) 2560 return 2561 self._curr_fortran_model = \ 2562 helas_call_writers.FortranUFOHelasCallWriter(\ 2563 self._curr_model) 2564 self._curr_cpp_model = \ 2565 helas_call_writers.CPPUFOHelasCallWriter(\ 2566 self._curr_model) 2567 2568 if '-modelname' not in args: 2569 self._curr_model.pass_particles_name_in_mg_default() 2570 2571 # Do post-processing of model 2572 self.process_model() 2573 2574 # Reset amplitudes and matrix elements and global checks 2575 self._curr_amps = diagram_generation.AmplitudeList() 2576 self._curr_matrix_elements = helas_objects.HelasMultiProcess() 2577 process_checks.store_aloha = [] 2578 2579 elif args[0] == 'command': 2580 # Remove previous imports, generations and outputs from history 2581 self.clean_history(to_remove=['import', 'generate', 'add process', 2582 'open','display','launch']) 2583 2584 if not os.path.isfile(args[1]): 2585 raise self.InvalidCmd("Path %s is not a valid pathname" % args[1]) 2586 else: 2587 # Check the status of export and try to use file position if no 2588 #self._export dir are define 2589 self.check_for_export_dir(args[1]) 2590 # Execute the card 2591 self.use_rawinput = False 2592 self.import_command_file(args[1]) 2593 self.use_rawinput = True 2594 2595 elif args[0] == 'banner': 2596 type = madevent_interface.MadEventCmd.detect_card_type(args[1]) 2597 if type != 'banner': 2598 raise self.InvalidCmd, 'The File should be a valid banner' 2599 ban = banner_module.Banner(args[1]) 2600 # Check that this is MG5 banner 2601 if 'mg5proccard' in ban: 2602 for line in ban['mg5proccard'].split('\n'): 2603 if line.startswith('#') or line.startswith('<'): 2604 continue 2605 self.exec_cmd(line) 2606 else: 2607 raise self.InvalidCmd, 'Only MG5 banner are supported' 2608 2609 if not self._done_export: 2610 self.exec_cmd('output . -f') 2611 2612 ban.split(self._done_export[0]) 2613 logger.info('All Cards from the banner have been place in directory %s' % pjoin(self._done_export[0], 'Cards')) 2614 if '--no_launch' not in args: 2615 self.exec_cmd('launch') 2616 2617 2618 elif args[0] == 'proc_v4': 2619 2620 # Remove previous imports, generations and outputs from history 2621 self.clean_history(to_remove=['import', 'generate', 'add process', 2622 'open','display','launch']) 2623 2624 if len(args) == 1 and self._export_dir: 2625 proc_card = pjoin(self._export_dir, 'Cards', \ 2626 'proc_card.dat') 2627 elif len(args) == 2: 2628 proc_card = args[1] 2629 # Check the status of export and try to use file position is no 2630 # self._export dir are define 2631 self.check_for_export_dir(os.path.realpath(proc_card)) 2632 else: 2633 raise MadGraph5('No default directory in output') 2634 2635 2636 #convert and excecute the card 2637 self.import_mg4_proc_card(proc_card)
2638 2639
2640 - def import_ufo_model(self, model_name):
2641 """ import the UFO model """ 2642 2643 self._curr_model = import_ufo.import_model(model_name) 2644 self._curr_fortran_model = \ 2645 helas_call_writers.FortranUFOHelasCallWriter(self._curr_model) 2646 self._curr_cpp_model = \ 2647 helas_call_writers.CPPUFOHelasCallWriter(self._curr_model)
2648
2649 - def process_model(self):
2650 """Set variables _particle_names and _couplings for tab 2651 completion, define multiparticles""" 2652 2653 # Set variables for autocomplete 2654 self._particle_names = [p.get('name') for p in self._curr_model.get('particles')\ 2655 if p.get('propagating')] + \ 2656 [p.get('antiname') for p in self._curr_model.get('particles') \ 2657 if p.get('propagating')] 2658 2659 self._couplings = list(set(sum([i.get('orders').keys() for i in \ 2660 self._curr_model.get('interactions')], []))) 2661 # Check if we can use case-independent particle names 2662 self._use_lower_part_names = \ 2663 (self._particle_names == \ 2664 [p.get('name').lower() for p in self._curr_model.get('particles')] + \ 2665 [p.get('antiname').lower() for p in self._curr_model.get('particles')]) 2666 2667 self.add_default_multiparticles()
2668 2669
2670 - def import_mg4_proc_card(self, filepath):
2671 """ read a V4 proc card, convert it and run it in mg5""" 2672 2673 # change the status of this line in the history -> pass in comment 2674 if self.history and self.history[-1].startswith('import proc_v4'): 2675 self.history[-1] = '#%s' % self.history[-1] 2676 2677 # read the proc_card.dat 2678 reader = files.read_from_file(filepath, import_v4.read_proc_card_v4) 2679 if not reader: 2680 raise self.InvalidCmd('\"%s\" is not a valid path' % filepath) 2681 2682 if self._mgme_dir: 2683 # Add comment to history 2684 self.exec_cmd("# Import the model %s" % reader.model, precmd=True) 2685 line = self.exec_cmd('import model_v4 %s -modelname' % \ 2686 (reader.model), precmd=True) 2687 else: 2688 logging.error('No MG_ME installation detected') 2689 return 2690 2691 2692 # Now that we have the model we can split the information 2693 lines = reader.extract_command_lines(self._curr_model) 2694 2695 for line in lines: 2696 self.exec_cmd(line, precmd=True) 2697 2698 return
2699
2700 - def add_default_multiparticles(self):
2701 """ add default particle from file interface.multiparticles_default.txt 2702 """ 2703 2704 defined_multiparticles = self._multiparticles.keys() 2705 removed_multiparticles = [] 2706 # First check if the defined multiparticles are allowed in the 2707 # new model 2708 for key in self._multiparticles.keys(): 2709 try: 2710 for part in self._multiparticles[key]: 2711 self._curr_model.get('particle_dict')[part] 2712 except: 2713 del self._multiparticles[key] 2714 defined_multiparticles.remove(key) 2715 removed_multiparticles.append(key) 2716 2717 # Now add default multiparticles 2718 for line in open(pjoin(MG5DIR, 'input', \ 2719 'multiparticles_default.txt')): 2720 if line.startswith('#'): 2721 continue 2722 try: 2723 if self._use_lower_part_names: 2724 multipart_name = line.lower().split()[0] 2725 else: 2726 multipart_name = line.split()[0] 2727 if multipart_name not in self._multiparticles: 2728 self.do_define(line) 2729 2730 except self.InvalidCmd, why: 2731 logger_stderr.warning('impossible to set default multiparticles %s because %s' % 2732 (line.split()[0],why)) 2733 if defined_multiparticles: 2734 logger.info("Kept definitions of multiparticles %s unchanged" % \ 2735 " / ".join(defined_multiparticles)) 2736 2737 for removed_part in removed_multiparticles: 2738 if removed_part in self._multiparticles: 2739 removed_multiparticles.remove(removed_part) 2740 2741 if removed_multiparticles: 2742 logger.info("Removed obsolete multiparticles %s" % \ 2743 " / ".join(removed_multiparticles))
2744
2745 - def do_install(self, line):
2746 """Install optional package from the MG suite.""" 2747 2748 args = self.split_arg(line) 2749 #check the validity of the arguments 2750 self.check_install(args) 2751 2752 if sys.platform == "darwin": 2753 program = "curl" 2754 else: 2755 program = "wget" 2756 2757 # Load file with path of the different program: 2758 import urllib 2759 path = {} 2760 try: 2761 data = urllib.urlopen('http://madgraph.phys.ucl.ac.be/package_info.dat') 2762 except: 2763 raise MadGraph5Error, '''Impossible to connect the server. 2764 Please check your internet connection or retry later''' 2765 for line in data: 2766 split = line.split() 2767 path[split[0]] = split[1] 2768 2769 name = {'td_mac': 'td', 'td_linux':'td', 'Delphes':'Delphes', 2770 'pythia-pgs':'pythia-pgs', 'ExRootAnalysis': 'ExRootAnalysis', 2771 'MadAnalysis':'MadAnalysis'} 2772 name = name[args[0]] 2773 2774 try: 2775 os.system('rm -rf %s' % pjoin(MG5DIR, name)) 2776 except: 2777 pass 2778 2779 # Load that path 2780 logger.info('Downloading %s' % path[args[0]]) 2781 if sys.platform == "darwin": 2782 misc.call(['curl', path[args[0]], '-o%s.tgz' % name], cwd=MG5DIR) 2783 else: 2784 misc.call(['wget', path[args[0]], '--output-document=%s.tgz'% name], cwd=MG5DIR) 2785 # Untar the file 2786 returncode = misc.call(['tar', '-xzpvf', '%s.tgz' % name], cwd=MG5DIR, 2787 stdout=open(os.devnull, 'w')) 2788 if returncode: 2789 raise MadGraph5Error, 'Fail to download correctly the File. Stop' 2790 2791 # Check that the directory has the correct name 2792 if not os.path.exists(pjoin(MG5DIR, name)): 2793 created_name = [n for n in os.listdir(MG5DIR) if n.startswith(name) 2794 and not n.endswith('gz')] 2795 if not created_name: 2796 raise MadGraph5Error, 'The file was not loaded correctly. Stop' 2797 else: 2798 created_name = created_name[0] 2799 files.mv(pjoin(MG5DIR, created_name), pjoin(MG5DIR, name)) 2800 logger.info('compile %s. This might takes a while.' % name) 2801 2802 # Modify Makefile for pythia-pgs on Mac 64 bit 2803 if args[0] == "pythia-pgs" and sys.maxsize > 2**32: 2804 path = os.path.join(MG5DIR, 'pythia-pgs', 'src', 'make_opts') 2805 text = open(path).read() 2806 text = text.replace('MBITS=32','MBITS=64') 2807 open(path, 'w').writelines(text) 2808 2809 # Compile the file 2810 # Check for F77 compiler 2811 if 'FC' not in os.environ or not os.environ['FC']: 2812 if self.options['fortran_compiler']: 2813 compiler = self.options['fortran_compiler'] 2814 elif misc.which('gfortran'): 2815 compiler = 'gfortran' 2816 elif misc.which('g77'): 2817 compiler = 'g77' 2818 else: 2819 raise self.InvalidCmd('Require g77 or Gfortran compiler') 2820 if compiler == 'gfortran' and args[0] == "pythia-pgs": 2821 path = os.path.join(MG5DIR, 'pythia-pgs', 'src', 'make_opts') 2822 text = open(path).read() 2823 text = text.replace('FC=g77','FC=gfortran') 2824 open(path, 'w').writelines(text) 2825 elif compiler == 'gfortran' and args[0] == 'MadAnalysis': 2826 path = os.path.join(MG5DIR, 'MadAnalysis', 'makefile') 2827 text = open(path).read() 2828 text = text.replace('FC=g77','FC=gfortran') 2829 open(path, 'w').writelines(text) 2830 2831 if logger.level <= logging.INFO: 2832 devnull = open(os.devnull,'w') 2833 misc.call(['make', 'clean'], stdout=devnull, stderr=-2) 2834 status = misc.call(['make'], cwd = os.path.join(MG5DIR, name)) 2835 else: 2836 misc.compile(['clean'], mode='', cwd = os.path.join(MG5DIR, name)) 2837 status = misc.compile(mode='', cwd = os.path.join(MG5DIR, name)) 2838 if not status: 2839 logger.info('compilation succeeded') 2840 else: 2841 logger.warning('Error detected during the compilation. Please check the compilation error and run make manually.') 2842 2843 2844 # Special treatment for TD/Ghostscript program (require by MadAnalysis) 2845 if args[0] == 'MadAnalysis': 2846 try: 2847 os.system('rm -rf td') 2848 os.mkdir(pjoin(MG5DIR, 'td')) 2849 except Exception, error: 2850 print error 2851 pass 2852 2853 if sys.platform == "darwin": 2854 logger.info('Downloading TD for Mac') 2855 target = 'http://theory.fnal.gov/people/parke/TD/td_mac_intel.tar.gz' 2856 misc.call(['curl', target, '-otd.tgz'], 2857 cwd=pjoin(MG5DIR,'td')) 2858 misc.call(['tar', '-xzpvf', 'td.tgz'], 2859 cwd=pjoin(MG5DIR,'td')) 2860 files.mv(MG5DIR + '/td/td_mac_intel',MG5DIR+'/td/td') 2861 else: 2862 logger.info('Downloading TD for Linux 32 bit') 2863 target = 'http://madgraph.phys.ucl.ac.be/Downloads/td' 2864 misc.call(['wget', target], cwd=pjoin(MG5DIR,'td')) 2865 os.chmod(pjoin(MG5DIR,'td','td'), 0775) 2866 if sys.maxsize > 2**32: 2867 logger.warning('''td program (needed by MadAnalysis) is not compile for 64 bit computer 2868 Please follow instruction in http://cp3wks05.fynu.ucl.ac.be/twiki/bin/view/Software/TopDrawer.''') 2869 2870 if not misc.which('gs'): 2871 logger.warning('''gosthscript not install on your system. This is not required to run MA. 2872 but this prevent to create jpg files and therefore to have the plots in the html output.''') 2873 if sys.platform == "darwin": 2874 logger.warning('''You can download this program at the following link: 2875 http://www.macupdate.com/app/mac/9980/gpl-ghostscript''')
2876 2877 2878
2879 - def set_configuration(self, config_path=None, test=False):
2880 """ assign all configuration variable from file 2881 ./input/mg5_configuration.txt. assign to default if not define """ 2882 2883 self.options = {'pythia8_path': './pythia8', 2884 'timeout': 20, 2885 'web_browser':None, 2886 'eps_viewer':None, 2887 'text_editor':None, 2888 'fortran_compiler':None, 2889 'automatic_html_opening':True, 2890 'group_subprocesses': 'Auto', 2891 'ignore_six_quark_processes': False} 2892 2893 if not config_path: 2894 try: 2895 config_file = open(pjoin(os.environ['HOME'],'.mg5', 'mg5_configuration.txt')) 2896 except: 2897 config_file = open(os.path.relpath( 2898 pjoin(MG5DIR,'input','mg5_configuration.txt'))) 2899 else: 2900 config_file = open(config_path) 2901 2902 # read the file and extract information 2903 logger.info('load MG5 configuration from %s ' % config_file.name) 2904 for line in config_file: 2905 if '#' in line: 2906 line = line.split('#',1)[0] 2907 line = line.replace('\n','').replace('\r\n','') 2908 try: 2909 name, value = line.split('=') 2910 except ValueError: 2911 pass 2912 else: 2913 name = name.strip() 2914 value = value.strip() 2915 self.options[name] = value 2916 if value.lower() == "none": 2917 self.options[name] = None 2918 2919 if test: 2920 return self.options 2921 2922 # Treat each expected input 2923 # 1: Pythia8_path 2924 # try relative path 2925 for key in self.options: 2926 if key == 'pythia8_path': 2927 if self.options['pythia8_path'] in ['None', None]: 2928 self.options['pythia8_path'] = None 2929 continue 2930 pythia8_dir = pjoin(MG5DIR, self.options['pythia8_path']) 2931 if not os.path.isfile(pjoin(pythia8_dir, 'include', 'Pythia.h')): 2932 if not os.path.isfile(pjoin(self.options['pythia8_path'], 'include', 'Pythia.h')): 2933 self.options['pythia8_path'] = None 2934 else: 2935 continue 2936 2937 elif key.endswith('path'): 2938 pass 2939 elif key in ['cluster_type', 'automatic_html_opening']: 2940 pass 2941 elif key not in ['text_editor','eps_viewer','web_browser']: 2942 # Default: try to set parameter 2943 try: 2944 self.do_set("%s %s" % (key, self.options[key]), log=False) 2945 except MadGraph5Error, error: 2946 print error 2947 logger.warning("Option %s from config file not understood" \ 2948 % key) 2949 2950 # Configure the way to open a file: 2951 launch_ext.open_file.configure(self.options) 2952 2953 return self.options
2954
2955 - def check_for_export_dir(self, filepath):
2956 """Check if the files is in a valid export directory and assign it to 2957 export path if if is""" 2958 2959 # keep previous if a previous one is defined 2960 if self._export_dir: 2961 return 2962 2963 if os.path.exists(pjoin(os.getcwd(), 'Cards')): 2964 self._export_dir = os.getcwd() 2965 return 2966 2967 path_split = filepath.split(os.path.sep) 2968 if len(path_split) > 2 and path_split[-2] == 'Cards': 2969 self._export_dir = os.path.sep.join(path_split[:-2]) 2970 return
2971
2972 - def do_launch(self, line):
2973 """Ask for editing the parameter and then 2974 Execute the code (madevent/standalone/...) 2975 """ 2976 start_cwd = os.getcwd() 2977 2978 args = self.split_arg(line) 2979 # check argument validity and normalise argument 2980 (options, args) = _launch_parser.parse_args(args) 2981 self.check_launch(args, options) 2982 options = options.__dict__ 2983 # args is now MODE PATH 2984 2985 if args[0].startswith('standalone'): 2986 ext_program = launch_ext.SALauncher(self, args[1], **options) 2987 elif args[0] == 'madevent': 2988 if options['interactive']: 2989 if hasattr(self, 'do_shell'): 2990 ME = madevent_interface.MadEventCmdShell(me_dir=args[1]) 2991 else: 2992 ME = madevent_interface.MadEventCmd(me_dir=args[1]) 2993 # transfer interactive configuration 2994 config_line = [l for l in self.history if l.strip().startswith('set')] 2995 for line in config_line: 2996 ME.exec_cmd(line) 2997 stop = self.define_child_cmd_interface(ME) 2998 return stop 2999 3000 #check if this is a cross-section 3001 if not self._generate_info: 3002 # This relaunch an old run -> need to check if this is a 3003 # cross-section or a width 3004 info = open(pjoin(args[1],'SubProcesses','procdef_mg5.dat')).read() 3005 generate_info = info.split('# Begin PROCESS',1)[1].split('\n')[1] 3006 generate_info = generate_info.split('#')[0] 3007 else: 3008 generate_info = self._generate_info 3009 3010 if len(generate_info.split('>')[0].strip().split())>1: 3011 ext_program = launch_ext.MELauncher(args[1], self, 3012 shell = hasattr(self, 'do_shell'), 3013 **options) 3014 else: 3015 # This is a width computation 3016 ext_program = launch_ext.MELauncher(args[1], self, unit='GeV', 3017 shell = hasattr(self, 'do_shell'), 3018 **options) 3019 3020 elif args[0] == 'pythia8': 3021 ext_program = launch_ext.Pythia8Launcher( args[1], self, **options) 3022 else: 3023 os.chdir(start_cwd) #ensure to go to the initial path 3024 raise self.InvalidCmd , '%s cannot be run from MG5 interface' % args[0] 3025 3026 3027 ext_program.run() 3028 os.chdir(start_cwd) #ensure to go to the initial path
3029 3030 3031 3032
3033 - def do_load(self, line):
3034 """Not in help: Load information from file""" 3035 3036 args = self.split_arg(line) 3037 # check argument validity 3038 self.check_load(args) 3039 3040 cpu_time1 = time.time() 3041 if args[0] == 'model': 3042 self._curr_model = save_load_object.load_from_file(args[1]) 3043 if self._curr_model.get('parameters'): 3044 # This is a UFO model 3045 self._model_v4_path = None 3046 self._curr_fortran_model = \ 3047 helas_call_writers.FortranUFOHelasCallWriter(self._curr_model) 3048 else: 3049 # This is a v4 model 3050 self._model_v4_path = import_v4.find_model_path(\ 3051 self._curr_model.get('name').replace("_v4", ""), 3052 self._mgme_dir) 3053 self._curr_fortran_model = \ 3054 helas_call_writers.FortranHelasCallWriter(self._curr_model) 3055 3056 # Do post-processing of model 3057 self.process_model() 3058 3059 #save_model.save_model(args[1], self._curr_model) 3060 if isinstance(self._curr_model, base_objects.Model): 3061 cpu_time2 = time.time() 3062 logger.info("Loaded model from file in %0.3f s" % \ 3063 (cpu_time2 - cpu_time1)) 3064 else: 3065 raise self.RWError('Could not load model from file %s' \ 3066 % args[1]) 3067 elif args[0] == 'processes': 3068 amps = save_load_object.load_from_file(args[1]) 3069 if isinstance(amps, diagram_generation.AmplitudeList): 3070 cpu_time2 = time.time() 3071 logger.info("Loaded processes from file in %0.3f s" % \ 3072 (cpu_time2 - cpu_time1)) 3073 if amps: 3074 model = amps[0].get('process').get('model') 3075 if not model.get('parameters'): 3076 # This is a v4 model. Look for path. 3077 self._model_v4_path = import_v4.find_model_path(\ 3078 model.get('name').replace("_v4", ""), 3079 self._mgme_dir) 3080 self._curr_fortran_model = \ 3081 helas_call_writers.FortranHelasCallWriter(\ 3082 model) 3083 else: 3084 self._model_v4_path = None 3085 self._curr_fortran_model = \ 3086 helas_call_writers.FortranUFOHelasCallWriter(\ 3087 model) 3088 # If not exceptions from previous steps, set 3089 # _curr_amps and _curr_model 3090 self._curr_amps = amps 3091 self._curr_model = model 3092 logger.info("Model set from process.") 3093 # Do post-processing of model 3094 self.process_model() 3095 self._done_export = None 3096 else: 3097 raise self.RWError('Could not load processes from file %s' % args[1])
3098
3099 - def do_save(self, line, check=True):
3100 """Not in help: Save information to file""" 3101 3102 args = self.split_arg(line) 3103 # Check argument validity 3104 if check: 3105 self.check_save(args) 3106 3107 if args[0] == 'model': 3108 if self._curr_model: 3109 #save_model.save_model(args[1], self._curr_model) 3110 if save_load_object.save_to_file(args[1], self._curr_model): 3111 logger.info('Saved model to file %s' % args[1]) 3112 else: 3113 raise self.InvalidCmd('No model to save!') 3114 elif args[0] == 'processes': 3115 if self._curr_amps: 3116 if save_load_object.save_to_file(args[1], self._curr_amps): 3117 logger.info('Saved processes to file %s' % args[1]) 3118 else: 3119 raise self.InvalidCmd('No processes to save!') 3120 3121 elif args[0] == 'options': 3122 CmdExtended.do_save(self, line)
3123 3124 3125 # Set an option
3126 - def do_set(self, line, log=True):
3127 """Set an option, which will be default for coming generations/outputs 3128 """ 3129 3130 args = self.split_arg(line) 3131 3132 # Check the validity of the arguments 3133 self.check_set(args) 3134 3135 if args[0] == 'ignore_six_quark_processes': 3136 if args[1] == 'False': 3137 self.options[args[0]] = False 3138 return 3139 self.options[args[0]] = list(set([abs(p) for p in \ 3140 self._multiparticles[args[1]]\ 3141 if self._curr_model.get_particle(p).\ 3142 is_fermion() and \ 3143 self._curr_model.get_particle(abs(p)).\ 3144 get('color') == 3])) 3145 if log: 3146 logger.info('Ignore processes with >= 6 quarks (%s)' % \ 3147 ",".join([\ 3148 self._curr_model.get_particle(q).get('name') \ 3149 for q in self.options[args[0]]])) 3150 3151 elif args[0] == 'group_subprocesses': 3152 if args[1] != 'Auto': 3153 self.options[args[0]] = eval(args[1]) 3154 else: 3155 self.options[args[0]] = 'Auto' 3156 if log: 3157 logger.info('Set group_subprocesses to %s' % \ 3158 str(self.options[args[0]])) 3159 logger.info('Note that you need to regenerate all processes') 3160 self._curr_amps = diagram_generation.AmplitudeList() 3161 self._curr_matrix_elements = helas_objects.HelasMultiProcess() 3162 3163 elif args[0] == "stdout_level": 3164 logging.root.setLevel(eval('logging.' + args[1])) 3165 logging.getLogger('madgraph').setLevel(eval('logging.' + args[1])) 3166 if log: 3167 logger.info('set output information to level: %s' % args[1]) 3168 3169 elif args[0] == 'fortran_compiler': 3170 if args[1] != 'None': 3171 if log: 3172 logger.info('set fortran compiler to %s' % args[1]) 3173 self.options['fortran_compiler'] = args[1] 3174 else: 3175 self.options['fortran_compiler'] = None 3176 elif args[0] == 'timeout': 3177 self.options[args[0]] = int(args[1]) 3178 elif args[0] in self.options: 3179 if args[1] in ['None','True','False']: 3180 self.options[args[0]] = eval(args[1]) 3181 else: 3182 self.options[args[0]] = args[1]
3183 3184
3185 - def do_open(self, line):
3186 """Open a text file/ eps file / html file""" 3187 3188 args = self.split_arg(line) 3189 # Check Argument validity and modify argument to be the real path 3190 self.check_open(args) 3191 file_path = args[0] 3192 3193 launch_ext.open_file(file_path)
3194
3195 - def do_output(self, line):
3196 """Initialize a new Template or reinitialize one""" 3197 3198 args = self.split_arg(line) 3199 # Check Argument validity 3200 self.check_output(args) 3201 3202 # Remove previous outputs from history 3203 self.clean_history(to_remove=['display','open','history','launch','output'], 3204 remove_bef_lb1='generate', 3205 keep_last=True) 3206 3207 noclean = '-noclean' in args 3208 force = '-f' in args 3209 nojpeg = '-nojpeg' in args 3210 main_file_name = "" 3211 try: 3212 main_file_name = args[args.index('-name') + 1] 3213 except: 3214 pass 3215 3216 ################ 3217 # ALOHA OUTPUT # 3218 ################ 3219 if self._export_format == 'aloha': 3220 # catch format 3221 format = [d[9:] for d in args if d.startswith('--format=')] 3222 if not format: 3223 format = 'Fortran' 3224 else: 3225 format = format[-1] 3226 # catch output dir 3227 output = [d for d in args if d.startswith('--output=')] 3228 if not output: 3229 output = import_ufo.find_ufo_path(self._curr_model['name']) 3230 output = pjoin(output, format) 3231 if not os.path.isdir(output): 3232 os.mkdir(output) 3233 else: 3234 output = output[-1] 3235 if not os.path.isdir(output): 3236 raise self.InvalidCmd('%s is not a valid directory' % output) 3237 logger.info('creating routines in directory %s ' % output) 3238 # build the calling list for aloha 3239 names = [d for d in args if not d.startswith('-')] 3240 wanted_lorentz = aloha_fct.guess_routine_from_name(names) 3241 # Create and write ALOHA Routine 3242 aloha_model = create_aloha.AbstractALOHAModel(self._curr_model.get('name')) 3243 if wanted_lorentz: 3244 aloha_model.compute_subset(wanted_lorentz) 3245 else: 3246 aloha_model.compute_all(save=False) 3247 aloha_model.write(output, format) 3248 return 3249 3250 ################# 3251 ## Other Output # 3252 ################# 3253 if not force and not noclean and os.path.isdir(self._export_dir)\ 3254 and self._export_format in ['madevent', 'standalone']: 3255 # Don't ask if user already specified force or noclean 3256 logger.info('INFO: directory %s already exists.' % self._export_dir) 3257 logger.info('If you continue this directory will be deleted and replaced.') 3258 answer = self.ask('Do you want to continue?', 'y', ['y','n']) 3259 if answer != 'y': 3260 raise self.InvalidCmd('Stopped by user request') 3261 else: 3262 shutil.rmtree(self._export_dir) 3263 3264 #check if we need to group processes 3265 group_subprocesses = False 3266 if self._export_format == 'madevent' and \ 3267 self.options['group_subprocesses']: 3268 if self.options['group_subprocesses'] is True: 3269 group_subprocesses = True 3270 elif self._curr_amps[0].get_ninitial() == 2: 3271 group_subprocesses = True 3272 3273 3274 # Make a Template Copy 3275 if self._export_format == 'madevent': 3276 if group_subprocesses: 3277 self._curr_exporter = export_v4.ProcessExporterFortranMEGroup(\ 3278 self._mgme_dir, self._export_dir, 3279 not noclean) 3280 else: 3281 self._curr_exporter = export_v4.ProcessExporterFortranME(\ 3282 self._mgme_dir, self._export_dir, 3283 not noclean) 3284 3285 elif self._export_format in ['standalone', 'matrix']: 3286 self._curr_exporter = export_v4.ProcessExporterFortranSA(\ 3287 self._mgme_dir, self._export_dir,not noclean) 3288 elif self._export_format == 'standalone_cpp': 3289 export_cpp.setup_cpp_standalone_dir(self._export_dir, self._curr_model) 3290 elif not os.path.isdir(self._export_dir): 3291 os.makedirs(self._export_dir) 3292 3293 if self._export_format in ['madevent', 'standalone']: 3294 self._curr_exporter.copy_v4template(modelname=self._curr_model.get('name')) 3295 3296 # Reset _done_export, since we have new directory 3297 self._done_export = False 3298 3299 # Perform export and finalize right away 3300 self.export(nojpeg, main_file_name) 3301 3302 # Automatically run finalize 3303 self.finalize(nojpeg) 3304 3305 # Remember that we have done export 3306 self._done_export = (self._export_dir, self._export_format) 3307 3308 # Reset _export_dir, so we don't overwrite by mistake later 3309 self._export_dir = None
3310 3311 # Export a matrix element
3312 - def export(self, nojpeg = False, main_file_name = ""):
3313 """Export a generated amplitude to file""" 3314 3315 def generate_matrix_elements(self): 3316 """Helper function to generate the matrix elements before 3317 exporting""" 3318 3319 # Sort amplitudes according to number of diagrams, 3320 # to get most efficient multichannel output 3321 self._curr_amps.sort(lambda a1, a2: a2.get_number_of_diagrams() - \ 3322 a1.get_number_of_diagrams()) 3323 3324 # Check if we need to group the SubProcesses or not 3325 group = True 3326 if self.options['group_subprocesses'] is False: 3327 group = False 3328 elif self.options['group_subprocesses'] == 'Auto' and \ 3329 self._curr_amps[0].get_ninitial() == 1: 3330 group = False 3331 3332 3333 3334 cpu_time1 = time.time() 3335 ndiags = 0 3336 if not self._curr_matrix_elements.get_matrix_elements(): 3337 if group: 3338 cpu_time1 = time.time() 3339 dc_amps = diagram_generation.DecayChainAmplitudeList(\ 3340 [amp for amp in self._curr_amps if isinstance(amp, \ 3341 diagram_generation.DecayChainAmplitude)]) 3342 non_dc_amps = diagram_generation.AmplitudeList(\ 3343 [amp for amp in self._curr_amps if not \ 3344 isinstance(amp, \ 3345 diagram_generation.DecayChainAmplitude)]) 3346 subproc_groups = group_subprocs.SubProcessGroupList() 3347 if non_dc_amps: 3348 subproc_groups.extend(\ 3349 group_subprocs.SubProcessGroup.group_amplitudes(\ 3350 non_dc_amps)) 3351 if dc_amps: 3352 dc_subproc_group = \ 3353 group_subprocs.DecayChainSubProcessGroup.\ 3354 group_amplitudes(dc_amps) 3355 subproc_groups.extend(\ 3356 dc_subproc_group.\ 3357 generate_helas_decay_chain_subproc_groups()) 3358 3359 ndiags = sum([len(m.get('diagrams')) for m in \ 3360 subproc_groups.get_matrix_elements()]) 3361 self._curr_matrix_elements = subproc_groups 3362 # assign a unique id number to all groups 3363 uid = 0 3364 for group in subproc_groups: 3365 uid += 1 # update the identification number 3366 for me in group.get('matrix_elements'): 3367 me.get('processes')[0].set('uid', uid) 3368 else: # Not grouped subprocesses 3369 self._curr_matrix_elements = \ 3370 helas_objects.HelasMultiProcess(self._curr_amps) 3371 ndiags = sum([len(me.get('diagrams')) for \ 3372 me in self._curr_matrix_elements.\ 3373 get_matrix_elements()]) 3374 # assign a unique id number to all process 3375 uid = 0 3376 for me in self._curr_matrix_elements.get_matrix_elements(): 3377 uid += 1 # update the identification number 3378 me.get('processes')[0].set('uid', uid) 3379 3380 cpu_time2 = time.time() 3381 return ndiags, cpu_time2 - cpu_time1
3382 3383 # Start of the actual routine 3384 3385 ndiags, cpu_time = generate_matrix_elements(self) 3386 3387 calls = 0 3388 3389 path = self._export_dir 3390 if self._export_format in ['standalone_cpp', 'madevent', 'standalone']: 3391 path = pjoin(path, 'SubProcesses') 3392 3393 cpu_time1 = time.time() 3394 3395 # First treat madevent and pythia8 exports, where we need to 3396 # distinguish between grouped and ungrouped subprocesses 3397 3398 # MadEvent 3399 if self._export_format == 'madevent': 3400 if isinstance(self._curr_matrix_elements, group_subprocs.SubProcessGroupList): 3401 for (group_number, me_group) in enumerate(self._curr_matrix_elements): 3402 calls = calls + \ 3403 self._curr_exporter.generate_subprocess_directory_v4(\ 3404 me_group, self._curr_fortran_model, 3405 group_number) 3406 else: 3407 for me_number, me in \ 3408 enumerate(self._curr_matrix_elements.get_matrix_elements()): 3409 calls = calls + \ 3410 self._curr_exporter.generate_subprocess_directory_v4(\ 3411 me, self._curr_fortran_model, me_number) 3412 3413 3414 # Write the procdef_mg5.dat file with process info 3415 card_path = pjoin(path, os.path.pardir, 'SubProcesses', \ 3416 'procdef_mg5.dat') 3417 if self._generate_info: 3418 self._curr_exporter.write_procdef_mg5(card_path, 3419 self._curr_model['name'], 3420 self._generate_info) 3421 try: 3422 cmd.Cmd.onecmd(self, 'history .') 3423 except: 3424 pass 3425 3426 # Pythia 8 3427 if self._export_format == 'pythia8': 3428 # Output the process files 3429 process_names = [] 3430 if isinstance(self._curr_matrix_elements, group_subprocs.SubProcessGroupList): 3431 for (group_number, me_group) in enumerate(self._curr_matrix_elements): 3432 exporter = export_cpp.generate_process_files_pythia8(\ 3433 me_group.get('matrix_elements'), self._curr_cpp_model, 3434 process_string = me_group.get('name'), 3435 process_number = group_number, path = path) 3436 process_names.append(exporter.process_name) 3437 else: 3438 exporter = export_cpp.generate_process_files_pythia8(\ 3439 self._curr_matrix_elements, self._curr_cpp_model, 3440 process_string = self._generate_info, path = path) 3441 process_names.append(exporter.process_file_name) 3442 3443 # Output the model parameter and ALOHA files 3444 model_name, model_path = export_cpp.convert_model_to_pythia8(\ 3445 self._curr_model, self._export_dir) 3446 3447 # Generate the main program file 3448 filename, make_filename = \ 3449 export_cpp.generate_example_file_pythia8(path, 3450 model_path, 3451 process_names, 3452 exporter, 3453 main_file_name) 3454 3455 # Pick out the matrix elements in a list 3456 matrix_elements = \ 3457 self._curr_matrix_elements.get_matrix_elements() 3458 3459 # Fortran MadGraph Standalone 3460 if self._export_format == 'standalone': 3461 for me in matrix_elements: 3462 calls = calls + \ 3463 self._curr_exporter.generate_subprocess_directory_v4(\ 3464 me, self._curr_fortran_model) 3465 3466 # Just the matrix.f files 3467 if self._export_format == 'matrix': 3468 for me in matrix_elements: 3469 filename = pjoin(path, 'matrix_' + \ 3470 me.get('processes')[0].shell_string() + ".f") 3471 if os.path.isfile(filename): 3472 logger.warning("Overwriting existing file %s" % filename) 3473 else: 3474 logger.info("Creating new file %s" % filename) 3475 calls = calls + self._curr_exporter.write_matrix_element_v4(\ 3476 writers.FortranWriter(filename),\ 3477 me, self._curr_fortran_model) 3478 3479 # C++ standalone 3480 if self._export_format == 'standalone_cpp': 3481 for me in matrix_elements: 3482 export_cpp.generate_subprocess_directory_standalone_cpp(\ 3483 me, self._curr_cpp_model, 3484 path = path) 3485 3486 cpu_time2 = time.time() - cpu_time1 3487 3488 logger.info(("Generated helas calls for %d subprocesses " + \ 3489 "(%d diagrams) in %0.3f s") % \ 3490 (len(matrix_elements), 3491 ndiags, cpu_time)) 3492 3493 if calls: 3494 if "cpu_time2" in locals(): 3495 logger.info("Wrote files for %d helas calls in %0.3f s" % \ 3496 (calls, cpu_time2)) 3497 else: 3498 logger.info("Wrote files for %d helas calls" % \ 3499 (calls)) 3500 3501 if self._export_format == 'pythia8': 3502 logger.info("- All necessary files for Pythia 8 generated.") 3503 logger.info("- Run \"launch\" and select %s.cc," % filename) 3504 logger.info(" or go to %s/examples and run" % path) 3505 logger.info(" make -f %s" % make_filename) 3506 logger.info(" (with process_name replaced by process name).") 3507 logger.info(" You can then run ./%s to produce events for the process" % \ 3508 filename) 3509 3510 # Replace the amplitudes with the actual amplitudes from the 3511 # matrix elements, which allows proper diagram drawing also of 3512 # decay chain processes 3513 self._curr_amps = diagram_generation.AmplitudeList(\ 3514 [me.get('base_amplitude') for me in \ 3515 matrix_elements]) 3516
3517 - def finalize(self, nojpeg, online = False):
3518 """Make the html output, write proc_card_mg5.dat and create 3519 madevent.tar.gz for a MadEvent directory""" 3520 3521 if self._export_format in ['madevent', 'standalone']: 3522 # For v4 models, copy the model/HELAS information. 3523 if self._model_v4_path: 3524 logger.info('Copy %s model files to directory %s' % \ 3525 (os.path.basename(self._model_v4_path), self._export_dir)) 3526 self._curr_exporter.export_model_files(self._model_v4_path) 3527 self._curr_exporter.export_helas(pjoin(self._mgme_dir,'HELAS')) 3528 else: 3529 logger.info('Export UFO model to MG4 format') 3530 # wanted_lorentz are the lorentz structures which are 3531 # actually used in the wavefunctions and amplitudes in 3532 # these processes 3533 wanted_lorentz = self._curr_matrix_elements.get_used_lorentz() 3534 wanted_couplings = self._curr_matrix_elements.get_used_couplings() 3535 self._curr_exporter.convert_model_to_mg4(self._curr_model, 3536 wanted_lorentz, 3537 wanted_couplings) 3538 if self._export_format == 'standalone_cpp': 3539 logger.info('Export UFO model to C++ format') 3540 # wanted_lorentz are the lorentz structures which are 3541 # actually used in the wavefunctions and amplitudes in 3542 # these processes 3543 wanted_lorentz = self._curr_matrix_elements.get_used_lorentz() 3544 wanted_couplings = self._curr_matrix_elements.get_used_couplings() 3545 export_cpp.convert_model_to_cpp(self._curr_model, 3546 pjoin(self._export_dir), 3547 wanted_lorentz, 3548 wanted_couplings) 3549 export_cpp.make_model_cpp(self._export_dir) 3550 3551 elif self._export_format == 'madevent': 3552 # Create configuration file [path to executable] for madevent 3553 filename = os.path.join(self._export_dir, 'Cards', 'me5_configuration.txt') 3554 self.do_save('options %s' % filename.replace(' ', '\ '), check=False) 3555 3556 if self._export_format in ['madevent', 'standalone']: 3557 3558 self._curr_exporter.finalize_v4_directory( \ 3559 self._curr_matrix_elements, 3560 [self.history_header] + \ 3561 self.history, 3562 not nojpeg, 3563 online, 3564 self.options['fortran_compiler']) 3565 3566 if self._export_format in ['madevent', 'standalone', 'standalone_cpp']: 3567 logger.info('Output to directory ' + self._export_dir + ' done.') 3568 3569 if self._export_format == 'madevent': 3570 logger.info('Type \"launch\" to generate events from this process, or see') 3571 logger.info(self._export_dir + '/README') 3572 logger.info('Run \"open index.html\" to see more information about this process.')
3573
3574 - def do_help(self, line):
3575 """ propose some usefull possible action """ 3576 3577 super(MadGraphCmd,self).do_help(line) 3578 3579 if line: 3580 return 3581 3582 if len(self.history) == 0: 3583 last_action_2 = 'mg5_start' 3584 last_action = 'mg5_start' 3585 else: 3586 args = self.history[-1].split() 3587 last_action = args[0] 3588 if len(args)>1: 3589 last_action_2 = '%s %s' % (last_action, args[1]) 3590 else: 3591 last_action_2 = 'none'
3592
3593 3594 -class MadGraphCmdWeb(CheckValidForCmdWeb,MadGraphCmd):
3595 """Temporary parser"""
3596 3597 #=============================================================================== 3598 # Command Parser 3599 #=============================================================================== 3600 # DRAW 3601 _draw_usage = "draw FILEPATH [options]\n" + \ 3602 "-- draw the diagrams in eps format\n" + \ 3603 " Files will be FILEPATH/diagrams_\"process_string\".eps \n" + \ 3604 " Example: draw plot_dir . \n" 3605 _draw_parser = optparse.OptionParser(usage=_draw_usage) 3606 _draw_parser.add_option("", "--horizontal", default=False, 3607 action='store_true', help="force S-channel to be horizontal") 3608 _draw_parser.add_option("", "--external", default=0, type='float', 3609 help="authorizes external particles to end at top or " + \ 3610 "bottom of diagram. If bigger than zero this tune the " + \ 3611 "length of those line.") 3612 _draw_parser.add_option("", "--max_size", default=1.5, type='float', 3613 help="this forbids external line bigger than max_size") 3614 _draw_parser.add_option("", "--non_propagating", default=True, \ 3615 dest="contract_non_propagating", action='store_false', 3616 help="avoid contractions of non propagating lines") 3617 _draw_parser.add_option("", "--add_gap", default=0, type='float', \ 3618 help="set the x-distance between external particles") 3619 3620 # LAUNCH PROGRAM 3621 _launch_usage = "launch [DIRPATH] [options]\n" + \ 3622 "-- execute the madevent/standalone/standalone_cpp/pythia8 output present in DIRPATH\n" + \ 3623 " By default DIRPATH is the latest created directory \n" + \ 3624 " (for pythia8, it should be the Pythia 8 main directory) \n" + \ 3625 " Example: launch PROC_sm_1 --name=run2 \n" + \ 3626 " Example: launch ../pythia8 \n" 3627 _launch_parser = optparse.OptionParser(usage=_launch_usage) 3628 _launch_parser.add_option("-f", "--force", default=False, action='store_true', 3629 help="Use the card present in the directory in order to launch the different program") 3630 _launch_parser.add_option("-n", "--name", default='', type='str', 3631 help="Provide a name to the run (for madevent run)") 3632 _launch_parser.add_option("-c", "--cluster", default=False, action='store_true', 3633 help="submit the job on the cluster") 3634 _launch_parser.add_option("-m", "--multicore", default=False, action='store_true', 3635 help="submit the job on multicore core") 3636 3637 _launch_parser.add_option("-i", "--interactive", default=False, action='store_true', 3638 help="Use Interactive Console [if available]") 3639 _launch_parser.add_option("-s", "--laststep", default='', 3640 help="last program run in MadEvent run. [auto|parton|pythia|pgs|delphes]") 3641 3642 3643 #=============================================================================== 3644 # __main__ 3645 #=============================================================================== 3646 3647 if __name__ == '__main__': 3648 3649 run_option = sys.argv 3650 if len(run_option) > 1: 3651 # The first argument of sys.argv is the name of the program 3652 input_file = open(run_option[1], 'rU') 3653 cmd_line = MadGraphCmd(stdin=input_file) 3654 cmd_line.use_rawinput = False #put it in non interactive mode 3655 cmd_line.cmdloop() 3656 else: 3657 # Interactive mode 3658 MadGraphCmd().cmdloop() 3659