def configure_analyst_opsvm():
'''
Configures Anaylyst for OPSVM
'''
if not service_running('plumgrid'):
restart_pg()
opsvm_ip = pg_gw_context._pg_dir_context()['opsvm_ip']
NS_ENTER = ('/opt/local/bin/nsenter -t $(ps ho pid --ppid $(cat '
'/var/run/libvirt/lxc/plumgrid.pid)) -m -n -u -i -p ')
sigmund_stop = NS_ENTER + '/usr/bin/service plumgrid-sigmund stop'
sigmund_status = NS_ENTER \
+ '/usr/bin/service plumgrid-sigmund status'
sigmund_autoboot = NS_ENTER \
+ '/usr/bin/sigmund-configure --ip {0} --start --autoboot' \
.format(opsvm_ip)
try:
status = subprocess.check_output(sigmund_status, shell=True)
if 'start/running' in status:
if subprocess.call(sigmund_stop, shell=True):
log('plumgrid-sigmund couldn\'t be stopped!')
return
subprocess.check_call(sigmund_autoboot, shell=True)
status = subprocess.check_output(sigmund_status, shell=True)
except:
log('plumgrid-sigmund couldn\'t be started!')
python类check_call()的实例源码
def import_key(keyid):
key = keyid.strip()
if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
juju_log("Importing ASCII Armor PGP key", level=DEBUG)
with tempfile.NamedTemporaryFile() as keyfile:
with open(keyfile.name, 'w') as fd:
fd.write(key)
fd.write("\n")
cmd = ['apt-key', 'add', keyfile.name]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
error_out("Error importing PGP key '%s'" % key)
else:
juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
juju_log("Importing PGP key from keyserver", level=DEBUG)
cmd = ['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
error_out("Error importing PGP key '%s'" % key)
def _git_update_requirements(venv, package_dir, reqs_dir):
"""
Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt.
"""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
python = os.path.join(venv, 'bin/python')
cmd = [python, 'update.py', package_dir]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from "
"global-requirements.txt".format(package))
os.chdir(orig_dir)
def _clean_check(cmd, target):
"""
Run the command to download target. If the command fails, clean up before
re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def stopScheduler():
try:
subprocess.check_call(["sudo", "service", "supervisor", "stop"])
except subprocess.CalledProcessError as e:
print "ERROR: couldn't stop the scheduler (supervisor): {reason}".format(reason=e)
exit(-1)
try:
subprocess.check_call(["sudo", "service", "rabbitmq-server", "stop"])
except subprocess.CalledProcessError as e:
print "ERROR: couldn't stop the scheduler (rabbitmq): {reason}".format(reason=e)
exit(-1)
print "Scheduler stopped successfully!"
def sed(filename, before, after, flags='g'):
"""
Search and replaces the given pattern on filename.
:param filename: relative or absolute file path.
:param before: expression to be replaced (see 'man sed')
:param after: expression to replace with (see 'man sed')
:param flags: sed-compatible regex flags in example, to make
the search and replace case insensitive, specify ``flags="i"``.
The ``g`` flag is always specified regardless, so you do not
need to remember to include it when overriding this parameter.
:returns: If the sed command exit code was zero then return,
otherwise raise CalledProcessError.
"""
expression = r's/{0}/{1}/{2}'.format(before,
after, flags)
return subprocess.check_call(["sed", "-i", "-r", "-e",
expression,
os.path.expanduser(filename)])
def add_group(group_name, system_group=False):
"""Add a group to the system"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
except KeyError:
log('creating group {0}'.format(group_name))
cmd = ['addgroup']
if system_group:
cmd.append('--system')
else:
cmd.extend([
'--group',
])
cmd.append(group_name)
subprocess.check_call(cmd)
group_info = grp.getgrnam(group_name)
return group_info
def signalp():
singleline()
command = ("signalp3.0 -t euk -short -m hmm " + path + "singleline.fasta | grep ' S ' > " + path + "signalpOUT.txt")
print "\nRunning SignalP"
signalpRUN = subprocess.check_call([command], shell=True)
print "SignalP Complete"
# Generate the list of sequences with siganal peptides using the mature sequences
print "\nCreating SignalP protein list"
command_list = ("cut -d ' ' -f 1 " + path + "signalpOUT.txt")
file_out4 = open(path + "goodlistSigP.txt", "w")
tab = subprocess.check_call([command_list], stdout=file_out4, shell=True)
file_out4.close()
# This function creates a fasta file containing the complete sequences with signal peptides
def tmhmm():
command = ("tmhmm -short " + path + "signalP_pass.fasta")
file_out = open(path + "tmhmmOUT.txt", "w")
print "\nRunning tmhmm on mature signalp sequences only"
tmhmmRUN = subprocess.check_call([command], stdout=file_out, shell=True)
file_out.close()
print "tmhmm complete"
print "\nIdentifying sequences without tm regions."
# This section of code parses the output from tmhmm and collects fastas with no TM regions
openfile = open(path + "tmhmmOUT.txt", "r")
file_out2 = open(path + "tmhmmGoodlist.txt", "a")
for line in openfile:
if "\tPredHel=0\t" in line:
goodname = line.partition('\t')[0] + '\n'
file_out2.write(goodname)
openfile.close()
file_out2.close()
#This function uses targetp to verify the destination of the signal peptide
#NOTE for plant networks use -P over -N
def try_initialize_swauth():
if is_leader() and config('auth-type') == 'swauth':
if leader_get('swauth-init') is not True:
try:
admin_key = config('swauth-admin-key')
if admin_key == '' or admin_key is None:
admin_key = leader_get('swauth-admin-key')
if admin_key is None:
admin_key = uuid.uuid4()
leader_set({'swauth-admin-key': admin_key})
bind_port = config('bind-port')
bind_port = determine_api_port(bind_port, singlenode_mode=True)
subprocess.check_call([
'swauth-prep',
'-A',
'http://localhost:{}/auth'.format(bind_port),
'-K', admin_key])
leader_set({'swauth-init': True})
except subprocess.CalledProcessError:
log("had a problem initializing swauth!")
def sed(filename, before, after, flags='g'):
"""
Search and replaces the given pattern on filename.
:param filename: relative or absolute file path.
:param before: expression to be replaced (see 'man sed')
:param after: expression to replace with (see 'man sed')
:param flags: sed-compatible regex flags in example, to make
the search and replace case insensitive, specify ``flags="i"``.
The ``g`` flag is always specified regardless, so you do not
need to remember to include it when overriding this parameter.
:returns: If the sed command exit code was zero then return,
otherwise raise CalledProcessError.
"""
expression = r's/{0}/{1}/{2}'.format(before,
after, flags)
return subprocess.check_call(["sed", "-i", "-r", "-e",
expression,
os.path.expanduser(filename)])
def add_metric(*args, **kwargs):
"""Add metric values. Values may be expressed with keyword arguments. For
metric names containing dashes, these may be expressed as one or more
'key=value' positional arguments. May only be called from the collect-metrics
hook."""
_args = ['add-metric']
_kvpairs = []
_kvpairs.extend(args)
_kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
_args.extend(sorted(_kvpairs))
try:
subprocess.check_call(_args)
return
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
log(log_message, level='INFO')
def _git_update_requirements(venv, package_dir, reqs_dir):
"""
Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt.
"""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
python = os.path.join(venv, 'bin/python')
cmd = [python, 'update.py', package_dir]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from "
"global-requirements.txt".format(package))
os.chdir(orig_dir)
def sed(filename, before, after, flags='g'):
"""
Search and replaces the given pattern on filename.
:param filename: relative or absolute file path.
:param before: expression to be replaced (see 'man sed')
:param after: expression to replace with (see 'man sed')
:param flags: sed-compatible regex flags in example, to make
the search and replace case insensitive, specify ``flags="i"``.
The ``g`` flag is always specified regardless, so you do not
need to remember to include it when overriding this parameter.
:returns: If the sed command exit code was zero then return,
otherwise raise CalledProcessError.
"""
expression = r's/{0}/{1}/{2}'.format(before,
after, flags)
return subprocess.check_call(["sed", "-i", "-r", "-e",
expression,
os.path.expanduser(filename)])
def add_metric(*args, **kwargs):
"""Add metric values. Values may be expressed with keyword arguments. For
metric names containing dashes, these may be expressed as one or more
'key=value' positional arguments. May only be called from the collect-metrics
hook."""
_args = ['add-metric']
_kvpairs = []
_kvpairs.extend(args)
_kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
_args.extend(sorted(_kvpairs))
try:
subprocess.check_call(_args)
return
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
log(log_message, level='INFO')
def _git_update_requirements(venv, package_dir, reqs_dir):
"""
Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt.
"""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
python = os.path.join(venv, 'bin/python')
cmd = [python, 'update.py', package_dir]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from "
"global-requirements.txt".format(package))
os.chdir(orig_dir)
def index_reference(self, in_fasta_fn, in_gtf_fn, num_threads=1, sa_sparse_d=None, sa_index_n_bases=None,
chr_bin_n_bits=None, limit_ram=None):
if os.path.exists(self.reference_star_path):
raise Exception('STAR reference path %s already exists' % self.reference_star_path)
os.mkdir(self.reference_star_path)
args = ['STAR', '--runMode', 'genomeGenerate', '--genomeDir', self.reference_star_path,
'--runThreadN', str(num_threads), '--genomeFastaFiles', in_fasta_fn,
'--sjdbGTFfile', in_gtf_fn]
if limit_ram is not None:
args += ['--limitGenomeGenerateRAM', str(limit_ram)]
if sa_sparse_d is not None:
args += ['--genomeSAsparseD', str(sa_sparse_d)]
if sa_index_n_bases is not None:
args += ['--genomeSAindexNbases', str(sa_index_n_bases)]
if chr_bin_n_bits is not None:
args += ['--genomeChrBinNbits', str(chr_bin_n_bits)]
subprocess.check_call(args)
def _index_reference(self, index_path, **kwargs):
"""
Generates a bowtie2 index for the specified reference file.
Args:
index_path (str): path to index prefix
**kwargs: Any additional arguments to bowtie2-build may be included. Flags may have value set to None. Values
are not validated. Parameters with hypens in name should be defined using underscores in place of hypens.
Notes:
Bowtie2 generates temporary files for indexing as a side-effect.
Examples:
kwargs can be specified as such: myBowtie2._index_reference(index_path, large_index=None, bmax=4)
"""
additional_arguments = cr_utils.kwargs_to_command_line_options(set(), replace_chars={'_': '-'}, **kwargs)
command = 'bowtie2-build %s %s %s' % (additional_arguments, self.reference_fasta_path, index_path)
subprocess.check_call(command, shell=True)
self.index_path = index_path
self.indexed = True
def main(args, outs):
# Write read_chunk for consumption by Rust
with open("chunk_args.json", "w") as f:
json.dump(args.read_chunk, f)
output_path = martian.make_path("")
prefix = "fastq_chunk"
chunk_reads_args = ['chunk_reads', '--reads-per-fastq', str(args.reads_per_file), output_path, prefix, "--martian-args", "chunk_args.json"]
print "running chunk reads: [%s]" % str(chunk_reads_args)
subprocess.check_call(chunk_reads_args)
with open(os.path.join(output_path, "read_chunks.json")) as f:
chunk_results = json.load(f)
outs.out_chunks = []
# Write out a new chunk entry for each resulting chunk
for chunk in chunk_results:
print args.read_chunk
chunk_copy = args.read_chunk.copy()
print chunk_copy
chunk_copy['read_chunks'] = chunk
outs.out_chunks.append(chunk_copy)
def get_bcl2fastq_v2(hostname):
try:
subprocess.check_call(["which", "bcl2fastq"])
# Restore the LD_LIBRARY_PATH set aside by sourceme.bash/shell10x.
# Required for some installations of bcl2fastq.
new_environ = dict(os.environ)
new_environ['LD_LIBRARY_PATH'] = os.environ.get('_TENX_LD_LIBRARY_PATH', '')
output = subprocess.check_output(["bcl2fastq", "--version"], env=new_environ, stderr=subprocess.STDOUT)
match = None
for l in output.split("\n"):
match = re.match("bcl2fastq v([0-9.]+)", l)
if match is not None:
return (match.groups()[0], None)
return (None, "bcl2fastq version not recognized -- please check the output of bcl2fastq --version")
except subprocess.CalledProcessError:
msg = "On machine: %s, bcl2fastq not found on PATH." % hostname
return (None, msg)
def combine_vcfs(output_filename, input_vcf_filenames):
tmp_filename = output_filename + ".tmp"
for (i,fn) in enumerate(input_vcf_filenames):
if i == 0:
args = 'cat ' + fn
subprocess.check_call(args + " > " + tmp_filename, shell=True)
else:
args = 'grep -v "^#" ' + fn
ret = subprocess.call(args + " >> " + tmp_filename, shell=True)
if ret == 2:
raise Exception("grep call failed: " + args)
# Sort and index the files
tk_tabix.sort_vcf(tmp_filename, output_filename)
tk_tabix.index_vcf(output_filename)
os.remove(tmp_filename)
def start_ab3(tmp_dir_loc, repo_dir, pkg_info, rm_abdir=False):
start_time = int(time.time())
os.chdir(tmp_dir_loc)
if not copy_abd(tmp_dir_loc, repo_dir, pkg_info):
return False
# For logging support: ptyprocess.PtyProcessUnicode.spawn(['autobuild'])
shadow_defines_loc = os.path.abspath(os.path.curdir)
if not parser_pass_through(pkg_info, shadow_defines_loc):
return False
try:
subprocess.check_call(['autobuild'])
except:
return False
time_span = int(time.time()) - start_time
print('>>>>>>>>>>>>>>>>>> Time for building\033[36m {} \033[0m:\033[36m {} \033[0mseconds'.format(
pkg_info['NAME'], time_span))
if rm_abdir is True:
shutil.rmtree(os.path.abspath(os.path.curdir) + '/autobuild/')
# Will get better display later
return True
def aria_get(url, threads=3, output=None):
if os.path.exists(output) and not os.path.exists(output+'.aria2'):
return
aria_cmd = ['aria2c', '--max-connection-per-server={}'.format(threads), url, '--auto-file-renaming=false'] # ,'--check-certificate=false'
if output is not None:
aria_cmd.insert(2, '-d')
aria_cmd.insert(3, dump_loc)
aria_cmd.insert(4, '-o')
aria_cmd.insert(5, output.split('/')[-1])
try:
subprocess.check_call(aria_cmd)
except KeyboardInterrupt:
raise KeyboardInterrupt()
except:
raise AssertionError('Failed to fetch source with Aria2!')
return
def compile_so(libs):
# I don't know how else to find these .so files other than just asking clang
# to make a .so file out of all of them
clang = os.getenv('CLANG', 'clang')
tempdir = tempfile.gettempdir()
libname = '.'.join(sorted(libs))
target = join(tempdir, 'lib' + libname + '.so')
if not os.path.exists(target):
libs = ['-l' + lib for lib in libs]
flags = ['-shared']
cmd = [clang, '-o', target] + flags + libs
subprocess.check_call(cmd)
return target
def share(self):
'''Compile a single Rain file into a shared object file.'''
self.build()
if self.compiled:
return
self.compiled = True
self.compile_links()
with self.okay('sharing'):
target = self.target or self.qname + '.so'
clang = os.getenv('CLANG', 'clang')
flags = ['-O2', '-shared', '-fPIC']
cmd = [clang, '-o', target, self.ll] + flags
self.vprint('{:>10} {}', 'target', X(target, 'yellow'))
self.vprint('{:>10} {}', 'flags', X(' '.join(flags), 'yellow'))
self.vprint('{:>10} {}', 'src', X(self.ll, 'yellow'))
subprocess.check_call(cmd)
def CreatePods(pod_name, yaml_file):
"""Creates pods based on the given kubernetes config.
Args:
pod_name: 'name-prefix' selector for the pods.
yaml_file: kubernetes yaml config.
Raises:
TimeoutError: if jobs didn't come up for a long time.
"""
command = [_KUBECTL, 'create', '--filename=%s' % yaml_file]
logging.info('Creating pods: %s', subprocess.list2cmdline(command))
subprocess.check_call(command)
if not _WaitUntil(100, _GetPodNames, pod_name):
raise TimeoutError(
'Timed out waiting for %s pod to come up.' % pod_name)
def clone_helpers(work_dir, branch):
dest = os.path.join(work_dir, 'charm-helpers')
logging.info('Checking out %s to %s.' % (branch, dest))
cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
subprocess.check_call(cmd)
return dest
def interface_exists(interface):
'''
Checks if interface exists on node.
'''
try:
subprocess.check_call(['ip', 'link', 'show', interface],
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
return True
def _exec_cmd(cmd=None, error_msg='Command exited with ERRORs', fatal=False):
'''
Function to execute any bash command on the node.
'''
if cmd is None:
log("No command specified")
else:
if fatal:
subprocess.check_call(cmd)
else:
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
log(error_msg)
def service_resume(service_name, init_dir="/etc/init",
initd_dir="/etc/init.d"):
"""Resume a system service.
Reenable starting again at boot. Start the service"""
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('enable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "enable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
started = service_running(service_name)
if not started:
started = service_start(service_name)
return started