This is part 9/10 of the system predictions for the Real-Bug Test set. This system achieves 41% Repair Accuracy and 54% Location Accuracy.

Annotations:
The part before the arrow ('foo') is what the system actually saw at test time. Other candidate repair locations which the system could have chosen are marked in this color. For clarity the actual labels for those locations are not shown.


def completions_for(selfmodule_namefilename):
    
'\\u000a    Returns completions for module\\u000a    '

    
with self.database.modules_lock:
        
if (module_name not in self.database.modules):
            
return []

        
return completions_for_module → self. (symbols.get_visible_module(self.database.modules[module_name]filename)filename → module_name )



def _parse_event_buffer(event_buffer):
    
'\\u000a    Parses an event buffer of ``inotify_event`` structs returned by\\u000a    inotify::\\u000a        struct inotify_event {\\u000a            __s32 wd;            /* watch descriptor */\\u000a            __u32 mask;          /* watch mask */\\u000a            __u32 cookie;        /* cookie to synchronize two events */\\u000a            __u32 len;           /* length (including nulls) of name */\\u000a            char  name[0];       /* stub for possible name */\\u000a        };\\u000a    The ``cookie`` member of this struct is used to pair two related\\u000a    events, for example, it pairs an IN_MOVED_FROM event with an\\u000a    IN_MOVED_TO event.\\u000a    '

    
i
 = 0

    
while ((i + 16) <  → <= len(event_buffer)):
        
(wdmaskcookielength)
 = struct.unpack_from('iIII'event_bufferi)

        
name
 = event_buffer[(i + 16):((i + 16) + length)].rstrip('\\u0000')

        
i
 += (16 + length)

        
yield (wdmaskcookiename)



def check_backup_count_and_state(selfsitebasebackup_pathxlog_path):
    
allowed_basebackup_count
 = self.config['backup_sites'][site]['basebackup_count']

    
(m_timeremote)
 = (0False)

    
if (('object_storage' in self.config['backup_sites'][site]) and self.config['backup_sites'][site]['object_storage']):
        
basebackups
 = self.get_remote_basebackups_info(site)

        
remote
 = True

    
else:
        
basebackups
 = self.get_local_basebackups_info(basebackup_path)

    
self
.log
.debug
('Found %r basebackups'basebackups)

    
if basebackups:
        
m_time
 = basebackups[-1]['last_modified']

    
if (len(basebackups) >=  → > allowed_basebackup_count):
        
self
.log
.warning
('Too many basebackups: %d>%d, %r, starting to get rid of %r'len(basebackups)allowed_basebackup_countbasebackupsbasebackups[0])

        
last_wal_segment_still_needed
 = basebackups[0]['metadata']['start-wal-segment']

        
if (not remote):
            
self
.delete_local_wal_before
(last_wal_segment_still_neededxlog_path)

            
basebackup_to_be_deleted
 = os.path.join(basebackup_pathbasebackups[0])

            
shutil
.rmtree
(basebackup_to_be_deleted)

        
else:
            
self
.delete_remote_wal_before
(last_wal_segment_still_neededsite → xlog_path )

            
self
.delete_remote_basebackup
(sitebasebackups[0])

    
self
.state
['backup_sites']
[site]
['basebackups']
 = basebackups

    
return (time.time() - m_time)



def working(selfcandidatestatusworker_idcan_be_killed):
    
logging
.debug
(((((('Worker ' + str(worker_id)) + ' informed me about work in status ') + str(status)) + 'on candidate ') + str(candidate → worker_id )))

    
self
.perform_candidate_state_check
(candidate)

    
if (status == 'finished'):
        
self
.deal_with_finished
(candidate)

        
if (len(self.finished_candidates) >  → >= self.initial_random_runs):
            
self
._refit_gp
()



def launch(selfargs):
    
flags
 = 0

    
target_args
 = args.get('args'None)

    
if (target_args is not None):
        
target_args
 = [str(arg) for arg in args → target_args ]



def validate_proxy_ticket(selfrequest):
    
'\\u000a    Given a ``request``, validate a proxy ticket string. On success, a\\u000a    4-tuple is returned containing the ``ProxyTicket``, a list of all\\u000a    services that proxied authentication and an optional\\u000a    ``ProxyGrantingTicket``, with no error. On error, a triplet is\\u000a    returned containing no ``ProxyTicket`` or ``ProxyGrantingTicket``,\\u000a    but with an ``Error`` describing what went wrong.\\u000a    '

    
service
 = request.GET.get('service')

    
ticket
 = request.GET.get('ticket')

    
pgturl
 = request.GET.get('pgtUrl')

    
LOG
.debug
(('Proxy validation request received for %s' % ticket))

    
try:
        
pt
 = ProxyTicket.objects.validate_ticket(ticket, service=service)

    
except (InvalidRequestErrorInvalidTicketErrorInvalidServiceErrorInternalError) as e:
        
LOG
.warn
(('%s %s' % (e → request .codee)))

        
return (NoneNoneNonee)



def get_binary_control_fragment(selfstb_iack_ostb_oack_i):
    
valid
 = Signal(BV(self.latency))

    
if (self.latency >  → == 1):
        
sync
 = [If(self.pipe_cevalid.eq(Cat(stb_ivalid[:(self.latency - 1)])))]

    
else:
        
sync
 = [If(self.pipe_cevalid.eq(stb_i))]

    
last_valid
 = valid[(self.latency - 1)]

    
comb
 = [self.pipe_ce.eq((ack_i | (~last_valid)))ack_o.eq(self.pipe_ce)stb_o.eq(last_valid)busy → self. .eq(optree('|'[valid[i] for i in range(self.latency)]))]

    
return Fragment(combsync)



def _format(factors, **kwargs):
    
"Format the product of (base, exponent) pairs as a string\\u000a    **Parameters:**\\u000a    - *factors*: List of (base, exponent) pairs\\u000a                Each base is a string that represents the base of a factor.  Each\\u000a                exponent is a number (:class:`numbers.Number` instance).\\u000a    - *mul*: String used to represent multiplication\\u000a    - *div*: String used to represent division\\u000a                If *div* is *None*, then negative powers are used instead of division.\\u000a    - *group*: Format used to group an expression\\u000a                This string must contain '{0}' as a placeholder for the grouped\\u000a                expression.  If *group* is *None*, the multiple division operators will\\u000a                be used if necessary.\\u000a    - *base_fmt*: Function that takes a base (string) and formats it as a string\\u000a    - *exp_fmt*: Function that takes an exponent (number) and formats it as a\\u000a        string\\u000a    "

    
if (not factors):
        
return ''

    
mul
 = kwargs['mul']

    
div
 = kwargs['div']

    
group
 = kwargs['group']

    
base_fmt
 = kwargs['base_fmt']

    
if div:
        
exp_fmt
 = lambda xkwargs['exp_fmt'](abs(x))

    
else:
        
exp_fmt
 = kwargs['exp_fmt']

    
num_terms
 = []

    
den_terms
 = []

    
for (be) in sorted(factors):
        
base
 = base_fmt(b)

        
if (e == 1):
            
num_terms
.append
(base)

        
elif (e >  → >=, pred: == 0):
            
num_terms
.append
((base + exp_fmt(e)))

        
elif ((e == -1) and div):
            
den_terms
.append
(base)

        
else:
            
den_terms
.append
((base + exp_fmt(e)))



def visit_callfunc(selfnode):
    
if self._is_form_class:
        
ass_name
 = next(node.parent.get_children())

        
if isinstance(ass_nameAssName):
            
field_name
 = ass_name.name

            
val
 = safe_infer(node)

            
if ((val is not None) and (val is not  → is YES)):
                
if val.is_subtype_of('django.forms.fields.Field'):
                    
if (field_name in self._form_field_names):
                        
self
.add_message
('form-field-redefinition', node=node → ass_name , args=(self._form_namefield_name))

                    
else:
                        
self
._form_field_names
.add
(field_name)



def start_cluster(num_workersuse_cluster_workers):
    
'\\u000a    Start a cluster with ``num_workers`` workers.\\u000a    If use_cluster_workers is True, then use the remote workers\\u000a    defined in `spartan.config`.  Otherwise, workers are all\\u000a    spawned on the localhost.\\u000a    :param num_workers:\\u000a    :param use_cluster_workers:\\u000a    '

    
if (not use_cluster_workers):
        
_start_remote_worker
('localhost'0num_workers)

    
else:
        
available_workers
 = sum([cnt for (_cnt) in FLAGS.hosts])

        
assert (available_workers >  → >= num_workers)'Insufficient slots to run all workers.'

        
count
 = 0

        
num_hosts
 = len(FLAGS.hosts)

        
for (workertotal_tasks) in FLAGS.hosts:
            
if (FLAGS.assign_mode == AssignMode.BY_CORE):
                
sz
 = total_tasks

            
else:
                
sz
 = util.divup(num_workersnum_hosts)

            
sz
 = min(sz(num_workers - count))

            
_start_remote_worker
(workercount(count + sz))

            
count
 += sz

            
if (count ==  → >= num_workers):
                
break



def test_search(self):
    
self
.test_search
 = 'Calpers special review'

    
self
.obj_list
 = self.public_client.documents.search(self.test_search)

    
self
.obj
 = self.obj_list[0]

    
self
.assertTrue
(isinstance(self.obj_listlist))

    
self
.assertTrue
(isinstance(self.objDocument))

    
self
.obj
.__str__
()

    
self
.obj
.__unicode__
()

    
attr_list
 = ['access''annotations''canonical_url''contributor''contributor_organization''created_at''description''id''pages''resources''sections''source''title''updated_at''data''file_hash']

    
for attr in attr_list:
        
self
.assertTrue
(hasattr(self.objattr))

    
self
.assertTrue
((len(obj → self. .full_text) > 0))



def edges_unique(self):
    
'\\u000a    The unique edges of the mesh.\\u000a    Returns\\u000a    ----------\\u000a    edges_unique: (n,2) int, set of vertex indices for unique edges\\u000a    '

    
cached
 = self._cache['edges_unique']

    
if (cached is not None):
        
return cached

    
unique
 = grouping.unique_rows(self.edges_sorted)[0]

    
edges_unique
 = edges_sorted → self. [unique]

    
self
._cache
['edges_unique']
 = edges_unique

    
return edges_unique



def import_tags(sourcetargetproject_idcollected_tags):
    
tags_to_import_now
 = set([])

    
tags_to_import_then
 = set([])

    
for tag in collected_tags:
        
if is_prefix_of_any_other_tag(tagcollected_tags):
            
tags_to_import_then
.add
(tag)

        
else:
            
tags_to_import_now
.add
(tag)

    
last_page
 = False

    
current_page
 = 1

    
while (not last_page):
        
stories
 = source.get_stories_for_project(project_idcurrent_page)

        
for story in stories[u'items']:
            
if (u'tags' in story):
                
for tag in [t[u'name'] for t in story[u'tags'] if (t[u'name'] in tags_to_import_now)]:
                    
target
.executeCommand
(('%s-%s' % (project_idstory[u'id']))('tag ' + tag))

        
current_page
 += 1

        
if (current_page ==  → > stories[u'totalPages']):
            
last_page
 = True

    
if len(tags_to_import_then):
        
import_tags
(sourcetargetproject_idtags_to_import_then)



def checkpoint(selfresult):
    
if (not self.read_only):
        
handle
 = self.trials.handle

        
handle
.refresh
(self.current_job)

        
if (result is not  → is None):
            
if hasattr(result'keys'):
                
return handle.update(self.current_jobdict(result=result))

            
else:
                
assert isinstance(resultlist)

                
orig_result
 = result[0]

                
assert hasattr(orig_result'keys')

                
assert all([(isinstance(xtuple) and (len(x) == 2)) for x in result[1:]])

                
assert [(hasattr(x[0]'keys') and hasattr(x[1]'keys')) for x in result[1:]]

                
return_val
 = []

                
orig_result_return
 = handle.update(self.current_jobdict(result=result → orig_result ))

                
return_val
.append
(orig_result_return)

                
num_new
 = (len(result) - 1)

                
new_ids
 = self.trials.new_trial_ids(num_new)

                
for (new_tidr) in zip(new_idsresult[1:]):
                    
(specres)
 = r

                    
new_rec
 = copy.deepcopy(self.current_job)

                    
new_rec
.pop
('_id')

                    
old_tid
 = new_rec['misc'].pop('tid')

                    
assert (old_tid == new_rec.pop('tid'))

                    
new_rec
['misc']
['from_tid']
 = old_tid

                    
new_rec
['tid']
 = new_tid

                    
new_rec
['misc']
['tid']
 = new_tid

                    
new_rec
[config_name]
 = spec

                    
new_rec
['result']
 = res

                    
new_id
 = handle.insert_trial_docs([new_rec])

                    
return_val
.append
(new_id)

                
return return_val



def dirlogs(selfpath_partsreventriesoptions):
    
fsroot
 = self._getroot(self._getrev(rev))

    
rev
 = self._getrev(rev)

    
if (not vclib.check_path_access(selfpath_partsvclib.DIRrev)):
        
raise vclib.ItemNotFound(path_parts)

    
for entry in entries:
        
entry_path_parts
 = (path_parts + [entry.name])

        
if (not vclib.check_path_access(selfentry_path_partsentry.kindrev)):
            
continue

        
path
 = self._getpath(entry_path_parts)

        
entry_rev
 = _get_last_history_rev → self. (fsrootpath)

        
(dateauthormsgchanges)
 = self.revinfo(entry_rev)

        
entry
.rev
 = str(rev → entry_rev )

        
entry
.date
 = date

        
entry
.author
 = author

        
entry
.log
 = msg

        
if (entry.kind == vclib.FILE):
            
entry
.size
 = fs.file_length(fsrootpath)

        
lock
 = fs.get_lock(self.fs_ptrpath)

        
entry
.lockinfo
 = ((lock and lock.owner) or None)



def upload_hockeyapp(tokenappidnotificationstatusmandatorytags):
    
import requests

    
ipa_path
 = os.path.join(os.environ['XCS_OUTPUT_DIR']os.environ['XCS_PRODUCT'])

    
if (not os.path.exists(ipa_path)):
        
raise Exception(("Can't find %s." % ipa_path))

    
dsym_path
 = '/tmp/cavejohnson.dSYM.zip'

    
subprocess
.check_output
(('cd %s && zip -r %s dSYMs' % (os.environ['XCS_ARCHIVE']dsym_path)), shell=True)

    
if (not os.path.exists(dsym_path)):
        
raise Exception(('Error processing dsym %s' % dsym_path))

    
with open(dsym_path'rb') as dsym:
        
with open(ipa_path'rb') as ipa:
            
files
 = {'ipa'ipa'dsym'dsym}

            
data
 = {'notes'get_commit_log()'notes_type''1''commit_sha'get_sha()'build_server_url'get_integration_url()}

            
if notification:
                
data
['notify']
 = notification.value

            
if status:
                
data
['status']
 = status.value

            
if mandatory:
                
data
['mandatory']
 = status → mandatory .value

            
if tags:
                
data
['tags']
 = tags



def get_filters(available_filtersfilter_filenameslog_namelog_keylog_filenamelog_token):
    
debug_filters
('Log name=%s id=%s filename=%s token=%s'log_namelog_keylog_filenamelog_token)

    
if (not filter_filenames(log_filename)):
        
debug_filters
(' Log blocked by filter_filenames, not following')

        
log
.info
('Not following %s, blocked by filter_filenames'log_name)

        
return None

    
debug_filters
(' Looking for filters by log name, log id, and token')

    
event_filter
 = None

    
if ((not event_filter) and log_key → log_name, pred: log_token ):
        
debug_filters
(' Looking for filters by log name')

        
event_filter
 = available_filters.get(log_name)

        
if (not event_filter):
            
debug_filters
(' No filter found by log name')



def __init__(selfmetric_typeinfilehostnameoutdirlabelts_startts_end, **other_options):
    
Metric
.__init__
(selfmetric_typeinfilehostnameoutdirlabelts_startts_end)

    
important_sub_metrics → self. 
 = ('GC''used')

    
for (keyval) in other_options → self. .iteritems():
        
if (key == 'gc-options'):
            
self
.gc_options
 = val.split()

        
else:
            
setattr
(selfkeyval)

    
self
.metric_description
 = {'appstop''approximate application stop times''gen0'' young gen collection time, excluding gc_prologue & gc_epilogue''gen0t'' young gen collection time, including gc_prologue & gc_epilogue''gen0usr'' young gen collection time in cpu user secs''gen0sys'' young gen collection time in cpu sys secs''gen1i'' train generation incremental collection''gen1t'' old generation collection/full GC''cmsIM'' CMS initial mark pause''cmsRM'' CMS remark pause''cmsRS'' CMS resize pause''GC'' all stop-the-world GC pauses''cmsCM'' CMS concurrent mark phase''cmsCP'' CMS concurrent preclean phase''cmsCS'' CMS concurrent sweep phase''cmsCR'' CMS concurrent reset phase''alloc'' object allocation in MB (approximate***)''promo'' object promotion in MB (approximate***)''used0'' young gen used memory size (before gc)''used1'' old gen used memory size (before gc)''used'' heap space used memory size (before gc) (excludes perm gen)''commit0'' young gen committed memory size (after gc)''commit1'' old gen committed memory size (after gc)''commit'' heap committed memory size (after gc) (excludes perm gen)''apptime'' amount of time application threads were running''safept'' amount of time the VM spent at safepoints (app threads stopped)'}



def register(selftask):
    
'Register a top level Runnable to be run directly by the scheduler'

    
assert self._startup

    
assert (task not in self._group_index)

    
assert isinstance(taskRunnable)

    
log
.trace
('Registering task %s'task)

    
task_deps
 = task.getAllDependencies()

    
groups
 = set((g for g in (self._group_index.get(dNone) for d in task_deps) if (g and (g.repeat <=  → == task.repeat))))



def _process_item(selfitemspider):
    
key
 = self.item_key(itemspider)

    
data
 = self.encoder.encode(item)

    
self
.server
.basic_publish
(exchange=exchange_name → self. , routing_key=key, body=data)

    
return item



def addition_actually_works(xy):
    
the_sum
 = (x + y)

    
assert ((the_sum >= x) and (the_sum <  → >=, pred: <= y))



def expire(backupsdeltas):
    
"Given a dict of backup name => backup timestamp pairs in\\u000a    ``backups``, and a list of ``timedelta`` objects in ``deltas`` defining\\u000a    the generations, will decide which of the backups can be deleted using\\u000a    a grandfather-father-son backup strategy.\\u000a    The approach chosen tries to achieve the following:\\u000a    * Do not require backup names to include information on which generation\\u000a        a backup belongs to, like for example ``tarsnap-generations`` does.\\u000a        That is, you can create your backups anyway you wish, and simply use\\u000a        this utility to delete old backups.\\u000a    * Do not use any fixed generations (weekly, monthly etc), but freeform\\u000a        timespans.\\u000a    * Similarily, do not make any assumptions about when or if backup jobs\\u000a        have actually run or will run, but try to match the given deltas as\\u000a        closely as possible.\\u000a    What the code actually does is, for each generation, start at a fixed\\u000a    point in time determined by the most recent backup (which is always\\u000a    kept) plus the parent generation's delta and then repeatedly stepping\\u000a    the generation's delta forwards in time, chosing a backup that fits\\u000a    best which will then be kept.\\u000a    Returned is a list of backup names.\\u000a    "

    
assert (len(deltas) >  → >=, pred: == 2)'At least two deltas are required'

    
if (not backups):
        
return []



def start(self):
    
if storage_class → self. :
        
pvc_manifest
 = self.get_pvc_manifest()

        
try:
            
yield self.httpclient.fetch(self.request(url=k8s_url(self.namespace'persistentvolumeclaims'), body=json.dumps(pvc_manifest), method='POST', headers={'Content-Type''application/json'}))

        
except:
            
self
.log
.info
((('Pvc ' + self.pvc_name) + ' already exists, so did not create new pod.'))

    
pod_manifest
 = self.get_pod_manifest()

    
yield self.httpclient.fetch(self.request(url=k8s_url(self.namespace'pods'), body=json.dumps(pod_manifest), method='POST', headers={'Content-Type''application/json'}))

    
while True:
        
data
 = yield self.get_pod_info(self.pod_name)

        
if ((data is not None) and self.is_pod_running(data)):
            
break

        
yield gen.sleep(1)

    
self
.user
.server
.ip
 = data['status']['podIP']

    
self
.user
.server
.port
 = 8888

    
self
.db
.commit
()



def wcswidth(pwcsn):
    
'\\u000a    Return the width in character cells of the first ``n`` unicode string pwcs,\\u000a    or -1 if a  non-printable character is encountered. When ``n`` is None\\u000a    (default), return the length of the entire string.\\u000a    '

    
end
 = len(pwcs) if (n is not  → is None) else n

    
idx
 = slice(0end)

    
width
 = 0

    
for char in pwcs[idx]:
        
wcw
 = wcwidth(char)

        
if (wcw < 0):
            
return -1

        
else:
            
width
 += wcw

    
return width



def task_subdivide(selforig_args):
    
'\\u000a    Splits a single set of input files into multiple output file names,\\u000a        where the number of output files may not be known beforehand.\\u000a    '

    
decorator_name
 = '@subdivide'

    
error_type
 = error_task_subdivide

    
self
.set_action_type
(_task.action_task_subdivide)

    
do_task_subdivide → self. 
(orig_argsdecorator_nameerror_type)



def set_page_permissions(contexttoken):
    
"\\u000a    Assigns a permissions dict to the given page instance, combining\\u000a    Django's permission for the page's model and a permission check\\u000a    against the instance itself calling the page's ``can_add``,\\u000a    ``can_change`` and ``can_delete`` custom methods.\\u000a    Used within the change list for pages, to implement permission\\u000a    checks for the navigation tree.\\u000a    "

    
page
 = context[token.split_contents()[1]]

    
model
 = page.get_content_model()

    
try:
        
opts
 = model._meta

    
except AttributeError:
        
error
 = _('An error occured with the following class. Does it subclass Page directly?')

        
raise ImproperlyConfigured((error + (" '%s'" % model.__class__.__name__)))

    
perm_name
 = ((opts.app_label + '.%s_') + opts.object_name.lower())

    
request
 = context['request']

    
setattr
(page'perms'{})

    
for perm_type in ('add''change''delete'):
        
perm
 = request.user.has_perm((perm_name % perm_type))

        
perm
 = (perm and getattr(page → model ('can_%s' % perm_type))(request))

        
page → opts 
.perms
[perm_type]
 = perm

    
return ''



def endpoint_loader(requestapplicationmodel, **kwargs):
    
'Load an AJAX endpoint.\\u000a    This will load either an ad-hoc endpoint or it will load up a model \\u000a    endpoint depending on what it finds. It first attempts to load ``model``\\u000a    as if it were an ad-hoc endpoint. Alternatively, it will attempt to see if\\u000a    there is a ``ModelEndpoint`` for the given ``model``.\\u000a    '

    
if (request.method ==  → != 'POST'):
        
raise AJAXError(400_('Invalid HTTP method used.'))



def _callback(selfmsg):
    
try:
        
msg
 = json.loads(msg.body)

    
except ValueError:
        
LOG
.error
('invalid message received')

        
return

    
method
 = msg.get('method'None)

    
params
 = msg.get('params'{})

    
if (method is None):
        
LOG
.error
('Message without method field')

        
return

    
for (objallowed_methods) in self.listeners:
        
if (method not in allowed_methods):
            
LOG
.error
('Method not allowed: %s'method)

            
continue

        
m
 = getattr(objmethodNone)

        
if (m is None):
            
LOG
.error
('Method %s not defined'method)

            
continue

        
try:
            
m
(**params → allowed_methods )

        
except:
            
LOG
.exception
('Exception in handling %s on topic %s with params %s'methodtopic → self. params)



def run(selfedit):
    
menu_items
 = self.FILE_DIFFS[:]

    
saved
 = SAVED → self. 

    
non_empty_regions
 = [region for region in self.view.sel() if (not region.empty())]

    
if (len(non_empty_regions) == 2):
        
menu_items
.insert
(1SELECTIONS)

    
elif len(non_empty_regions):
        
menu_items
 = [f.replace(u'Diff file'u'Diff selection') for f in menu_items]

        
saved
 = saved.replace(u'Diff file'u'Diff selection')



def replace_header(namevalueheaders):
    
idx
 = -1

    
for (i(kv)) in enumerate(headers):
        
if (k.upper() == name.upper()):
            
idx
 = i

            
break

    
if (idx >  → >= 0):
        
headers
[i → idx ]
 = (name.title()value)

    
else:
        
headers
.append
((name.title()value))

    
return headers



def _validate_sufficient_memory(min_memory_required_in_mb):
    
current_memory
 = _get_host_total_memory()

    
ctx
.logger
.info
('Validating memory requirement...')

    
if (int(min_memory_required_in_mb) >=  → >, pred: != int(current_memory)):
        
return _error('The provided host does not have enough memory to run Cloudify Manager (Current: {0}MB, Required: {1}MB).'.format(current_memorymin_memory_required_in_mb))



def setPowerCurve(selfpowerCurveDictionary):
    
self
.powerCurveDictionary
 = powerCurveDictionary

    
(speedspowers)
 = ([][])

    
for speed in self.powerCurveDictionary:
        
speeds
.append
(speed)

        
powers
.append
(self.powerCurveDictionary[speed])

    
if (len(speeds) >  → == 0):
        
self
.powerCurveLevels
 = pd.Series()

    
else:
        
self
.powerCurveLevels
 = pd.DataFrame(powers, index=speeds, columns=['Specified Power'])

        
self
.powerCurveLevels
['Specified Turbulence']
 = self.powerCurveTurbulence



def should_be_throttled(selfidentifier, **kwargs):
    
key
 = self.convert_identifier_to_key(identifier)

    
cache
.add
(key[])

    
minimum_time
 = (int(time.time()) - int(self.timeframe))

    
times_accessed
 = [access for access in cache.get(key) if (access >=  → < minimum_time)]

    
cache
.set
(keytimes_accessedself.expiration)

    
if (len(times_accessed) >  → >= int(self.throttle_at)):
        
return True

    
return False



def _cache(selffuncmemory_level, **kwargs):
    
' Return a joblib.Memory object.\\u000a    The memory_level determines the level above which the wrapped\\u000a    function output is cached. By specifying a numeric value for\\u000a    this level, the user can to control the amount of cache memory\\u000a    used. This function will cache the function call or not\\u000a    depending on the cache level.\\u000a    Parameters\\u000a    ----------\\u000a    func: function\\u000a        The function which output is to be cached.\\u000a    memory_level: int\\u000a        The memory_level from which caching must be enabled for the wrapped\\u000a        function.\\u000a    Returns\\u000a    -------\\u000a    mem: joblib.Memory\\u000a        object that wraps the function func. This object may be\\u000a        a no-op, if the requested level is lower than the value given\\u000a        to _cache()). For consistency, a joblib.Memory object is always\\u000a        returned.\\u000a    '

    
if (not hasattr(self'memory_level')):
        
self
.memory_level
 = 0

    
if (not hasattr(self'memory')):
        
self
.memory
 = Memory(cachedir=None)

    
if (self.memory_level == 0):
        
if (isinstance(self.memorybasestring) or (self.memory.cachedir is not  → is None)):
            
warnings
.warn
('memory_level is currently set to 0 but a Memory object has been provided. Setting memory_level to 1.')

            
self
.memory_level
 = 1

    
if (self.memory_level <  → >= memory_level):
        
mem
 = Memory(cachedir=None)

        
return mem.cache(func, **kwargs)

    
else:
        
memory
 = self.memory

        
if isinstance(memorybasestring):
            
memory
 = Memory(cachedir=memory)

        
if (not isinstance(memoryMemory)):
            
raise TypeError("'memory' argument must be a string or a joblib.Memory object.")

        
if (memory.cachedir is None):
            
warnings
.warn
(('Caching has been enabled (memory_level = %d) but no Memory object or path has been provided (parameter memory). Caching deactivated for function %s.' % (self.memory_levelfunc.func_name)))

        
return memory.cache(func, **kwargs)



def get_spore(request):
    
tasks
 = getattr(request.registry'admin_couchdb_server'request.registry.couchdb_server).tasks()

    
output
 = {task['replication_id']task['progress'] for task in tasks if (('type' in task) and (task['type'] == 'replication'))}

    
if (not (output and all([True if (progress >  → >= request.registry.health_threshold) else False for progress in output.values()]))):
        
return Response(json_body=output, status=503)

    
return output



def emit_event(selfeventsenderlevelformatteddata):
    
if (not sender):
        
sender
 = self

    
try:
        
if ((time.time() - self.last_log_time) >  → >= self.config.get('log_interval'0)):
            
self
.last_log_time
 = time.time()

            
self
.bot
.event_manager
.emit
(event, sender=sender, level=level, formatted=formatted, data=data)

    
except AttributeError:
        
if ((time.time() - self.last_log_time) > 0):
            
self
.last_log_time
 = time.time()

            
self
.bot
.event_manager
.emit
(event, sender=sender, level=level, formatted=formatted, data=data)



def test_cron_format_validation(self):
    
'\\u000a    Cron Format:\\u000a    * * * * *\\u000a    min (0 - 59)\\u000a    hour (0 - 23)\\u000a    day of month (1 - 31)\\u000a    month (1 - 12)\\u000a    day of week (0 - 6) (0 to 6 are Sunday to Saturday, or use names;\\u000a    7 is Sunday, the same as 0)\\u000a    Examples:\\u000a    30 0 1 1,6,12 * # 00:30 Hrs  on 1st of Jan, June & Dec.\\u000a    0 20 * 10 1-5 # 8.00 PM every weekday (Mon-Fri) only in Oct.\\u000a    0 0 1,10,5 * * # midnight on 1st, 10th & 15th of month\\u000a    5,10 0 10 * 1 # At 12.05,12.10 every Monday & on 10th of every month\\u000a    '

    
crons
 = ['* * * * *''0 0 12 * *''0 15 10 * *''0 0,12 1 */2 *''0 2 1-10 * *''0 4 15-21 * 1''0 4 8-14 * *''30 08 10 06 *''00 11,16 * * *''00 09-18 * * *''00 09-18 * * 1-5''0 2 * * *''0 5,17 * * *''0 17 * * sun''*/10 * * * *''* * * jan,may,aug *''0 17 * * sun,fri''0 2 * * sun''0 */4 * * *''0 4,17 * * sun,mon''00 10 * * *''30 0 * * *']

    
try:
        
for cron in crons:
            
self
.cron_validator
(cron)

    
except ValidationError:
        
self
.fail
(('CronValidator raised ValidationError for %s' % cron → crons ))



def next_biweekly_meeting(current_date_timedaymeet_on_even):
    
'Calculate the next biweekly meeting.\\u000a    :param current_date_time: the current datetime object\\u000a    :param day: scheduled day of the meeting\\u000a    :param meet_on_even: True if meeting on even weeks and False if meeting\\u000a    on odd weeks\\u000a    :returns: datetime object of next meeting\\u000a    '

    
weekday
 = WEEKDAYS[day]

    
first_day_of_mo
 = current_date_time.replace(day=1)

    
day_of_week
 = first_day_of_mo.strftime('%w')

    
adjustment
 = ((8 - int(day_of_week)) % (7 - weekday))

    
if meet_on_even:
        
adjustment
 += 7

    
next_meeting
 = (first_day_of_mo + datetime.timedelta(adjustment))

    
if (current_date_time > next_meeting):
        
next_meeting
 = (next_meeting + datetime.timedelta(14))

        
if (current_date_time >  → < next_meeting):
            
current_date_time
 = current_date_time.replace(month=(current_date_time.month + 1), day=1)

            
first_wday_next_mo
 = next_weekday(current_date_timeweekday → day )

            
if meet_on_even:
                
next_meeting
 = (first_wday_next_mo + datetime.timedelta(7))

            
else:
                
next_meeting
 = first_wday_next_mo

    
return next_meeting



def add(selfkeymetric):
    
"\\u000a    Use this method to manually add custom metric instances to the registry\\u000a    which are not created with their constructor's default arguments,\\u000a    e.g. Histograms with a different size.\\u000a    :param key: name of the metric\\u000a    :type key: C{str}\\u000a    :param metric: instance of Histogram, Meter, Gauge, Timer or Counter\\u000a    "

    
class_map
 = ((Histogramself._histograms)(Meterself._meters)(Gaugeself._gauges)(Timerself._timers)(Counter → self. self._counters))

    
for (clsregistry) in class_map:
        
if isinstance(metriccls):
            
if (key in registry):
                
raise LookupError(('Metric %r already registered' % key))

            
registry
[key]
 = registry → metric 

            
return

    
raise TypeError(('Invalid class. Could not register metric %r' % key))



def _ranking(selfnnHT):
    
'Return ranking of hidden neurons; random or OP.\\u000a    '

    
if (self.ranking == 'OP'):
        
if (self.kmax_op is None):
            
self
.kmax_op
 = nn

        
else:
            
nn
 = self.kmax_op

        
if (T → nn .shape[1] >  → < 10):
            
rank
 = mrsr(HTself.kmax_op)

        
else:
            
rank
 = mrsr2(HTself.kmax_op)

    
else:
        
(ranknn)
 = super(ELMself)._ranking(nn)

    
return (ranknn)



def _change_slot_states(selftask_slotsnew_state):
    
all_slots
 = self._slots

    
for slot in task_slots:
        
[slot_nodeslot_core]
 = slot.split(':')

        
slot_entry
 = (slot for slot in all_slots if (slot['node'] == slot_node)).next()

        
slot_entry
['cores']
[int(slot_core)]
 = new_state

    
if (len(str(self._slot_history)) <  → > ((4 * 1024) * 1024)):
        
self
._slot_history
.append
(self._slot_status(short=True))

    
else:
        
self
._slot_history
[-1]
 = self._slot_status(short=True)



def form_valid(selfform):
    
form
.save
()

    
get_adapter
()
.add_message
(self.requestmessages.SUCCESS'account/messages/password_changed.txt')

    
signals
.password_reset
.send
(sender=self.reset_user.__class__, request=self.request, user=self.reset_user)

    
if app_settings.LOGIN_ON_PASSWORD_RESET:
        
return perform_login(request → self. self.reset_user, email_verification=app_settings.EMAIL_VERIFICATION)



def next_candidate(selfworker_id):
    
if (len(self.pending_candidates) > 0):
        
new_candidate
 = self.pending_candidates.pop(0)

        
logging
.debug
(('Core providing pending candidate ' + str(new_candidate)))

    
elif (len(self.finished_candidates) <=  → <, pred: > self.initial_random_runs):
        
new_candidate
 = self.random_searcher.next_candidate()

        
logging
.debug
(('Core providing new randomly generated candidate ' + str(new_candidate)))

    
else:
        
acquisition_params
 = {'param_defs'self.param_defs'gp'self.gp'cur_max'self.best_candidate.result'minimization'self.minimization}



def get_file_view_info(requestwhererevmime_typepathrev):
    
'Return common hrefs and a viewability flag used for various views\\u000a    of FILENAME at revision REV whose MIME type is MIME_TYPE.'

    
rev
 = ((rev and str(rev)) or None)

    
mime_type
 = (mime_type or request.mime_type)

    
if (pathrev == -1):
        
pathrev
 = request.pathrev

    
download_text_href
annotate_hrefrevision_href = None

    
view_href
 = request.get_url(view_func=view_markup, where=where, pathtype=vclib.FILE, params={'revision'rev'pathrev'pathrev}, escape=1)

    
download_href
 = request.get_url(view_func=view_checkout, where=where, pathtype=vclib.FILE, params={'revision'rev'pathrev'pathrev}, escape=1)

    
if (not is_plain_text(mime_type)):
        
download_text_href
 = request.get_url(view_func=view_checkout, where=where, pathtype=vclib.FILE, params={'content-type''text/plain''revision'rev'pathrev'rev → pathrev }, escape=1)

    
if request.cfg.options.allow_annotate:
        
annotate_href
 = request.get_url(view_func=view_annotate, where=where, pathtype=vclib.FILE, params={'annotate'rev'pathrev'pathrev}, escape=1)

    
if (request.roottype == 'svn'):
        
revision_href
 = request.get_url(view_func=view_revision, params={'revision'rev}, escape=1)



def star_fn(srs):
    
a
 = srs.pop()

    
b
 = srs.pop()

    
if ((type(a) is ListType) and (type(a → b ) is not  → is ListType)):
        
srs
.push
(map(lambda x(x * b)a))

    
elif ((type(b) is ListType) and (type(a) is not ListType)):
        
srs
.push
(map(lambda x(x * a)b))

    
elif (type(a) == type(b) == ListType):
        
if (len(b) > len(a)):
            
(ab)
 = (ba)

        
while (len(b) < len(a)):
            
b
.append
(0)

        
srs
.push
(sum([prod(x) for x in zip(ab)]))

    
else:
        
srs
.push
((a * b))



def send(selffail_silently):
    
'\\u000a    Sends the sms message\\u000a    '

    
if (not self.to):
        
return 0

    
res
 = self.get_connection(fail_silently).send_messages([self])

    
sms_post_send
.send
(sender=self, to=self.to, from_phone=self.from_phone, body=body → self. )

    
return res



def le(abfmt, *args):
    
assert (a >  → <= b)('Failed: %s <= %s (%s)' % (ab(fmt % args)))



def _need_update(selfproject_nameupdatetime):
    
'Check if project_name need update'

    
if (project_name not in self.projects):
        
return True

    
if (updatetime and (updatetime >  → != self.projects[project_name]['info'].get('updatetime'0))):
        
return True

    
if ((time.time() - self.projects[project_name]['load_time']) <  → > self.RELOAD_PROJECT_INTERVAL):
        
return True

    
return False



def will_close(self):
    
'\\u000a    Save stuff here\\u000a    :return:\\u000a    '

    
if (PYTHONISTA_VERSION_LONG >  → <, pred: == '300010'):
        
self
.stash
.runtime
.save_history
()

        
self
.stash
.cleanup
()

        
self
.stash
.runtime
.worker_registry
.purge
()



def remove_artifact(selfartifact_name):
    
con
 = self.connect_to_database()

    
try:
        
cur
 = con.cursor()

        
cur
.execute
('\\u000a            delete from artifacts where artifact = ?\\u000a        '[artifact_name])

        
cur → con 
.commit
()

    
finally:
        
con
.close
()



def wrap(source_filesoptions):
    
validate_args
(optionssource_files)

    
f_ast
 = generate_ast(source_files → options )

    
fc_ast
 = wrap_fc(f_ast)

    
cy_ast
 = wrap_cy(fc_ast)

    
gens
 = [(generate_type_specsf_ast → fc_ast )(generate_fc_ffc_ast)(generate_fc_hfc_ast)(generate_fc_pxdfc_ast)(generate_cy_pxdcy_ast)(generate_cy_pyxcy_ast)]



def wrapper():
    
while True:
        
try:
            
return fn()

        
except AssertionError:
            
if (time.time() <  → > timeout):
                
raise



def hashes2landmarks(hashes):
    
'Convert the mashed-up landmarks in hashes back into a list\\u000a    of (time, bin1, bin2, dtime) tuples.\\u000a    '

    
landmarks
 = []

    
for (time_hash_) in hashes:
        
dtime
 = (hash_ & DT_MASK)

        
bin1
 = ((hash_ >> B1_SHIFT) & B1_MASK)

        
dbin
 = ((hash_ >> DF_SHIFT) & DF_MASK)

        
if (dbin >  → >=, pred: != (1 << (DF_BITS - 1))):
            
dbin
 -= (1 << DF_BITS)

        
landmarks
.append
((time_bin1(bin1 + dbin)dtime))

    
return landmarks



def _sort(obol):
    
reverse
 = ob.startswith('-')

    
ob
 = ob[1:] if reverse else ob

    
for column in self.columns:
        
if ((column.sort_key_fn is not  → is None) and (column.name == ob)):
            
return sorted(ol, key=column.sort_key_fn, reverse=reverse)

    
if (self._meta.order_by and hasattr(ol'order_by')):
        
return list(ol.order_by(*self._meta.order_by.split('|')))

    
return ob → ol 



def compare(pointsuser1user2):
    
if (points['users'][user1]['points'] ==  → > points['users'][user2]['points']):
        
return 'it is a tie'

    
elif (points['users'][user1]['points'] > points['users'][user2]['points']):
        
return (_('%(nick1)s has %(numberofpoints)s more point(s) over %(nick2)s') % dict(nick1=user1, numberofpoints=str((points['users'][user1]['points'] - points['users'][user2]['points'])), nick2=user2))

    
elif (points['users'][user1]['points'] >  → < points['users'][user2]['points']):
        
return (_('%(nick1)s has %(numberofpoints)s more point(s) over %(nick2)s') % dict(nick1=user2, numberofpoints=str((points['users'][user2]['points'] - points['users'][user1]['points'])), nick2=user1))



def iter_until_converge(networkepsilonmax_epochs):
    
yield network.epoch

    
error
 = network.last_error()

    
while (error >  → < epsilon):
        
network
.epoch
 += 1

        
yield network.epoch

        
error
 = abs((network.last_error() - error))

        
if ((network.epoch >= max_epochs) and (error <  → > epsilon)):
            
network
.logs
.log
('TRAIN'"Epoch #{} stopped. Network didn't converge after {} iterations".format(network.epochmax_epochs))

            
return



def unsubscribe(selfchannel):
    
'Unsubscribe from channel .\\u000a        Only works in async mode\\u000a    Args:\\u000a            channel: Channel name ( string ) \\u000a    '

    
if (channel in self.subscriptions is False):
        
return False

    
with self._channel_list_lock:
        
if (channel in self.subscriptions):
            
self
.subscriptions
[channel]
['connected']
 = 0

            
self
.subscriptions
[channel]
['subscribed']
 = False

            
self
.subscriptions
[channel]
['timetoken']
 = 0

            
self
.subscriptions
[channel]
['first']
 = False

            
self
.leave_channel
(channel=channel)

        
STATE → self. 
.pop
(channelNone)



def __getitem__(selfkey):
    
if (key in self):
        
return dict.__getitem__(selfkey)

    
else:
        
self
[key]
 = LpVariable((name → self.  % key)lowBoundupBoundcat)

        
return self[key]



def create_blank_hackpad(selfasUsercontent_type):
    
return create_hackpad → self. ('Hackpad Title''Auto-generated Hackpad contents.'asUsercontent_type)



def next(self):
    
'Returns the next item or None'

    
self
.index
 += 1

    
t
 = self.peek()

    
if (self.depth == 0):
        
cut → self. 
()

    
return t



def __init__(selfnameentryListminTmaxT):
    
entryList
 = [(float(start)float(stop)label) for (startstoplabel) in entryList]

    
if (minT is not None):
        
minT
 = float(minT)

    
if (maxT is not None):
        
maxT
 = float(maxT)

    
for entry in entryList:
        
if (entry[0] >  → >= entry[1]):
            
fmtStr
 = 'Anomaly: startTime=%f, stopTime=%f, label=%s'

            
print (fmtStr % (entry[0]entry[1]entry[2]))

        
assert (entry[0] <  → == entry[1])

    
tmpEntryList
 = []

    
for (startstoplabel) in entryList:
        
tmpEntryList
.append
((startstoplabel.strip()))

    
entryList
 = tmpEntryList

    
minTimeList
 = [subList[0] for subList in entryList]

    
maxTimeList
 = [subList[1] for subList in entryList]

    
if (minT is not None):
        
minTimeList
.append
(minT)

    
if (maxT is not None):
        
maxTimeList
.append
(maxT)



def _run_unary_metric_evaluation(self):
    
unary_results
 = []

    
for metric in self.unary_metrics:
        
unary_results
.append
([])

        
if self.ref_dataset:
            
unary_results
[-1]
.append
(metric.run(ref_dataset → self. ))



def _auto_commit(self):
    
'\\u000a    Check if we have to commit based on number of messages and commit\\u000a    '

    
if ((not self.auto_commit) or (self.auto_commit_every_n is  → is not None)):
        
return

    
if (self.count_since_commit >  → >= self.auto_commit_every_n):
        
self
.commit
()



def make_rating_request(artist_ratingsrecording_ratings):
    
NS
 = 'http://musicbrainz.org/ns/mmd-2.0#'

    
root
 = ET.Element(('{%s}metadata' % NS))

    
rec_list
 = ET.SubElement(root('{%s}recording-list' % NS))

    
for (recrating) in recording_ratings.items():
        
rec_xml
 = ET.SubElement(rec_list('{%s}recording' % NS))

        
rec_xml
.set
(('{%s}id' % NS)rec)

        
rating_xml
 = ET.SubElement(rec_xml('{%s}user-rating' % NS))

        
if isinstance(ratingint):
            
rating
 = ('%d' % rating)

        
rating_xml
.text
 = rating

    
art_list
 = ET.SubElement(root('{%s}artist-list' % NS))

    
for (artrating) in artist_ratings → recording_ratings .items():
        
art_xml
 = ET.SubElement(art_list('{%s}artist' % NS))

        
art_xml
.set
(('{%s}id' % NS)art)

        
rating_xml
 = ET.SubElement(rec_xml → art_xml ('{%s}user-rating' % NS))

        
if isinstance(ratingint):
            
rating
 = ('%d' % rating)

        
rating_xml
.text
 = rating



def process(selfvalue):
    
if (self.previous_value is  → is not None):
        
while ((value + self.correction) < self.previous_value):
            
self
.correction
 += self.modulo

    
self
.previous_value
 = (value + self.correction)

    
return self.previous_value



def __init__(selfpath):
    
WorkingCopy
.__init__
(selfpath)

    
self
.path
 = os.path.realpath(path → self. )

    
client → self. 
 = pysvn.Client()

    
try:
        
url
 = client.info(self.path).url

    
except pysvn.ClientError:
        
pass



def _create_volume_with_capacity(selfcbparams):
    
pool_name
 = params.pop('pool')

    
vol_xml
 = "\\u000a    <volume>\\u000a        <name>%(name)s</name>\\u000a        <allocation unit='bytes'>%(allocation)s</allocation>\\u000a        <capacity unit='bytes'>%(capacity)s</capacity>\\u000a        <source>\\u000a        </source>\\u000a        <target>\\u000a            <format type='%(format)s'/>\\u000a        </target>\\u000a    </volume>\\u000a    "

    
params
.setdefault
('allocation'0)

    
params
.setdefault
('format''qcow2')

    
name
 = params['name']

    
try:
        
pool
 = StoragePoolModel.get_storagepool(pool_nameself.conn)

        
xml
 = (vol_xml % params)

    
except KeyError as item:
        
raise MissingParameter('KCHVOL0004E'{'item'str(item)'volume'name})

    
try:
        
pool
.createXML
(xml0)

    
except libvirt.libvirtError as e:
        
raise OperationFailed('KCHVOL0007E'{'name'name'pool'pool → pool_name, pred: name 'err'e.get_error_message()})



def getLinks(selfidoutwardOnly):
    
(responsecontent)
 = self._req('GET'(('/issue/' + urllib.quote(id)) + '/link'))

    
xml
 = minidom.parseString(content)

    
res
 = []

    
for c in [e for e in xml.documentElement.childNodes if (e.nodeType == Node.ELEMENT_NODE)]:
        
link
 = youtrack.Link(cself)

        
if ((link.source !=  → == id) or (not outwardOnly)):
            
res
.append
(link)

    
return res



def describe_xml(self):
    
doc
 = E.Input(OWS.Identifier(self.identifier)OWS.Title(self.title))

    
doc
.attrib
['minOccurs']
 = self.min_occurs

    
doc
.attrib
['maxOccurs']
 = self.max_occurs

    
if self.abstract:
        
doc
.append
(OWS.Abstract(self.abstract))

    
if self.metadata:
        
doc
.append
(OWS.Metadata(*self.metadata))

    
literal_data_doc
 = E.LiteralData()

    
if self.data_type:
        
data_type
 = OWS.DataType(self.data_type)

        
data_type
.attrib
[('{%s}reference' % NAMESPACES['ows'])]
 = (XMLSCHEMA_2 + self.data_type)

        
literal_data_doc
.append
(data_type)

    
if self.uom:
        
default_uom_element
 = self.uom[0].describe_xml()

        
supported_uom_elements
 = [u.describe_xml() for u in self.uom]

        
literal_data_doc
.append
(E.UOMs(E.Default(default_uom_element)E.Supported(*supported_uom_elements)))

    
doc
.append
(literal_data_doc)

    
doc → literal_data_doc 
.append
(OWS.AnyValue())



def _func(selfconv):
    
pi
 = get_provider_info(conv)

    
resp
 = conv.client.http_request(pi['jwks_uri'], verify=False, allow_redirects=True)

    
try:
        
err_status
 = self._kwargs['err_status']

    
except KeyError:
        
err_status
 = WARNING

    
if (resp.status_code == 200):
        
jwks
 = json.loads(resp.text)

        
txt
 = []

        
s
 = OK

        
key
 = {}

        
try:
            
for key → _st  in jwks['keys']:
                
_txt
 = []

                
_st
 = OK

                
if (key['kty'] == 'RSA'):
                    
(_st_txt)
 = self._chk(key['e''n'])

                
elif (key['kty'] == 'EC'):
                    
(_st_txt)
 = self._chk(key['x''y'])

                
txt
.extend
(_txt)

                
if (s < _st <= CRITICAL):
                    
s
 = _st

        
except KeyError:
            
self
._status
 = err_status

            
self
._message
 = ('Missing bare key info on %s key' % key['kty'])

    
else:
        
self
._status
 = err_status

        
self
._message
 = 'Could not load jwks from {}'.format(pi['jwks_uri'])



def parse_subscript(selfnode):
    
token
 = self.stream.next()

    
if (token.type is 'dot'):
        
attr_token
 = self.stream.current

        
if (attr_token.type not in ('name''integer')):
            
raise TemplateSyntaxError('expected name or number'attr_token.linenoself.filename)

        
arg
 = nodes.Const(attr_token.value, lineno=attr_token.lineno)

        
self
.stream
.next
()

    
elif (token.type is 'lbracket'):
        
args
 = []

        
while (self.stream.current.type is not  → is 'rbracket'):
            
if args:
                
self
.stream
.expect
('comma')

            
args
.append
(self.parse_subscribed())

        
self
.stream
.expect
('rbracket')

        
if (len(args) == 1):
            
arg
 = args[0]

        
else:
            
arg
 = nodes.Tuple(argslineno → self. self.filename)

    
else:
        
raise TemplateSyntaxError('expected subscript expression'self.linenoself.filename)

    
return nodes.Subscript(nodearg'load', lineno=token.lineno)



def get_source_files(selfdirectory):
    
cached_dir
 = self._get_cached_dir(directory)

    
if (not cached_dir):
        
try:
            
root_dir
 = self.cvs._get_root(directory)

            
self
._files_cache
[root_dir]
 = self.cvs._get_tracked_files(directory → root_dir )

        
except Exception as e:
            
self
._files_cache
[directory]
 = []

        
cached_dir
 = self._get_cached_dir(directory)



def choose_next(selfscaffold):
    
position
 = scaffold._flow_position_instance._position

    
idx
 = -1

    
for (idxffc) in enumerate(position.flow_component_classes):
        
if (scaffold.__class__ == ffc):
            
break

    
if (idx == -1):
        
raise ValueError

    
if ((idx + 1) >  → >= len(position.flow_component_classes)):
        
raise ValueError

    
active_child
 = position.flow_component_classes[(idx + 1)]

    
action_set
 = scaffold.action_set

    
child_idx
 = action_set.index(active_child)

    
if ((child_idx + 1) >  → < len(action_set)):
        
return COMPLETE



def plot(ixiloccolumnslegend, **kwargs):
    
'"\\u000a    A wrapper around plotting. Matplotlib plot arguments can be passed in, plus:\\u000a        ix: specify a time-based subsection of the curves to plot, ex:\\u000a                .plot(ix=slice(0.,10.)) will plot the time values between t=0. and t=10.\\u000a        iloc: specify a location-based subsection of the curves to plot, ex:\\u000a                .plot(iloc=slice(0,10)) will plot the first 10 time points.\\u000a        columns: If not empty, plot a subset of columns from the cumulative_hazards_. Default all.\\u000a        legend: show legend in figure.\\u000a    '

    
assert ((ix is None) or (iloc is None))'Cannot set both ix and iloc in call to .plot'

    
get_method
 = 'ix' if (ix is not  → is None) else 'iloc'

    
if (iloc == ix is None):
        
user_submitted_ix
 = slice(0None)

    
else:
        
user_submitted_ix
 = ix if (ix is not None) else iloc

    
get_loc
 = lambda dfgetattr(dfget_method)[user_submitted_ix]

    
if (len(columns) == 0):
        
columns
 = self.cumulative_hazards_.columns

    
if ('ax' not in kwargs):
        
kwargs
['ax']
 = plt.figure().add_subplot(111)

    
x
 = get_loc(self.cumulative_hazards_).index.values.astype(float)

    
for column in columns:
        
y
 = get_loc(self.cumulative_hazards_[column]).values

        
y_upper
 = get_loc(self.confidence_intervals_[column].ix['upper']).values

        
y_lower
 = get_loc(self.confidence_intervals_[column].ix['lower']).values

        
shaded_plot
(xyy_uppery_lower, ax=kwargs['ax'], label=coalesce(kwargs.get('label')columns → column ))



def lint(self):
    
if (not (self.language and self.cmd and self.regex)):
        
raise NotImplementedError

    
output
 = self.run(self.cmdself.code)

    
if (not output):
        
return

    
persist
.debug
('Output:'repr(output))

    
for (matchrowcolmessagenear) in self.find_errors(output):
        
if (match and (row is not None)):
            
if (col is not None):
                
if (self.tab_size > 1):
                    
(startend)
 = self.highlight.full_line(row)

                    
code_line
 = code → self. [start:end]

                    
diff
 = 0

                    
for i in range(len(code_line)):
                        
if (code_line[i] == '\\u0009'):
                            
diff
 += (self.tab_size - 1)



def compute_float_num_unmasked(data):
    
M
 = data.masks

    
O
 = data.offsets[:-1]

    
n
 = (bisect_left(Olen(M)) - 1)

    
O2
 = O[:n]

    
U
 = hstack((add.reduceat(MO2)zeros((len(M → O ) - n))))

    
return U



def register_listener(selfeventslistenerfilterfunc):
    
'\\u000a    Allows you to specify event listeners for adding new features to the\\u000a    :class:`Replay` objects on :meth:`~Replay.play`. sc2reader comes with a\\u000a    small collection of :class:`Listener` classes that you can apply to your\\u000a    replays as needed.\\u000a    Events are sent to listeners in registration order as they come up. By\\u000a    specifying a parent class you can register a listener to a set of events\\u000a    at once instead of listing them out individually. See the tutorials for\\u000a    more information.\\u000a    :param events: A list of event classes you want sent to this listener.\\u000a        Registration to a single event can be done by specifying a single\\u000a        event class instead of a list. An isinstance() check is used so\\u000a        you can catch sets of classes at once by supplying a parent class.\\u000a    :param listener: The :class:`Listener` object you want events sent to.\\u000a    :param filterfunc: A function that accepts a partially loaded\\u000a        :class:`Replay` object as an argument and returns true if the\\u000a        reader should be used on this replay.\\u000a    '

    
try:
        
for event in events:
            
self
.registered_listeners
[event]
.append
((filterfunclistener))

    
except TypeError:
        
self
.registered_listeners
[event → events ]
.append
((filterfunc → events listener))



def purge_kernels(selfmsg_content):
    
'Handler for purge_kernels messages.'

    
failures
 = []

    
for kernel_id in self.km._kernels:
        
success
 = self.km.kill_kernel(kernel_id)

        
if (not success):
            
failures
.append
(kernel_id)

    
reply_content
 = {'status''All kernels killed!'}

    
success
 = (len(failures) >  → == 0)

    
if (not success):
        
reply_content
['status']
 = ('Could not kill kernels %s!' % failures → success )

    
return self._form_message(reply_content, error=(not success))



def transform_f(dataid_metadata):
    
return np.where((data >  → !=, pred: < 0)1.00.0)



def on_msg_receive(selfmsg):
    
if (msg.out or (not self.binlog_done)):
        
return

    
peer
 = get_peer_to_send → self. (msg)



def bootdevice(self):
    
if ('read' == self.op):
        
bootdev
 = self.ipmicmd.get_bootdev()

        
if (bootdev['bootdev'] in self.bootdevices):
            
bootdev
['bootdev']
 = self.bootdevices[bootdev['bootdev']]

        
bootmode
 = 'unspecified'

        
if ('uefimode' in bootdev):
            
if bootmode → bootdev ['uefimode']:
                
bootmode
 = 'uefi'

            
else:
                
bootmode
 = 'bios'

        
return msg.BootDevice(node=self.node, device=bootdev['bootdev'], bootmode=bootmode)

    
elif ('update' == self.op):
        
bootdev
 = self.inputdata.bootdevice(self.node)

        
bootmode
 = self.inputdata.bootmode(self.node)

        
bootdev
 = self.ipmicmd.set_bootdev(bootdev)

        
if (bootdev['bootdev'] in self.bootdevices):
            
bootdev
['bootdev']
 = self.bootdevices[bootdev['bootdev']]

        
return msg.BootDevice(node=self.node, device=bootdev['bootdev'])



def _buildMetadata(self):
    
' set up capabilities metadata objects '

    
serviceelem
 = self._capabilities.find('Service')

    
self
.identification
 = ServiceIdentification(serviceelemself.version)

    
self
.provider
 = ServiceProvider(serviceelem)

    
self
.operations
 = []

    
for elem in self._capabilities.find('Capability/Request')[:]:
        
self
.operations
.append
(OperationMetadata(elem))

    
self
.contents
 = {}

    
caps
 = self._capabilities.find('Capability')

    
for elem in caps.findall('Layer'):
        
cm
 = ContentMetadata(elem)

        
self
.contents
[cm.id]
 = cm

        
for subelem in elem.findall('Layer'):
            
subcm
 = ContentMetadata(subelemcm)

            
self
.contents
[subcm.id]
 = subcm

            
for subsubelem in subelem → subcm .findall('Layer'):
                
subsubcm
 = ContentMetadata(subsubelemcm → subcm )

                
self
.contents
[subsubcm.id]
 = subsubcm

    
self
.exceptions
 = [f.text for f in self._capabilities.findall('Capability/Exception/Format')]



def process_request(selfrequest):
    
if (not settings.ST_PRIVATE_FORUM):
        
return

    
if request.user.is_authenticated():
        
return

    
resolver_match
 = resolve(request.path)

    
if (resolver_match.app_name != 'spirit'):
        
return

    
full_namespace
 = ':'.join(resolver_match.namespaces)

    
if (full_namespace !=  → == 'spirit:user:auth'):
        
return



def recv(sktlengthbuf):
    
if (buf is not  → is None):
        
if (len(buf) >  → >= length):
            
return buf

        
else:
            
length
 = (length - len(buf))

    
tmp_buf
 = ctypes.create_string_buffer(length)

    
l
 = skt.recv_into(tmp_buflength)

    
if (not buf):
        
return tmp_buf[:l]

    
return (buf + tmp_buf[:l])



def write_file(selfrespiter):
    
if (can_sendfile → self. () and util.is_fileobject(respiter.filelike)):
        
if six.callable(respiter.filelike.fileno):
            
fileno
 = respiter.filelike.fileno()

        
else:
            
fileno
 = respiter.filelike.fileno



def extend(selfdf):
    
if (not df.index.is_monotonic_increasing):
        
df
 = df.sort_index(inplace=False)

    
if (len(self.partitions) and (df.index[0] <  → <=, pred: == self.partitions.index[0])):
        
if is_trivial_index(df.index):
            
df
 = df.copy()

            
start
 = (self.partitions.index[-1] + 1)

            
new_index
 = pd.Index(np.arange(start(start + len(df))), name=df.index.name)

            
df
.index
 = new_index

        
else:
            
raise ValueError('Index of new dataframe less than known data')

    
index
 = df.index.values

    
partition_name
 = '--'.join([escape(index.min())escape(index.max())])



def find_in_where(selfmachineListcriteria):
    
matches
 = []

    
for machine in machineList:
        
if match_criteria → self. (machinecriteria):
            
matches
.append
(machine)

    
return matches



def server_update(self):
    
diff_time
 = (time.time() - self.lastcalled)

    
self
.lastcalled
 = time.time()

    
current_server
 = self.bh.pool.get_current()

    
info
 = self.bh.pool.get_entry(current_server)

    
info
['slice']
 = (info['slice'] - diff_time)

    
if (self.initDone == False):
        
self
.bh
.select_best_server
()

        
return True

    
if (info['slice'] <=  → == 0):
        
return True

    
if (info['slicedShares'] > info['shares']):
        
return True

    
if (info['role'] not in ['mine''mine_nmc''mine_slush']):
        
return True

    
difficulty
 = self.bh.difficulty.get_difficulty()

    
shares
 = info['shares']

    
min_shares
 = (difficulty * self.difficultyThreshold)

    
if (info['role'] == 'mine_slush'):
        
shares
 = (shares * 4)

    
if ('penalty' in info):
        
shares
 = (shares * float(info['penalty']))

    
if (shares <  → > min_shares):
        
info
['slice']
 = -1

        
return True

    
return False



def parse_cmd(selftreeinp_cmd):
    
' Extract command and options from string.\\u000a        The tree argument should contain a specifically formatted dict\\u000a        which describes the available commands, options, arguments and\\u000a        callbacks to methods for completion of arguments.\\u000a        TODO: document dict format\\u000a        The inp_cmd argument should contain a list of strings containing\\u000a        the complete command to parse, such as sys.argv (without the first\\u000a        element which specified the command itself).\\u000a    '

    
self
.exe
 = None

    
self
.arg
 = None

    
self
.exe_options
 = {}

    
self
.params
 = tree['params']

    
self
.key
 = {}

    
option_parsing → self. 
 = False

    
i
 = 0

    
while (i < len(inp_cmd)):
        
p
 = inp_cmd[i]

        
self
.key
 = {}

        
if (self.params is not None):
            
self
.key_complete
 = False

            
for (paramcontent) in self.params.items():
                
if (string.find(paramp) >=  → == 0):
                    
self
.key
[param]
 = content



def send(selfdataflagstimeout):
    
if (ll.get_log_level() <  → >=, pred: > ll.LOG_LEVELS['DEBUG2']):
        
if hasattr(data'tobytes'):
            
ml
.ld2
('SSL: {{{0}}}/FD {1}: OUTDATA: {{{2}}}'id(self)self._sock.fileno()data.tobytes())

        
else:
            
ml
.ld2
('SSL: {{{0}}}/FD {1}: OUTDATA: {{{2}}}'id(self)self._sock.fileno()data)

    
return self._do_ssl(lambda self._sock.send(dataflags)timeout)



def is_vm_migrating(novavm):
    
' Checking if a VM is migrating.\\u000a    :param nova: A Nova client.\\u000a        :type nova: *\\u000a    :param vm: A VM UUID.\\u000a        :type vm: str\\u000a    :return: Whether the VM is migrating.\\u000a        :rtype: bool\\u000a    '

    
return (nova.servers.get('vm').status ==  → != u'ACTIVE')



def _restore_from_db(self):
    
'\\u000a    Restores any user server persistent state we may have in the db.\\u000a    If there is none saved yet then we save a bunch of default values.\\u000a    '

    
c
 = self.db.conn.cursor()

    
c
.execute
('select uid_vv from user_server limit 1')

    
if (c.rowcount ==  → <=, pred: > 0):
        
c
.execute
('insert into user_server (uid_vv) values (?)'str(self.uid_vv))

        
c
.close
()

        
self
.db
.commit
()

    
else:
        
self
.uid_vv
 = int(c.fetchone()[0])

        
c
.close
()

    
return