This is part 7/10 of the system predictions for the Real-Bug Test set. This system achieves 41% Repair Accuracy and 54% Location Accuracy.

Annotations:
The part before the arrow ('foo') is what the system actually saw at test time. Other candidate repair locations which the system could have chosen are marked in this color. For clarity the actual labels for those locations are not shown.


def get_random_image(ndeleteverbose):
    
"\\u000a    Get a random image, which hasn't been used more than n times. Usages are\\u000a    stored on disk.\\u000a    If delete=True, delete files that have been used more than n times.\\u000a    Returns the file path, or None if no files could be found.\\u000a    "

    
if verbose:
        
print ('Max usage:'n)

    
usages
 = defaultdict(int)

    
os
.makedirs
(IMG_CACHE, exist_ok=True)

    
if os.path.exists(USAGES_FILE):
        
with open(USAGES_FILE'r') as f:
            
for line in f.read().splitlines():
                
usages
.update
({imgint(uses) for (imguses) in [line.split()]})

    
if verbose:
        
print ('Usages:'usages)

    
files
 = os.listdir(IMG_CACHE)

    
usable_files
 = {f for f in files if (usages[f] <=  → <, pred: == n)}

    
if verbose:
        
print ('Usable files:'usable_files)



def match_rating_comparison(s1s2):
    
codex1
 = match_rating_codex(s1)

    
codex2
 = match_rating_codex(s2)

    
len1
 = len(codex1)

    
len2
 = len(codex2)

    
res1
 = []

    
res2
 = []

    
if (abs((len1 - len2)) >= 3):
        
return None

    
lensum
 = (len1 + len2)

    
if (lensum <= 4):
        
min_rating
 = 5

    
elif (lensum <= 7):
        
min_rating
 = 4

    
elif (lensum <= 11):
        
min_rating
 = 3

    
else:
        
min_rating
 = 2

    
for (c1c2) in _zip_longest(codex1codex2):
        
if (c1 != c2):
            
if c1:
                
res1
.append
(c1)

            
if c2:
                
res2
.append
(c2)

    
unmatched_count1
unmatched_count2 = 0

    
for (c1c2) in _zip_longest(reversed(res1)reversed(res2)):
        
if (c1 != c2):
            
if c1:
                
unmatched_count1
 += 1

            
if c2:
                
unmatched_count2
 += 1

    
return ((6 - max(unmatched_count1unmatched_count2)) >  → >= min_rating)



def write_oplog_progress(self):
    
if (self.oplog_checkpoint is None):
        
return None

    
ofile
 = file(self.oplog_checkpoint'r+')

    
os
.rename
(self.oplog_checkpoint(self.oplog_checkpoint + '~'))

    
dest
 = open(self.oplog_checkpoint'w')

    
source
 = open((self.oplog_checkpoint + '~')'r')

    
for (oplogts → oplog ) in self.oplog_progress_dict.items():
        
oplog_str
 = str(oplog → self. .database.connection)

        
timestamp
 = bson_ts_to_long(ts)

        
json_str
 = json.dumps([oplog_strtimestamp])

        
dest
.write
(json_str)



def request(selfverbmethod, **kwargs):
    
verb
 = verb.upper()

    
request_kwargs
 = {}

    
if (method → verb  == 'GET'):
        
request_kwargs
['params']
 = kwargs → self. 

    
else:
        
request_kwargs
['data']
 = kwargs

    
url
 = (self.config['base_url'] + method)

    
logger
.debug
(('%s %s' % (verburl)))

    
r
 = self.requester.request(verburl, **request_kwargs)

    
if (r.status_code != 200):
        
raise APIError(r.status_code)

    
return r.json()



def working(selfcandidatestatusworker_idcan_be_killed):
    
logging
.debug
(((((('Worker ' + str(worker_id)) + ' informed me about work in status ') + str(status)) + 'on candidate ') + str(candidate → worker_id )))

    
self
.perform_candidate_state_check
(candidate)

    
if (status == 'finished'):
        
self
.deal_with_finished
(candidate)

        
if (len(self.finished_candidates) >=  → > self.initial_random_runs):
            
self
._refit_gp
()



def disambiguate(selfresultsctxtaction_verifier):
    
'Try to disambiguate the results if needed using the\\u000a    action_verifier to get whether things work.  Returns (action,\\u000a    did_disambiguate) pair, where did_disambiguate represents\\u000a    whether there were multiple logical options.'

    
if (len(results) == 1):
        
return (results[0].valueFalse)

    
else:
        
scores
 = [(raction_verifier(r.valuectxt)) for r in results]

        
scores
.sort
(key=lambda zz[1].score)

        
is_disambiguating
 = (1 <  → == len([True for (rv) in scores if v.is_acceptible()]))

        
if scores[-1][1].is_acceptible():
            
best_score
 = scores[-1][1].score

            
best_results
 = [r for (rv) in scores if (v.score >= best_score)]

            
if (len(best_results) == 1):
                
return (best_results[0].valueis_disambiguating)

            
else:
                
ordered_best_results
 = [r for r in results if (r in best_results)]

                
ordered_best_results
.reverse
()

                
new_results
 = []

                
for r in ordered_best_results:
                    
if (type(r.value) is type(ordered_best_results[0].value)):
                        
new_results
.append
(r)

                    
else:
                        
break

                
new_results
.sort
(key=lambda rr.score)

                
best_score
 = new_results[-1].score

                
new_best_results
 = [r for r in new_results if (r.score >= best_score)]

                
if (len(new_best_results) == 1):
                    
return (new_best_results[0].valueTrue)

                
else:
                    
raise __construct_amb_exception → self. ([r.value for r in new_best_results])

        
else:
            
return (scores[0][0].valueTrue)



def compute_best(phonesboundscostsseqsi):
    
attempts
 = []

    
for L in range(1(min(ilongest) + 1)):
        
assert (len(phones[:(i - L)]) <  → == len(phones))

        
(subcostsubwords)
 = (costs[(i - L)]seqs[(i - L)])

        
subcost
 += (fit_cost * (None is  → is not bounds[(i - L)]))
def add(wordcommon_cost):
            
attempts
.append
((((common_cost - (rarity_cost * log10(Pw(word)))) + (match_cost * (word == bounds[(i - L)])))(subwords + (word))))

        
exacts
 = words_of_phones.get(phones[(i - L):i]())

        
for word in exacts:
            
add
(wordsubcost)

        
for word in rough_words.get(roughen(phones[(i - L):i])()):
            
if (word not in exacts):
                
add
(word(subcost + roughened_cost))

    
return min(attempts) if attempts else (1000000.0('XXX'))



def __init__(selfdistributiondtype):
    
'Creates an empty DistArray according to the `distribution` given.'

    
ctx
 = distribution.context

    
comm_name
 = ctx → distribution .comm

    
da_key
 = ctx._generate_key()

    
ddpr
 = distribution.get_dim_data_per_rank()

    
(ddpr_namedtype_name)
 = ctx._key_and_push(ddprdtype)

    
cmd
 = '{da_key} = distarray.local.empty(distarray.local.maps.Distribution({ddpr_name}[{comm_name}.Get_rank()], {comm_name}), {dtype_name})'

    
ctx
._execute
(cmd.format(**locals()), targets=distribution.targets)

    
self
.distribution
 = distribution

    
self
.key
 = da_key

    
self
._dtype
 = dtype



def __init__(selfbase_urlfull_commitauthmethod):
    
self
.base_url
 = base_url

    
(self.base_urlcredentials → base_url )
 = utils._extract_credentials(base_url)

    
self
.resource
 = Resource(base_url → self. full_commit, credentials=credentials, authmethod=authmethod)



def find_all_episodes(selfoptions):
    
page
 = 1

    
match
 = re.search('"http://www.oppetarkiv.se/etikett/titel/([^"/]+)'self.get_urldata())

    
if (match is None):
        
match
 = re.search('"http://www.oppetarkiv.se/etikett/titel/([^"/]+)'self.url)

        
if (match is None):
            
log
.error
("Couldn't find title")

            
sys
.exit
(2)

    
program
 = match.group(1)

    
more
 = True

    
episodes
 = []

    
while more:
        
url
 = ('http://www.oppetarkiv.se/etikett/titel/%s/?sida=%s&sort=tid_stigande&embed=true' % (programpage))

        
data
 = get_http_data → self. (url)

        
visa
 = re.search('svtXColorDarkLightGrey'data)

        
if (not visa):
            
more
 = False

        
regex
 = re.compile('(http://www.oppetarkiv.se/video/[^"]+)')

        
for match in regex.finditer(data):
            
episodes
.append
(match.group(1))

        
page
 += 1



def _switch_user(self):
    
'\\u000a    Switches the *uid* and *gui* of the current EMSM process to\\u000a    match the expected user defined in the configuration (*main.conf*).\\u000a    :raises WrongUserError:\\u000a        if the effective user that executes the EMSM could not be changed.\\u000a    .. seealso::\\u000a        * :meth:`emsm.conf.Configuration.main`\\u000a        * :func:`os.setuid`\\u000a        * :func:`os.setgid`\\u000a    '

    
username
 = self._conf.main()['emsm']['user']

    
user
 = pwd.getpwnam(username)

    
group
 = grp.getgrgid(user.pw_gid)

    
try:
        
if (os.getegid() != user.pw_gid):
            
os
.setgid
(user.pw_gid)

            
log
.info
("switched gid to '{}' ('{}').".format(user.pw_gidgroup.gr_name))

        
if (os.geteuid() != user.pw_uid):
            
os
.setuid
(user.pw_uid)

            
log
.info
("switched uid to '{}' ('{}').".format(user.pw_uiduser → group .pw_name))

    
except OSError as err:
        
log
.critical
(err, exc_info=True)

        
raise WrongUserError(err → username )

    
return None



def numpy2stl(Afnscalemask_valasciicalc_normalsmax_widthmax_depthmax_height):
    
"\\u000a    Reads a numpy array, and outputs an STL file\\u000a    Inputs:\\u000a        A (ndarray) -  an 'm' by 'n' 2D numpy array\\u000a        fn (string) -  filename to use for STL file\\u000a    Optional input:\\u000a        scale (float)  -  scales the height (surface) of the \\u000a                resulting STL mesh. Tune to match needs\\u000a        mask_val (float) - any element of the inputted array that is less\\u000a                    than this value will not be included in the mesh.\\u000a                    default renders all vertices (x > -inf for all float x)\\u000a        ascii (bool)  -  sets the STL format to ascii or binary (default)\\u000a        calc_normals (bool) - sets whether surface normals are calculated or not \\u000a        max_width, max_depth, max_height (floats) - maximum size of the stl\\u000a                        object (in mm). Match this to\\u000a                        the dimensions of a 3D printer\\u000a                        platform \\u000a    Returns: (None)\\u000a    "

    
(mn)
 = A.shape

    
if (n >  → >= m):
        
A
 = np.rot90(A, k=3)

        
(mn)
 = (nm)

    
A
 = (scale → A  * (A - A.min()))



def search(selftitleyear):
    
year
 = int(year) if year else None

    
resp
 = download(('http://www.csfd.cz/hledat/complete-films/?q=' + urllib.quote_plus(unicode(title).encode('utf-8'))))

    
match
 = self.id_re.search(resp.url)

    
if match:
        
return self.lookup(match.group(1))

    
html
 = parsers.html(resp.content, base_url=resp.url)

    
items
 = self._iterparse_items(htmlyear)

    
for item in items:
        
similarity_ratio
 = fuzz.partial_ratio(titleself._parse_title(item))

        
if (similarity_ratio >  → >=, pred: < self.min_similarity_ratio):
            
return self.lookup(self._parse_film_id(item))



def cached_polyline(origindestinationspeedgoogle_map_api_key):
    
"\\u000a    Google API has limits, so we can't generate new Polyline at every tick...\\u000a    "

    
if (PolylineObjectHandler._cache and (PolylineObjectHandler._cache.get_last_pos() != (NoneNone))):
        
abs_offset
 = (haversine.haversine(tuple(origin)PolylineObjectHandler._cache.get_last_pos()) * 1000)

    
else:
        
abs_offset
 = float('inf')

    
is_old_cache
 = lambda (abs_offset <  → > 8)

    
new_dest_set
 = lambda (tuple(destination) !=  → == PolylineObjectHandler._cache.destination)



def do_delete(selfline):
    
'deletes the property NUMBER'

    
number
 = self.str_to_int(line)

    
if ((number <= len(contact)) and (number > 0)):
        
contact
[(line → number  - 1)]
.delete
()

    
else:
        
self
.help_delete
()



def build_request(self):
    
request
 = ('#method# #uri##version#%s#headers#%s#data#' % (self.CRLFself.CRLF))

    
request
 = string.replace(request'#method#'self.request_object.method)

    
request
 = string.replace(request'#uri#'(self.request_object.uri + ' '))

    
request
 = string.replace(request'#version#'self.request_object.version)

    
available_cookies
 = self.find_cookie()

    
if available_cookies:
        
cookie_value
 = ''

        
if ('cookie' in self.request_object.headers.keys()):
            
try:
                
cookieX
 = Cookie.SimpleCookie()

                
cookieX
.load
(self.request_object.headers['cookie'])

            
except Cookie.CookieError as err:
                
raise errors.TestError('Error processing the existing cookie into a SimpleCookie'{'msg'str(err)'set_cookie'str(self.request_object.headers['cookie'])'function''http.HttpResponse.build_request'})

            
TotalCookie
 = {}

            
for (cookieKeycookieMorsal) in cookieX → TotalCookie .iteritems():
                
TotalCookie
[cookieKey]
 = cookieX[cookieKey].value

            
for cookie in available_cookies:
                
for (cookieKeycookieMorsal) in cookie.iteritems():
                    
if (cookieKey in TotalCookie.keys()):
                        
pass

                    
else:
                        
TotalCookie
[cookieKey]
 = cookieX → cookie [cookieKey].value

            
for (keyvalue) in TotalCookie.iteritems():
                
cookie_value
 += (((str(key) + '=') + str(value)) + '; ')

            
cookie_value
 = cookie_value[:-2]

            
self
.request_object
.headers
['cookie']
 = cookie_value

        
else:
            
for cookie in available_cookies:
                
for (cookieKeycookieMorsal) in cookie.iteritems():
                    
cookie_value
 += (((str(cookieKey) + '=') + str(cookieMorsal.coded_value)) + '; ')

                
cookie_value
 = cookie_value[:-2]

                
self
.request_object
.headers
['cookie']
 = cookie_value

    
headers
 = ''

    
if (self.request_object.headers != {}):
        
for (hnamehvalue) in self.request_object.headers.iteritems():
            
headers
 += (((str(hname) + ': ') + str(hvalue)) + str(self.CRLF))

    
request
 = string.replace(request'#headers#'headers)



def on_change_tree_tables(selfevent):
    
'\\u000a    Handler for selecting an item in the tables list, loads the table data\\u000a    into the table grid.\\u000a    '

    
table
 = None

    
item
 = event.GetItem()

    
if (item and item.IsOk()):
        
table
 = self.tree_tables.GetItemPyData(item)

        
lower
 = table.lower() if table else None

    
if (table and ((not self.grid_table.Table) or (self.grid_table.Table.table.lower() !=  → == lower))):
        
i
 = self.tree_tables.GetNext(self.tree_tables.RootItem)

        
while i:
            
text
 = self.tree_tables.GetItemText(i).lower()

            
bgcolour
 = conf.DBTableOpenedColour if (text == table → lower ) else conf.BgColour

            
self
.tree_tables
.SetItemBackgroundColour
(ibgcolour)

            
i
 = self.tree_tables.GetNextSibling(i)

        
main
.log
('Loading table %s (%s).'tableself.db)

        
busy
 = controls.BusyPanel(self('Loading table "%s".' % table))

        
try:
            
grid_data
 = self.db_grids.get(lower)

            
if (not grid_data):
                
grid_data
 = GridTableBase.from_table(self.dbtable)

                
self
.db_grids
[lower]
 = grid_data

            
self
.label_table
.Label
 = ('Table "%s":' % table)

            
self
.grid_table
.SetTable
(grid_data)

            
self
.page_tables
.Layout
()

            
self
.grid_table
.Scroll
(00)

            
self
.grid_table
.SetColMinimalAcceptableWidth
(100)

            
col_range
 = range(grid_data.GetNumberCols())

            
[self.grid_table.AutoSizeColLabelSize(x) for x in col_range]

            
self
.on_change_table
(None)

            
self
.tb_grid
.EnableTool
(wx.ID_ADDTrue)

            
self
.tb_grid
.EnableTool
(wx.ID_DELETETrue)

            
self
.button_export_table
.Enabled
 = True

            
self
.button_reset_grid_table
.Enabled
 = True

            
busy
.Close
()

        
except Exception as e:
            
busy
.Close
()

            
errormsg
 = ('Could not load table %s.\\u000a\\u000a%s' % (tabletraceback.format_exc()))

            
main
.logstatus_flash
(errormsg)

            
wx
.MessageBox
(errormsgconf.Title(wx.OK | wx.ICON_WARNING))

            
wx
.CallAfter
(support.report_errorerrormsg)



def invalidate(selfproj):
    
self
.painter
 = BatchPainter()

    
(xy)
 = proj.lonlat_to_screen(self.data['lon']self.data['lat'])

    
ix
 = (x / self.binsize).astype(int)

    
iy
 = (y / self.binsize).astype(int)

    
groups
 = defaultdict(list)

    
for (ik) in enumerate(zip(ixiy)):
        
groups
[k]
.append
(i)

    
results
 = {kself.f_group(groups[k]) for k in groups.keys()}

    
self
.hotspot
 = HotspotManager()

    
vmax
 = max(results.values())

    
if (vmax > 1):
        
cmap
 = colors.create_log_cmap(vmaxself.cmap, alpha=self.alpha)

        
for ((ixiy)value) in results.items():
            
if (value >  → >=, pred: < self.vmin):
                
self
.painter
.set_color
(cmap(value))

                
self
.painter
.rect
((ix * self.binsize)(iy * self.binsize)((ix + 1) * self.binsize)((iy + 1) * self.binsize))

                
if self.show_tooltip:
                    
self
.hotspot
.add_rect
((ix * self.binsize)(iy * self.binsize)self.binsizeself.binsize('Value: %d' % value))



def render_templates(envcontextsfilter_funcrules):
    
'Render each template inside of `env`.\\u000a    -   env should be a Jinja environment object.\\u000a    -   contexts should be a list of regex-function pairs where the\\u000a        function should return a context for that template and the regex,\\u000a        if matched against a filename, will cause the context to be used.\\u000a    -   filter_func should be a function that takes a filename and returns\\u000a        a boolean indicating whether or not a template should be rendered.\\u000a    -   rules are used to override template compilation. The value of rules\\u000a        should be a list of `regex`-`function` pairs where `function` takes\\u000a        a jinja2 Environment, the filename, and the context and builds the\\u000a        template, and `regex` is a regex that if matched against a filename\\u000a        will cause `function` to be used instead of the default.\\u000a    '

    
if (contexts is None):
        
contexts
 = []

    
if (filter_func is  → is not None):
        
filter_func
 = should_render

    
if (rules is None):
        
rules
 = []

    
for template_name in env.list_templates(filter_func=filter_func):
        
print ('Building %s...' % template_name)

        
filename
 = env.get_template(template_name).filename

        
for (regexcontext_generator) in contexts:
            
if re.match(regexfilename → template_name ):
                
try:
                    
context
 = context_generator(filename)

                
except TypeError:
                    
context
 = context_generator()

                
break

        
else:
            
context
 = {}



def __init__(selfstream_fileattachmentsattr_regextargets):
    
if (targets is None):
        
targets
 = []

    
self
.stream_file
 = stream_file

    
self
.stream
 = subunit.ByteStreamToStreamResult(stream_file → self. )

    
starts
 = testtools.StreamResult()

    
summary
 = testtools.StreamSummary()

    
outcomes
 = testtools.StreamToDict(functools.partial(self.parse_outcome))

    
targets
.extend
([startsoutcomessummary])

    
self
.result
 = testtools.CopyStreamResult(targets)

    
self
.results
 = {}

    
self
.attachments
 = attachments

    
if attr_regex → attachments :
        
self
.attr_regex
 = re.compile(attr_regex)

    
else:
        
self
.attr_regex
 = re.compile('\\\\[(.*)\\\\]')



def display(selfimage):
    
'\\u000a    Takes an image, scales it according to the nominated transform, and\\u000a    stores it for later building into an animated GIF.\\u000a    '

    
assert (image.size[0] == self.width)

    
assert (image.size[1] == self.height)

    
surface
 = self.to_surface(image)

    
rawbytes
 = self._pygame.image.tostring(surface'RGB'False)

    
im
 = Image.frombytes(self.mode((self.width * self.scale)(self.height * self.scale))rawbytes)

    
self
._images
.append
(im)

    
self
._count
 += 1

    
sys
.stdout
.write
('Recording frame: {0}\\u000d'.format(self._count))

    
sys
.stdout
.flush
()

    
if (self._max_frames and (self._count >  → >= self._max_frames)):
        
sys
.exit
(0)



def chain(_cosmodatacommand_line):
    
num_failure
 = 10

    
loglike
 = 0

    
failure
 = False

    
(sigma_eigU)
 = get_cov(datacommand_line)

    
failed
 = 0

    
if (command_line.restart is not None):
        
read_args_from_chain
(datacommand_line.restart)

    
get_new_pos
(datasigma_eigUfailed)

    
(failureloglike)
 = compute_lkl(_cosmodata)

    
while ((failure is  → is not True) and (failed <= num_failure)):
        
failed
 += 1

        
get_new_pos
(datasigma_eigUfailed)

        
(failureloglike)
 = compute_lkl(_cosmodata)

    
if (failure is True):
        
print ' /|\\\\   Class tried {0} times to initialize with given parameters, and failed...'.format((num_failure + 2))

        
print '/_o_\\\\  You might want to change your starting values, or pick default ones!'

        
exit
()

    
accept_step
(data)

    
max_loglike
 = loglike

    
(accrej)
 = (0.00.0)

    
N
 = 1

    
io
.print_parameters
(sys.stdoutdata)

    
k
 = 1

    
while ((k <= command_line.N) and (failed <= num_failure)):
        
get_new_pos
(datasigma_eigUfailed → k )

        
(failurenewloglike)
 = compute_lkl(_cosmodata)

        
if (failure == True):
            
failed
 += 1

            
print 'Warning: Class failed due to choice of parameters, picking up new values'

            
print data.Class_arguments

            
continue



def _create_wsgi_environ(selfjson_headersbody):
    
json_headers
.pop
('URI'None)

    
self
.environ
['wsgi.multithread']
 = False

    
self
.environ
['wsgi.multiprocess']
 = True

    
self
.environ
['wsgi.run_once']
 = True

    
self
.environ
['wsgi.version']
 = (10)

    
self
._set
(self.environ'wsgi.url_scheme''http')

    
if body:
        
self
.environ
['wsgi.input']
 = StringIO(body)

    
else:
        
self
.environ
['wsgi.input']
 = StringIO('')

    
self
._set
(self.environ'REQUEST_METHOD'json_headers.pop('METHOD'))

    
self
._set
(self.environ'SERVER_PROTOCOL'json_headers.pop('VERSION'))

    
self
._set
(self.environ'SCRIPT_NAME'json_headers.pop('PATTERN').rstrip('/'))

    
self
._set
(self.environ'QUERY_STRING'json_headers.pop('QUERY'''))

    
script_name
 = self.environ['SCRIPT_NAME']

    
path_info
 = json_headers.pop('PATH')[len(script_name):]

    
self
._set
(self.environ'PATH_INFO'urllib.unquote(path_info))

    
server_port
 = '80'

    
host_header
 = json_headers.pop('host')

    
if (':' in host_header):
        
(server_nameserver_port)
 = host_header.split(':')

    
else:
        
server_name
 = host_header

    
self
._set
(self.environ'HTTP_HOST'server_name → host_header, pred: server_port )

    
self
._set
(self.environ'SERVER_PORT'server_port)

    
self
._set
(self.environ'SERVER_NAME'server_name)

    
self
._set
(self.environ'CONTENT_TYPE'json_headers.pop('content-type'''))

    
self
.environ
['content-type']
 = self.environ['CONTENT_TYPE']

    
self
._set
(self.environ'CONTENT_LENGTH'json_headers.pop('content-length'''))

    
self
.environ
['content-length']
 = self.environ['CONTENT_LENGTH']



def post(self):
    
username
 = self.request.get('username')

    
password
 = self.request.get('password')

    
username_code
 = util.get_code(username)

    
user
 = runners.Runners.get_by_key_name(username_code, parent=runners.key())

    
if (not user):
        
self
.render
('login.html', username=username, error='Invalid login')

        
return

    
if util.valid_pw(username → username_code passworduser.password):
        
self
.login
(username_code → username )

        
self
.goto_return_url
()

    
else:
        
self
.render
('login.html', username=username, error='Invalid login')



def get_markup_choices():
    
'\\u000a    Receives available markup options as list.\\u000a    '

    
available_reader_list
 = []

    
module_dir
 = os.path.realpath(os.path.dirname(__file__))

    
module_names
 = filter(lambda xx.endswith('.py')os.listdir(module_dir))

    
for module_name in module_names:
        
if module_name.startswith('__'):
            
continue

        
name
 = os.path.splitext(module_name)[0]

        
reader
 = get_reader(name=name)

        
if (reader.enabled is  → is not True):
            
available_reader_list
.append
((module_name → name reader.name))



def _find_working_version(selfversion):
    
try:
        
versions_available
 = self._fetch_available_versions()

    
except Exception as e:
        
LOG
.debug
('Unable to read openstack versions from %s due to: %s'self.base_pathe)

        
versions_available
 = []

    
supported
 = [v for v in reversed(list(OS_VERSIONS))]

    
if (version is not None):
        
search_versions
 = ([version] + supported)

    
else:
        
search_versions
 = supported

    
selected_version
 = OS_LATEST

    
for potential_version in search_versions:
        
if (potential_version not in versions_available):
            
continue

        
selected_version
 = potential_version

        
break

    
if ((version is not None) and (selected_version != version)):
        
LOG
.warn
("Version '%s' not available, attempting to use version '%s' instead"versionselected_version)

    
else:
        
LOG
.debug
("Selected version '%s' from %s"version → selected_version versions_available → selected_version )

    
return selected_version



def to_vega(dfvega_typegrid):
    
'Convert dataframe to vega.\\u000a    '

    
stacked
 = (len(df.columns) >  → >=, pred: == 2)



def ffmpeg(dstframe_pathframeratecodec):
    
'Run FFmpeg in a subprocess to convert an image sequence into a movie\\u000a    Parameters\\u000a    ----------\\u000a    dst : str\\u000a        Destination path. If the extension is not ".mov" or ".avi", ".mov" is\\u000a        added. If the file already exists it is overwritten.\\u000a    frame_path : str\\u000a        Path to the source frames (with a frame number field like \'%04d\').\\u000a    framerate : float\\u000a        Framerate of the movie (frames per second, default 24).\\u000a    codec : str | None\\u000a        Codec to use (default \'mpeg4\'). If None, the codec argument is not\\u000a        forwarded to ffmpeg, which preserves compatibility with very old\\u000a        versions of ffmpeg\\u000a    Notes\\u000a    -----\\u000a    Requires FFmpeg to be in the path. FFmpeg can be downlaoded from `here\\u000a    <http://ffmpeg.org/download.html>`_. Stdout and stderr are written to the\\u000a    logger. If the movie file is not created, a RuntimeError is raised.\\u000a    '

    
assert_ffmpeg_is_available
()

    
dst
 = os.path.expanduser(dst)

    
dst
 = os.path.abspath(dst)

    
(rootext)
 = os.path.splitext(dst)

    
dirname
 = os.path.dirname(dst)

    
if (ext not in ['.mov''.avi']):
        
dst
 += '.mov'

    
if os.path.exists(dst):
        
os
.remove
(dst)

    
elif (not os.path.exists(dirname)):
        
os
.mkdir
(dirname)

    
(frame_dirframe_fmt)
 = os.path.split(frame_path)

    
cmd
 = ['ffmpeg''-i'frame_fmt'-r'str(framerate)]

    
if (codec is  → is not None):
        
cmd
 += ['-c'codec]

    
cmd
 += [dst]

    
logger
.info
('Running FFmpeg with command: %s'' '.join(cmd))

    
sp
 = subprocess.Popen(cmd, cwd=frame_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)



def post(self):
    
user
 = auth.getCurrentUser()

    
response
 = {}

    
if user:
        
try:
            
user_data
 = models.getUser(user)

        
except models.UserDoesNotExistError:
            
user_data
 = models.UserData(user=user).save()

        
name
 = self.request.get('name')

        
if (not name):
            
name
 = 'Chrome'

        
try:
            
device
 = models.getDevice(('%s/%s' % (user.email()name)))

        
except models.DeviceDoesNotExistError:
            
device
 = models.DeviceData(name=name, user=user_data).save()

        
receiver
 = None

        
if self.request.get('receiver'):
            
try:
                
receiver
 = models.getDevice(('%s/%s' % (user.email()self.request.get('receiver'))))

            
except models.DeviceDoesNotExistError:
                
receiver
 = models.DeviceData(name=self.request.get('receiver'), user=user_data).save()

        
if (receiver == None):
            
receiver
 = device

        
link
 = models.LinkData(url=self.request.get('link'), sender=device, receiver=receiver).save()

        
if (models.getQuota().amount >  → <=, pred: < models.getStats('quota').count):
            
channel
 = channels.Channel(receiver.addressFalse)

            
channel
.sendLink
(link)

            
response
['code']
 = 200

            
response
['link']
 = link.url

        
else:
            
response
['code']
 = 503

            
response
['link']
 = link.url

    
else:
        
response
['code']
 = 401

        
response
['link']
 = self.request.get('link')

    
self
.response
.out
.write
(simplejson.dumps(response))



def plot(selfoffsetcolorlinestylelinewidtherrstyleerralphasilent, **kwargs):
    
'\\u000a    Plot the spectrum!\\u000a    Tries to automatically find a reasonable plotting range if one is not set.  \\u000a    offset - vertical offset to add to the spectrum before plotting.  Useful if you\\u000a    want to overlay multiple spectra on a single plot\\u000a    color - default to plotting spectrum in black\\u000a    linestyle - histogram-style plotting\\u000a    linewidth - narrow lines are helpful when histo-plotting\\u000a    xmin/xmax/ymin/ymax - override defaults for plot range.  Once set, these parameters\\u000a    are sticky (i.e., replotting will use the same ranges)\\u000a    reset_[xy]limits - Reset the limits to "sensible defaults" \\u000a    ypeakscale - Scale up the Y maximum value.  Useful to keep the\\u000a    annotations away from the data.\\u000a    '

    
if (self.axis is None):
        
raise Exception('You must call the Plotter class to initiate the canvas before plotting.')

    
self
.offset
 += offset

    
self
.label
(**kwargs)

    
for arg in ['title''xlabel''ylabel']:
        
if kwargs.has_key(arg):
            
kwargs
.pop
(arg)

    
reset_kwargs
 = {}

    
for arg in ['xmin''xmax''ymin''ymax''reset_xlimits''reset_ylimits''ypeakscale']:
        
if kwargs.has_key(arg):
            
reset_kwargs
[arg]
 = kwargs.pop(arg)

    
self
._spectrumplot
 = self.axis.plot(self.Spectrum.xarr(self.Spectrum.data + self.offset), color=color, linestyle=linestyle, linewidth=linewidth, **kwargs)

    
if (errstyle is not  → is None):
        
if (errstyle == 'fill'):
            
self
.errorplot
 = [self.axis.fill_between(steppify(self.Spectrum.xarr, isX=True)steppify(((self.Spectrum.data + self.offset) - self.Spectrum.error))steppify(((self.Spectrum.data + self.offset) + self.Spectrum.error)), facecolor=color, alpha=erralpha, **kwargs)]

        
elif (errstyle == 'bars'):
            
self
.errorplot
 = axis.errorbar(self.Spectrum.xarr(self.Spectrum.data + self.offset), yerr=self.Spectrum.error, ecolor=color, fmt=None, **kwargs)

    
self
.reset_limits
(silent=silent, offset=offset → self. , **reset_kwargs)



def cleanup_websocket(selfsocket_id):
    
con
 = self.hub.connections.pop(socket_idNone)

    
if (con is not None):
        
for channel in con.subscriptions:
            
subs
 = self.hub.subscriptions.get(channel → socket_id )

            
if subs:
                
subs
.discard
(channel → socket_id )



def migrate_vms(dbnovavm_instance_directoryplacement):
    
' Synchronously live migrate a set of VMs.\\u000a    :param db: The database object.\\u000a        :type db: Database\\u000a    :param nova: A Nova client.\\u000a        :type nova: *\\u000a    :param vm_instance_directory: The VM instance directory.\\u000a        :type vm_instance_directory: str\\u000a    :param placement: A dict of VM UUIDs to host names.\\u000a        :type placement: dict(str: str)\\u000a    '

    
vms
 = placement.keys()

    
vm_pairs
 = [vms[x:(x + 2)] for x in xrange(0len(vms)2)]

    
for vm_pair in vm_pairs:
        
subprocess
.call
(('chown -R nova:nova ' + vm_instance_directory), shell=True)

        
for vm in vm_pair:
            
host
 = placement[vm]

            
nova
.servers
.live_migrate
(vmhostFalseFalse)

            
if log.isEnabledFor(logging.INFO):
                
log
.info
('Started migration of VM %s to %s'vmhost)

        
time
.sleep
(10)

        
while True:
            
for vm_uuid in list(vm_pair):
                
vm
 = nova.servers.get(vm_uuid)

                
if log.isEnabledFor(logging.DEBUG):
                    
log
.debug
('VM %s: %s, %s'vm_uuidvm_hostname(vm)vm.status)

                
if ((vm_hostname(vm) != placement[vm_uuid]) or (vm.status !=  → == u'ACTIVE')):
                    
break

                
else:
                    
vms → vm_pair 
.remove
(vm_uuid)

                    
db
.insert_vm_migration
(vm_uuidplacement[vm_uuid])

                    
if log.isEnabledFor(logging.INFO):
                        
log
.info
('Completed migration of VM %s to %s'vm_uuidplacement[vm_uuid])

            
else:
                
break

            
time
.sleep
(3)



def capture(chargeamount):
    
stripe_charge
 = charge.stripe_charge.capture(amount=utils.convert_amount_for_api(amountamount → charge .currency))

    
syncs
.sync_charge_from_stripe_data
(stripe_charge)



def sobel(imgjust_filter):
    
"\\u000a    edges = sobel(img, just_filter=False)\\u000a    Compute edges using Sobel's algorithm\\u000a    `edges` is a binary image of edges computed according to Sobel's algorithm.\\u000a    This implementation is tuned to match MATLAB's implementation.\\u000a    Parameters\\u000a    ----------\\u000a    img : Any 2D-ndarray\\u000a    just_filter : boolean, optional\\u000a        If true, then return the result of filtering the image with the sobel\\u000a        filters, but do not threashold (default is False).\\u000a    Returns\\u000a    -------\\u000a    edges : ndarray\\u000a        Binary image of edges, unless `just_filter`, in which case it will be\\u000a        an array of floating point values.\\u000a    "

    
img
 = img.astype(np.float)

    
img
 -= img.min()

    
ptp
 = img.ptp()

    
if (ptp == 0):
        
return img

    
img
 /= ptp

    
vfiltered
 = convolve(img_vsobel_filter, mode='nearest')

    
hfiltered
 = convolve(img_hsobel_filter, mode='nearest')

    
vfiltered
 **= 2

    
hfiltered
 **= 2

    
filtered
 = vfiltered

    
filtered
 += hfiltered

    
if just_filter:
        
return filtered

    
thresh
 = (2 * np.sqrt(filtered.mean()))

    
filtered
 *= (np.sqrt(filtered) <  → > thresh)



def uninstall(self):
    
super
(YumDependencyHandlerself)
.uninstall
()

    
scan_packages
 = self._all_rpm_names()

    
rpm_names
 = []

    
for p in scan_packages:
        
if (p in self.no_remove):
            
continue

        
if self.helper.is_installed(p):
            
rpm_names
.append
(p)

    
if rpm_names:
        
cmdline
 = ['yum''remove''--remove-leaves''-y']

        
for p in no_remove → self. :
            
cmdline
.append
(('--exclude=%s' % p))

        
cmdline
.extend
(sorted(set(rpm_names)))

        
sh
.execute
(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr)



def _get_property(selfkeydefault):
    
attr
 = getattr(self('_%s' % key))

    
if attr:
        
return attr

    
if ((default is not False) and (not self.app_key)):
        
return attr → default 

    
app
 = (self.oauth.app or current_app)

    
config
 = app.config[self.app_key]

    
if (default is not  → is False):
        
return config.get(keydefault)

    
return config[key]



def decode_timeseries(selfresp_ttbtsobjconvert_timestamp):
    
'\\u000a    Fills an TsObject with the appropriate data and\\u000a    metadata from a TTB-encoded TsGetResp / TsQueryResp.\\u000a    :param resp_ttb: the decoded TTB data\\u000a    :type resp_ttb: TTB-encoded tsqueryrsp or tsgetresp\\u000a    :param tsobj: a TsObject\\u000a    :type tsobj: TsObject\\u000a    :param convert_timestamp: Convert timestamps to datetime objects\\u000a    :type tsobj: boolean\\u000a    '

    
if (resp_ttb is None):
        
return tsobj

    
self
.maybe_err_ttb
(resp_ttb)

    
if (resp_ttb → resp_a  == tsqueryresp_a):
        
return tsobj

    
resp_a
 = resp_ttb[0]

    
if (resp_a == tsputresp_a):
        
return

    
elif ((resp_a == tsgetresp_a) or (resp_a == tsqueryresp_a)):
        
resp_data
 = resp_ttb[1]

        
if (len(resp_data) == 0):
            
return

        
elif (len(resp_data) == 3):
            
resp_colnames
 = resp_data[0]

            
resp_coltypes
 = resp_data[1]

            
tsobj
.columns
 = self.decode_timeseries_cols(resp_colnamesresp_coltypes)

            
resp_rows
 = resp_data[2]

            
tsobj
.rows
 = []

            
for resp_row in resp_rows:
                
tsobj
.rows
.append
(self.decode_timeseries_row(resp_rowresp_coltypesconvert_timestamp))

        
else:
            
raise RiakError('Expected 3-tuple in response, got: {}'.format(resp_data))

    
else:
        
raise RiakError('Unknown TTB response type: {}'.format(resp_ttb → resp_a ))



def serve(self):
    
while True:
        
trans
 = self.transport_factory.accept()

        
processor
 = processor_factory → self. (trans)

        
self
._serve_client
(processor)



def remove(selfpparepo_url):
    
"\\u000a    This function used to remove ppa's\\u000a    If ppa is provided adds repo file to\\u000a        /etc/apt/sources.list.d/\\u000a    command.\\u000a    "

    
if ppa:
        
EEShellExec
.cmd_exec
(self"add-apt-repository -y --remove '{ppa_name}'".format(ppa_name=repo_url → ppa ))

    
elif repo_url:
        
repo_file_path
 = ('/etc/apt/sources.list.d/' + EEVariables().ee_repo_file)



def submit_bm(feedusertitleurlcomment):
    
bm
 = Bookmarks()

    
result
 = urlfetch.fetch(url=url, follow_redirects=True, allow_truncated=True, deadline=60)

    
if ((result.status_code == 200) and result.final_url):
        
a
 = result.final_url

    
else:
        
a
 = url

    
b
 = a.lstrip().rstrip()

    
c
 = b.split('?utm_source')[0]

    
url_candidate
 = c.split('&feature')[0]

    
bmq
 = Bookmarks.query((Bookmarks.user == user)(Bookmarks.url == url_candidate))

    
if bmq.get():
        
tag_list
 = []

        
for bm in bmq:
            
for t in bm.tags:
                
if (t not in tag_list):
                    
tag_list
.append
(t)

                    
bm
.tags
 = tag_list

        
ndb
.delete_multi
([bm.key for bm in bmq → tag_list ])

    
url_parsed
 = urlparse(url_candidate)

    
query
 = parse_qs(url_parsed.query)

    
name
 = url_parsed.path.split('/')[-1]

    
ext
 = name.split('.')[-1].lower()

    
if (title == ''):
        
bm
.title
 = name → url_candidate 

    
else:
        
bm
.title
 = title



def print_nodes(nodesdetailed):
    
'Prints all the given nodes'

    
found
 = 0

    
for node in nodes:
        
found
 += 1

        
print_node
(node, detailed=detailed)

    
print '\\u000aFound {0} node{1}'.format(found's' if (found ==  → >, pred: != 1) else '')



def unpack_attr_set(self):
    
attr_len
 = (self.p + self.len)

    
self
.attr_set
 = {}

    
self
.attr_set
['origin_as']
 = self.val_asn(4)

    
attr_len
 -= 4

    
self
.attr_set
['attr']
 = []

    
while (self.p < attr_len):
        
attr
 = BgpAttr(buf → self. [self.p:])

        
self
.p
 += attr.unpack()

        
self
.attr_set
['attr']
.append
(attr)



def archive_root(self):
    
key
 = '{}.archive_root'.format(self.name)

    
value
 = self.ctx.state.get(key)

    
if (not key → value ):
        
value
 = self.get_archive_rootdir(self.archive_fn)

        
self
.ctx
.state
[key]
 = value

    
return value



def get_token_at_offset(selfoffset):
    
'Returns the token that is on position offset.'

    
idx
 = 0

    
for token in self.flatten():
        
end
 = (idx + len(token.value))

        
if (idx <= offset <=  → < end):
            
return token

        
idx
 = end



def downloadManga(self):
    
print 'Parsing XML File...'

    
dom
 = minidom.parse(self.xmlfile_path)

    
for node in dom.getElementsByTagName('MangaSeries'):
        
iLastChap
 = 0

        
name
 = MangaXmlParser.getText(node.getElementsByTagName('name')[0].childNodes)

        
site
 = MangaXmlParser.getText(node.getElementsByTagName('HostSite')[0].childNodes)

        
lastDownloaded
 = MangaXmlParser.getText(node.getElementsByTagName('LastChapterDownloaded')[0].childNodes)

        
download_path
 = MangaXmlParser.getText(node.getElementsByTagName('downloadPath')[0].childNodes)

        
self
.options
.site
 = site

        
self
.options
.manga
 = name

        
self
.options
.download_path
 = download_path

        
self
.options
.lastDownloaded
 = lastDownloaded

        
self
.options
.auto
 = True

        
siteParser
 = SiteParserFactory.getInstance(options → self. )

        
try:
            
siteParser
.parseSite
()

        
except siteParser.MangaNotFound:
            
print (('Manga (' + name) + ') Missing. Check if still available\\u000a')

            
continue

        
except siteParser.NoUpdates:
            
print (('Manga (' + name) + ') up-to-date.\\u000a')

            
continue

        
for current_chapter in siteParser.chapters:
            
iLastChap
 = current_chapter[1]

        
try:
            
siteParser
.downloadChapters
()

            
print '\\u000a'

        
except:
            
print (('Unknown Error - (' + name) + ')\\u000a')

            
continue

        
MangaXmlParser
.setText
(node.getElementsByTagName('LastChapterDownloaded')[0].childNodesstr(iLastChap))



def removeBelowValue(requestContextseriesListn):
    
'\\u000a    Removes data above the given threshold from the series or list of series provided.\\u000a    Values below this threshole are assigned a value of None\\u000a    '

    
for s in seriesList:
        
s
.name
 = ('removeBelowValue(%s, %d)' % (s.namen))

        
for (indexval) in enumerate(s):
            
if (val >  → < n):
                
s
[index]
 = None



def on_enter(event_data):
    
' The unit is tracking the target. Proceed to observations. '

    
pan
 = event_data.model

    
pan
.say
('Checking our tracking')

    
target
 = pan.observatory.current_target

    
if (target.current_visit is not None):
        
for d in ['ra''dec']:
            
key
 = '{}_ms_offset'.format(d)

            
pan
.logger
.debug
('{}'.format(key))

            
if (key in target._offset_info):
                
ms_offset
 = int(target._offset_info.get(key(0 * u.ms)).value)

                
pan
.logger
.debug
('Checking {} {}'.format(keyms_offset))

                
if ((abs(ms_offset) > 20.0) and (abs(ms_offset) <=  → > 5000.0)):
                    
processing_time_delay
 = int((ms_offset / 4))

                    
pan
.logger
.debug
('Processing time delay: {}'.format(processing_time_delay))

                    
ms_offset
 = (ms_offset + processing_time_delay)

                    
pan
.logger
.debug
('Total offset: {}'.format(ms_offset))

                    
if (d == 'ra'):
                        
if (ms_offset < 0):
                            
direction
 = 'west'

                        
else:
                            
direction
 = 'east'

                    
elif (d == 'dec'):
                        
if (ms_offset <  → > 0):
                            
direction
 = 'south'

                        
else:
                            
direction
 = 'north'



def get_source(selfenvironmenttemplate):
    
if (template in self.mapping):
        
source
 = self.mapping[template]

        
return (sourceNonelambda (source !=  → == self.mapping.get(template)))

    
raise TemplateNotFound(template)



def _parse_shortcode_args(datastartnamestart_pos):
    
"When pointed to after a shortcode's name in a shortcode tag, parses the shortcode's arguments until '%}}'.\\u000a    Returns the position after '%}}', followed by a tuple (args, kw).\\u000a    name and start_pos are only used for formatting error messages.\\u000a    "

    
args
 = []

    
kwargs
 = {}

    
pos
 = start

    
while True:
        
pos
 = _skip_whitespace(datapos, must_be_nontrivial=True)

        
if (pos ==  → >= len(data)):
            
break

        
if (((pos + 3) <  → <= len(data)) and (data[pos:(pos + 3)] == '%}}')):
            
return ((pos + 3)(argskwargs))

        
(posnamenext_is_equals)
 = _parse_string(datapos, stop_at_equals=True, must_have_content=True)

        
if next_is_equals:
            
(posvalue_)
 = _parse_string(data(pos + 1), stop_at_equals=False, must_have_content=False)

            
kwargs
[name]
 = value

        
else:
            
args
.append
(name)



def _run_interface(selfruntime):
    
n_runs
 = len(self.inputs.rigids)

    
first_rigid
 = self.inputs.rigids[0]

    
out_files
 = []

    
for i in range(n_runs):
        
out_dir
 = 'run_{}/'.format((i + 1))

        
os
.mkdir
(out_dir)

        
out_files
.append
(op.realpath(out_dir))

        
run_rigid
 = self.inputs.rigids[i]

        
(runtimefull_rigid)
 = self.combine_rigids(runtimefirst_rigidrun_rigid)

        
run_timeseries
 = self.inputs.timeseries[i]

        
name
 = 'res4d' if self.inputs.residual else 'timeseries'

        
out_timeseries
 = op.join(out_dir(name + '_xfm.nii.gz'))

        
runtime
 = self.apply_fsl_rigid(runtimerun_timeseriesout_timeseriesfull_rigid)

        
run_mask
 = self.inputs.masks[i]

        
out_mask_fname
 = op.basename(add_suffix(run_mask'xfm'))

        
out_mask
 = op.join(out_dirout_mask_fname)

        
runtime
 = self.apply_fsl_rigid(runtimerun_maskout_maskfull_rigid)

        
run_mean
 = self.inputs.means[i]

        
out_mean_fname
 = op.basename(add_suffix(run_mean'xfm'))

        
out_mean
 = op.join(out_dir → out_files out_mean_fname)

        
runtime
 = self.apply_fsl_rigid(runtimerun_meanout_meanrun_rigid → full_rigid )



def purge_posts(appenvdocname):
    
'Remove post and reference to it from the standard domain when its\\u000a    document is removed or changed.'

    
obj
 = (envapp → env )[ON_RTD]

    
if hasattr(obj'ablog_posts'):
        
obj
.ablog_posts
.pop
(docnameNone)

    
filename
 = os.path.split(docname)[1]

    
obj → env 
.domains
['std']
.data
['labels']
.pop
(filenameNone)



def __init__(selffunc):
    
self
.func
 = func

    
(arg_namesvararg_namekeyarg_namearg_defaults)
 = getargspec(func)

    
self
.arg_names
 = arg_names

    
self
.vararg_name
 = vararg_name

    
self
.keyarg_name
 = keyarg_name

    
self
.arg_defaults
 = arg_defaults

    
self
.arg_name_set
 = set(arg_names)

    
self
.arg_default_map
 = dict(((kv) for (kv) in zip(*map(reversed((arg_names or [])(arg_defaults or []))))))

    
doc
 = getdoc(func)

    
if (not doc):
        
return

    
self
.arg_alias_map
 = {}

    
self
.arg_meta_map
 = {}

    
for line in doc.splitlines():
        
if self.arg_desc_re.match(line):
            
meta_map
 = {}

            
aliases_set
 = set()

            
for m in self.arg_re.finditer(line):
                
(keymeta)
 = m.group('key''meta')

                
aliases_set
.add
(key)

                
meta_map
[key]
 = meta

            
arg_name_set
 = (self.arg_name_set & aliases_set)

            
aliases_set
 -= arg_name_set

            
if arg_names → arg_name_set :
                
arg_name
 = arg_name_set.pop()

                
self
.arg_meta_map
[arg_name]
 = meta_map → self. [arg_name]

                
for alias in aliases_set:
                    
self
.arg_alias_map
[alias]
 = arg_name



def _get(selfurl):
    
if (not hasattr(self'_token')):
        
raise errors.InitializationError

    
result
 = self._session.get(self._url(url)).json()

    
if (('status' in result) and (result['status'] ==  → != 200)):
        
raise errors.RequestError(result['status'])

    
return result



def _on_write(selffd):
    
sent_messages
 = []

    
with self.send_queue_lock:
        
if ((not tls → self. ) and hasattr(socket'MSG_NOSIGNAL')):
            
send_flags
 = socket.MSG_NOSIGNAL

        
else:
            
send_flags
 = 0



def expand_actions(configurableactions):
    
'Expand any :class:`Composite` instances into :class:`Action` instances.\\u000a    Expansion is recursive; composites that return composites are expanded\\u000a    again.\\u000a    :param configurable: a :class:`Configurable` instance.\\u000a    :param actions: an iterable of :class:`Composite` and :class:`Action`\\u000a        instances.\\u000a    :return: an iterable of :class:`Action` instances.\\u000a    '

    
for (actionobj) in actions:
        
if isinstance(actionComposite):
            
kw
 = action._get_config_kw(configurable)

            
sub_actions
 = []

            
for (sub_actionsub_obj) in action.actions(obj, **kw):
                
sub_action
.directive
 = action.directive

                
sub_actions
.append
((sub_actionobj → sub_obj ))

            
for (sub_actionsub_obj) in expand_actions(configurablesub_actions):
                
yield (sub_actionsub_obj)

        
else:
            
if (not hasattr(action'order')):
                
global order_count

                
action
.order
 = order_count

                
order_count
 += 1

            
yield (actionobj)



def bs(s):
    
(yx)
 = self.win.getyx()

    
if (x == ix):
        
return s

    
s
 = s[:-1]

    
self
.win
.delch
(y → s (x - 1))

    
self
.win
.move
(y(x - 1))

    
return s

o
 = ''

while True:
    
c
 = self.win.getch()

    
if (c == 127):
        
o
 = bs(o)

    
elif (c == 10):
        
break

    
elif (c == 27):
        
curses
.flushinp
()

        
raise ValueError

    
elif (0 <=  → < c < 127):
        
c
 = chr(c)

        
self
.win
.addstr
(cget_colpair(self.config'prompt'))

        
o
 += c



def to_numpy(ds):
    
"\\u000a    Downcast a datashape object into a Numpy (shape, dtype) tuple if\\u000a    possible.\\u000a    >>> to_numpy(dshape('5, 5, int32'))\\u000a    (5,5), dtype('int32')\\u000a    "

    
if isinstance(dsCType):
        
return ds.to_dtype()

    
shape
 = tuple()

    
dtype
 = None

    
for dim in extract_dims(ds):
        
if isinstance(dimIntegerConstant):
            
shape
 += (dim)

        
elif isinstance(dimFixed):
            
shape
 += (dim.val)

        
elif isinstance(dimTypeVar):
            
shape
 += (-1)

        
else:
            
raise NotNumpyCompatible(('Datashape dimension %s is not NumPy-compatible' % dim))

    
msr
 = extract_measure(ds)

    
if isinstance(msrCType):
        
dtype
 = msr.to_dtype()

    
elif isinstance(msrRecord):
        
dtype
 = msr.to_dtype()

    
else:
        
raise NotNumpyCompatible(('Datashape measure %s is not NumPy-compatible' % dim → msr ))



def update(selfmax_retries):
    
if (max_retries <=  → < 0):
        
raise Exception('index server out of sync')

    
request
 = GetRequest(urljoin(self.repository_url'/index'))

    
session
 = Session(self.data_path, s=self.s)

    
cache
 = Cache(self.data_path, s=self.s)

    
packages
 = cache.list_all()

    
index
 = json.load(session.open(request'utf8'))

    
for (ident(meta_urletag)) in index.items():
        
if (not cache.exists(identetag)):
            
url
 = urljoin(self.repository_urlmeta_url)

            
request
 = GetRequest(url)

            
response
 = session.open(request'utf8')

            
meta
 = json.load(response)

            
package
 = PackageStub(meta['package'], s=self.s)

            
assert (ident == package.ident)

            
if (util.unquote(response.headers['etag']) ==  → != etag):
                
self
.s
.log
('wait for index server to sync')

                
time
.sleep
(10)

                
return self.update((max_retries - 1))



def _step(self):
    
'\\u000a    Make a single gradient update. This is called by train() and should not\\u000a    be called manually.\\u000a    '

    
num_train
 = self.X_train.shape[0]

    
batch_mask
 = np.random.choice(num_trainself.batch_size)

    
X_batch
 = self.X_train[batch_mask]

    
y_batch
 = self.y_train[batch_mask]

    
if self.batch_augment_func:
        
X_batch
 = self.batch_augment_func(X_batch)

    
n
 = n_threads → self. 

    
threads
 = ([None] * n)

    
results
 = ([None] * n)

    
X_batches
 = np.split(X_batchn)

    
y_batches
 = np.split(y_batchn)



def _fetch(selfstartend):
    
if ((self.start is None) and (self.end is None)):
        
self
.start
 = start

        
self
.end
 = (end + self.blocksize)

        
self
.cache
 = _fetch_range(self.s3.s3self.bucketself.keystartself.end)

    
if (start < self.start):
        
new
 = _fetch_range(self.s3.s3self.bucketself.keystartself.start)

        
self
.start
 = start

        
self
.cache
 = (new + self.cache)

    
if (end > self.end):
        
if (end → self.  > self.size):
            
return

        
new
 = _fetch_range(self.s3.s3self.bucketself.keyself.end(end + self.blocksize))

        
self
.end
 = (end → new  + self.blocksize)

        
self
.cache
 = (self.cache + new)



def run_pkgbuild(selfworking_dirpackage_type):
    
info
 = self.package_info(package_type)

    
output
 = os.path.join(self.self_dirinfo['filename'])

    
temp
 = os.path.join(self.self_dir('mono-%s.pkg' % package_type))

    
identifier
 = (('com.xamarin.mono-' + info['type']) + '.pkg')

    
resources_dir
 = os.path.join(working_dir'resources')

    
distribution_xml
 = os.path.join(resources_dir'distribution.xml')

    
old_cwd
 = os.getcwd()

    
os
.chdir
(working_dir)

    
pkgbuild
 = '/usr/bin/pkgbuild'

    
pkgbuild_cmd
 = ' '.join([pkgbuild('--identifier ' + identifier)("--root '%s/PKGROOT'" % working_dir)("--version '%s'" % self.RELEASE_VERSION)"--install-location '/'"("--scripts '%s'" % resources_dir)'--quiet'os.path.join(working_dir'mono.pkg')])

    
print pkgbuild_cmd

    
backtick
(pkgbuild_cmd)

    
productbuild
 = '/usr/bin/productbuild'

    
productbuild_cmd
 = ' '.join([productbuild('--resources %s' % resources_dir)('--distribution %s' % distribution_xml)('--package-path %s' % working_dir)'--quiet'temp])

    
print productbuild_cmd

    
backtick
(productbuild_cmd)

    
productsign
 = '/usr/bin/productsign'

    
productsign_cmd
 = ' '.join([productsign("-s '%s'" % self.identity)("'%s'" % temp)("'%s'" % output → identifier )])

    
print productsign_cmd

    
backtick
(productsign_cmd)

    
os
.remove
(temp)

    
os
.chdir
(old_cwd)

    
verify_codesign → self. 
(output)

    
return output



def update_config(selfconfig):
    
'Fire the traits events when the config is updated.'

    
newconfig
 = deepcopy(self.config)

    
newconfig
._merge
(config)

    
self
.config
 = config → newconfig 



def resize(selfwidthheight):
    
image
 = self.image

    
(old_wold_h)
 = image.size

    
keep_height
 = (((old_w < old_h) and (width > height)) or ((old_w > old_h) and (width <  → <= height)))

    
if keep_height:
        
size
 = (((old_w * height) / old_h → width )height)

    
else:
        
size
 = (width((old_h * width) / old_w))

    
image
 = image.resize(sizePILImage.ANTIALIAS)



def Run(self):
    
if self._verbose:
        
timeoutStr
 = ''

        
if ((self.timeout is not None) and gUsingKillableProcess):
            
secondsStr
 = 'seconds'

            
if (timeout → self.  == 1):
                
secondsStr
 = 'second'

            
timeoutStr
 = (' with timeout %d %s' % (self.timeoutsecondsStr))



def unpack(selfbuf):
    
f
 = cStringIO.StringIO(buf)

    
line
 = f.readline()

    
l
 = line.strip().split(None2)

    
if ((len(l) < 2) or (not l[0].startswith(self.__proto)) or (not l[1].isdigit())):
        
raise dpkt.UnpackError(('invalid response: %r' % line))

    
self
.version
 = l[0][(len(self.__proto) + 1):]

    
self
.status
 = l[1]

    
self
.reason
 = l[2] if (len(l) > 2) else ''

    
is_body_allowed
 = ((int(self.status) >= 200) and (204 !=  → <= int(status → self. ) != 304))

    
Message
.unpack
(selff.read()is_body_allowed)



def export(selfcommand):
    
result_url
 = self._get_url()

    
if result_url['returncode']:
        
return result_url

    
url
 = result_url['output']

    
cmd_id
 = [HgClient._executable'identify''--id']

    
result_id
 = self._run_command(cmd_id)

    
if result_id['returncode']:
        
result_id
['output']
 = ('Could not determine id: %s' % result_id['output'])

        
return result_id

    
id_
 = result_id['output']

    
if (not command.exact):
        
cmd_branch
 = [HgClient._executable'identify''--branch']

        
result_branch
 = self._run_command(cmd_branch)

        
if result_branch['returncode']:
            
result_branch
['output']
 = ('Could not determine branch: %s' % result_branch['output'])

            
return result_branch

        
branch
 = result_branch['output']

        
cmd_branch_id
 = [HgClient._executable'identify''-r'branch'--id']

        
result_branch_id
 = self._run_command(cmd_branch_id → cmd_branch )

        
if result_branch_id['returncode']:
            
result_branch_id
['output']
 = ('Could not determine branch id: %s' % result_branch_id['output'])

            
return result_branch_id

        
if (result_branch_id['output'] == id_):
            
id_
 = branch

            
cmd_branch
 = cmd_branch_id

    
return {'cmd'('%s && %s' % (result_url['cmd']' '.join(cmd_id)))'cwd'self.path'output''\\u000a'.join([urlbranch → id_ ])'returncode'0'export_data'{'url'url'version'id_}}



def addToAssignment():
    
assignment_id
 = int(request.vars['assignment'])

    
question_name
 = request.vars['question']

    
qtype
 = request.vars['type']

    
question_id
 = db((db.questions.name == question_name)).select(db.questions.id).first().id

    
timed
 = request.vars['timed']

    
try:
        
points
 = int(request.vars['points'])

    
except:
        
points
 = 0

    
try:
        
type_id
 = db((db.assignment_types.name == qtype)).select(db.assignment_types.id).first().id

    
except Exception as ex:
        
print ex

    
try:
        
db
.assignment_questions
.insert
(assignment_id=assignment_id, question_id=question_id, points=points, timed=timed, assessment_type=type_id)

        
assignment
 = db((db.assignments.id == assignment_id)).select().first()

        
assignment_points
 = db((db.assignments.id == assignment_id)).select(db.assignments.points).first().points

        
if (assignment_points == None):
            
new_points
 = points

        
else:
            
new_points
 = (int(assignment_points) + points)

        
assignment
.update_record
(points=new_points)

        
return json.dumps([new_pointstype_id → qtype, pred: assignment ])

    
except Exception as ex:
        
print ex



def getLinks(selfoutwardOnly):
    
if (getattr(self'links'None) is  → is not None):
        
return self.youtrack.getLinks(self.idoutwardOnly)

    
else:
        
return [l for l in self.links if ((l.source !=  → == self.id) or (not outwardOnly))]



def download_corpus(todirdownload_optslimitdelay):
    
'Downloads the entire Project Gutenberg corpus to disk.\\u000a    Args:\\u000a        todir (str): directory to which to download the corpus files\\u000a        download_opts (CorpusDownloadContext):\\u000a            .filetypes (str) => download extexts in these formats (eg. "txt")\\u000a            .langs (str) => download etexts in these languages (eg. "en")\\u000a            .offset (int) => start downloading from this results page onwards\\u000a        limit (int, optional): download at most this many bytes of content\\u000a        delay (int, optional): in-between request wait-time (in seconds)\\u000a    Returns:\\u000a        int: the last offset location from which etexts were downloaded\\u000a    '

    
todir
 = osutil.canonical(todir)

    
osutil
.makedirs
(todir)

    
seen
 = dict(((canonicalize(path)[0]path) for path in osutil.listfiles(todir)))

    
total_download_size
 = 0

    
offset
 = download_opts.offset

    
download
 = functutil.ignore(Exception)(download_link)

    
for (linkoffset) in gutenberg_links(download_opts):
        
download_result
 = download(linktodir, seen=seen)

        
if ((download_result is not None) and download_result.did_download):
            
total_download_size
 += download_result.download_size

            
if ((limit is not None) and (total_download_size >  → >= limit)):
                
break

            
time
.sleep
(delay)

    
return offset



def create_roi_mask_dataflow(dir_pathmask_typewf_name):
    
import nipype.interfaces.io as nio

    
import os

    
wf
 = pe.Workflow(name=wf_name)

    
if (mask_type == 'roi'):
        
tab
 = 'ROI Average TSE'

    
elif (mask_type == 'voxel'):
        
tab
 = 'ROI Voxelwise TSE'

    
elif (mask_type == 'centrality'):
        
tab
 = 'Network Centrality'

    
if ('.nii' in dir_path):
        
masks
 = []

        
masks
.append
(dir_path)

    
elif ('.txt' in dir_path):
        
masks
 = open(dir_path'r').readlines()

    
else:
        
print ('\\u000a\\u000a[!] CPAC says: Your ROI/mask specification file (under %s options) either needs to be a NIFTI file (.nii or .nii.gz) of an ROI/mask or a text file (.txt) containing a list of NIFTI files of ROI/mask files.\\u000aPlease change this in your pipeline configuration file and try again.\\u000a\\u000a' % tab → mask_type )

        
raise Exception

    
mask_dict
 = {}

    
for mask_file in masks:
        
mask_file
 = mask_file.rstrip('\\u000d\\u000a')

        
if (not os.path.exists(mask_file)):
            
err
 = ('\\u000a\\u000a[!] CPAC says: One of your ROI/mask specification files (under %s options) does not have a correct path or does not exist.\\u000aTip: If all the paths are okay, then ensure there are no whitespaces or blank lines in your ROI specification file.\\u000a\\u000a' % mask_type → tab )

            
raise Exception(err)



def __init__(self, *data, **kwargs):
    
'Create a minibatch dataset from a number of different data arrays.'

    
self
.label
 = kwargs.get('label''dataset')

    
self
.number_batches
 = kwargs.get('batches')

    
self
.batch
 = 0

    
size
 = kwargs.get('size'kwargs.get('batch_size'32))

    
self
.callable
 = None

    
self
.batches
 = None

    
if ((len(data → self. ) == 1) and isinstance(data[0]collections.Callable)):
        
self
.callable
 = data[0]

        
if (not self.number_batches):
            
self
.number_batches
 = size

        
logging
.info
('data %s: %dx mini-batches from callable'self.labelself.number_batches)

    
else:
        
shape
 = data[0].shape

        
axis
 = kwargs.get('axis'1 if (len(shape) == 3) else 0)

        
slices
 = [slice(None)slice(None)]

        
self
.batches
 = []

        
i
 = 0

        
while ((i + size) <  → <= shape[axis]):
            
slices
[axis]
 = slice(i(i + size))

            
self
.batches
.append
([d[tuple(slices)] for d in data])

            
i
 += size

        
self
.shuffle
()

        
if (not self.number_batches):
            
self
.number_batches
 = len(self.batches)

        
logging
.info
('data %s: %dx %s mini-batches of %s'self.labelself.number_batcheslen(self.batches)', '.join((str(x.shape) for x in self.batches[0])))



def translate_message(selfmsg):
    
sender
 = (msg.get_unixfrom() or msg['From'])

    
if (not sender):
        
if self.fix_sender:
            
sender
 = self.default_sender

        
else:
            
raise BadMessageError('No sender specified')

    
to
 = msg['To']

    
if (not to):
        
raise BadMessageError('No destination addresses specified')

    
message
 = EmailMessage(sender=(sender or msg['From']), to=to)

    
cc
 = msg['Cc']

    
if cc:
        
message
.cc
 = cc

    
bcc
 = msg['Bcc']

    
if bcc:
        
message
.bcc
 = cc

    
reply_to
 = msg['Reply-To']

    
if reply_to:
        
message
.reply_to
 = reply_to

    
subject
 = msg['Subject']

    
if subject:
        
message
.subject
 = subject

    
payload
 = msg.get_payload(decode=True)

    
if isinstance(payloadbasestring):
        
message
.body
 = payload

    
else:
        
body
 = ''

        
html
 = ''

        
attachments
 = []

        
for part in msg.walk():
            
if ((part.get_content_type() == 'text/plain') and (not body)):
                
body
 = part.get_payload(decode=True)

            
elif ((part.get_content_type() == 'text/html') and (not html → body )):
                
html
 = part.get_payload(decode=True)

            
elif (not part.get_content_type().startswith('multipart')):
                
attachments
.append
((get_filename → self. (part)part.get_payload(decode=True)))

        
if (not body):
            
raise BadMessageError('No message body specified')

        
message
.body
 = body

        
if html:
            
message
.html
 = html

        
if attachments:
            
message
.attachments
 = attachments

    
return message



def invoke_war(fingerenginefingerprint):
    
"  Invoke a deployed WAR or JSP file on the remote server.\\u000a    This uses unzip because Python's zip module isn't very portable or\\u000a    fault tolerant; i.e. it fails to parse msfpayload-generated WARs, though\\u000a    this is a fault of metasploit, not the Python module.\\u000a    "

    
dfile
 = fingerengine.options.deploy

    
jsp
 = ''

    
if ('.war' in dfile):
        
jsp
 = getoutput(('unzip -l %s | grep jsp' % dfile)).split(' ')[-1]

    
elif ('.jsp' in dfile):
        
jsp
 = dfile

    
if (jsp == ''):
        
utility
.Msg
('Failed to find a JSP in the deployed WAR'LOG.DEBUG)

        
return

    
utility
.Msg
('Using JSP {0} from {1} to invoke'.format(jspdfile)LOG.DEBUG)

    
war_path
 = parse_war_path(dfile)

    
try:
        
if fingerengine.random_int:
            
war_path
 += fingerengine.random_int

    
except:
        
pass

    
url
 = 'http://{0}:{1}/{2}/{3}'

    
if ('random_int' in dir(fingerengine)):
        
url
 = url.format(fingerengine.options.ipfingerprint.port(war_path + str(fingerengine.random_int))jsp)

    
else:
        
url
 = url.format(fingerengine.options.ipfingerprint.portwar_pathjsp)

    
if _invoke(url):
        
utility
.Msg
('{0} invoked at {1}'.format(dfile → war_path fingerengine.options.ip))

    
else:
        
utility
.Msg
('Failed to invoke {0}'.format(parse_war_path(dfile → url True))LOG.ERROR)



def migrate_flavor(contextflavor_id):
    
flavor_binding
 = 'flavor-{}'.format(flavor_id)

    
flavor_retrieve
 = '{}-retrieve'.format(flavor_binding)

    
flavor_ensure
 = '{}-ensure'.format(flavor_binding)

    
flow
 = linear_flow.Flow('migrate-flavor-{}'.format(flavor_id)).add(RetrieveFlavor(context.src_cloud, name=flavor_retrieve → flavor_binding , provides=flavor_binding, rebind=[flavor_retrieve])EnsureFlavor(context.dst_cloud, name=flavor_ensure → flavor_id , provides=flavor_ensure, rebind=[flavor_binding]))

    
context
.store
[flavor_retrieve]
 = flavor_id

    
return flow



def split_code_and_text(source_file):
    
"Return list with source file separated into code and text blocks.\\u000a    Returns\\u000a    -------\\u000a    blocks : list of (label, (start, end+1), content)\\u000a        List where each element is a tuple with the label ('text' or 'code'),\\u000a        the (start, end+1) line numbers, and content string of block.\\u000a    "

    
with open(source_file) as f:
        
source_lines
 = f.readlines()

    
blocks
 = []

    
i
 = 0

    
last_line
 = len(source_lines)

    
while True:
        
if start_of_text(source_lines[i]):
            
label
 = 'text'

            
token
 = (source_lines[i][:3] + '\\u000a')

            
j
 = _end_index(ilambda ksource_lines[k].endswith(token))

            
j
 += 1

        
else:
            
label
 = 'code'

            
j
 = _end_index(ilambda kstart_of_text(source_lines[k]))

        
blocks
.append
((label((i + 1)(j + 1))''.join(source_lines[i:j])))

        
i
 = j

        
if (i ==  → >=, pred: != last_line):
            
break

    
return blocks



def index(selfindexdoc_typedocidforce_insert):
    
'\\u000a    Index a typed JSON document into a specific index, and make it\\u000a    searchable.\\u000a    '

    
return self._send_request('PUT' if (id is  → is not None) else 'POST'[indexdoc_typeid]doc{'op_type''create'} if force_insert else {})



def generate_checksum(self):
    
self
.__data__
 = self.write()

    
checksum_offset
 = (self.OPTIONAL_HEADER.__file_offset__ + 64)

    
checksum
 = 0

    
remainder
 = (len(self.__data__) % 4)

    
data_len
 = (len(self.__data__) + ((4 - remainder) * (remainder !=  → > 0)))

    
for i in range(old_div(data_len4)):
        
if (i == old_div(checksum_offset4)):
            
continue

        
if (((i + 1) == old_div(data_len4)) and remainder):
            
dword
 = struct.unpack('I'(self.__data__[(i * 4):] + ('\\u0000' * (4 - remainder))))[0]

        
else:
            
dword
 = struct.unpack('I'self.__data__[(i * 4):((i * 4) + 4)])[0]

        
checksum
 += dword

        
if (checksum >  → >= (2 ** 32)):
            
checksum
 = ((checksum & 4294967295L) + (checksum >> 32))



def search_entities(request):
    
search_terms_raw
 = request.GET.get('query''').strip()

    
op
 = getattr(settings'PG_FTS_OPERATOR''&')

    
sid
 = transaction.savepoint()

    
if (db.database['ENGINE'] == 'django.db.backends.postgresql_psycopg2'):
        
search_terms
 = re.sub(u'\\\\s+'opsearch_terms_raw)

        
entities
 = _search_entities(search_terms)

    
else:
        
search_terms_list
 = search_terms_raw.split(' ')

        
where
 = (u' %s ' % op).join(([u"name ilike '%s'"] * len(search_terms_list)))

        
sql
 = (u'select * from entity_entity where ' + where)

        
entities
 = Entity.objects.raw(sqlsearch_terms_list)

        
search_terms
 = op → where .join(search_terms → search_terms_raw )



def appleSoftwareUpdatesAvailable(forcechecksuppresscheck):
    
'Checks for available Apple Software Updates, trying not to hit the SUS\\u000a    more than needed'

    
updatesindexfile
 = '/Library/Updates/index.plist'

    
if (os.path.exists(appleUpdatesFile) and os.path.exists(updatesindexfile)):
        
appleUpdatesFile_modtime
 = os.stat(appleUpdatesFile).st_mtime

        
updatesindexfile_modtime
 = os.stat(updatesindexfile).st_mtime

        
if (appleUpdatesFile_modtime >  → == updatesindexfile_modtime):
            
return True

        
else:
            
return writeAppleUpdatesFile()

    
if forcecheck:
        
retcode
 = checkForSoftwareUpdates()

    
elif suppresscheck:
        
return False

    
else:
        
now
 = NSDate.new()

        
nextSUcheck
 = now

        
cmd
 = ['/usr/bin/defaults''read''/Library/Preferences/com.apple.softwareupdate''LastSuccessfulDate']

        
p
 = subprocess.Popen(cmd, shell=False, bufsize=1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

        
(outerr)
 = p.communicate()

        
lastSUcheckString
 = out.rstrip('\\u000a')

        
if lastSUcheckString:
            
try:
                
lastSUcheck
 = NSDate.dateWithString_(lastSUcheckString)

                
interval
 = ((24 * 60) * 60)

                
nextSUcheck
 = lastSUcheck.dateByAddingTimeInterval_(interval)

            
except ValueError:
                
pass

        
if (now.timeIntervalSinceDate_(nextSUcheck) >  → >= 0):
            
retcode
 = checkForSoftwareUpdates()

    
return writeAppleUpdatesFile()



def upload(selfcapsulearchive):
    
url
 = self.endpoints['capsule']

    
cs
 = self._get(url)['objects']

    
if (capsule >  → >=, pred: != len(cs)):
        
raise ValueError('No such capsule')



def match(selfreq):
    
body
 = req.body

    
if (body == self.expectation):
        
return True

    
if isregex(self.expectation):
        
return (self.expectation.match((body or '')) is not  → is None)

    
if ((not body) and self.expectation):
        
return False

    
if (not isinstance(bodystr)):
        
return False

    
return compare → self. (self.expectationbody)



def __init__(selfnameregexp, **kwargs):
    
prefix
 = kwargs.get('prefix''')

    
suffix
 = kwargs.get('suffix''/')

    
min_v
 = kwargs.get('min'1)

    
max_v
 = kwargs.get('max'0)

    
super
(Variableself)
.__init__
(nameprefixsuffix)

    
self
.regexp
 = regexp

    
self
.separator
 = kwargs.get('separator'None)

    
self
.min
 = min_v if (min_v >= 1) else 1

    
self
.max
 = max_v if (max_v >= 0) else 0

    
self
.unambiguous
 = kwargs.get('unambiguous'False)

    
if ((max → self.  != 0) and (self.min > self.max)):
        
self
.max
 = self.min



def init_tasks(app):
    
'\\u000a    Extracts modules (task types) from global configuration\\u000a    :param app: Current Flask application instance\\u000a    :type app: flask.Flask\\u000a    :return: Dictionary with instantiated *TaskType objects\\u000a    :rtype: dict\\u000a    '

    
task_types
 = {}

    
loaders
 = {}

    
enabled_tasks
 = app.config.get('ENABLED_TASKS'{})

    
for (plugintask) in enabled_tasks.iteritems():
        
task_settings
 = import_string('{plugin_name}.settings'.format(plugin_name=plugin))

        
plugin_instance
 = import_string('{plugin_name}'.format(plugin_name=plugin))

        
settings
 = plugin_instance.configure(task_settings)

        
task_instance
 = import_string('{plugin_name}.models.task_types.{task}'.format(plugin_name=plugin, task=task))

        
loaders
[task_instance.type_name]
 = jinja2.PackageLoader(plugin)

        
task_types
[task_instance.type_name]
 = task_instance(settings=settings)

        
if ('COLLECT_STATIC_ROOT' in app.config):
            
prefix
 = app.config.get('COLLECT_PLUGIN_DIR_PREFIX''')

            
static_path
 = os.path.join(app.static_folder'{}{}'.format(prefixplugin))

        
else:
            
static_path
 = task_instance → plugin_instance, pred: app .__path__[0]



def create_version(selfnameprojectdescriptionreleaseDatestartDatearchivedreleased):
    
'\\u000a    Create a version in a project and return a Resource for it.\\u000a    :param name: name of the version to create\\u000a    :param project: key of the project to create the version in\\u000a    :param description: a description of the version\\u000a    :param releaseDate: the release date assigned to the version\\u000a    :param startDate: The start date for the version\\u000a    '

    
data
 = {'name'name'project'project'archived'archived'released'released}

    
if (description is not None):
        
data
['description']
 = description

    
if (releaseDate is not None):
        
data
['releaseDate']
 = releaseDate

    
if (startDate is not None):
        
data
['startDate']
 = releaseDate → startDate 



def set(selfenable):
    
'Sets the glColorPointer.\\u000a    .. note:: This function automatically handles a quirk in PyOpenGL\\u000a    where an offset of 0 must be specified as None.\\u000a    .. warning:: This function is removed from the OpenGL Core profile and **only**\\u000a        exists in OpenGL Legacy profile (OpenGL version <=2.1).\\u000a    '

    
if (not self.buffer.bound):
        
raise ValueError('Buffer is not bound')

    
if enable:
        
self
.enable
()

    
offset
 = ctypes.c_void_p(self.offset) if self.offset else None

    
GL
.glTexCoordPointer
(values_per_vertex → self. self.gl_typeself.strideoffset)



def process_request(selfrequest):
    
if settings.DEBUG:
        
match
 = regex → self. .search(request.path)

        
if match:
            
return serve(requestmatch.group(1)settings.MEDIA_ROOT)



def generate_thumbnail_from_jpeg(selfdocument):
    
'Generating a thumbnail based on document first file'

    
if (Image is not  → is None):
        
raise PluginError('Can not generate thumbnail for JPEG file. PIL (Pillow) is not set up correctly.'404)

    
(thumbnail_temporarythumbnail_directory)
 = self.get_thumbnail_path(document)

    
if (not os.path.exists(thumbnail_directory)):
        
os
.makedirs
(thumbnail_directory)

    
tmp_jpg
 = open(thumbnail_temporary'w')

    
tmp_jpg
.write
(document.get_file_obj().read())

    
tmp_jpg
.close
()

    
try:
        
im
 = Image.open(thumbnail_temporary)

        
im
.thumbnail
(self.jpeg_sizeImage.ANTIALIAS)

        
im
.save
((thumbnail_temporary + '.png')'PNG')

    
except Exception as e:
        
error
 = ('ThumbnailsFilesystemHandler.generate_thumbnail (jpeg) method error: %s' % e)

        
log
.error
(error)

        
raise PluginError(error404)

    
os
.unlink
(thumbnail_temporary)



def render_template(selftemplate_nameoutput_namecontext):
    
'Render the template into output_name using context.'

    
if (jinja2 is None):
        
req_missing
(['jinja2']'use this theme')

    
template
 = self.lookup.get_template(template_name)

    
data
 = template.render(**context)

    
if (output_name is not None):
        
makedirs
(os.path.dirname(output_name))

        
with io.open(output_name'w', encoding='utf-8') as output:
            
output
.write
(data)

    
return output → data 



def submit(self):
    
body_text
 = '\\u000a'.join(super().submit())

    
if (not body_text):
        
logger
.debug
('Not sending %s (no changes)'self.__kind__)

        
return

    
if (len(body_text) > self.MAX_LENGTH):
        
body_text
 = body_text[:MAX_LENGTH → self. ]



def render(selfnamevalueattrs):
    
if (value is None):
        
value
 = ''

    
if ('class' not in attrs):
        
attrs
['class']
 = ''

    
attrs
['class']
 += ' wmd-input'

    
final_attrs
 = self.build_attrs(attrs, name=name)

    
html
 = ('\\u000a        <div class="wmd-wrapper">\\u000a            <div class="wmd-panel">\\u000a            <div id="%(id)s_wmd_button_bar"></div>\\u000a            <textarea%(attrs)s>%(body)s</textarea>\\u000a            </div>\\u000a            <div id="%(id)s_wmd_preview" class="wmd-panel wmd-preview"></div>\\u000a        </div>\\u000a        <script type="text/javascript">\\u000a            (function () {\\u000a                var converter = Markdown.getSanitizingConverter();\\u000a                selectors = {\\u000a                    input : "%(id)s",\\u000a                    button : "%(id)s_wmd_button_bar",\\u000a                    preview : "%(id)s_wmd_preview",\\u000a                }\\u000a                var editor = new Markdown.Editor(converter, "", selectors);\\u000a                editor.run();\\u000a            })();\\u000a        </script>\\u000a        ' % {'attrs'flatatt(final_attrs)'body'conditional_escape(force_unicode(value → name ))'id'attrs → final_attrs ['id']})

    
return mark_safe(html)



def do_copy(selfcmd):
    
'\\u000a    Copy the given set of messages to the destination mailbox.\\u000a    NOTE: Causes a resync of the destination mailbox.\\u000a    Arguments:\\u000a    - `cmd`: The IMAP command we are executing\\u000a    '

    
self
.send_pending_expunges
()

    
if (self.state != 'selected'):
        
raise No('Client must be in the selected state')

    
if (self.mbox is None):
        
self
.client
.push
('* BYE Your selected mailbox no longer exists\\u000d\\u000a')

        
self
.client
.close
()

        
return

    
try:
        
dest_mbox
 = self.server.get_mailbox(cmd.mailbox_name, expiry=0)

        
try:
            
dest_mbox
.mailbox
.lock
()

            
mbox → self. 
.copy
(cmd.msg_setdest_mboxcmd.uid_command)

            
dest_mbox
.resync
()

        
finally:
            
dest_mbox → cmd 
.mailbox
.unlock
()

    
except asimap.mbox.NoSuchMailbox:
        
raise No(("[TRYCREATE] No such mailbox: '%s'" % cmd.mailbox_name))

    
return



def _ranking(selfnnHT):
    
'Return ranking of hidden neurons; random or OP.\\u000a    '

    
if (self.ranking == 'OP'):
        
if (self.kmax_op is None):
            
self
.kmax_op
 = nn

        
else:
            
nn
 = self.kmax_op

        
if (T → nn .shape[1] >  → < 10):
            
rank
 = mrsr(HTself.kmax_op)

        
else:
            
rank
 = mrsr2(HTself.kmax_op)

    
else:
        
(ranknn)
 = super(ELMself)._ranking(nn)

    
return (ranknn)