python类log()的实例源码

cassandra_provider.py 文件源码 项目:cassandra-fdw 作者: rankactive 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def execute(self, quals, columns, sortkeys=None):
        if ISDEBUG:
            logger.log("building select statement... Quals: {0}, columns: {1}, sortkeys: {2}, allow filtering: {3}".format(quals, columns, sortkeys, self.allow_filtering))
        res = self.build_select_stmt(quals, columns, self.allow_filtering)
        if res is None:
            yield {}
            return
        stmt = res[0]
        binding_values = res[1]
        filtered_columns = res[2]
        if self.prepare_select_stmt:
            if stmt not in self.prepared_select_stmts:
                if ISDEBUG:
                    logger.log(u"preparing statement...")
                self.prepared_select_stmts[stmt] = self.session.prepare(stmt)
            elif ISDEBUG:
                    logger.log(u"statement already prepared")
        if ISDEBUG:
            logger.log(u"executing statement...")
            st = time.time()
        elif self.enable_trace:
            logger.log(u"executing statement '{0}'".format(stmt))
        if self.prepare_select_stmt:
            result = self.session.execute(self.prepared_select_stmts[stmt], binding_values)
        else:
            result = self.session.execute(SimpleStatement(stmt), binding_values)
        if ISDEBUG:
            logger.log(u"cursor got in {0} ms".format((time.time() - st) * 1000))
        for row in result:
            line = {}
            idx = 0
            for column_name in filtered_columns:
                value = row[idx]
                if self.columnsTypes[column_name].main_type == cassandra_types.cql_timestamp and value is not None:
                    line[column_name] = u"{0}+00:00".format(value)
                elif self.columnsTypes[column_name].main_type == cassandra_types.cql_time and value is not None:
                    line[column_name] = u"{0}+00:00".format(value)
                elif isinstance(value, tuple):
                    tuple_values = []
                    for t in value:
                        tuple_values.append(str(t))
                    line[column_name] = json.dumps(tuple_values)
                elif isinstance(value, OrderedMapSerializedKey):
                    dict_values = {}
                    for i in value:
                        dict_values[str(i)] = str(value[i])
                    line[column_name] = json.dumps(dict_values)
                else:
                    line[column_name] = value
                idx = idx + 1
            rowid_values = []
            for idcolumn in self.rowIdColumns:
                rowid_values.append(unicode(line[idcolumn]))
            line[self.ROWIDCOLUMN] = json.dumps(rowid_values)
            yield line
cassandra_provider.py 文件源码 项目:cassandra-fdw 作者: rankactive 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_path_keys(self):
        output = []
        sorted_items = sorted(self.querableColumnsIdx.items(), key=operator.itemgetter(1))
        clusteting_key_columns = []
        partition_key_columns = []
        idx_columns = []
        regular_columns = []
        ptt = []
        for tp in sorted_items:
            k = tp[0]
            v = tp[1]
            if v >= self.PARTITION_KEY_QUERY_COST and v < self.CLUSTERING_KEY_QUERY_COST:
                partition_key_columns.append(k)
                ptt.append(k)
            if v < self.IDX_QUERY_COST and v >= self.CLUSTERING_KEY_QUERY_COST:
                clusteting_key_columns.append(k)
            elif v >= self.IDX_QUERY_COST and v < self.REGULAR_QUERY_COST:
                idx_columns.append(k)
            else:
                regular_columns.append(k)

        ckc_len = len(clusteting_key_columns)
        if ckc_len == 0:
            output.append((tuple(partition_key_columns), 1))
        else:
            i = 1
            output.append((tuple(partition_key_columns), self.CLUSTERING_KEY_QUERY_COST))
            for ckc in clusteting_key_columns:
                ptt.append(ckc)
                if i == ckc_len:
                    output.append((tuple(ptt), 1))
                else:
                    output.append((tuple(ptt), self.CLUSTERING_KEY_QUERY_COST - i))
                    if len(idx_columns) != 0:
                        output.append((tuple(ptt + idx_columns), self.IDX_QUERY_COST - i))
                i += 1

        for idx_col in idx_columns:
            output.append((tuple(idx_col), self.IDX_QUERY_COST))

        for t in sorted_items:
            output.append(((t[0]), self.REGULAR_QUERY_COST))

        output.append(((self.get_row_id_column()), 1))

        if ISDEBUG:
            logger.log('path keys: {0}'.format(output))
        return output
conn_manager.py 文件源码 项目:appetite 作者: Bridgewater 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def run_single(self, command, ssh=None):
        """Runs a single cmd command on the remote host
        """

        if not ssh:
            if not self.create_ssh_channel():
                return {"rc": 1,
                        "stderror": "Error creating ssh channel",
                        "stdout": "",
                        "function": self.function_name}
            ssh = self.ssh

        reads = None
        cmd = command
        if isinstance(command, dict):
            cmd = command['cmd']
            reads = command['reads']

        rc = 0
        std_out = ""
        std_error = ""

        if not CREDS.DRY_RUN:
            # Dangerous, only use if commands are filtered/protected
            # Only commands either defined here or in the command.conf should
            # run here.
            if reads:
                # Only use invoke shell if needed
                channel = ssh.invoke_shell()  # nosec

                channel.settimeout(SESSION_SHELL_TIMEOUT)

                # Remove any ssh login messages
                send_command(channel, "")

                read_commands = []
                for param, value in reads.items():
                    read_commands.append("read -s %s" % param)
                    read_commands.append(value)

                    # Don't want to log any read commands
                    send_command(channel, read_commands)

                std_out, std_error, rc = send_command(channel, self._add_root(cmd))
            else:
                stdin, stdout, stderr = ssh.exec_command(self._add_root(cmd), get_pty=True, timeout=SESSION_TIMEOUT)  # nosec
                rc = stdout.channel.recv_exit_status()

                std_out = stdout.read()
                std_error = stderr.read()
                stdin.flush()

        return {"stdout": std_out,
                "stderror": std_error,
                "function": self.function_name,
                "rc": rc}


# Helper ssh function
rlmain.py 文件源码 项目:PyMal 作者: cysinfo 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __init__(self):
        self.startup_hook = None
        self.pre_input_hook = None
        self.completer = None
        self.completer_delims = " \t\n\"\\'`@$><=;|&{("
        self.console = console.Console()
        self.size = self.console.size()
        self.prompt_color = None
        self.command_color = None
        self.selection_color = self.console.saveattr<<4
        self.key_dispatch = {}
        self.previous_func = None
        self.first_prompt = True
        self.next_meta = False # True to force meta on next character
        self.tabstop = 4
        self.allow_ctrl_c=False
        self.ctrl_c_tap_time_interval=0.3
        self.debug=False

        self.begidx = 0
        self.endidx = 0

        # variables you can control with parse_and_bind
        self.show_all_if_ambiguous = 'off'
        self.mark_directories = 'on'
        self.bell_style = 'none'
        self.mark=-1
        self.l_buffer=lineobj.ReadLineTextBuffer("")
        self._history=history.LineHistory()

        # this code needs to follow l_buffer and history creation
        self.editingmodes=[mode(self) for mode in editingmodes]
        for mode in self.editingmodes:
            mode.init_editing_mode(None)
        self.mode=self.editingmodes[0]

        self.read_inputrc()
        log("\n".join(self.rl_settings_to_string()))

        #Paste settings    
        #assumes data on clipboard is path if shorter than 300 characters and doesn't contain \t or \n
        #and replace \ with / for easier use in ipython
        self.enable_ipython_paste_for_paths=True

        #automatically convert tabseparated data to list of lists or array constructors
        self.enable_ipython_paste_list_of_lists=True
        self.enable_win32_clipboard=True

        self.paste_line_buffer=[]

    #Below is for refactoring, raise errors when using old style attributes 
    #that should be refactored out
rlmain.py 文件源码 项目:shadowbroker-auto 作者: wrfly 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def __init__(self):
        self.startup_hook = None
        self.pre_input_hook = None
        self.completer = None
        self.completer_delims = " \t\n\"\\'`@$><=;|&{("
        self.console = console.Console()
        self.size = self.console.size()
        self.prompt_color = None
        self.command_color = None
        self.selection_color = self.console.saveattr<<4
        self.key_dispatch = {}
        self.previous_func = None
        self.first_prompt = True
        self.next_meta = False # True to force meta on next character
        self.tabstop = 4
        self.allow_ctrl_c=False
        self.ctrl_c_tap_time_interval=0.3

        self.begidx = 0
        self.endidx = 0

        # variables you can control with parse_and_bind
        self.show_all_if_ambiguous = 'off'
        self.mark_directories = 'on'
        self.bell_style = 'none'
        self.mark=-1
        self.l_buffer=lineobj.ReadLineTextBuffer("")
        self._history=history.LineHistory()

        # this code needs to follow l_buffer and history creation
        self.editingmodes=[mode(self) for mode in editingmodes]
        for mode in self.editingmodes:
            mode.init_editing_mode(None)
        self.mode=self.editingmodes[0]

        self.read_inputrc()
        log("\n".join(self.rl_settings_to_string()))

        #Paste settings    
        #assumes data on clipboard is path if shorter than 300 characters and doesn't contain \t or \n
        #and replace \ with / for easier use in ipython
        self.enable_ipython_paste_for_paths=True

        #automatically convert tabseparated data to list of lists or array constructors
        self.enable_ipython_paste_list_of_lists=True
        self.enable_win32_clipboard=True

        self.paste_line_buffer=[]

    #Below is for refactoring, raise errors when using old style attributes 
    #that should be refactored out
agpredict.py 文件源码 项目:forecastVeg 作者: JohnNay 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def matrix(self):

        """
        This function transforms the images into a single numpy array with dimensions
        pixels by observations.  If the image has 100 pixels for 1 year (23 observations)
        then this matrix should have dimensions 100 rows by 23 columns.  The matrix
        includes the quality mask dataset.  This matrix is not yet masked for quality control.
        """

        dataCount = self.subset.count('1')
        dataNames = sorted(glob.glob(self.fullPath + '/*.tif'))
        dataNames = dataNames[0:dataCount]
        subsetInt = [int(s) for s in self.subset.split() if s.isdigit()] 

        DC = np.empty(shape = (self.rows*self.columns*self.observations,0))  
        DCs = np.empty(shape = (self.rows*self.columns*self.observations, subsetInt.count(1)))  

        for i in range(dataCount):
            name = str(dataNames[i])
            dataList = sorted(glob.glob(self.fullPath + '/*' + name[-10:-4] + '.tif'))  
            bandDC = np.empty((0, 1))      
            for b in dataList:
                data = gdal.Open(str(b), GA_ReadOnly).ReadAsArray()
                vec = data.reshape((self.rows*self.columns, 1))
                bandDC = np.append(bandDC, vec, axis = 0)  
            DC = np.append(DC, bandDC, axis = 1) 
            del vec, bandDC, data

        #apply fill values    
        if self.dataset == 'MOD15A2.005' or self.dataset == 'MOD17A2.005':
            DC[DC>self.fillValue] = 9999.0                
        if self.dataset == 'MOD11A2.005':
            DC[:,0][DC[:,0] == self.fillValue] = 9999.0 #this should have fixed it!
        else:
            DC[DC == self.fillValue] = 9999.0 


        #scale dataset
        count = 0    
        for i in range(len(subsetInt)):
            if subsetInt[i] == 1:
                DCs[:,count] = np.multiply(DC[:,count], self.scale[i])
                count += 1
        DCs[DC == 9999.0] = 9999.0
        self.DC = DCs
        del DC

        #metadata function        
        with open(self.fullPath + '/' + 'metadata_' + self.dataset + '.txt', 'w') as f:
            f.write(' '.join(["self.%s = %s" % (k,v) for k,v in self.__dict__.iteritems()]))

        logger.log('SUCCESS', 'The %s data was transformed into an array with dimensions %d rows by %d columns.  No data value set to 9999.  A metadata file with object attributes was created.  To access the matrix, simply call object.DC' % (str(self.outformat), self.DC.shape[0], self.DC.shape[1]))

        tif = sorted(glob.glob(self.fullPath + '/*.tif'))
        for t in tif:
            os.remove(t)


问题


面经


文章

微信
公众号

扫码关注公众号