def plot_counts(counts, gene_type):
"""Plot expression counts. Return a Figure object"""
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
fig = plt.figure(figsize=((50 + len(counts) * 5) / 25.4, 210/25.4))
matplotlib.rcParams.update({'font.size': 14})
ax = fig.gca()
ax.set_title('{} gene usage'.format(gene_type))
ax.set_xlabel('{} gene'.format(gene_type))
ax.set_ylabel('Count')
ax.set_xticks(np.arange(len(counts)) + 0.5)
ax.set_xticklabels(counts.index, rotation='vertical')
ax.grid(axis='x')
ax.set_xlim((-0.25, len(counts)))
ax.bar(np.arange(len(counts)), counts['count'])
fig.set_tight_layout(True)
return fig
python类use()的实例源码
def main():
"""
Start StochOPy Viewer window.
"""
import matplotlib
matplotlib.use("TkAgg")
from sys import platform as _platform
root = tk.Tk()
root.resizable(0, 0)
StochOGUI(root)
s = ttk.Style()
if _platform == "win32":
s.theme_use("vista")
elif _platform in [ "linux", "linux2" ]:
s.theme_use("alt")
elif _platform == "darwin":
s.theme_use("aqua")
root.mainloop()
def nice_labels ( numbers ):
suffixes = ['', 'K', 'M', 'G']
suff_len = []
## figure out which suffix gives us the shortest label length
for i, suff in enumerate( suffixes ):
test = [float(y)/(1000.0**i) for y in numbers]
labels = ["%d%s"% (int(y), suff) for y in test]
## make sure that in the new representation there are no
## degenerate cases
if len(set(labels)) == len(labels):
suff_len.append( (sum(map(len, labels)), i) )
## if we fail to find any satisfactory suffixes, just use defaults
if len(suff_len) == 0:
return map(str, numbers), 0
else:
suff_len.sort()
i = suff_len[0][1]
labels = ["%d%s"% (int(float(y)/(1000.0**i)), suffixes[i]) for y in numbers]
return labels, i
def format(self, content: Optional[QqTag],
blanks_to_pars=True,
keep_end_pars=True) -> str:
"""
:param content: could be QqTag or any iterable of QqTags
:param blanks_to_pars: use blanks_to_pars (True or False)
:param keep_end_pars: keep end paragraphs
:return: str: text of tag
"""
if content is None:
return ""
out = []
for child in content:
if isinstance(child, str):
if blanks_to_pars:
out.append(self.blanks_to_pars(html_escape(
child, keep_end_pars)))
else:
out.append(html_escape(child))
else:
out.append(self.handle(child))
return "".join(out)
def url_for_chapter(self, index=None, label=None,
fromindex=None) -> str:
"""
Returns url for chapter. Either index or label of
the target chapter have to be provided.
Optionally, fromindex can be provided. In this case
function will return empty string if
target chapter coincides with current one.
You can inherit from QqHTMLFormatter and override
url_for_chapter_by_index and url_for_chapter_by_label too
use e.g. Flask's url_for.
"""
assert index is not None or label is not None
if index is None:
index = self.label_to_chapter[label]
if fromindex is not None and fromindex == index:
# we are already on the right page
return ""
if label is None:
label = self.chapters[index].heading.find("label")
if not label:
return self.url_for_chapter_by_index(index)
return self.url_for_chapter_by_label(label.value)
def word_cloud(word_embedding_matrix, vocab, s, save_file='scatter.png'):
words = [(i, vocab[i]) for i in s]
model = TSNE(n_components=2, random_state=0)
#Note that the following line might use a good chunk of RAM
tsne_embedding = model.fit_transform(word_embedding_matrix)
words_vectors = tsne_embedding[np.array([item[1] for item in words])]
plt.subplots_adjust(bottom = 0.1)
plt.scatter(
words_vectors[:, 0], words_vectors[:, 1], marker='o', cmap=plt.get_cmap('Spectral'))
for label, x, y in zip(s, words_vectors[:, 0], words_vectors[:, 1]):
plt.annotate(
label,
xy=(x, y), xytext=(-20, 20),
textcoords='offset points', ha='right', va='bottom',
fontsize=20,
# bbox=dict(boxstyle='round,pad=1.', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle = '<-', connectionstyle='arc3,rad=0')
)
plt.show()
# plt.savefig(save_file)
def fix_query_reflength(sequence_length, queries, doubled):
"""
arguments:
<sequence_length> This is the reference fasta length. It should be 2x the actual
length of the reference since this program takes a sam file from
a concatenated reference.
<queries> This is a list of SQL-type query strings. This is generated
from argparse.
purpose:
This function takes in a list of queries to use for read filtering
for the redwood plot. It is often not advisable to plot all mapped reads
since many of them are too small relative to the reference length. Also,
the point of a death star plot is to show continuity of a circular
reference, so short reads aren't very helpful there either.
Currently, this function only recognizes the keyword argument 'reflength'.
"""
if not doubled:
sequence_length = int(sequence_length * 2)
for i in range(len(queries)):
if 'reflength' in queries[i].split():
queries[i] = queries[i].replace('reflength', str(int(sequence_length/2)))
def fast_run(args):
model = Model(args)
feed = {}
#feed[model.train_batch]=False
xx,ss,yy=model.inputs(args.input_path)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
tf.train.start_queue_runners(sess=sess)
xxx,sss,yyy=sess.run([xx,ss,yy])
#print(yyy)
#print(yyy[1])
print('len:',xxx.shape)
import matplotlib.cm as cm
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.figure(figsize=(16,4))
#plt.imshow()
plt.imshow(np.asarray(xxx[0]).reshape((36,90))+0.5, interpolation='nearest', aspect='auto', cmap=cm.jet)
plt.savefig("img.jpg")
plt.clf() ; plt.cla()
def plot_tsne(z_mu, classes, name):
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
model_tsne = TSNE(n_components=2, random_state=0)
z_states = z_mu.data.cpu().numpy()
z_embed = model_tsne.fit_transform(z_states)
classes = classes.data.cpu().numpy()
fig666 = plt.figure()
for ic in range(10):
ind_vec = np.zeros_like(classes)
ind_vec[:, ic] = 1
ind_class = classes[:, ic] == 1
color = plt.cm.Set1(ic)
plt.scatter(z_embed[ind_class, 0], z_embed[ind_class, 1], s=10, color=color)
plt.title("Latent Variable T-SNE per Class")
fig666.savefig('./vae_results/'+str(name)+'_embedding_'+str(ic)+'.png')
fig666.savefig('./vae_results/'+str(name)+'_embedding.png')
def _set_matplotlib_default_backend():
"""
matplotlib will try to print to a display if it is available, but don't want
to run it in interactive mode. we tried setting the backend to 'Agg'' before
importing, but it was still resulting in issues. we replace the existing
backend with 'agg' in the default matplotlibrc. This is a hack until we can
find a better solution
"""
if _matplotlib_installed():
import matplotlib
matplotlib.use('Agg', force=True)
config = matplotlib.matplotlib_fname()
with file_transaction(config) as tx_out_file:
with open(config) as in_file, open(tx_out_file, "w") as out_file:
for line in in_file:
if line.split(":")[0].strip() == "backend":
out_file.write("backend: agg\n")
else:
out_file.write(line)
train_lstm_multivariate.py 文件源码
项目:TensorFlow-Time-Series-Examples
作者: hzy46
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def __init__(self, num_units, num_features, dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def __init__(self, num_units, num_features, dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def save_plot(niters, loss, args):
print('Saving training loss-iteration figure...')
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
name = 'Train-{}_hs-{}_lr-{}_bs-{}'.format(args.train_file, args.hs,
args.lr, args.batch_size)
plt.title(name)
plt.plot(niters, loss)
plt.xlabel('iteration')
plt.ylabel('loss')
plt.savefig(name + '.jpg')
print('{} saved!'.format(name + '.jpg'))
except ImportError:
print('matplotlib not installed and no figure is saved.')
def redraw(self):
column_index1 = self.combo_box1.GetSelection()
column_index2 = self.combo_box2.GetSelection()
if column_index1 != wx.NOT_FOUND and column_index1 != 0 and \
column_index2 != wx.NOT_FOUND and column_index2 != 0:
# subtract one to remove the neutral selection index
column_index1 -= 1
column_index2 -= 1
df = self.df_list_ctrl.get_filtered_df()
# It looks like using pandas dataframe.plot causes something weird to
# crash in wx internally. Therefore we use plain axes.plot functionality.
# column_name1 = self.columns[column_index1]
# column_name2 = self.columns[column_index2]
# df.plot(kind='scatter', x=column_name1, y=column_name2)
if len(df) > 0:
self.axes.clear()
self.axes.plot(df.iloc[:, column_index1].values, df.iloc[:, column_index2].values, 'o', clip_on=False)
self.canvas.draw()
def saveAttention(input_sentence, attentions, outpath):
# Set up figure with colorbar
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
fig = plt.figure(figsize=(24,10), )
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.cpu().numpy(), cmap='bone')
fig.colorbar(cax)
if input_sentence:
# Set up axes
ax.set_yticklabels([' '] + list(input_sentence) + [' '])
# Show label at every tick
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.tight_layout()
plt.savefig(outpath)
plt.close('all')
def __init__(self, gulp_size=1048576, core=-1):
"""
@param[in] input_ring Ring containing a 1d
timeseries
@param[out] output_ring Ring will contain a 1d
timeseries that will be cleaned of RFI
@param[in] core Which OpenMP core to use for
this block. (-1 is any)
"""
super(KurtosisBlock, self).__init__()
self.gulp_size = gulp_size
self.core = core
self.output_header = {}
self.settings = {}
self.nchan = 1
self.dtype = np.uint8
def __init__(
self, bins, period=1e-3,
gulp_size=4096 * 256, dispersion_measure=0,
core=-1):
"""
@param[in] bins The total number of bins to fold into
@param[in] period Period to fold over (s)
@param[in] gulp_size How many bytes of the ring to
read at once.
@param[in] dispersion_measure DM of the desired
source (pc cm^-3)
@param[in] core Which OpenMP core to use for
this block. (-1 is any)
"""
super(FoldBlock, self).__init__()
self.bins = bins
self.gulp_size = gulp_size
self.period = period
self.dispersion_measure = dispersion_measure
self.core = core
self.data_settings = {}
def __init__(
self, ring, imagename,
core=-1, gulp_nframe=4096):
"""
@param[in] ring Ring containing a multichannel
timeseries
@param[in] imagename Filename to store the
waterfall image
@param[in] core Which OpenMP core to use for
this block. (-1 is any)
@param[in] gulp_size How many bytes of the ring to
read at once.
"""
self.ring = ring
self.imagename = imagename
self.core = core
self.gulp_nframe = gulp_nframe
self.header = {}
def get_palette(self):
"""
Return a palette that is suitable for the data.
"""
# choose the "Paired" palette if the number of grouping factor
# levels is even and below 13, or the "Set3" palette otherwise:
if len(self._levels) == 0:
if len(self._groupby) == 1:
return sns.color_palette("Paired")[0]
else:
palette_name = "Paired"
elif len(self._levels[-1]) in (2, 4, 6):
palette_name = "Paired"
else:
# use 'Set3', a quantitative palette, if there are two grouping
# factors, or a palette diverging from Red to Purple otherwise:
palette_name = "Paired" if len(self._groupby) == 2 else "RdPu"
return sns.color_palette(palette_name)
def optimiz(currencies, debug):
currencies = sorted(currencies)
if len(currencies) < 2 or len(currencies) > 10:
return {"error": "2 to 10 currencies"}
max_workers = 4 if sys.version_info[1] < 5 else None
executor = ThreadPoolExecutor(max_workers)
data = dict(future.result() for future in wait([executor.submit(get_ochl, cur) for cur in currencies]).done)
data = [data[cur] for cur in currencies]
errors = [x['error'] for x in data if 'error' in x]
if errors:
return {"error": "Currencies not found : " + str(errors)}
weights, m, s, a, b = markowitz_optimization(data, debug)
if debug:
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.plot(s, m, 'o', markersize=1)
plt.plot(b, a, 'or')
fig.savefig("chalu.png")
result = dict()
for i, cur in enumerate(currencies):
result[cur] = weights[i]
return {"result": result}
def frame_similarity(frame1,frame2):
similarity = 1
if 'Type' in frame1:
if frame1['Type'] != frame2['Type']:
similarity = 0.0
if similarity == 1:
if 'PlaceMention' in frame1:
# if PlaceMention is normalized use simple string comparison
if not Levenshtein_arg:
if frame1['PlaceMention'] != frame2['PlaceMention']:
similarity = 0.0
else:
# PlaceMention is not normalized so use Levinshtein distance
similarity = Levenshtein.ratio(frame1['PlaceMention'], frame2['PlaceMention'])
#print("similarity: ", similarity)
return similarity
# evaluate at the document level -----------------------------------------------
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
test_yl.py 文件源码
项目:Automatic_Group_Photography_Enhancement
作者: Yuliang-Zou
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
default='VGGnet_test')
parser.add_argument('--model', dest='model', help='Model path',
default=' ')
parser.add_argument('--imdb', dest='imdb', default='voc_2007_test')
args = parser.parse_args()
return args
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def plot_attention(attention_matrix: np.ndarray, source_tokens: List[str], target_tokens: List[str], filename: str):
"""
Uses matplotlib for creating a visualization of the attention matrix.
:param attention_matrix: The attention matrix.
:param source_tokens: A list of source tokens.
:param target_tokens: A list of target tokens.
:param filename: The file to which the attention visualization will be written to.
"""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
assert attention_matrix.shape[0] == len(target_tokens)
plt.imshow(attention_matrix.transpose(), interpolation="nearest", cmap="Greys")
plt.xlabel("target")
plt.ylabel("source")
plt.gca().set_xticks([i for i in range(0, len(target_tokens))])
plt.gca().set_yticks([i for i in range(0, len(source_tokens))])
plt.gca().set_xticklabels(target_tokens, rotation='vertical')
plt.gca().set_yticklabels(source_tokens)
plt.tight_layout()
plt.savefig(filename)
logger.info("Saved alignment visualization to " + filename)
def extract_thumbnail_number(text):
""" Pull out the thumbnail image number specified in the docstring. """
# check whether the user has specified a specific thumbnail image
pattr = re.compile(
r"^\s*#\s*sphinx_gallery_thumbnail_number\s*=\s*([0-9]+)\s*$",
flags=re.MULTILINE)
match = pattr.search(text)
if match is None:
# by default, use the first figure created
thumbnail_number = 1
else:
thumbnail_number = int(match.groups()[0])
return thumbnail_number
variational_autoencoder_deconv_mgpu.py 文件源码
项目:keras_experiments
作者: avolkov1
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def parser_(desc):
parser = ap.ArgumentParser(description=desc)
parser.add_argument(
'--mgpu', action='store', nargs='?', type=int,
const=-1, # if mgpu is specified but value not provided then -1
# if mgpu is not specified then defaults to 0 - single gpu
# mgpu = 0 if getattr(args, 'mgpu', None) is None else args.mgpu
default=ap.SUPPRESS,
help='Run on multiple-GPUs using all available GPUs on a system.\n'
'If not passed does not use multiple GPU. If passed uses all GPUs.\n'
'Optionally specify a number to use that many GPUs. Another\n'
'approach is to specify CUDA_VISIBLE_DEVICES=0,1,... when calling\n'
'script and specify --mgpu to use this specified device list.\n'
'This option is only supported with TensorFlow backend.\n')
parser.add_argument('--epochs', type=int, default=5,
help='Number of epochs to run training for.')
args = parser.parse_args()
return args
def plot(self, filename):
r"""Save an image file of the transfer function.
This function loads up matplotlib, plots the transfer function and saves.
Parameters
----------
filename : string
The file to save out the plot as.
Examples
--------
>>> tf = TransferFunction( (-10.0, -5.0) )
>>> tf.add_gaussian(-9.0, 0.01, 1.0)
>>> tf.plot("sample.png")
"""
import matplotlib
matplotlib.use("Agg")
import pylab
pylab.clf()
pylab.plot(self.x, self.y, 'xk-')
pylab.xlim(*self.x_bounds)
pylab.ylim(0.0, 1.0)
pylab.savefig(filename)
def get_nearest_point(self, points):
"""
If there are more then 1 intersection points then use the nearest one to
be the intersection Point.
@param points: A list of points to be checked for nearest
@return: Returns the nearest Point
"""
if len(points) == 1:
Point = points[0]
else:
mindis = points[0].distance(self)
Point = points[0]
for i in range(1, len(points)):
curdis = points[i].distance(self)
if curdis < mindis:
mindis = curdis
Point = points[i]
return Point
def _prepare_projectors(params):
""" Helper for setting up the projectors for epochs browser """
import matplotlib.pyplot as plt
import matplotlib as mpl
epochs = params['epochs']
projs = params['projs']
if len(projs) > 0 and not epochs.proj:
ax_button = plt.subplot2grid((10, 15), (9, 14))
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
params['opt_button'] = opt_button
params['ax_button'] = ax_button
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_epochs_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
callback_proj('none')