def to_xaxis(self, value):
if self.axis_has_datelocator(self.axes.xaxis):
return date2num(value)
else:
return value
python类date2num()的实例源码
def __call__(self, x):
"""
Format a sequence of inputs
Parameters
----------
x : array
Input
Return
------
out : list
List of strings.
"""
# Formatter timezone
if self.tz is None and len(x):
tz = self.formatter.tz = x[0].tzinfo
if not all(value.tzinfo == tz for value in x):
msg = ("Dates have different time zones. "
"Choosen `{}` the time zone of the first date. "
"To use a different time zone, create a "
"formatter and pass the time zone.")
warn(msg.format(tz.zone))
# The formatter is tied to axes and takes
# breaks in ordinal format.
x = [date2num(val) for val in x]
return _format(self.formatter, x)
def transform(x):
"""
Transform from date to a numerical format
"""
try:
x = date2num(x)
except AttributeError:
# numpy datetime64
# This is not ideal because the operations do not
# preserve the np.datetime64 type. May be need
# a datetime64_trans
x = [pd.Timestamp(item) for item in x]
x = date2num(x)
return x
def make_efficiency_date(
total_data,
avg_data,
f_name,
title=None,
x_label=None,
y_label=None,
x_ticks=None,
y_ticks=None):
fig = plt.figure()
if title is not None:
plt.title(title, fontsize=16)
if x_label is not None:
plt.ylabel(x_label)
if y_label is not None:
plt.xlabel(y_label)
v_date = []
v_val = []
for data in total_data:
dates = dt.date2num(datetime.datetime.strptime(data[0], '%H:%M'))
to_int = round(float(data[1]))
plt.plot_date(dates, data[1], color=plt.cm.brg(to_int))
for data in avg_data:
dates = dt.date2num(datetime.datetime.strptime(data[0], '%H:%M'))
v_date.append(dates)
v_val.append(data[1])
plt.plot_date(v_date, v_val, "^y-", label='Average')
plt.legend()
plt.savefig(f_name)
plt.close(fig)
Quantitative platform in test.py 文件源码
项目:Quantitative-Trading-System
作者: carlche15
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def __init__(self, ax, x, y,ttype, ith=0, formatter=fmt):
try:
x = np.asarray(x, dtype='float')
except (TypeError, ValueError):
x = np.asarray(mdates.date2num(x), dtype='float')
y = np.asarray(y, dtype='float')
mask = ~(np.isnan(x) | np.isnan(y))
x = x[mask]
y = y[mask]
self._points = np.column_stack((x, y))
# All plots use the same pointer now
# if(ith==0):
self.offsets =(-20,20)
# else:
# self.offsets=(-20-10*ith,20+25*ith)
self.type=ttype
y = y[np.abs(y - y.mean()) <= 3 * y.std()]
self.scale = x.ptp()
self.scale = y.ptp() / self.scale if self.scale else 1
self.tree = spatial.cKDTree(self.scaled(self._points))
self.formatter = formatter
self.ax = ax
self.fig = ax.figure
self.ax.xaxis.set_label_position('top')
self.dot = ax.scatter(
[x.min()], [y.min()], s=130, color='green', alpha=0.7)
self.annotation = self.setup_annotation()
plt.connect('motion_notify_event', self)
def plot_states_and_var(data, hidden_states, cmap=None, columns=None, by='Activity'):
"""
Make a plot of the data and the states
Parameters
----------
data : pandas DataFrame
Data to plot
hidden_states: iteretable
the hidden states corresponding to the timesteps
columns : list, optional
Which columns to plot
by : str
The column to group on
"""
fig, ax = plt.subplots(figsize=(15, 5))
if columns is None:
columns = data.columns
df = data[columns].copy()
stateseq = np.array(hidden_states)
stateseq_norep, durations = rle(stateseq)
datamin, datamax = np.array(df).min(), np.array(df).max()
y = np.array(
[datamin, datamax])
maxstate = stateseq.max() + 1
x = np.hstack(([0], durations.cumsum()[:-1], [len(df.index) - 1]))
C = np.array(
[[float(state) / maxstate] for state in stateseq_norep]).transpose()
ax.set_xlim((min(x), max(x)))
if cmap is None:
num_states = max(hidden_states) + 1
colormap, cmap = get_color_map(num_states)
pc = ax.pcolorfast(x, y, C, vmin=0, vmax=1, alpha=0.3, cmap=cmap)
plt.plot(df.as_matrix())
locator = AutoDateLocator()
locator.create_dummy_axis()
num_index = pd.Index(df.index.map(date2num))
ticks_num = locator.tick_values(min(df.index), max(df.index))
ticks = [num_index.get_loc(t) for t in ticks_num]
plt.xticks(ticks, df.index.strftime('%H:%M')[ticks], rotation='vertical')
cb = plt.colorbar(pc)
cb.set_ticks(np.arange(1./(2*cmap.N), 1, 1./cmap.N))
cb.set_ticklabels(np.arange(0, cmap.N))
# Plot the activities
if by is not None:
actseq = np.array(data[by])
sca = ax.scatter(
np.arange(len(hidden_states)), #data.index,
np.ones_like(hidden_states) * datamax,
c=actseq,
edgecolors='none'
)
plt.show()
return fig, ax
def plot_resid(d,savename='resfig1.png'):
"""
Plots the residual frequency after the first wipe using the TLE velocity.
"""
flim = [-2.e3, 2.e3]
t = d['tvec']
dates = [dt.datetime.fromtimestamp(ts) for ts in t]
datenums = md.date2num(dates)
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
fig1 = plt.figure(figsize=(7, 9))
doppler_residual = sp.interpolate.interp1d(d['tvec'],d['dopfit'])
fvec = d["fvec"]
res0 = d["res0"]
res1 = d["res1"]
plt.subplot(211)
mesh = plt.pcolormesh(datenums, fvec, sp.transpose(10.*sp.log10(res0+1e-12)), vmin=-5, vmax=25)
plt.plot(datenums, (150.0/400.0)*doppler_residual(t), "r--", label="doppler resid")
ax = plt.gca()
ax.xaxis.set_major_formatter(xfmt)
plt.ylim(flim)
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
plt.xlabel("UTC")
plt.ylabel("Frequency (Hz)")
plt.title("Power ch0 (dB) %1.2f MHz"%(150.012))
plt.legend()
plt.colorbar(mesh, ax=ax)
# quicklook spectra of residuals spectra along with measured Doppler residual from second channel.
plt.subplot(212)
mesh = plt.pcolormesh(datenums, fvec, sp.transpose(10.*sp.log10(res1+1e-12)), vmin=-5, vmax=25)
plt.plot(datenums, doppler_residual(t), "r--", label="doppler resid")
ax = plt.gca()
ax.xaxis.set_major_formatter(xfmt)
plt.ylim(flim)
plt.xlabel("UTC")
plt.ylabel("Frequency (Hz)")
plt.title("Power ch1 (dB), %1.2f MHz"%(400.032))
plt.subplots_adjust(bottom=0.2)
plt.xticks(rotation=25)
plt.legend()
plt.colorbar(mesh, ax=ax)
plt.tight_layout()
print('Saving residual plots: '+savename)
plt.savefig(savename, dpi=300)
plt.close(fig1)
def parse_yahoo_historical_ochl(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, close, high, low prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, close, high, low, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, close, high, low,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=True)
def parse_yahoo_historical_ohlc(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, high, low, close prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, high, low, close, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, high, low, close,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=False)
def time_crop(f_init_date, f_final_date, delta, f_time_array, data_array, multiple=False):
"""
Crop the data_array between f_init_date and f_final_date.
:param f_init_date: Float. Initial date
:param f_final_date: Float. Final date
:param delta: a datetime instance to step in dates
:param f_time_array: Float array. All dates of data_array
:param data_array: The data to be cropped. Its shape must be of the form (time, ...)
:param multiple: False, just one data_array. True a list of data_arrays.
:return: Cropped data and according datetime list.
"""
i_start = np.where(np.array(f_time_array) >= f_init_date)[0][0]
i_end = np.where(np.array(f_time_array) <= f_final_date + 23 / 24.)[0][-1]
# TODO: Refactor this while. You can transform delta and operate only on f_dates and then convert the entire list.
d_date = num2date(f_init_date).replace(minute=0)
d_Time = []
f_Time = []
while f_init_date <= f_final_date + 23 / 24.:
d_Time.append(d_date)
f_Time.append(date2num(d_date))
d_date = d_date + delta
f_init_date = date2num(d_date)
if multiple:
all_cropped_data = []
for data in data_array:
new_shape = [len(d_Time)]
new_shape.extend(list(data.shape[1:]))
new_shape = tuple(new_shape)
cropped_data = np.empty(new_shape)
cropped_data.fill(np.nan)
# TODO: Use find_nearest
cropped_data[np.in1d(f_Time, f_time_array[i_start:i_end + 1])] = data[i_start:i_end + 1]
all_cropped_data.append(cropped_data)
return all_cropped_data, d_Time
else:
new_shape = [len(d_Time)]
new_shape.extend(list(data_array.shape[1:]))
new_shape = tuple(new_shape)
cropped_data = np.empty(new_shape)
cropped_data.fill(np.nan)
cropped_data[np.in1d(f_Time, f_time_array[i_start:i_end + 1])] = data_array[i_start:i_end + 1]
return cropped_data, d_Time
def __generate_figure(self, user, user_name):
# Set up figure
fig = plt.figure(figsize=(8, 6), dpi=150)
fig.suptitle('{}\'s activity'.format(user_name), fontsize=20)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(212)
# Plot 24 hour participation data, accumulated over all time
t = [x for x in range(24)]
y = [user['average_day_cycle'][x] for x in t]
ax1.plot(t, y)
y = [user['recent_day_cycle'][x] for x in t]
ax1.plot(t, y)
y = [user['weekly_day_cycle'][x] for x in t]
ax1.plot(t, y)
ax1.set_xlim([0, 24])
ax1.grid()
ax1.set_title('Daily Activity')
ax1.set_xlabel('Hour (UTC)')
ax1.set_ylabel('Message Count per Hour')
ax1.legend(['Average', 'Last Day', 'Last Week'])
# Create pie chart of the most active channels
top5 = sorted(user['participation_per_channel'], key=user['participation_per_channel'].get, reverse=True)[:5]
labels = top5
sizes = [user['participation_per_channel'][x] for x in top5]
explode = [0] * len(top5)
explode[0] = 0.1
ax2.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True)
# Create overall activity
dates, values = zip(*sorted(user['participation_per_day'].items(), key=lambda dv: dv[0]))
dates = [datetime.fromtimestamp(float(x)) for x in dates]
dates = date2num(dates)
if len(values) > 80:
ax3.bar(dates, values, width=1)
else:
ax3.bar(dates, values)
ax3.xaxis_date()
ax3.set_title('Total Activity')
ax3.set_xlim([dates[0], dates[-1]])
ax3.set_ylabel('Message Count per Day')
ax3.grid()
spacing = 2
for label in ax3.xaxis.get_ticklabels()[::spacing]:
label.set_visible(False)
image_file_name = path.join(self.cache_dir, user_name + '.png')
fig.savefig(image_file_name)
return image_file_name
def plot_tick_range(tick_path, range_start, range_end):
if os.path.exists(tick_path) == False:
print(tick_path + ' file doesnt exist')
quit()
date_cols = ['RateDateTime']
df = pd.read_csv(tick_path, usecols=['RateDateTime','RateBid','RateAsk'])
start_index = tfh.find_index_closest_date(range_start, tick_path)
end_index = tfh.find_index_closest_date(range_end, tick_path)
# dont proceed if we didnt find indices
if (start_index is None or end_index is None):
print('start_index or end_index was None')
quit()
ticks_s = df.iloc[start_index:end_index]
ticks = (ticks_s['RateAsk'] + ticks_s['RateBid']) / 2.0
dates_dt = [dt.datetime.strptime(str.split(x, '.')[0], '%Y-%m-%d %H:%M:%S') for x in ticks_s['RateDateTime'].values]
dates = mdates.date2num(dates_dt)
#fig = plt.figure()
#ax1 = plt.subplot2grid((1,1), (0,0))
plt.plot_date(dates, ticks, 'b-')
# candlestick_ohlc(ax1, ohlc, width=0.0004, colorup='#77d879', colordown='#db3f3f')
# for label in ax1.xaxis.get_ticklabels():
# label.set_rotation(45)
# ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
# ax1.xaxis.set_major_locator(mticker.MaxNLocator(10))
# ax1.grid(True)
# plt.xlabel('Date')
# plt.ylabel('Price')
# plt.title(ohlc_path)
# plt.legend()
# plt.subplots_adjust(left=0.09, bottom=0.20, right=0.94, top=0.90, wspace=0.2, hspace=0)
#plt.show()
# plot_ohlc_range
def randomDate(dt1, dt2, N=1, tzinfo=False, sorted=False):
"""
Return a (or many) random datetimes between two given dates, this is done under the convention dt <=1 rand < dt2
Parameters
==========
dt1 : datetime.datetime
start date for the the random date
dt2 : datetime.datetime
stop date for the the random date
Other Parameters
================
N : int (optional)
the number of random dates to generate (defualt=1)
tzinfo : bool (optional)
maintain the tzinfo of the input datetimes (default=False)
sorted : bool (optional)
return the times sorted (default=False)
Returns
=======
out : datetime.datetime or numpy.ndarray of datetime.datetime
the new time for the next call to EventTimer
Examples
========
"""
from matplotlib.dates import date2num, num2date
if dt1.tzinfo != dt2.tzinfo:
raise(ValueError('tzinfo for the input and output datetimes must match'))
dt1n = date2num(dt1)
dt2n = date2num(dt2)
rnd_tn = np.random.uniform(dt1n, dt2n, size=N)
rnd_t = num2date(rnd_tn)
if not tzinfo:
tzinfo = None
else:
tzinfo = dt1.tzinfo
rnd_t = np.asarray([val.replace(tzinfo=tzinfo) for val in rnd_t])
if sorted:
rnd_t.sort()
return rnd_t
def logspace(min, max, num, **kwargs):
"""
Returns log-spaced bins. Same as numpy.logspace except the min and max are the min and max
not log10(min) and log10(max)
Parameters
==========
min : float
minimum value
max : float
maximum value
num : integer
number of log spaced bins
Other Parameters
================
kwargs : dict
additional keywords passed into matplotlib.dates.num2date
Returns
=======
out : array
log-spaced bins from min to max in a numpy array
Notes
=====
This function works on both numbers and datetime objects
Examples
========
>>> import spacepy.toolbox as tb
>>> tb.logspace(1, 100, 5)
array([ 1. , 3.16227766, 10. , 31.6227766 , 100. ])
See Also
========
geomspace
linspace
"""
if isinstance(min, datetime.datetime):
from matplotlib.dates import date2num, num2date
ans = num2date(np.logspace(np.log10(date2num(min)), np.log10(date2num(max)), num, **kwargs))
ans = spt.no_tzinfo(ans)
return np.array(ans)
else:
return np.logspace(np.log10(min), np.log10(max), num, **kwargs)
def linspace(min, max, num, **kwargs):
"""
Returns linear-spaced bins. Same as numpy.linspace except works with datetime
and is faster
Parameters
==========
min : float, datetime
minimum value
max : float, datetime
maximum value
num : integer
number of linear spaced bins
Other Parameters
================
kwargs : dict
additional keywords passed into matplotlib.dates.num2date
Returns
=======
out : array
linear-spaced bins from min to max in a numpy array
Notes
=====
This function works on both numbers and datetime objects
Examples
========
>>> import spacepy.toolbox as tb
>>> tb.linspace(1, 10, 4)
array([ 1., 4., 7., 10.])
See Also
========
geomspace
logspace
"""
if hasattr(min, 'shape') and min.shape is ():
min = min.item()
if hasattr(max, 'shape') and max.shape is ():
max = max.item()
if isinstance(min, datetime.datetime):
from matplotlib.dates import date2num, num2date
ans = num2date(np.linspace(date2num(min), date2num(max), num, **kwargs))
ans = spt.no_tzinfo(ans)
return np.array(ans)
else:
return np.linspace(min, max, num, **kwargs)
converter.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
if dmin > dmax:
dmax, dmin = dmin, dmax
# We need to cap at the endpoints of valid datetime
# TODO(wesm) unused?
# delta = relativedelta(dmax, dmin)
# try:
# start = dmin - delta
# except ValueError:
# start = _from_ordinal(1.0)
# try:
# stop = dmax + delta
# except ValueError:
# # The magic number!
# stop = _from_ordinal(3652059.9999999)
nmax, nmin = dates.date2num((dmax, dmin))
num = (nmax - nmin) * 86400 * 1000
max_millis_ticks = 6
for interval in [1, 10, 50, 100, 200, 500]:
if num <= interval * (max_millis_ticks - 1):
self._interval = interval
break
else:
# We went through the whole loop without breaking, default to 1
self._interval = 1000.
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
if estimate > self.MAXTICKS * 2:
raise RuntimeError(('MillisecondLocator estimated to generate %d '
'ticks from %s to %s: exceeds Locator.MAXTICKS'
'* 2 (%d) ') %
(estimate, dmin, dmax, self.MAXTICKS * 2))
freq = '%dL' % self._get_interval()
tz = self.tz.tzname(None)
st = _from_ordinal(dates.date2num(dmin)) # strip tz
ed = _from_ordinal(dates.date2num(dmax))
all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject
try:
if len(all_dates) > 0:
locs = self.raise_if_exceeds(dates.date2num(all_dates))
return locs
except Exception: # pragma: no cover
pass
lims = dates.date2num([dmin, dmax])
return lims
def initializeLines(self, timestamp):
print "initializing %s" % self.name
anomalyRange = (0.0, 1.0)
self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW)
self.convertedDates = deque(
[date2num(date) for date in self.dates], maxlen=WINDOW
)
self.value = deque([0.0] * WINDOW, maxlen=WINDOW)
self.rawValue = deque([0.0] * WINDOW, maxlen=WINDOW)
self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW)
self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW)
actualPlot, = self._mainGraph.plot(self.dates, self.value)
self.actualLine = actualPlot
rawPlot, = self._mainGraph.plot(self.dates, self.rawValue)
self.rawLine = rawPlot
predictedPlot, = self._mainGraph.plot(self.dates, self.predicted)
self.predictedLine = predictedPlot
self._mainGraph.legend(tuple(['actual', 'raw', 'predicted']), loc=3)
anomalyScorePlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'm'
)
anomalyScorePlot.axes.set_ylim(anomalyRange)
self.anomalyScoreLine = anomalyScorePlot
anomalyLikelihoodPlot, = self._anomalyGraph.plot(
self.dates, self.anomalyScore, 'r'
)
anomalyLikelihoodPlot.axes.set_ylim(anomalyRange)
self.anomalyLikelihoodLine = anomalyLikelihoodPlot
self._anomalyGraph.legend(
tuple(['anomaly score', 'anomaly likelihood']), loc=3
)
dateFormatter = DateFormatter('%H:%M:%S.%f')
self._mainGraph.xaxis.set_major_formatter(dateFormatter)
self._anomalyGraph.xaxis.set_major_formatter(dateFormatter)
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, True, True)
self.linesInitialized = True
def write(self, timestamp, value, predicted, anomalyScore, rawValue):
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamp)
anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability(
value, anomalyScore, timestamp
)
self.dates.append(timestamp)
self.convertedDates.append(date2num(timestamp))
self.value.append(value)
self.rawValue.append(rawValue)
self.allValues.append(value)
self.allRawValues.append(rawValue)
self.predicted.append(predicted)
self.anomalyScore.append(anomalyScore)
self.anomalyLikelihood.append(anomalyLikelihood)
# Update main chart data
self.actualLine.set_xdata(self.convertedDates)
self.actualLine.set_ydata(self.value)
self.rawLine.set_xdata(self.convertedDates)
self.rawLine.set_ydata(self.rawValue)
self.predictedLine.set_xdata(self.convertedDates)
self.predictedLine.set_ydata(self.predicted)
# Update anomaly chart data
self.anomalyScoreLine.set_xdata(self.convertedDates)
self.anomalyScoreLine.set_ydata(self.anomalyScore)
self.anomalyLikelihoodLine.set_xdata(self.convertedDates)
self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood)
# Remove previous highlighted regions
for poly in self._chartHighlights:
poly.remove()
self._chartHighlights = []
# weekends = extractWeekendHighlights(self.dates)
anomalies = extractAnomalyIndices(self.anomalyLikelihood)
# Highlight weekends in main chart
# self.highlightChart(weekends, self._mainGraph)
# Highlight anomalies in anomaly chart
self.highlightChart(anomalies, self._anomalyGraph)
maxValue = max(max(self.allValues), max(self.allRawValues))
self._mainGraph.relim()
self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02))
self._mainGraph.relim()
self._mainGraph.autoscale_view(True, scaley=False)
self._anomalyGraph.relim()
self._anomalyGraph.autoscale_view(True, True, True)
plt.draw()
plt.pause(0.00000000001)