本文整理汇总了Python中statsmodels.tsa.seasonal.seasonal_decompose函数的典型用法代码示例。如果您正苦于以下问题:Python seasonal_decompose函数的具体用法?Python seasonal_decompose怎么用?Python seasonal_decompose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了seasonal_decompose函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_pandas
def test_pandas(self):
res_add = seasonal_decompose(self.data, freq=4)
freq_override_data = self.data.copy()
freq_override_data.index = DatetimeIndex(start='1/1/1951', periods=len(freq_override_data), freq='A')
res_add_override = seasonal_decompose(freq_override_data, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal.values.squeeze(), seasonal, 2)
assert_almost_equal(res_add.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_add.resid.values.squeeze(), random, 3)
assert_almost_equal(res_add_override.seasonal.values.squeeze(), seasonal, 2)
assert_almost_equal(res_add_override.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_add_override.resid.values.squeeze(), random, 3)
assert_equal(res_add.seasonal.index.values.squeeze(),
self.data.index.values)
res_mult = seasonal_decompose(np.abs(self.data), 'm', freq=4)
res_mult_override = seasonal_decompose(np.abs(freq_override_data), 'm', freq=4)
seasonal = [1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716,
0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538,
0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815,
1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931,
1.0815, 1.5538, 0.6716, 0.6931]
trend = [np.nan, np.nan, 171.62, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 107.25, 80.50, 79.12, 78.75, 116.50,
140.00, 157.38, np.nan, np.nan]
random = [np.nan, np.nan, 1.29263, 1.51360, 1.03223, 0.62226,
1.04771, 1.05139, 1.20124, 0.84080, 1.28182, 1.28752,
1.08043, 0.77172, 0.91697, 0.96191, 1.36441, 0.72986,
1.01171, 0.73956, 1.03566, 1.44556, 0.02677, 1.31843,
0.49390, 1.14688, 1.45582, 0.16101, 0.82555, 1.47633,
np.nan, np.nan]
assert_almost_equal(res_mult.seasonal.values.squeeze(), seasonal, 4)
assert_almost_equal(res_mult.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_mult.resid.values.squeeze(), random, 4)
assert_almost_equal(res_mult_override.seasonal.values.squeeze(), seasonal, 4)
assert_almost_equal(res_mult_override.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_mult_override.resid.values.squeeze(), random, 4)
assert_equal(res_mult.seasonal.index.values.squeeze(),
self.data.index.values)
开发者ID:5267,项目名称:statsmodels,代码行数:56,代码来源:test_seasonal.py
示例2: test_pandas_nofreq
def test_pandas_nofreq(self):
# issue #3503
nobs = 100
dta = pd.Series([x % 3 for x in range(nobs)] + np.random.randn(nobs))
res_np = seasonal_decompose(dta.values, freq=3)
res = seasonal_decompose(dta, freq=3)
atol = 1e-8
rtol = 1e-10
assert_allclose(res.seasonal.values.squeeze(), res_np.seasonal,
atol=atol, rtol=rtol)
assert_allclose(res.trend.values.squeeze(), res_np.trend,
atol=atol, rtol=rtol)
assert_allclose(res.resid.values.squeeze(), res_np.resid,
atol=atol, rtol=rtol)
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:15,代码来源:test_seasonal.py
示例3: make_stationary
def make_stationary(self):
# remove trend and seasonality
#for positive trend, to penalize higher values do log/squqreroot/cube root etc...
self.ts_log = np.log(self.df)
#estimate or model trend, then remove from the series. diff appraoches
# aggregation: take avg for monthly/weekly avg
# smooth: taking rolling avg
# poly fit : fit a regression model
# Exanoke 1: using smoothing as example, rolling avg
moving_avg = pd.rolling_mean(self.df,window=287)
ts_log_moving_avg_diff = self.ts_log - moving_avg
ts_log_moving_avg_diff.dropna(inplace=True)
# Example 2: using exponential weighted moving avg (EWMA)
# halflife is same as window, how many datapoint to make up 1 cycle
expwighted_avg = pd.ewma(self.ts_log, halflife=287)
ts_log_ewma_diff = self.ts_log - expwighted_avg
# Example 3: differencing: take the difference of the observation at a particular instant
# with that at the previous instant
self.ts_log_diff = self.ts_log - self.ts_log.shift()
# Example 4: decomposing
# trend and seasonality are modeled separately and the remaining part of the series is returned
# pandas.DataFrame with index doesn't work, need to pass in numpy value as datafram.values
decomposition = seasonal_decompose(ts_log.values, freq=288)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
开发者ID:greatObelix,项目名称:datatoolbox,代码行数:31,代码来源:timeseries.py
示例4: decompose
def decompose(df,col,freq):
"To plot the decomposition graphs "
decomposed = seasonal_decompose(df[col].values, freq=freq)
pd.DataFrame(decomposed.observed).plot(figsize=(12,4), title = "Observed")
pd.DataFrame(decomposed.trend).plot(figsize=(12,4), title = "Trend")
pd.DataFrame(decomposed.seasonal).plot(figsize=(12,4), title = "Seasonal")
pd.DataFrame(decomposed.resid).plot(figsize=(12,4), title = "Residuals")
开发者ID:tannishk,项目名称:data-profiling,代码行数:7,代码来源:timeseries.py
示例5: _create_grid_plot_of_trends
def _create_grid_plot_of_trends(df, X, col_list, filename):
width = 600
height = 400
color_palette = [ 'Black', 'Red', 'Purple', 'Green', 'Brown', 'Yellow', 'Cyan', 'Blue', 'Orange', 'Pink']
i = 0
#2 columns, so number of rows is total /2
row_index = 0
row_list = []
row = []
for col in col_list[1:]: #skip the date column
# create a new plot
s1 = figure(x_axis_type = 'datetime', width=width, plot_height=height, title=col + ' trend')
#seasonal decompae to extract seasonal trends
decomposition = seasonal_decompose(np.array(df[col]), model='additive', freq=15)
s1.line(X, decomposition.trend, color=color_palette[i % len(color_palette)], alpha=0.5, line_width=2)
row.append(s1)
if len(row) == 2:
row_copy = copy.deepcopy(row)
row_list.append(row_copy)
row = []
i = 0
i += 1
# put all the plots in a grid layout
p = gridplot(row_list)
save(vplot(p), filename=filename, title='trends')
开发者ID:aarora79,项目名称:sitapt,代码行数:31,代码来源:tsa.py
示例6: decomp
def decomp(ts):
decomposition = seasonal_decompose(ts[Y_name])
fig = decomposition.plot()
plt.tight_layout()
fig.savefig('decomp.png', bbox_inches="tight")
trend = decomposition.trend
seasonal = decomposition.seasonal
resid = decomposition.resid
开发者ID:mkgunasinghe,项目名称:examples,代码行数:8,代码来源:timeseries.py
示例7: seasonal_decompose
def seasonal_decompose(timeSeries, freq = 34):
# Seasonal decomposition using moving averages
decomposition = tsa_seasonal.seasonal_decompose(timeSeries, freq = freq)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
return [trend, seasonal, residual]
开发者ID:manuwhs,项目名称:Trapyng,代码行数:8,代码来源:VARMA.py
示例8: test_interpolate_trend
def test_interpolate_trend(self):
x = np.arange(6)
trend = seasonal_decompose(x, freq=2).trend
assert_equal(trend[0], np.nan)
trend = seasonal_decompose(x, freq=2, extrapolate_trend=1).trend
assert_almost_equal(trend, x)
trend = seasonal_decompose(x, freq=2, extrapolate_trend='freq').trend
assert_almost_equal(trend, x)
# 2d case
x = np.tile(np.arange(6), (2, 1)).T
trend = seasonal_decompose(x, freq=2, extrapolate_trend=1).trend
assert_almost_equal(trend, x)
trend = seasonal_decompose(x, freq=2, extrapolate_trend='freq').trend
assert_almost_equal(trend, x)
开发者ID:cong1989,项目名称:statsmodels,代码行数:18,代码来源:test_seasonal.py
示例9: decompose_pre
def decompose_pre(ts):
ts_log = np.log(ts)
decomposition = seasonal_decompose(ts_log.values, freq = 24)
# decomposition.plot()
# plt.show(block= False)
ts_log_decompose = ts_log
ts_log_decompose.plays = decomposition.resid
# print ts_log_decompose
ts_log_decompose.dropna(inplace = True)
stationarity_test(ts_log_decompose)
return ts_log_decompose
开发者ID:pthaike,项目名称:comp,代码行数:11,代码来源:process.py
示例10: freq
def freq(df,col,max1):
"To find the required freq for the decompostion "
count = None
for i in range(1,max1):
try:
decomposed = seasonal_decompose(df[col].values, freq=i)
decomposed.resid = decomposed.resid[[~np.isnan(decomposed.resid)]]
print decomposed.resid
##decomposed.resid = [1,2,1,2,1,2]
x = np.array(decomposed.resid)
z,p = stats.kstest(x,'norm')
if(p<0.055):
print 'It is not the required freq'
else:
print 'it is the required freq'
count = i
except ValueError:
pass
decompose(df,col,i)
return count
开发者ID:tannishk,项目名称:data-profiling,代码行数:21,代码来源:timeseries.py
示例11: test_filt
def test_filt(self):
filt = np.array([1/8., 1/4., 1./4, 1/4., 1/8.])
res_add = seasonal_decompose(self.data.values, filt=filt, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
开发者ID:5267,项目名称:statsmodels,代码行数:21,代码来源:test_seasonal.py
示例12: seasonal_decompose
# fontsize is just for the axes size
unq_rel_cnts1['distinct_freq'].loc[:].plot(figsize=(40,8), fontsize=30)
# #### Execute some Univariate Statistics
# In[17]:
unq_rel_cnts1['distinct_freq'].describe()
# In[18]:
decomposition = seasonal_decompose(unq_rel_cnts1['distinct_freq'].values,freq=24 )
fig = decomposition.plot()
fig.set_size_inches(15, 8)
# In[19]:
# Graph Autocorrelation and Partial Autocorrelation data
fig, axes = plt.subplots(1, 2, figsize=(15,4))
fig = sm.graphics.tsa.plot_acf(unq_rel_cnts1['distinct_freq'], lags=12, ax=axes[0])
fig = sm.graphics.tsa.plot_pacf(unq_rel_cnts1['distinct_freq'], lags=12, ax=axes[1])
开发者ID:Ecoware,项目名称:Advanced_Analytics,代码行数:29,代码来源:Relationship+ETL,+Modeling,+and+Summary+Stats+PROD+1.py
示例13: test_ndarray
def test_ndarray(self):
res_add = seasonal_decompose(self.data.values, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
res_mult = seasonal_decompose(np.abs(self.data.values), 'm', freq=4)
seasonal = [1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716,
0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538,
0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815,
1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931,
1.0815, 1.5538, 0.6716, 0.6931]
trend = [np.nan, np.nan, 171.62, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 107.25, 80.50, 79.12, 78.75, 116.50,
140.00, 157.38, np.nan, np.nan]
random = [np.nan, np.nan, 1.29263, 1.51360, 1.03223, 0.62226,
1.04771, 1.05139, 1.20124, 0.84080, 1.28182, 1.28752,
1.08043, 0.77172, 0.91697, 0.96191, 1.36441, 0.72986,
1.01171, 0.73956, 1.03566, 1.44556, 0.02677, 1.31843,
0.49390, 1.14688, 1.45582, 0.16101, 0.82555, 1.47633,
np.nan, np.nan]
assert_almost_equal(res_mult.seasonal, seasonal, 4)
assert_almost_equal(res_mult.trend, trend, 2)
assert_almost_equal(res_mult.resid, random, 4)
# test odd
res_add = seasonal_decompose(self.data.values[:-1], freq=4)
seasonal = [68.18, 69.02, -82.66, -54.54, 68.18, 69.02, -82.66,
-54.54, 68.18, 69.02, -82.66, -54.54, 68.18, 69.02,
-82.66, -54.54, 68.18, 69.02, -82.66, -54.54, 68.18,
69.02, -82.66, -54.54, 68.18, 69.02, -82.66, -54.54,
68.18, 69.02, -82.66]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, np.nan, np.nan]
random = [np.nan, np.nan, 72.538, 64.538, -42.426, -77.150,
-12.087, -67.962, 99.699, 120.725, -2.962, -4.462,
9.699, 6.850, -38.962, -33.462, 40.449, -40.775, 22.288,
-42.462, -43.301, 168.975, -81.212, 80.538, -15.926,
-176.900, 42.413, 5.288, -46.176, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
开发者ID:5267,项目名称:statsmodels,代码行数:64,代码来源:test_seasonal.py
示例14: test_one_sided_moving_average_in_stl_decompose
def test_one_sided_moving_average_in_stl_decompose(self):
res_add = seasonal_decompose(self.data.values, freq=4, two_sided=False)
seasonal = np.array([76.76, 90.03, -114.4, -52.4, 76.76, 90.03, -114.4,
-52.4, 76.76, 90.03, -114.4, -52.4, 76.76, 90.03,
-114.4, -52.4, 76.76, 90.03, -114.4, -52.4, 76.76,
90.03, -114.4, -52.4, 76.76, 90.03, -114.4, -52.4,
76.76, 90.03, -114.4, -52.4])
trend = np.array([np.nan, np.nan, np.nan, np.nan, 159.12, 204., 221.25,
245.12, 319.75, 451.5, 561.12, 619.25, 615.62, 548.,
462.12, 381.12, 316.62, 264., 228.38, 210.75, 188.38,
199., 207.12, 191., 166.88, 72., -9.25, -33.12,
-36.75, 36.25, 103., 131.62])
resid = np.array([np.nan, np.nan, np.nan, np.nan, 11.112, -57.031,
118.147, 136.272, 332.487, 267.469, 83.272, -77.853,
-152.388, -181.031, -152.728, -152.728, -56.388, -115.031,
14.022, -56.353, -33.138, 139.969, -89.728, -40.603,
-200.638, -303.031, 46.647, 72.522, 84.987, 234.719,
-33.603, 104.772])
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, resid, 3)
res_mult = seasonal_decompose(np.abs(self.data.values), 'm', freq=4, two_sided=False)
seasonal = np.array([1.1985, 1.5449, 0.5811, 0.6755, 1.1985, 1.5449, 0.5811,
0.6755, 1.1985, 1.5449, 0.5811, 0.6755, 1.1985, 1.5449,
0.5811, 0.6755, 1.1985, 1.5449, 0.5811, 0.6755, 1.1985,
1.5449, 0.5811, 0.6755, 1.1985, 1.5449, 0.5811, 0.6755,
1.1985, 1.5449, 0.5811, 0.6755])
trend = np.array([np.nan, np.nan, np.nan, np.nan, 171.625, 204.,
221.25, 245.125, 319.75, 451.5, 561.125, 619.25,
615.625, 548., 462.125, 381.125, 316.625, 264.,
228.375, 210.75, 188.375, 199., 207.125, 191.,
166.875, 107.25, 80.5, 79.125, 78.75, 116.5,
140., 157.375])
resid = np.array([np.nan, np.nan, np.nan, np.nan, 1.2008, 0.752, 1.75,
1.987, 1.9023, 1.1598, 1.6253, 1.169, 0.7319, 0.5398,
0.7261, 0.6837, 0.888, 0.586, 0.9645, 0.7165, 1.0276,
1.3954, 0.0249, 0.7596, 0.215, 0.851, 1.646, 0.2432,
1.3244, 2.0058, 0.5531, 1.7309])
assert_almost_equal(res_mult.seasonal, seasonal, 4)
assert_almost_equal(res_mult.trend, trend, 2)
assert_almost_equal(res_mult.resid, resid, 4)
# test odd
res_add = seasonal_decompose(self.data.values[:-1], freq=4, two_sided=False)
seasonal = np.array([81.21, 94.48, -109.95, -65.74, 81.21, 94.48, -109.95,
-65.74, 81.21, 94.48, -109.95, -65.74, 81.21, 94.48,
-109.95, -65.74, 81.21, 94.48, -109.95, -65.74, 81.21,
94.48, -109.95, -65.74, 81.21, 94.48, -109.95, -65.74,
81.21, 94.48, -109.95])
trend = [np.nan, np.nan, np.nan, np.nan, 159.12, 204., 221.25,
245.12, 319.75, 451.5, 561.12, 619.25, 615.62, 548.,
462.12, 381.12, 316.62, 264., 228.38, 210.75, 188.38,
199., 207.12, 191., 166.88, 72., -9.25, -33.12,
-36.75, 36.25, 103.]
random = [np.nan, np.nan, np.nan, np.nan, 6.663, -61.48,
113.699, 149.618, 328.038, 263.02, 78.824, -64.507,
-156.837, -185.48, -157.176, -139.382, -60.837, -119.48,
9.574, -43.007, -37.587, 135.52, -94.176, -27.257,
-205.087, -307.48, 42.199, 85.868, 80.538, 230.27, -38.051]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
开发者ID:5267,项目名称:statsmodels,代码行数:74,代码来源:test_seasonal.py
示例15: diagnostics
def diagnostics():
decomposition = seasonal_decompose(view_hour['distinct_freq_sum'].values,freq=24 )
fig = decomposition.plot()
fig.set_size_inches(50, 8)
开发者ID:Ecoware,项目名称:Advanced_Analytics,代码行数:5,代码来源:READ+GCS+-+Prophet+2.py
示例16: open
from statsmodels.tsa.seasonal import seasonal_decompose
output = open('AIRMA_mars_tianchi_artist_plays_predict.csv','w')
for artist in artists:
print artist, len(daily_play[artist])
y_data = daily_play[artist][-30:]
l = len(y_data)
dates_str = sm.tsa.datetools.date_range_str('2005m1',length=l)
dates_all = sm.tsa.datetools.dates_from_range('2005m1', length=l)
y = pd.Series(y_data, index=dates_all)
plt.plot(y)
decomposition = seasonal_decompose(y)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
y_decompose = residual
y_decompose.dropna(inplace=True)
test_stat(y_decompose)
# remove moving avg
moving_avg = y.rolling(window=12,center=False).mean()
y_moving_avg_diff = y - moving_avg
y_moving_avg_diff.dropna(inplace=True)
print "Stationarity for TS - moving avg:"
开发者ID:LossyRedgement,项目名称:Tianchi-2016-music,代码行数:30,代码来源:ARIMA.py
示例17: is_stationary
ts_log_moving_avg_diff.dropna(inplace=True) # Pandas in action :p
# after the above, make sure that the test_statistic is lesser than the critical value.
# For this you can run is_stationary again.
# is_stationary(ts_log_moving_avg_diff, 12)
expwighted_avg = pd.ewma(ts_log, halflife=12)
# Exponential weights make sure that recent observations have more importance
ts_log_ewma_diff = ts_log - expwighted_avg
# test_stationarity(ts_log_ewma_diff)
# On testing, apparently this has a lower test statistic value and hence
# better as a stationary series
from statsmodels.tsa.seasonal import seasonal_decompose
decomposition = seasonal_decompose(ts_log)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
plt.subplot(411)
plt.plot(ts_log, label="Original")
plt.legend(loc="best")
plt.subplot(412)
plt.plot(trend, label="Trend")
plt.legend(loc="best")
plt.subplot(413)
plt.plot(seasonal, label="Seasonality")
plt.legend(loc="best")
plt.subplot(414)
开发者ID:PrieureDeSion,项目名称:Randoms,代码行数:31,代码来源:main.py
示例18: acf
#acf is auto correlation fucntion and pacf is partial acf (works only for 1 d array)
#iloc is integer location, check pandas
lag_corr = acf (stock_data ['Logged First Difference'].iloc [1:])
lag_partial_corr = pacf (stock_data ['Logged First Difference'].iloc [1:])
#fig, ax = plt.subplots (figsize = (16,12))
#ax.plot (lag_corr)
#pylab.show ()
# To extract trends and seasonal patterns for TS analysis
from statsmodels.tsa.seasonal import seasonal_decompose
#set the frequency value right for monthly set freq = 30
decomposition = seasonal_decompose(stock_data['Natural Log'], model='additive', freq=30)
#fig = decomposition.plot()
#pylab.show ()
#lets fit some ARIMA, keep indicator as 1 and rest as zero ie (p,q,r) = (1,0,0)
#the snippet below does it for undifferenced series
#model = sm.tsa.ARIMA (stock_data ['Natural Log'].iloc[1:], order = (1,0,0))
#result = model.fit (disp = -1)
#stock_data ['Forecast'] = result.fittedvalues
#stock_data [['Natural Log', 'Forecast']].plot (figsize = (16,12))
#pylab.show ()
#trying an exponential smoothing model
model = sm.tsa.ARIMA(stock_data['Logged First Difference'].iloc[1:], order=(0, 0, 1))
results = model.fit(disp=-1)
开发者ID:varun10221,项目名称:ARIMA-model,代码行数:31,代码来源:s_and_p.py
示例19: diagnostics
def diagnostics():
decomposition = seasonal_decompose(voltage_df['rel_counts'].values,freq=24 )
fig = decomposition.plot()
fig.set_size_inches(50, 8)
开发者ID:Ecoware,项目名称:Advanced_Analytics,代码行数:5,代码来源:JuniperETL,TimeSeries,RulesEng1.py
示例20: _draw_multiple_line_plot
print 'lag_partial_correlations'
print lag_partial_correlations
y = lag_partial_correlations
_draw_multiple_line_plot('lag_partial_correlations.html',
'lag_partial_correlations',
[X],
[y],
['navy'],
['lag_partial_correlations'],
[None],
[1],
'datetime', 'Date', 'lag_partial_correlations', y_start=-1, y_end=1)
decomposition = seasonal_decompose(np.array(df['https']), model='additive', freq=30)
_draw_decomposition_plot('decomposition.html', X, decomposition, 'seasonal decomposition', 'datetime', 'decomposition', width=600, height=400)
model = sm.tsa.ARIMA(np.array(df['https'].iloc[1:]), order=(2,0,0))
results = model.fit(disp=-1)
#predict next 10 values
num_predictions = 12
predicted_dates = []
last_date = X[-1]
for i in range(num_predictions):
next_date = last_date + 30
predicted_dates.append(next_date)
last_date = next_date
#predicted_dates=np.array(['2015-10-17', '2015-12-19', '2016-03-19', '2016-06-19', '2016-09-19'], dtype=np.datetime64)
开发者ID:aarora79,项目名称:sitapt,代码行数:29,代码来源:visualize.py
注:本文中的statsmodels.tsa.seasonal.seasonal_decompose函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论