本文整理汇总了Python中statsmodels.tsa.tsatools.lagmat函数的典型用法代码示例。如果您正苦于以下问题:Python lagmat函数的具体用法?Python lagmat怎么用?Python lagmat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lagmat函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _process_inputs
def _process_inputs(self, X, E=None, lengths=None):
if self.n_features == 1:
lagged = None
if lengths is None:
lagged = lagmat(X, maxlag=self.n_lags, trim='forward',
original='ex')
else:
lagged = np.zeros((len(X), self.n_lags))
for i, j in iter_from_X_lengths(X, lengths):
lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
trim='forward', original='ex')
return {'obs': X.reshape(-1,1),
'lagged': lagged.reshape(-1, self.n_features, self.n_lags)}
else:
lagged = None
lagged = np.zeros((X.shape[0], self.n_features, self.n_lags))
if lengths is None:
tem = lagmat(X, maxlag=self.n_lags, trim='forward',
original='ex')
for sample in range(X.shape[0]):
lagged[sample] = np.reshape\
(tem[sample], (self.n_features, self.n_lags), 'F')
else:
for i, j in iter_from_X_lengths(X, lengths):
lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
trim='forward', original='ex')
lagged.reshape(-1, self.n_featurs, self.n_lags)
return {'obs': X, 'lagged': lagged}
开发者ID:sarah-strauss,项目名称:autohmm,代码行数:31,代码来源:ar.py
示例2: _fit_start_params_hr
def _fit_start_params_hr(self, order):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength selected according
to best BIC. Obtain the residuals. Then fit an ARMA(p,q) model via
OLS using these residuals for a first approximation. Uses a separate
OLS regression to find the coefficients of exogenous variables.
References
----------
Hannan, E.J. and Rissanen, J. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
"""
p,q,k = order
start_params = zeros((p+q+k))
endog = self.endog.copy() # copy because overwritten
exog = self.exog
if k != 0:
ols_params = GLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
armod = AR(endog).fit(ic='bic', trend='nc')
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,
trim='both'), arcoefs_tmp)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, 'both')[endog_start:]
lag_resid = lagmat(resid, q, 'both')[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = GLS(endog[max(p_tmp+q,p):], X).fit().params
start_params[k:k+p+q] = coefs
else:
start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]
if q==0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k:k+p] = arcoefs
return start_params
开发者ID:arokem,项目名称:statsmodels,代码行数:60,代码来源:arima_model.py
示例3: _process_inputs
def _process_inputs(self, X, E=None, lengths=None):
# Makes sure inputs have correct shape, generates features
lagged = None
if lengths is None:
lagged = lagmat(X, maxlag=self.n_lags, trim='forward',
original='ex')
else:
lagged = np.zeros((len(X), self.n_lags))
for i, j in iter_from_X_lengths(X, lengths):
lagged[i:j, :] = lagmat(X[i:j], maxlag=self.n_lags,
trim='forward', original='ex')
inputs = {'obs': X.reshape(-1,1),
'lagged': lagged}
return inputs
开发者ID:jan-matthis,项目名称:autohmm,代码行数:15,代码来源:ar.py
示例4: _init_model
def _init_model(self):
"""Should be called whenever the model is initialized or changed"""
self._reformat_lags()
self._check_specification()
nobs_orig = self._y.shape[0]
if self.constant:
reg_constant = ones((nobs_orig, 1), dtype=np.float64)
else:
reg_constant = ones((nobs_orig, 0), dtype=np.float64)
if self.lags is not None and nobs_orig > 0:
maxlag = np.max(self.lags)
lag_array = lagmat(self._y, maxlag)
reg_lags = empty((nobs_orig, self._lags.shape[1]), dtype=np.float64)
for i, lags in enumerate(self._lags.T):
reg_lags[:, i] = np.mean(lag_array[:, lags[0]:lags[1]], 1)
else:
reg_lags = empty((nobs_orig, 0), dtype=np.float64)
if self._x is not None:
reg_x = self._x
else:
reg_x = empty((nobs_orig, 0), dtype=np.float64)
self.regressors = np.hstack((reg_constant, reg_lags, reg_x))
first_obs, last_obs = self._indices
self.regressors = self.regressors[first_obs:last_obs, :]
self._y_adj = self._y[first_obs:last_obs]
开发者ID:rhodge1,项目名称:arch,代码行数:29,代码来源:mean.py
示例5: _estimate_df_regression
def _estimate_df_regression(y, trend, lags):
"""Helper function that estimates the core (A)DF regression
Parameters
----------
y : array-like, (nobs,)
The data for the lag selection
trend : str, {'nc','c','ct','ctt'}
The trend order
lags : int
The number of lags to include in the ADF regression
Returns
-------
ols_res : OLSResults
A results class object produced by OLS.fit()
Notes
-----
See statsmodels.regression.linear_model.OLS for details on the results
returned
"""
delta_y = diff(y)
rhs = lagmat(delta_y[:, None], lags, trim='both', original='in')
nobs = rhs.shape[0]
lhs = rhs[:, 0].copy() # lag-0 values are lhs, Is copy() necessary?
rhs[:, 0] = y[-nobs - 1:-1] # replace lag 0 with level of y
if trend != 'nc':
rhs = add_trend(rhs[:, :lags + 1], trend)
return OLS(lhs, rhs).fit()
开发者ID:VolosSoftware,项目名称:arch,代码行数:33,代码来源:unitroot.py
示例6: pacf_ols
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
开发者ID:Inoryy,项目名称:statsmodels,代码行数:33,代码来源:stattools.py
示例7: load_
def load_(infile, nLags=1000):
from statsmodels.tsa.tsatools import lagmat
assert infile.endswith('.npy')
X, Y = np.load(infile)
X0 = lagmat(X, nLags, trim='both')
ind = len(X)-len(X0)
return X0, Y[ind:]
开发者ID:mobeets,项目名称:jonasASD,代码行数:7,代码来源:dio.py
示例8: moment_ret
def moment_ret(self, theta_ret, theta_vol=None, uarg=None,
zlag=1, **kwargs):
"""Moment conditions (returns) for spectral GMM estimator.
Parameters
----------
theta_ret : (2, ) array
Vector of model parameters. [phi, price_ret]
theta_vol : (3, ) array
Vector of model parameters. [mean, rho, delta]
uarg : (nu, ) array
Grid to evaluate a and b functions
zlag : int
Number of lags to use for the instrument
Returns
-------
moment : (nobs, nmoms) array
Matrix of momcond restrictions
Raises
------
ValueError
"""
if uarg is None:
raise ValueError("uarg is missing!")
vollag, vol = lagmat(self.vol, maxlag=zlag,
original='sep', trim='both')
# Number of observations after truncation
nobs = vol.shape[0]
# Number of moments
nmoms = 2 * uarg.shape[0] * (zlag+1)
# Change class attribute with the current theta
param = ARGparams()
try:
param.update(theta_ret=theta_ret, theta_vol=theta_vol)
except ValueError:
return np.ones((nobs, nmoms))*1e10
# Must be (nobs, nu) array
try:
cfun = self.char_fun_ret(uarg, param)[zlag-1:]
except ValueError:
return np.ones((nobs, nmoms))*1e10
# Must be (nobs, nu) array
error = np.exp(-self.ret[zlag:, np.newaxis] * uarg) - cfun
# Instruments, (nobs, ninstr) array
instr = np.hstack([np.exp(-1j * vollag), np.ones((nobs, 1))])
# Must be (nobs, nmoms) array
moment = error[:, np.newaxis, :] * instr[:, :, np.newaxis]
moment = moment.reshape((nobs, nmoms//2))
# (nobs, 2 * ninstr)
moment = np.hstack([np.real(moment), np.imag(moment)])
return moment
开发者ID:khrapovs,项目名称:argamma,代码行数:57,代码来源:arg.py
示例9: __init__
def __init__(self, endog, k_regimes, order, trend='c', exog=None,
exog_tvtp=None, switching_ar=True, switching_trend=True,
switching_exog=False, switching_variance=False,
dates=None, freq=None, missing='none'):
# Properties
self.switching_ar = switching_ar
# Switching options
if self.switching_ar is True or self.switching_ar is False:
self.switching_ar = [self.switching_ar] * order
elif not len(self.switching_ar) == order:
raise ValueError('Invalid iterable passed to `switching_ar`.')
# Initialize the base model
super(MarkovAutoregression, self).__init__(
endog, k_regimes, trend=trend, exog=exog, order=order,
exog_tvtp=exog_tvtp, switching_trend=switching_trend,
switching_exog=switching_exog,
switching_variance=switching_variance, dates=dates, freq=freq,
missing=missing)
# Sanity checks
if self.nobs <= self.order:
raise ValueError('Must have more observations than the order of'
' the autoregression.')
# Autoregressive exog
self.exog_ar = lagmat(endog, self.order)[self.order:]
# Reshape other datasets
self.nobs -= self.order
self.orig_endog = self.endog
self.endog = self.endog[self.order:]
if self._k_exog > 0:
self.orig_exog = self.exog
self.exog = self.exog[self.order:]
# Reset the ModelData datasets
self.data.endog, self.data.exog = (
self.data._convert_endog_exog(self.endog, self.exog))
# Reset indexes, if provided
if self.data.row_labels is not None:
self.data._cache['row_labels'] = (
self.data.row_labels[self.order:])
if self._index is not None:
if self._index_generated:
self._index = self._index[:-self.order]
else:
self._index = self._index[self.order:]
# Parameters
self.parameters['autoregressive'] = self.switching_ar
# Cache an array for holding slices
self._predict_slices = [slice(None, None, None)] * (self.order + 1)
开发者ID:bert9bert,项目名称:statsmodels,代码行数:57,代码来源:markov_autoregression.py
示例10: _df_select_lags
def _df_select_lags(y, trend, max_lags, method):
"""
Helper method to determine the best lag length in DF-like regressions
Parameters
----------
y : array-like, (nobs,)
The data for the lag selection exercise
trend : str, {'nc','c','ct','ctt'}
The trend order
max_lags : int
The maximum number of lags to check. This setting affects all
estimation since the sample is adjusted by max_lags when
fitting the models
method : str, {'AIC','BIC','t-stat'}
The method to use when estimating the model
Returns
-------
best_ic : float
The information criteria at the selected lag
best_lag : int
The selected lag
all_res : list
List of OLS results from fitting max_lag + 1 models
Notes
-----
See statsmodels.tsa.tsatools._autolag for details. If max_lags is None, the
default value of 12 * (nobs/100)**(1/4) is used.
"""
nobs = y.shape[0]
delta_y = diff(y)
if max_lags is None:
max_lags = int(ceil(12. * power(nobs / 100., 1 / 4.)))
rhs = lagmat(delta_y[:, None], max_lags, trim='both', original='in')
nobs = rhs.shape[0]
rhs[:, 0] = y[-nobs - 1:-1] # replace 0 with level of y
lhs = delta_y[-nobs:]
if trend != 'nc':
full_rhs = add_trend(rhs, trend, prepend=True)
else:
full_rhs = rhs
start_lag = full_rhs.shape[1] - rhs.shape[1] + 1
ic_best, best_lag, all_res = _autolag(OLS, lhs, full_rhs, start_lag,
max_lags, method, regresults=True)
# To get the correct number of lags, subtract the start_lag since
# lags 0,1,...,start_lag-1 were not actual lags, but other variables
best_lag -= start_lag
return ic_best, best_lag, all_res
开发者ID:VolosSoftware,项目名称:arch,代码行数:54,代码来源:unitroot.py
示例11: _stackX
def _stackX(self, k_ar, trend):
"""
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
"""
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend)
self.k_trend = k_trend
return X
开发者ID:0ceangypsy,项目名称:statsmodels,代码行数:13,代码来源:ar_model.py
示例12: _df_select_lags
def _df_select_lags(y, trend, max_lags, method):
"""
Helper method to determine the best lag length in DF-like regressions
Parameters
----------
y : array
The data for the lag selection exercise
trend : {'nc','c','ct','ctt'}
The trend order
max_lags : int
The maximum number of lags to check. This setting affects all
estimation since the sample is adjusted by max_lags when
fitting the models
method : {'AIC','BIC','t-stat'}
The method to use when estimating the model
Returns
-------
best_ic : float
The information criteria at the selected lag
best_lag : int
The selected lag
Notes
-----
If max_lags is None, the default value of 12 * (nobs/100)**(1/4) is used.
"""
nobs = y.shape[0]
delta_y = diff(y)
if max_lags is None:
max_lags = int(ceil(12. * power(nobs / 100., 1 / 4.)))
rhs = lagmat(delta_y[:, None], max_lags, trim='both', original='in')
nobs = rhs.shape[0]
rhs[:, 0] = y[-nobs - 1:-1] # replace 0 with level of y
lhs = delta_y[-nobs:]
if trend != 'nc':
full_rhs = add_trend(rhs, trend, prepend=True)
else:
full_rhs = rhs
start_lag = full_rhs.shape[1] - rhs.shape[1] + 1
ic_best, best_lag = _autolag_ols(lhs, full_rhs, start_lag, max_lags, method)
return ic_best, best_lag
开发者ID:esvhd,项目名称:arch,代码行数:48,代码来源:unitroot.py
示例13: fit
def fit(self, nlags):
'''estimate parameters using ols
Parameters
----------
nlags : integer
number of lags to include in regression, same for all variables
Returns
-------
None, but attaches
arhat : array (nlags, nvar, nvar)
full lag polynomial array
arlhs : array (nlags-1, nvar, nvar)
reduced lag polynomial for left hand side
other statistics as returned by linalg.lstsq : need to be completed
This currently assumes all parameters are estimated without restrictions.
In this case SUR is identical to OLS
estimation results are attached to the class instance
'''
self.nlags = nlags # without current period
nvars = self.nvars
#TODO: ar2s looks like a module variable, bug?
#lmat = lagmat(ar2s, nlags, trim='both', original='in')
lmat = lagmat(self.y, nlags, trim='both', original='in')
self.yred = lmat[:,:nvars]
self.xred = lmat[:,nvars:]
res = np.linalg.lstsq(self.xred, self.yred, rcond=-1)
self.estresults = res
self.arlhs = res[0].reshape(nlags, nvars, nvars)
self.arhat = ar2full(self.arlhs)
self.rss = res[1]
self.xredrank = res[2]
开发者ID:bashtage,项目名称:statsmodels,代码行数:40,代码来源:varma_process.py
示例14: _em_autoregressive
def _em_autoregressive(self, result, betas, tmp=None):
"""
EM step for autoregressive coefficients and variances
"""
if tmp is None:
tmp = np.sqrt(result.smoothed_marginal_probabilities)
resid = np.zeros((self.k_regimes, self.nobs + self.order))
resid[:] = self.orig_endog
if self._k_exog > 0:
for i in range(self.k_regimes):
resid[i] -= np.dot(self.orig_exog, betas[i])
# The difference between this and `_em_exog` is that here we have a
# different endog and exog for each regime
coeffs = np.zeros((self.k_regimes,) + (self.order,))
variance = np.zeros((self.k_regimes,))
exog = np.zeros((self.nobs, self.order))
for i in range(self.k_regimes):
endog = resid[i, self.order:]
exog = lagmat(resid[i], self.order)[self.order:]
tmp_endog = tmp[i] * endog
tmp_exog = tmp[i][:, None] * exog
coeffs[i] = np.dot(np.linalg.pinv(tmp_exog), tmp_endog)
if self.switching_variance:
tmp_resid = endog - np.dot(exog, coeffs[i])
variance[i] = (np.sum(
tmp_resid**2 * result.smoothed_marginal_probabilities[i]) /
np.sum(result.smoothed_marginal_probabilities[i]))
else:
tmp_resid = tmp_endog - np.dot(tmp_exog, coeffs[i])
variance[i] = np.sum(tmp_resid**2)
# Variances
if not self.switching_variance:
variance = variance.sum() / self.nobs
return coeffs, variance
开发者ID:bert9bert,项目名称:statsmodels,代码行数:40,代码来源:markov_autoregression.py
示例15: fnn
def fnn(data, maxm):
"""
Compute the embedding dimension of a time series data to build the phase space using the false neighbors criterion
data--> time series
maxm--> maximmum embeding dimension
"""
RT=15.0
AT=2
sigmay=np.std(data, ddof=1)
nyr=len(data)
m=maxm
EM=lagmat(data, maxlag=m-1)
EEM=np.asarray([EM[j,:] for j in range(m-1, EM.shape[0])])
embedm=maxm
for k in range(AT,EEM.shape[1]+1):
fnn1=[]
fnn2=[]
Ma=EEM[:,range(k)]
D=dist(Ma)
for i in range(1,EEM.shape[0]-m-k):
#print D.shape
#print(D[i,range(i-1)])
d=D[i,:]
pdnz=np.where(d>0)
dnz=d[pdnz]
Rm=np.min(dnz)
l=np.where(d==Rm)
l=l[0]
l=l[len(l)-1]
if l+m+k-1<nyr:
fnn1.append(np.abs(data[i+m+k-1]-data[l+m+k-1])/Rm)
fnn2.append(np.abs(data[i+m+k-1]-data[l+m+k-1])/sigmay)
Ind1=np.where(np.asarray(fnn1)>RT)
Ind2=np.where(np.asarray(fnn2)>AT)
if len(Ind1[0])/float(len(fnn1))<0.1 and len(Ind2[0])/float(len(fnn2))<0.1:
embedm=k
break
return embedm
开发者ID:johntanz,项目名称:ROP,代码行数:38,代码来源:Corr_Dim.py
示例16: cointegration_johansen
def cointegration_johansen(input_df, lag=1):
"""
For axis: -1 means no deterministic part, 0 means constant term, 1 means constant plus time-trend,
> 1 means higher order polynomial.
:param input_df: the input vectors as a pandas.DataFrame instance
:param lag: number of lagged difference terms used when computing the estimator
:return: returns test statistics data
"""
count_samples, count_dimensions = input_df.shape
input_df = detrend(input_df, type='constant', axis=0)
diff_input_df = numpy.diff(input_df, 1, axis=0)
z = tsatools.lagmat(diff_input_df, lag)
z = z[lag:]
z = detrend(z, type='constant', axis=0)
diff_input_df = diff_input_df[lag:]
diff_input_df = detrend(diff_input_df, type='constant', axis=0)
r0t = residuals(diff_input_df, z)
lx = input_df[:-lag]
lx = lx[1:]
diff_input_df = detrend(lx, type='constant', axis=0)
rkt = residuals(diff_input_df, z)
if rkt is None:
return None
skk = numpy.dot(rkt.T, rkt) / rkt.shape[0]
sk0 = numpy.dot(rkt.T, r0t) / rkt.shape[0]
s00 = numpy.dot(r0t.T, r0t) / r0t.shape[0]
sig = numpy.dot(sk0, numpy.dot(linalg.inv(s00), sk0.T))
eigenvalues, eigenvectors = linalg.eig(numpy.dot(linalg.inv(skk), sig))
# normalizing the eigenvectors such that (du'skk*du) = I
temp = linalg.inv(linalg.cholesky(numpy.dot(eigenvectors.T, numpy.dot(skk, eigenvectors))))
dt = numpy.dot(eigenvectors, temp)
# sorting eigenvalues and vectors
order_decreasing = numpy.flipud(numpy.argsort(eigenvalues))
sorted_eigenvalues = eigenvalues[order_decreasing]
sorted_eigenvectors = dt[:, order_decreasing]
# computing the trace and max eigenvalue statistics
trace_statistics = numpy.zeros(count_dimensions)
eigenvalue_statistics = numpy.zeros(count_dimensions)
critical_values_max_eigenvalue = numpy.zeros((count_dimensions, 3))
critical_values_trace = numpy.zeros((count_dimensions, 3))
iota = numpy.ones(count_dimensions)
t, junk = rkt.shape
for i in range(0, count_dimensions):
tmp = numpy.log(iota - sorted_eigenvalues)[i:]
trace_statistics[i] = -t * numpy.sum(tmp, 0)
eigenvalue_statistics[i] = -t * numpy.log(1 - sorted_eigenvalues[i])
critical_values_max_eigenvalue[i, :] = get_critical_values_max_eigenvalue(count_dimensions - i, time_polynomial_order=0)
critical_values_trace[i, :] = get_critical_values_trace(count_dimensions - i, time_polynomial_order=0)
order_decreasing[i] = i
result = dict()
result['rkt'] = rkt
result['r0t'] = r0t
result['eigenvalues'] = sorted_eigenvalues
result['eigenvectors'] = sorted_eigenvectors
result['trace_statistic'] = trace_statistics # likelihood ratio trace statistic
result['eigenvalue_statistics'] = eigenvalue_statistics # maximum eigenvalue statistic
result['critical_values_trace'] = critical_values_trace
result['critical_values_max_eigenvalue'] = critical_values_max_eigenvalue
result['order_decreasing'] = order_decreasing # indices of eigenvalues in decreasing order
return result
开发者ID:danbob123,项目名称:cointeg,代码行数:67,代码来源:cointeg.py
示例17: acorr_lm
def acorr_lm(x, maxlag=None, autolag='AIC', store=False, regresults=False):
'''Lagrange Multiplier tests for autocorrelation
This is a generic Lagrange Multiplier test for autocorrelation. I don't
have a reference for it, but it returns Engle's ARCH test if x is the
squared residual array. A variation on it with additional exogenous
variables is the Breusch-Godfrey autocorrelation test.
Parameters
----------
resid : ndarray, (nobs,)
residuals from an estimation, or time series
maxlag : int
highest lag to use
autolag : None or string
If None, then a fixed number of lags given by maxlag is used.
store : bool
If true then the intermediate results are also returned
Returns
-------
lm : float
Lagrange multiplier test statistic
lmpval : float
p-value for Lagrange multiplier test
fval : float
fstatistic for F test, alternative version of the same test based on
F test for the parameter restriction
fpval : float
pvalue for F test
resstore : instance (optional)
a class instance that holds intermediate results. Only returned if
store=True
See Also
--------
het_arch
acorr_breusch_godfrey
acorr_ljung_box
'''
if regresults:
store = True
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#for adf from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs/100., 1/4.)))#nobs//4 #TODO: check default, or do AIC/BIC
xdiff = np.diff(x)
#
xdall = lagmat(x[:,None], maxlag, trim='both')
nobs = xdall.shape[0]
xdall = np.c_[np.ones((nobs,1)), xdall]
xshort = x[-nobs:]
if store: resstore = ResultsStore()
if autolag:
#search for lag length with highest information criteria
#Note: I use the same number of observations to have comparable IC
results = {}
for mlag in range(1, maxlag+1):
results[mlag] = OLS(xshort, xdall[:,:mlag+1]).fit()
if autolag.lower() == 'aic':
bestic, icbestlag = min((v.aic,k) for k,v in iteritems(results))
elif autolag.lower() == 'bic':
icbest, icbestlag = min((v.bic,k) for k,v in iteritems(results))
else:
raise ValueError("autolag can only be None, 'AIC' or 'BIC'")
#rerun ols with best ic
xdall = lagmat(x[:,None], icbestlag, trim='both')
nobs = xdall.shape[0]
xdall = np.c_[np.ones((nobs,1)), xdall]
xshort = x[-nobs:]
usedlag = icbestlag
if regresults:
resstore.results = results
else:
usedlag = maxlag
resols = OLS(xshort, xdall[:,:usedlag+1]).fit()
fval = resols.fvalue
fpval = resols.f_pvalue
lm = nobs * resols.rsquared
lmpval = stats.chi2.sf(lm, usedlag)
# Note: degrees of freedom for LM test is nvars minus constant = usedlags
#return fval, fpval, lm, lmpval
if store:
resstore.resols = resols
resstore.usedlag = usedlag
return lm, lmpval, fval, fpval, resstore
else:
return lm, lmpval, fval, fpval
开发者ID:bashtage,项目名称:statsmodels,代码行数:100,代码来源:diagnostic.py
示例18: adfuller
#.........这里部分代码省略.........
References
----------
.. [1] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [2] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [3] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [4] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
开发者ID:Inoryy,项目名称:statsmodels,代码行数:67,代码来源:stattools.py
示例19: acorr_breusch_godfrey
def acorr_breusch_godfrey(results, nlags=None, store=False):
'''Breusch Godfrey Lagrange Multiplier tests for residual autocorrelation
Parameters
----------
results : Result instance
Estimation results for which the residuals are tested for serial
correlation
nlags : int
Number of lags to include in the auxiliary regression. (nlags is
highest lag)
store : bool
If store is true, then an additional class instance that contains
intermediate results is returned.
Returns
-------
lm : float
Lagrange multiplier test statistic
lmpval : float
p-value for Lagrange multiplier test
fval : float
fstatistic for F test, alternative version of the same test based on
F test for the parameter restriction
fpval : float
pvalue for F test
resstore : instance (optional)
a class instance that holds intermediate results. Only returned if
store=True
Notes
-----
BG adds lags of residual to exog in the design matrix for the auxiliary
regression with residuals as endog,
see Greene 12.7.1.
References
----------
Greene Econometrics, 5th edition
'''
x = np.asarray(results.resid)
exog_old = results.model.exog
nobs = x.shape[0]
if nlags is None:
#for adf from Greene referencing Schwert 1989
nlags = np.trunc(12. * np.power(nobs/100., 1/4.))#nobs//4 #TODO: check default, or do AIC/BIC
nlags = int(nlags)
x = np.concatenate((np.zeros(nlags), x))
#xdiff = np.diff(x)
#
xdall = lagmat(x[:,None], nlags, trim='both')
nobs = xdall.shape[0]
xdall = np.c_[np.ones((nobs,1)), xdall]
xshort = x[-nobs:]
exog = np.column_stack((exog_old, xdall))
k_vars = exog.shape[1]
if store: resstore = ResultsStore()
resols = OLS(xshort, exog).fit()
ft = resols.f_test(np.eye(nlags, k_vars, k_vars - nlags))
fval = ft.fvalue
fpval = ft.pvalue
fval = np.squeeze(fval)[()] #TODO: fix this in ContrastResults
fpval = np.squeeze(fpval)[()]
lm = nobs * resols.rsquared
lmpval = stats.chi2.sf(lm, nlags)
# Note: degrees of freedom for LM test is nvars minus constant = usedlags
#return fval, fpval, lm, lmpval
if store:
resstore.resols = resols
resstore.usedlag = nlags
return lm, lmpval, fval, fpval, resstore
else:
return lm, lmpval, fval, fpval
开发者ID:bashtage,项目名称:statsmodels,代码行数:80,代码来源:diagnostic.py
示例20: curv
Trans_M2 = curv(delt_M2,P_M2,r)
Trans_O1 = curv(delt_O1,P_O1,r)
tf=time.clock()
print '...Done!',tf-t0, 'seconds'
t0=time.clock
###########################################################################
# Calculate BP Response Function
###########################################################################
ti=time.clock() # measure time of calculation
print 'Calculating BP Response function...',
t0=time.clock()
# create lag matrix for regression
bpmat = tools.lagmat(dbp, lag, original='in')
etmat = tools.lagmat(ddl, lag, original='in')
#lamat combines lag matrices of bp and et
lamat = numpy.column_stack([bpmat,etmat])
#for i in range(len(etmat)):
# lagmat.append(bpmat[i]+etmat[i])
#transpose matrix to determine required length
#run least squared regression
sqrd = numpy.linalg.lstsq(bpmat,dwl)
#determine lag coefficients of the lag matrix lamat
sqrdlag = numpy.linalg.lstsq(lamat,dwl)
wlls = sqrd[0]
#lagls return the coefficients of the least squares of lamat
lagls = sqrdlag[0]
开发者ID:inkenbrandt,项目名称:Earth_Tides,代码行数:30,代码来源:Simple_File_Reader_v2.py
注:本文中的statsmodels.tsa.tsatools.lagmat函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论