• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python time_series.TimeSeries类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pySPACE.resources.data_types.time_series.TimeSeries的典型用法代码示例。如果您正苦于以下问题:Python TimeSeries类的具体用法?Python TimeSeries怎么用?Python TimeSeries使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了TimeSeries类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: generate_affine_backtransformation

    def generate_affine_backtransformation(self):
        """ Generate synthetic examples and test them to determine transformation

        This is the key method!
        """
        if type(self.example) == FeatureVector:
            testsample = FeatureVector.replace_data(
                self.example, numpy.zeros(self.example.shape))
            self.offset = numpy.longdouble(self._execute(testsample))
            self.trafo = FeatureVector.replace_data(
                self.example, numpy.zeros(self.example.shape))
            for j in range(len(self.example.feature_names)):
                testsample = FeatureVector.replace_data(
                    self.example,
                    numpy.zeros(self.example.shape))
                testsample[0][j] = 1.0
                self.trafo[0][j] = \
                    numpy.longdouble(self._execute(testsample) - self.offset)
        elif type(self.example) == TimeSeries:
            testsample = TimeSeries.replace_data(
                self.example, numpy.zeros(self.example.shape))
            self.offset = numpy.longdouble(numpy.squeeze(
                self._execute(testsample)))
            self.trafo = TimeSeries.replace_data(
                self.example, numpy.zeros(self.example.shape))
            for i in range(self.example.shape[0]):
                for j in range(self.example.shape[1]):
                    testsample = TimeSeries.replace_data(
                        self.example, numpy.zeros_like(self.example))
                    testsample[i][j] = 1.0
                    self.trafo[i][j] = \
                        numpy.longdouble(numpy.squeeze(self._execute(testsample))
                                       - self.offset)
开发者ID:pyspace,项目名称:pyspace,代码行数:33,代码来源:flow_node.py


示例2: _execute

 def _execute(self, data):
     """ Apply the windowing to the given data and return the result """        
     #Create a window of the correct length for the given data
     if self.num_of_samples is None:
         self.num_of_samples = data.shape[0]
         self.create_window_array()
          
     data_array=data.view(numpy.ndarray)
     #Do the actual windowing
     # TODO: check if windowed_data = (self.window_array.T * data) works also???
     windowed_data = (self.window_array * data_array.T).T
     
     # Skip trailing zeros
     if self.window_has_zeros and self.reduce_window:
         windowed_data = windowed_data[
             range(self.window_not_equal_zero[0],
                   self.window_not_equal_zero[-1] + 1), :]
     
         result_time_series = TimeSeries.replace_data(data, windowed_data)
         
         # Adjust start and end time when chopping was done
         result_time_series.start_time = data.start_time + \
             self.window_not_equal_zero[0] * 1000.0 / data.sampling_frequency
         result_time_series.end_time = \
             data.end_time - (data.shape[0] - self.window_not_equal_zero[-1]
                              - 1) * 1000.0 / data.sampling_frequency
     else:
         result_time_series = TimeSeries.replace_data(data, windowed_data)
                 
     return result_time_series
开发者ID:AlexanderFabisch,项目名称:pyspace,代码行数:30,代码来源:window_func.py


示例3: next

    def next(self, debug=False):
        """Return next labeled window when used in iterator context."""
        while len(self.cur_extract_windows) == 0:
            # fetch the next block from data_client
            if debug:
                print "reading next block"
            self._readnextblock()
            self._extract_windows_cur_block()
            if debug:
                print "  buffermarkers", self.buffermarkers
                print "  current block", self.samplebuf.get()[self.prebuflen][1, :]
                # print "  current extracted windows ", self.cur_extract_windows

        (windef_name, current_window, class_, start_time, end_time, markers_cur_win) = self.cur_extract_windows.pop(0)

        # TODO: Replace this by a decorator or something similar
        current_window = numpy.atleast_2d(current_window.transpose())
        current_window = TimeSeries(
            input_array=current_window,
            channel_names=self.data_client.channelNames,
            sampling_frequency=self.data_client.dSamplingInterval,
            start_time=start_time,
            end_time=end_time,
            name="Window extracted @ %d ms, length %d ms, class %s" % (start_time, end_time - start_time, class_),
            marker_name=markers_cur_win,
        )

        current_window.generate_meta()
        current_window.specs["sampling_frequency"] = self.data_client.dSamplingInterval
        current_window.specs["wdef_name"] = windef_name
        self.nwindow += 1

        # return (ndsamplewin, ndmarkerwin)
        return (current_window, class_)
开发者ID:Crespo911,项目名称:pyspace,代码行数:34,代码来源:windower.py


示例4: setUp

 def setUp(self):
     """Create some example data """
     # Create some TimeSeries:
     self.x1 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
                     marker_name='S4', name='Name_text ending with Standard',
                     start_time=1000.0, end_time=1004.0)
     
     self.x1.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
     self.x1.generate_meta() #automatically generate key and tag
                     
     self.x2 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
                     marker_name='S4', start_time=2000.0, end_time=2004.0, 
                     name='Name_text ending with Standard')
     
     #manually generate key and tag
     import uuid
     self.x2_key=uuid.uuid4()
     self.x2.key=self.x2_key
     self.x2.tag='Tag of x2'
     self.x2.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
                      
     self.x3 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
                     marker_name='S4', start_time=3000.0, end_time=3004.0)
     
     self.x3.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
     self.x3.generate_meta()
     
     self.x4 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,marker_name='S4')
     
     self.x4.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
     
     self.x5 = TimeSeries([1,2], ['a','b'], 12)
     self.x5.inherit_meta_from(self.x2)
     
     self.x6 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12)
     
     self.x6.specs={'Nice_Parameter': 11, 'Less_Nice_Param': '21'}
     self.x6.generate_meta()
     #safe information
     self.x6_key=self.x6.key
     
     self.x6.inherit_meta_from(self.x2)
     
     self.some_nice_dict = {'guido': 4127, 'irv': 4127, 'jack': 4098}
     
     self.x6.add_to_history(self.x5, self.some_nice_dict)
     
     # Create some FeatureVectors:
     self.f1 = FeatureVector([1,2,3,4,5,6],['a','b','c','d','e','f'])
     
     self.f1.specs={'NiceParam':1,'LessNiceParam':2}
     
     self.f2 = FeatureVector([1,2,3,4,5,6],['a','b','c','d','e','f'], tag = 'Tag of f2')
     
     self.f2.specs={'NiceParam':1,'LessNiceParam':2}
     
     self.f3 = FeatureVector([1,2], ['a','b'])
     self.f3.inherit_meta_from(self.x2)
     self.f3.add_to_history(self.x5)
开发者ID:AlexanderFabisch,项目名称:pyspace,代码行数:59,代码来源:test_base.py


示例5: _execute

    def _execute(self, data):
        """ Perform a shift and normalization according
        (whole_data - mean(specific_samples)) / std(specific_samples)
        """
        if self.devariance:
            # code copy from LocalStandardizationNode
            std = numpy.std(data[self.subset],axis=0)
            std = check_zero_division(self, std, tolerance=10**-15, data_ts=data)

            return TimeSeries.replace_data(data,
                        (data-numpy.mean(data[self.subset], axis=0)) / std)
        else:
            return TimeSeries.replace_data(data, \
                        data-numpy.mean(data[self.subset], axis=0))
开发者ID:AlexanderFabisch,项目名称:pyspace,代码行数:14,代码来源:normalization.py


示例6: merge_time_series

    def merge_time_series(self, input_collection):
        """ Merges all timeseries of the input_collection to one big timeseries """
        # Retriev the time series from the input_collection
        input_timeseries = input_collection.get_data(0,0,'test')
        # Get the data from the first timeseries
        output_data = input_timeseries[0][0]
        skiped_range = output_data.start_time

        # Change the endtime of the first timeseries to the one of the last
        # timeseries inside the input_collection
        input_timeseries[0][0].end_time = input_timeseries[-1][0].end_time
        # For all the remaining timeseries

        for ts in input_timeseries[1:]:
            # Concatenate the data...
            output_data = numpy.vstack((output_data,ts[0]))
            # ... and add the marker to the first timeseries
            if(len(ts[0].marker_name) > 0):
                for k in ts[0].marker_name:
                    if(not input_timeseries[0][0].marker_name.has_key(k)):
                        input_timeseries[0][0].marker_name[k] = []
                    for time in ts[0].marker_name[k]:
                        input_timeseries[0][0].marker_name[k].append(time+ts[0].start_time - skiped_range)
        # Use the meta information from the first timeseries e.g. marker start/end_time
        # and create a new timeseries with the concatenated data
        merged_time_series = TimeSeries.replace_data(input_timeseries[0][0],output_data)
        # Change the name of the merged_time_series
        merged_time_series.name = "%s, length %d ms, %s" % (merged_time_series.name.split(',')[0], \
                                                            (len(merged_time_series)*1000.0)/merged_time_series.sampling_frequency,\
                                                            merged_time_series.name.split(',')[-1])
        
        return merged_time_series
开发者ID:Crespo911,项目名称:pyspace,代码行数:32,代码来源:time_series_sink.py


示例7: _execute

 def _execute(self, x):
     """
     f' = (f(x+h)-f(x))
     """
     if self.datapoints == None:
         self.datapoints = len(x)
     
     #create new channel names
     new_names = []
     for channel in range(len(x.channel_names)):
         new_names.append("%s'" %  (x.channel_names[channel]))
     #Derive the f' d2 from data x
     timeSeries = []
     for datapoint in range(self.datapoints):
         temp = []
         if((datapoint+1)<self.datapoints):
             for channel in range(len(x.channel_names)):
                 temp.append(x[datapoint+1][channel]-x[datapoint][channel])#*8*sampling_frequency
             timeSeries.append(temp)
     #padding with zero's if the original length of the time series have to remain equal.
     if self.keep_number_of_samples:
         temp = []
         for i in range(len(x.channel_names)):
             temp.append(0)
         timeSeries.append(temp)
     #Create a new time_series with the new data and channel names
     result_time_series = TimeSeries.replace_data(x, numpy.array(timeSeries))
     result_time_series.channel_names = new_names
     #if necessary adjust the length of the time series
     if not self.keep_number_of_samples:
         result_time_series.end_time -= 1
     
     return result_time_series
开发者ID:AlexanderFabisch,项目名称:pyspace,代码行数:33,代码来源:differentiation.py


示例8: _execute

    def _execute(self, x):
        """ Compute the energy of the given signal x using the TKEO """
        #Determine the indices of the channels which will be filtered
        #Done only once...
        if(self.selected_channel_indices == None):
            self.selected_channels = self.selected_channels \
            if self.selected_channels != None else x.channel_names
            self.selected_channel_indices = [x.channel_names.index(channel_name) \
                                            for channel_name in self.selected_channels]
            self.old_data = numpy.zeros((2,len(self.selected_channel_indices)))

        filtered_data = numpy.zeros(x.shape)
        channel_counter = -1
        for channel_index in self.selected_channel_indices:
            channel_counter += 1
            for i in range(len(x)):
                if i==0:
                    filtered_data[i][channel_index] = math.pow(self.old_data[1][channel_counter],2) - (self.old_data[0][channel_counter] * x[0][channel_index])
                elif i==1:
                    filtered_data[i][channel_index] = math.pow(x[0][channel_index],2) - (self.old_data[1][channel_counter] * x[1][channel_index])
                else:
                    filtered_data[i][channel_index] = math.pow(x[i-1][channel_index],2) - (x[i-2][channel_index] * x[i][channel_index])
            self.old_data[0][channel_counter] = x[-2][channel_index]
            self.old_data[1][channel_counter] = x[-1][channel_index]
        result_time_series = TimeSeries.replace_data(x, filtered_data)

        return result_time_series
开发者ID:AlexanderFabisch,项目名称:pyspace,代码行数:27,代码来源:filtering.py


示例9: _prepare_FV

    def _prepare_FV(self, data):
        """ Convert FeatureVector into TimeSeries and use it for plotting.

        .. note:: This function is not yet working as it should be.
                  Work in progress.
                  Commit due to LRP-Demo (DLR Review)
        """
        # visualization of transformation or history data times visualization
        if self.current_trafo_TS is None:
            transformation_list = self.get_previous_transformations(data)
            transformation_list.reverse() #first element is previous node

            for elem in transformation_list:
                if self.use_FN and elem[3]=="feature normalization":
                    # visualize Feature normalization scaling as feature vector
                    FN_FV = FeatureVector(numpy.atleast_2d(elem[0]),
                                      feature_names = elem[2])
                    self.current_trafo_TS = type_conversion.FeatureVector2TimeSeriesNode()._execute(FN_FV)
                    self.current_trafo_TS.reorder(sorted(self.current_trafo_TS.channel_names))
                    break


                # visualize spatial filter as times series,
                # where the time axis is the number of channel or virtual
                # channel name
                if self.use_SF and elem[3]=="spatial filter":
                    new_channel_names = elem[2]
                    SF_trafo = elem[0]
                    self.current_trafo_TS = TimeSeries(SF_trafo.T,
                                channel_names = new_channel_names,
                                sampling_frequency = 1)
                    self.current_trafo_TS.reorder(sorted(self.current_trafo_TS.channel_names))
                    break
        
        return self.current_trafo_TS
开发者ID:MMKrell,项目名称:pyspace,代码行数:35,代码来源:base.py


示例10: setUp

 def setUp(self):
     self.test_data = numpy.zeros((128, 3))
     self.test_data[:,1] = numpy.ones(128)
     self.test_data[:,2] = numpy.random.random(128)
     
     self.test_time_series = TimeSeries(self.test_data, ["A","B", "C"], 64,
                                        start_time = 0, end_time = 2000)
开发者ID:Crespo911,项目名称:pyspace,代码行数:7,代码来源:test_window_func.py


示例11: _execute

    def _execute(self, data):
        # Initialize the ringbuffers and variables one for each channel
        if(self.ringbuffer == None):
            self.width /= 1000.0
            self.width = int(self.width * data.sampling_frequency)
            self.nChannels = len(data.channel_names)
            self.ringbuffer = numpy.zeros((self.width,self.nChannels),dtype=numpy.double)
            self.variables = numpy.zeros((2,self.nChannels),dtype=numpy.double)
            self.index = numpy.zeros(self.nChannels,'i')

        # Convert the input data to double
        x = data.view(numpy.ndarray).astype(numpy.double)
        # Initialize the result data array
        filtered_data = numpy.zeros(x.shape)
        # Lists which are passed to the standadization
        # TODO: make self
        processing_filtered_data = None
        processing_ringbuffer = None
        processing_variables = None
        processing_index = None
        if(self.standardization):
            for channel_index in range(self.nChannels):
                # Copy the different data to the processing listst
                processing_filtered_data = numpy.array(filtered_data[:,channel_index],'d')
                processing_ringbuffer = numpy.array(self.ringbuffer[:,channel_index],'d')
                processing_variables = numpy.array(self.variables[:,channel_index],'d')
                processing_index = int(self.index[channel_index])
                if self.var_tools:
                    # Perform the standardization
                    # The module vt (variance_tools) is implemented in c using boost to wrap the code in python
                    # The module is located in trunk/library/variance_tools and have to be compiled
                    self.index[channel_index] = vt.standardization(processing_filtered_data, numpy.array(x[:,channel_index],'d'), processing_ringbuffer, processing_variables, self.width, processing_index)
                else:
                    self.index[channel_index] = self.standardisation(processing_filtered_data, numpy.array(x[:,channel_index],'d'), processing_ringbuffer, processing_variables, self.width, processing_index)
                # Copy the processing lists back to the local variables
                filtered_data[:,channel_index] = processing_filtered_data
                self.ringbuffer[:,channel_index] = processing_ringbuffer
                self.variables[:,channel_index] = processing_variables
        else:
            for channel_index in range(self.nChannels):
                # Copy the different data to the processing listst
                processing_filtered_data = numpy.array(filtered_data[:,channel_index],'d')
                processing_ringbuffer = numpy.array(self.ringbuffer[:,channel_index],'d')
                processing_variables = numpy.array(self.variables[:,channel_index],'d')
                processing_index = int(self.index[channel_index])
                if self.var_tools:
                    # Perform the filtering with the variance
                    # The module vt (variance_tools) is implemented in c using boost to wrap the code in python
                    # The module is located in trunk/library/variance_tools and have to be compiled
                    self.index[channel_index] = vt.filter(processing_filtered_data, numpy.array(x[:,channel_index],'d'), processing_ringbuffer, processing_variables, self.width, processing_index)
                else:
                    self.index[channel_index] = self.variance(processing_filtered_data, numpy.array(x[:,channel_index],'d'), processing_ringbuffer, processing_variables, self.width, processing_index)
                # Copy the processing lists back to the local variables
                filtered_data[:,channel_index] = processing_filtered_data
                self.ringbuffer[:,channel_index] = processing_ringbuffer
                self.variables[:,channel_index] = processing_variables
        # Return the result
        result_time_series = TimeSeries.replace_data(data, filtered_data)
        return result_time_series
开发者ID:Crespo911,项目名称:pyspace,代码行数:59,代码来源:filtering.py


示例12: testInheritAndAddStuff

 def testInheritAndAddStuff(self):
     """test inheritance of meta data from other objects"""
     # Inherit
     self.assertEqual(self.x5.tag, self.x2.tag)
     self.assertEqual(self.x5.key, self.x2.key)
     
     self.assertEqual(self.f3.tag, self.x2.tag)
     self.assertEqual(self.f3.key, self.x2.key)
     
     #Inherit
     
     #suppress warning of BaseData type and cast data back to numpy
     hist_x6=self.x6.history[0].view(numpy.ndarray)
     data_x5=self.x5.view(numpy.ndarray)
     
     # history
     self.assertEqual((hist_x6==data_x5).all(),True)
     self.assertEqual(self.x6.history[0].key,self.x5.key)
     self.assertEqual(self.x6.history[0].tag,self.x5.tag)
     self.assertEqual(self.x6.history[0].specs['node_specs'],self.some_nice_dict)
     
     hist_f3=self.f3.history[0].view(numpy.ndarray)
     
     self.assertEqual((hist_f3==data_x5).all(),True)
     self.assertEqual(self.f3.history[0].key,self.x5.key)
     self.assertEqual(self.f3.history[0].tag,self.x5.tag)
     
     #if key (and tag) were already set, these original values
     #have to be kept
     # 
     self.assertEqual(self.x6.key, self.x6_key)
     self.assertEqual(self.x6.tag, self.x2.tag)
     
     self.x6.inherit_meta_from(self.f3) #should not change tag and key
     
     self.assertEqual(self.x6.key, self.x6_key)
     self.assertEqual(self.x6.tag, self.x2.tag)
     
     #testing multiple histories
     x7 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,marker_name='S4')
     x7.add_to_history(self.x1)
     x7.add_to_history(self.x2)
     x7.add_to_history(self.x3)
     x7.add_to_history(self.x4)
     x7.add_to_history(self.x5)
     x7.add_to_history(self.x6)
     x7.add_to_history(self.x1)
     
     self.assertEqual(len(x7.history),7)
     self.assertEqual(x7.history[0].key,x7.history[6].key)
     self.assertEqual(x7.history[5].history,[])        
开发者ID:AlexanderFabisch,项目名称:pyspace,代码行数:51,代码来源:test_base.py


示例13: _execute

 def _execute(self, data):
     """ Subsample the given data and return a new time series """
     if self.new_len == 0 :
         self.new_len = int(round(self.target_frequency*len(data)/(1.0*data.sampling_frequency)))
     if not self.mirror:
         downsampled_time_series = \
             TimeSeries.replace_data(data, 
                                     scipy.signal.resample(data, self.new_len,
                                                         t=None, axis=0,
                                                         window=self.window))
     else:
         downsampled_time_series = \
             TimeSeries.replace_data(data, 
                                     scipy.signal.resample(numpy.vstack((data,numpy.flipud(data))), self.new_len*2,
                                                         t=None, axis=0,
                                                         window=self.window)[:self.new_len])
     downsampled_time_series.sampling_frequency = self.target_frequency
     return downsampled_time_series
开发者ID:MMKrell,项目名称:pyspace,代码行数:18,代码来源:subsampling.py


示例14: _execute

 def _execute(self, data):
     """ Apply the cast """
     #Determine the indices of the channels which will be filtered
     self._log("Cast data")
     casted_data = data.astype(self.datatype)
         
     result_time_series = TimeSeries.replace_data(data, casted_data)
     
     return result_time_series
开发者ID:schevalier,项目名称:pyspace,代码行数:9,代码来源:type_conversion.py


示例15: SimpleDifferentiationFeature

class SimpleDifferentiationFeature(unittest.TestCase):

    def setUp(self):
        self.channel_names = ['a', 'b', 'c', 'd', 'e', 'f']
        self.x1 = TimeSeries(
            [[1, 2, 3, 4, 5, 6], [6, 5, 3, 1, 7, 7]], self.channel_names, 120)

    def test_sd_feature(self):
        sd_node = SimpleDifferentiationFeatureNode()
        features = sd_node.execute(self.x1)
        for f in range(features.shape[1]):
            channel = features.feature_names[f][4]
            index = self.channel_names.index(channel)
            self.assertEqual(
                features.view(
                    numpy.ndarray)[0][f],
                self.x1.view(
                    numpy.ndarray)[1][index] -
                self.x1.view(
                    numpy.ndarray)[0][index])
开发者ID:Crespo911,项目名称:pyspace,代码行数:20,代码来源:test_time_domain_features.py


示例16: _execute

    def _execute(self, data):
        """
        Apply the scaling to the given data x
        and return a new time series.
        """
        x = data.view(numpy.ndarray)

        x.clip(self.min_threshold, self.max_threshold, out = x)

        result_time_series = TimeSeries.replace_data(data, x)

        return result_time_series
开发者ID:AlexanderFabisch,项目名称:pyspace,代码行数:12,代码来源:clip.py


示例17: _execute

    def _execute(self, data):
        """ Reorder the memory. """

        # exchange data of time series object to correctly ordered data
        buffer = numpy.array(data, order='F')

        if self.convert_type and numpy.dtype('float64') != buffer.dtype:
            buffer = buffer.astype(numpy.float)
        
        data = TimeSeries.replace_data(data,buffer)
        
        return data
开发者ID:AlexanderFabisch,项目名称:pyspace,代码行数:12,代码来源:reorder_memory.py


示例18: _execute

    def _execute(self, data):
        # First check if all channels actually appear in the data

        # Determine the indices of the channels that are the basis for the 
        # average reference.
        if not self.inverse:
            if self.avg_channels == None:
                self.avg_channels = data.channel_names
            channel_indices = [data.channel_names.index(channel_name) 
                                for channel_name in self.avg_channels]
        else:
            channel_indices = [data.channel_names.index(channel_name)
                               for channel_name in data.channel_names
                               if channel_name not in self.avg_channels]

        not_found_channels = \
            [channel_name for channel_name in self.avg_channels 
                     if channel_name not in data.channel_names]
        if not not_found_channels == []:
            warnings.warn("Couldn't find selected channel(s): %s. Ignoring." % 
                            not_found_channels, Warning)
                    
        if self.old_ref is None:
            self.old_ref = 'avg'
        
        # Compute the actual data of the reference channel. This is the sum of all 
        # channels divided by (the number of channels +1).
        ref_chen = -numpy.sum(data[:, channel_indices], axis=1)/(data.shape[1]+1)
        ref_chen = numpy.atleast_2d(ref_chen).T
        # Reference all electrodes against average
        avg_referenced_data = data + ref_chen
        
        # Add average as new channel to the signal if enabled
        if self.keep_average:
            avg_referenced_data = numpy.hstack((avg_referenced_data, ref_chen))
            channel_names = data.channel_names + [self.old_ref]
            result_time_series = TimeSeries(avg_referenced_data, 
                                            channel_names,
                                            data.sampling_frequency, 
                                            data.start_time, data.end_time,
                                            data.name, data.marker_name)
        else:
            result_time_series = TimeSeries.replace_data(data, 
                                                            avg_referenced_data)
        
        return result_time_series
开发者ID:MMKrell,项目名称:pyspace,代码行数:46,代码来源:rereferencing.py


示例19: _execute

    def _execute(self, x):
        """ Executes the preprocessing on the given data vector x"""
        #Number of retained channels
        num_channels = numpy.size(x,1)
        if(self.below_threshold == None):
            # When the node is called for the first time initialize all parameters/variables
            self.width_AT = int((self.width_AT*x.sampling_frequency)/1000.)
            
            #Convert the time from ms to samples
            self.time_below_threshold = int((self.time_below_threshold*x.sampling_frequency)/1000.)
            
            #Create and prefill the array which indicates how long a signal was below the threshold
            self.below_threshold = numpy.zeros(num_channels)
            self.below_threshold.fill(self.time_below_threshold+1)
            
            #Create the ringbuffer and the variables list for the adaptive threshold 
            self.ringbuffer_AT=numpy.zeros((self.width_AT,num_channels))
            self.variables_AT=numpy.zeros((4,num_channels))
        data=x.view(numpy.ndarray)
        #Create the array for the thresholded data
        threshold_data = numpy.zeros(data.shape)
        #For each sample of each retained channel
        for i in range(num_channels):
            data_index = 0
            for sample in data[:,i]:
                #calculate the adaptive threshold
                value = self.adaptive_threshold(sample, i)
                #if the actual sample exceeds the threshold...
                if(sample >= value):
                    #and the resting time was observed
                    if(self.below_threshold[i] > self.time_below_threshold):
                        #store a 1 indicating a onset
                        threshold_data[data_index][i] = 1
                    #reset the resting time counter
                    self.below_threshold[i] = 0
                #increase the time the signal was below the signal
                else:
                    self.below_threshold[i] += 1
                data_index += 1

        #return the thresholded data
        result_time_series = TimeSeries.replace_data(x, threshold_data)
        return result_time_series
开发者ID:schevalier,项目名称:pyspace,代码行数:43,代码来源:adaptive_threshold_classifier.py


示例20: _train

    def _train(self, data):
        """ Check which channels have constant values.

        The training data is considered and the invalid channel names
        are removed. The first data entry is saved and the starting
        assumption is that all channels have constant values. When a value
        different from the first data entry for a respective channel is found,
        that channel is removed from the list of channels that have constant
        values.
        """
        # copy the first data value
        if self.data_values is None:
            # copy the first entry
            self.data_values = TimeSeries.replace_data(data, data.get_data()[0])
            # invalidate all the channels in the beginning
            self.selected_channel_names = copy.deepcopy(data.channel_names)

        for channel in self.selected_channel_names:
            if (data.get_channel(channel) != self.data_values.get_channel(channel)[0]).any():
                self.selected_channel_names.remove(channel)
开发者ID:Crespo911,项目名称:pyspace,代码行数:20,代码来源:channel_selection.py



注:本文中的pySPACE.resources.data_types.time_series.TimeSeries类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python base.BaseDataset类代码示例发布时间:2022-05-25
下一篇:
Python mesh.mesh函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap