Welcome to OStack Knowledge Sharing Community for programmer and developer-Open, Learning and Share
Welcome To Ask or Share your Answers For Others

Categories

0 votes
243 views
in Technique[技术] by (71.8m points)

python - Fit_generator produces two verboses end of epoch

I am using a mixup generator like

import numpy as np
from tensorflow.keras.utils import Sequence

class MixupGenerator(Sequence):
    def __init__(self, x_train, y_train, batch_size=32, alpha=0.2, shuffle=True):
        self.X_train = x_train
        self.y_train = y_train
        self.batch_size = batch_size
        self.alpha = alpha
        self.shuffle = shuffle
        self.sample_num = len(x_train)
        self.lock = threading.Lock()

    def __iter__(self):
        return self

    #@threadsafe_generator
    def __call__(self):
        with self.lock:
            while True:
                indexes = self.__get_exploration_order()
                itr_num = int(len(indexes) // (self.batch_size * 2))

                for i in range(itr_num):
                    batch_ids = indexes[i * self.batch_size * 2:(i + 1) * self.batch_size * 2]
                    X, y = self.__data_generation(batch_ids)

                    yield X, y

    def __get_exploration_order(self):
        indexes = np.arange(self.sample_num)

        if self.shuffle:
            np.random.shuffle(indexes)

        return indexes

    def __data_generation(self, batch_ids):
        _, h, w, c = self.X_train.shape
        l = np.random.beta(self.alpha, self.alpha, self.batch_size)
        X_l = l.reshape(self.batch_size, 1, 1, 1)
        y_l = l.reshape(self.batch_size, 1)

        X1 = self.X_train[batch_ids[:self.batch_size]]
        X2 = self.X_train[batch_ids[self.batch_size:]]

        X = X1 * X_l + X2 * (1.0 - X_l)

        if isinstance(self.y_train, list):
            y = []

            for y_train_ in self.y_train:
                y1 = y_train_[batch_ids[:self.batch_size]]
                y2 = y_train_[batch_ids[self.batch_size:]]
                y.append(y1 * y_l + y2 * (1.0 - y_l))
        else:
            y1 = self.y_train[batch_ids[:self.batch_size]]
            y2 = self.y_train[batch_ids[self.batch_size:]]
            y = y1 * y_l + y2 * (1.0 - y_l)

        return X, y

I have 13965 samples during training and 2970 during testing. I call the fit like:

history = model.fit_generator(train_datagen,
                                  validation_data=(val_x, val_y), epochs=epochs,
                                  steps_per_epoch=np.ceil((x.shape[0] - 1) / config.batch_size),
                                  callbacks=callbacks,
                                  verbose=tr_verbose)

being the batch_size = 32

The verbose is quite rare, is it due to the decimal division between the number of epochs and the batch size?

Epoch 49/500 436/437 [============================>.] - ETA: 0s - loss: 0.1408 - categorical_accuracy: 0.8295Epoch 1/500 2968/437 [===========================================================================================================================================================================================================] - 6s 2ms/sample - loss: 0.2304 - categorical_accuracy: 0.5162 437/437 [==============================] - 131s 299ms/step - loss: 0.1409 - categorical_accuracy: 0.8294 - val_loss: 0.2510 - val_categorical_accuracy: 0.5162


与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome To Ask or Share your Answers For Others

1 Answer

0 votes
by (71.8m points)
等待大神答复

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome to OStack Knowledge Sharing Community for programmer and developer-Open, Learning and Share
Click Here to Ask a Question

2.1m questions

2.1m answers

60 comments

56.8k users

...