Welcome to OStack Knowledge Sharing Community for programmer and developer-Open, Learning and Share
Welcome To Ask or Share your Answers For Others

Categories

0 votes
335 views
in Technique[技术] by (71.8m points)

machine learning - Accuracy reported by caffe and pycaffe are different

Below is the train.Prototxt file that is used to train a pretrained model.

    name: "TempWLDNET"
    layer {
      name: "data"
      type: "ImageData"
      top: "data"
      top: "label"
      include {
        phase: TRAIN
      }
      transform_param {
        mirror: true
        crop_size: 224 
        mean_file: "mean.binaryproto"
      }
      image_data_param {
        source: "train.txt"
        batch_size: 25
        new_height: 256 
        new_width: 256 
      }
    }
    layer {
      name: "data"
      type: "ImageData"
      top: "data"
      top: "label"
      include {
        phase: TEST
      }
      transform_param {
        mirror: false
        crop_size: 224 
        mean_file: "painmean.binaryproto"
      }
      image_data_param {
        source: "test.txt"
        batch_size: 25
        new_height: 256 
        new_width: 256 
      }
    }
    layer {
      name: "conv1"
      type: "Convolution"
      bottom: "data"
      top: "conv1"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 96
        kernel_size: 7
        stride: 2
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layer {
      name: "relu1"
      type: "ReLU"
      bottom: "conv1"
      top: "conv1"
    }
    layer {
      name: "norm1"
      type: "LRN"
      bottom: "conv1"
      top: "norm1"
      lrn_param {
        local_size: 5
        alpha: 0.0005
        beta: 0.75
      }
    }
    layer {
      name: "pool1"
      type: "Pooling"
      bottom: "norm1"
      top: "pool1"
      pooling_param {
        pool: MAX
        kernel_size: 3
        stride: 3
      }
    }
    layer {
      name: "conv2"
      type: "Convolution"
      bottom: "pool1"
      top: "conv2"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 256
        pad: 2
        kernel_size: 5
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layer {
      name: "relu2"
      type: "ReLU"
      bottom: "conv2"
      top: "conv2"
    }
    layer {
      name: "pool2"
      type: "Pooling"
      bottom: "conv2"
      top: "pool2"
      pooling_param {
        pool: MAX
        kernel_size: 2
        stride: 2
      }
    }
    layer {
      name: "conv3"
      type: "Convolution"
      bottom: "pool2"
      top: "conv3"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layer {
      name: "relu3"
      type: "ReLU"
      bottom: "conv3"
      top: "conv3"
    }
    layer {
      name: "conv4"
      type: "Convolution"
      bottom: "conv3"
      top: "conv4"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layer {
      name: "relu4"
      type: "ReLU"
      bottom: "conv4"
      top: "conv4"
    }
    layer {
      name: "conv5"
      type: "Convolution"
      bottom: "conv4"
      top: "conv5"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layer {
      name: "relu5"
      type: "ReLU"
      bottom: "conv5"
      top: "conv5"
    }
    layer {
      name: "pool5"
      type: "Pooling"
      bottom: "conv5"
      top: "pool5"
      pooling_param {
        pool: MAX
        kernel_size: 3
        stride: 3
      }
    }
    layer {
      name: "fc6"
      type: "InnerProduct"
      bottom: "pool5"
      top: "fc6"
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      inner_product_param {
        num_output: 4048
        weight_filler {
          type: "gaussian"
          std: 0.005
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layer {
      name: "relu6"
      type: "ReLU"
      bottom: "fc6"
      top: "fc6"
    }
    layer {
      name: "drop6"
      type: "Dropout"
      bottom: "fc6"
      top: "fc6"
      dropout_param {
        dropout_ratio: 0.5
      }
    }
    layer {
      name: "fc7"
      type: "InnerProduct"
      bottom: "fc6"
      top: "fc7"
      # Note that lr_mult can be set to 0 to disable any fine-tuning of this, and any other, layer
      param {
        lr_mult: 1
        decay_mult: 1
      }
      param {
        lr_mult: 2
        decay_mult: 0
      }
      inner_product_param {
        num_output: 4048
        weight_filler {
          type: "gaussian"
          std: 0.005
        }
        bias_filler {
          type: "constant"
          value: 1
        }
      }
    }
    layer {
      name: "relu7"
      type: "ReLU"
      bottom: "fc7"
      top: "fc7"
    }
    layer {
      name: "drop7"
      type: "Dropout"
      bottom: "fc7"
      top: "fc7"
      dropout_param {
        dropout_ratio: 0.5
      }
    }
    layer {
      name: "fc8_temp"
      type: "InnerProduct"
      bottom: "fc7"
      top: "fc8_temp"
      # lr_mult is set to higher than for other layers, because this layer is starting from random while the others are already trained
      param {
        lr_mult: 10
        decay_mult: 1
      }
      param {
        lr_mult: 20
        decay_mult: 0
      }
      inner_product_param {
        num_output: 16
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layer {
      name: "accuracy"
      type: "Accuracy"
      bottom: "fc8_temp"
      bottom: "label"
      top: "accuracy"
      include {
        phase: TEST
      }
    }
    layer {
      name: "loss"
      type: "SoftmaxWithLoss"
      bottom: "fc8_temp"
      bottom: "label"
      top: "loss"
    }

Using the above prototxt file accuracy reported for test set at the end of the Training is 92%. For more details please see How to evaluate the accuracy and loss of a trained model is good or not in caffe?

I took the model snapshot at the end of 13000 iteration and using below python script, i tried to construct the confusion matrix, Accuracy reported is 74%.

    #!/usr/bin/python
    # -*- coding: utf-8 -*-

    import sys
    import caffe
    import numpy as np
    import argparse
    from collections import defaultdict

    TRAIN_DATA_ROOT='/Images/test/'

    if __name__ == "__main__":
            parser = argparse.ArgumentParser()
            parser.add_argument('--proto', type=str, required=True)
            parser.add_argument('--model', type=str, required=True)
            parser.add_argument('--meanfile', type=str, required=True)
            parser.add_argument('--labelfile', type=str, required=True)
            args = parser.parse_args()

            proto_data = open(args.meanfile, 'rb').read()
            a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
            mean  = caffe.io.blobproto_to_array(a)[0]


            caffe.set_mode_gpu()

            count = 0
            correct = 0
            matrix = defaultdict(int) # (real,pred) -> int
            labels_set = set()

            net = caffe.Net(args.proto, args.model, caffe.TEST)
            # load input and configure preprocessing    
            transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
            transformer.set_mean('data', mean)
            transformer.set_transpose('data', (2,0,1))
            transformer.set_channel_swap('data', (2,1,0))
            transformer.set_raw_scale('data', 1)


            #note we can change the batch size on-the-fly
            #since we classify only one image, we change batch size from 10 to 1
            net.blobs['data'].reshape(1,3,224,224)

            #load the image in the data layer
            f = open(args.labelfile, "r")
            for line in f.readlines():
                    parts = line.split()
                    example_image = parts[0]
                    label = int(parts[1])
                    im = caffe.io.load_image(TRAIN_DATA_ROOT + example_image)
                    print(im.shape)
                    net.blobs['data'].data[...] = transformer.preprocess('data', im)
                    out = net.forward()
                    plabel = int(out['prob'][0].argmax(axis=0))
                    count += 1
                    iscorrect = label == plabel
                    correct += (1 if iscorrect else 0)
                    matrix[(label, plabel)] += 1
                    labels_set.update([label, plabel])
                    if not iscorrect:
                            print("
Error: expected %i but predicted %i" 
                                        % (label, plabel))

                    sys.stdout.write("
Accuracy: %.1f%%" % (100.*correct/count))
                    sys.stdout.flush()

            print(", %i/%i corrects" % (correct, count))

            print ("")
            print ("Confusion matrix:")
            print ("(r , p) | count")
            for l in labels_set:
                    for pl in labels_set:
                            print ("(%i , %i) | %i" % (l, pl, matrix[(l,pl)])) 

I am using the deploy.protxt

    name: "CaffeNet"
    input: "data"
    input_shape {
      dim: 1
      dim: 3
      dim: 224
      dim: 224
    }
    layers {
      name: "conv1"
      type: CONVOLUTION
      bottom: "data"
      top: "conv1"

        blobs_lr: 1
        weight_decay: 1

        blobs_lr: 2
        weight_decay: 0


      convolution_param {
        num_output: 96
        kernel_size: 7
        stride: 2
        weight_filler {
          type: "gaussian"
          std: 0.01
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
    }
    layers {
      name: "relu1"
      type: RELU
      bottom: "conv1"
      top: "conv1"
    }
    layers {
      name: "norm1"
      type: LRN
      bottom: "conv1"
      top: "norm1"
      lrn_param {
        local_size: 5
        alpha: 0.0005
        beta: 0.75
      }
    }
    layers {
      name: "pool1"
      type: POOLING
      bo

与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome To Ask or Share your Answers For Others

1 Answer

0 votes
by (71.8m points)

There are differences between your validation step (TEST phase) and the python code you are running:

  1. You are using a different mean file for train and test (!): for phase: TRAIN you are using mean_file: "mean.binaryproto" while for phase: TEST you are using mean_file: "painmean.binaryproto". Your python evaluation code uses the training mean file and not the validation.
    It is not a good practice to have different settings for train/validation.

  2. Your input images have new_height: 256 and copr_size: 224. This settings means caffe reads the image, scales it to 256x256 and then crops the center to size 224x224. Your python code seems to only scale the input to 224x224 without cropping: you feed your net with different inputs.

  3. Please verify that you do not have any other differences between your training prototxt and deploy prototxt.


与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…
Welcome to OStack Knowledge Sharing Community for programmer and developer-Open, Learning and Share
Click Here to Ask a Question

...