This discussion has been locked.
You can no longer post new replies to this discussion. If you have a question you can start a new discussion

nn_quantizer.py in the ML-CIFAR10 examples got error KeyError: 'accuracy'

 I am executing ML-examples ->cmsisnn-cifar10. While converting trained caffe model into cmsis-nn by below command

python nn_quantizer.py --model models/cifar10_m7_train_test.prototxt --weights models/cifar10_m7_iter_300000.caffemodel.h5 --save models/cifar10_m7.pkl

getting error message.

Traceback (most recent call last):
File "nn_quantizer.py", line 614, in <module>
my_model.get_graph_connectivity()
File "nn_quantizer.py", line 231, in get_graph_connectivity
num_branch = len(self.bottom_blob[current_layer])
KeyError: 'accuracy'

Kindly provide any refernce for solving above error.

Parents Reply Children
  • Hey , could you tell me what went wrong in your prototxt? I'm having the same issue but I can't seem to find anything wrong with my prototxt.

    name: "VGGNet_xavier"
    layer {
      name: "data"
      type: "Data"
      include {
        phase: TRAIN
      }
     transform_param {
        crop_size: 224
        mean_value: 104
        mean_value: 117
        mean_value: 123
        mirror: true
     }
     data_param {
        source: "/home/sumant/Project/ML-examples/cmsisnn-cifar10/data_set/train_lmdb"
        batch_size: 128
        backend: LMDB
      }
      top: "data"
      top: "label"
    }
    layer {
      name: "data"
      type: "Data"
      include {
        phase: TEST
      }
     transform_param {
        crop_size: 224
        mean_value: 104
        mean_value: 117
        mean_value: 123
        mirror: false
     }
     data_param {
        source: "/home/sumant/Project/ML-examples/cmsisnn-cifar10/data_set/val_lmdb"
        batch_size: 32
        backend: LMDB
      }
      top: "data"
      top: "label"
    }
    layer {
      name: "conv1_1"
      type: "Convolution"
      bottom: "data"
      top: "conv1_1"
      convolution_param {
        num_output: 64
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 8
        fl_layer_out: 6
        fl_params: 7
      }
    }
    layer {
      name: "relu1_1"
      type: "ReLU"
      bottom: "conv1_1"
      top: "conv1_1"
    }
    layer {
      name: "conv1_2"
      type: "Convolution"
      bottom: "conv1_1"
      top: "conv1_2"
      convolution_param {
        num_output: 64
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 6
        fl_layer_out: 3
        fl_params: 8
      }
    }
    layer {
      name: "relu1_2"
      type: "ReLU"
      bottom: "conv1_2"
      top: "conv1_2"
    }
    layer {
      name: "pool1"
      type: "Pooling"
      bottom: "conv1_2"
      top: "pool1"
      pooling_param {
        pool: MAX
        kernel_size: 2
        stride: 2
      }
    }
    layer {
      name: "conv2_1"
      type: "Convolution"
      bottom: "pool1"
      top: "conv2_1"
      convolution_param {
        num_output: 128
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 3
        fl_layer_out: 2
        fl_params: 8
      }
    }
    layer {
      name: "relu2_1"
      type: "ReLU"
      bottom: "conv2_1"
      top: "conv2_1"
    }
    layer {
      name: "conv2_2"
      type: "Convolution"
      bottom: "conv2_1"
      top: "conv2_2"
      convolution_param {
        num_output: 128
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 2
        fl_layer_out: 2
        fl_params: 8
      }
    }
    layer {
      name: "relu2_2"
      type: "ReLU"
      bottom: "conv2_2"
      top: "conv2_2"
    }
    layer {
      name: "pool2"
      type: "Pooling"
      bottom: "conv2_2"
      top: "pool2"
      pooling_param {
        pool: MAX
        kernel_size: 2
        stride: 2
      }
    }
    layer {
      name: "conv3_1"
      type: "Convolution"
      bottom: "pool2"
      top: "conv3_1"
      convolution_param {
        num_output: 256
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 2
        fl_layer_out: 1
        fl_params: 7
      }
    }
    layer {
      name: "relu3_1"
      type: "ReLU"
      bottom: "conv3_1"
      top: "conv3_1"
    }
    layer {
      name: "conv3_2"
      type: "Convolution"
      bottom: "conv3_1"
      top: "conv3_2"
      convolution_param {
        num_output: 256
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 1
        fl_layer_out: 1
        fl_params: 8
      }
    }
    layer {
      name: "relu3_2"
      type: "ReLU"
      bottom: "conv3_2"
      top: "conv3_2"
    }
    layer {
      name: "conv3_3"
      type: "Convolution"
      bottom: "conv3_2"
      top: "conv3_3"
      convolution_param {
        num_output: 256
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 1
        fl_layer_out: 1
        fl_params: 8
      }
    }
    layer {
      name: "relu3_3"
      type: "ReLU"
      bottom: "conv3_3"
      top: "conv3_3"
    }
    layer {
      name: "pool3"
      type: "Pooling"
      bottom: "conv3_3"
      top: "pool3"
      pooling_param {
        pool: MAX
        kernel_size: 2
        stride: 2
      }
    }
    layer {
      name: "conv4_1"
      type: "Convolution"
      bottom: "pool3"
      top: "conv4_1"
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 1
        fl_layer_out: 1
        fl_params: 8
      }
    }
    layer {
      name: "relu4_1"
      type: "ReLU"
      bottom: "conv4_1"
      top: "conv4_1"
    }
    layer {
      name: "conv4_2"
      type: "Convolution"
      bottom: "conv4_1"
      top: "conv4_2"
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 1
        fl_layer_out: 2
        fl_params: 8
      }
    }
    layer {
      name: "relu4_2"
      type: "ReLU"
      bottom: "conv4_2"
      top: "conv4_2"
    }
    layer {
      name: "conv4_3"
      type: "Convolution"
      bottom: "conv4_2"
      top: "conv4_3"
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 2
        fl_layer_out: 3
        fl_params: 8
      }
    }
    layer {
      name: "relu4_3"
      type: "ReLU"
      bottom: "conv4_3"
      top: "conv4_3"
    }
    layer {
      name: "pool4"
      type: "Pooling"
      bottom: "conv4_3"
      top: "pool4"
      pooling_param {
        pool: MAX
        kernel_size: 2
        stride: 2
      }
    }
    layer {
      name: "conv5_1"
      type: "Convolution"
      bottom: "pool4"
      top: "conv5_1"
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 3
        fl_layer_out: 4
        fl_params: 9
      }
    }
    layer {
      name: "relu5_1"
      type: "ReLU"
      bottom: "conv5_1"
      top: "conv5_1"
    }
    layer {
      name: "conv5_2"
      type: "Convolution"
      bottom: "conv5_1"
      top: "conv5_2"
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 4
        fl_layer_out: 5
        fl_params: 9
      }
    }
    layer {
      name: "relu5_2"
      type: "ReLU"
      bottom: "conv5_2"
      top: "conv5_2"
    }
    layer {
      name: "conv5_3"
      type: "Convolution"
      bottom: "conv5_2"
      top: "conv5_3"
      convolution_param {
        num_output: 512
        pad: 1
        kernel_size: 3
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 8
        fl_layer_in: 5
        fl_layer_out: 6
        fl_params: 8
      }
    }
    layer {
      name: "relu5_3"
      type: "ReLU"
      bottom: "conv5_3"
      top: "conv5_3"
    }
    layer {
      name: "pool5"
      type: "Pooling"
      bottom: "conv5_3"
      top: "pool5"
      pooling_param {
        pool: MAX
        kernel_size: 2
        stride: 2
      }
    }
    layer {
      name: "fc6"
      type: "InnerProduct"
      bottom: "pool5"
      top: "fc6"
      inner_product_param {
        num_output: 4096
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0.1
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 4
        fl_layer_in: 6
        fl_layer_out: 8
        fl_params: 8
      }
    }
    layer {
      name: "relu6"
      type: "ReLU"
      bottom: "fc6"
      top: "fc6"
    }
    layer {
      name: "drop6"
      type: "Dropout"
      bottom: "fc6"
      top: "fc6"
      dropout_param {
        dropout_ratio: 0.5
      }
    }
    layer {
      name: "fc7"
      type: "InnerProduct"
      bottom: "fc6"
      top: "fc7"
      inner_product_param {
        num_output: 4096
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0.1
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 4
        fl_layer_in: 8
        fl_layer_out: 10
        fl_params: 7
      }
    }
    layer {
      name: "relu7"
      type: "ReLU"
      bottom: "fc7"
      top: "fc7"
    }
    layer {
      name: "drop7"
      type: "Dropout"
      bottom: "fc7"
      top: "fc7"
      dropout_param {
        dropout_ratio: 0.5
      }
    }
    layer {
      name: "fc8"
      type: "InnerProduct"
      bottom: "fc7"
      top: "fc8"
      inner_product_param {
        num_output: 1000
        weight_filler {
          type: "xavier"
        }
        bias_filler {
          type: "constant"
          value: 0.1
        }
      }
      quantization_param {
        bw_layer_in: 16
        bw_layer_out: 16
        bw_params: 4
        fl_layer_in: 10
        fl_layer_out: 10
        fl_params: 7
      }
    }
    layer {
      bottom: "fc8"
      top: "prob"
      name: "prob"
      type: "Softmax"
    }
    layer {
      name: "accuracy/top1"
      type: "Accuracy"
      bottom: "fc8"
      bottom: "label"
      top: "accuracy@1"
      include: { phase: TEST }
      accuracy_param {
        top_k: 1
      }
    }
    layer {
      name: "accuracy/top5"
      type: "Accuracy"
      bottom: "fc8"
      bottom: "label"
      top: "accuracy@5"
      include: { phase: TEST }
      accuracy_param {
        top_k: 5
      }
    }