2017-02-26 2 views
3

필자는 자체 데이터 집합에서 텐서 흐름의 InceptionV3 모델을 학습했습니다. 검사 점 파일과 그래프 (.meta)를 교육에서 받았습니다. 이 파일을 사용하여 새 이미지의 레이블을 분류합니다. TFslim에서Tensorflow의 InceptionV3을 사용한 예측

inception_v3 코드

def inception_v3(inputs, 
 
       dropout_keep_prob=0.8, 
 
       num_classes=1000, 
 
       is_training=True, 
 
       restore_logits=True, 
 
       scope=''): 
 
    """Latest Inception from http://arxiv.org/abs/1512.00567. 
 
    "Rethinking the Inception Architecture for Computer Vision" 
 
    Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, 
 
    Zbigniew Wojna 
 
    Args: 
 
    inputs: a tensor of size [batch_size, height, width, channels]. 
 
    dropout_keep_prob: dropout keep_prob. 
 
    num_classes: number of predicted classes. 
 
    is_training: whether is training or not. 
 
    restore_logits: whether or not the logits layers should be restored. 
 
     Useful for fine-tuning a model with different num_classes. 
 
    scope: Optional scope for name_scope. 
 
    Returns: 
 
    a list containing 'logits', 'aux_logits' Tensors. 
 
    """ 
 
    # end_points will collect relevant activations for external use, for example 
 
    # summaries or losses. 
 
    end_points = {} 
 
    with tf.name_scope(scope, 'inception_v3', [inputs]): 
 
    with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout], 
 
          is_training=is_training): 
 
     with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool], 
 
          stride=1, padding='VALID'): 
 
     # 299 x 299 x 3 
 
     end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2, 
 
             scope='conv0') 
 
     # 149 x 149 x 32 
 
     end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3], 
 
             scope='conv1') 
 
     # 147 x 147 x 32 
 
     end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3], 
 
             padding='SAME', scope='conv2') 
 
     # 147 x 147 x 64 
 
     end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3], 
 
              stride=2, scope='pool1') 
 
     # 73 x 73 x 64 
 
     end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1], 
 
             scope='conv3') 
 
     # 73 x 73 x 80. 
 
     end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3], 
 
             scope='conv4') 
 
     # 71 x 71 x 192. 
 
     end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3], 
 
              stride=2, scope='pool2') 
 
     # 35 x 35 x 192. 
 
     net = end_points['pool2'] 
 
     # Inception blocks 
 
     with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool], 
 
          stride=1, padding='SAME'): 
 
     # mixed: 35 x 35 x 256. 
 
     with tf.variable_scope('mixed_35x35x256a'): 
 
      with tf.variable_scope('branch1x1'): 
 
      branch1x1 = ops.conv2d(net, 64, [1, 1]) 
 
      with tf.variable_scope('branch5x5'): 
 
      branch5x5 = ops.conv2d(net, 48, [1, 1]) 
 
      branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) 
 
      with tf.variable_scope('branch3x3dbl'): 
 
      branch3x3dbl = ops.conv2d(net, 64, [1, 1]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.avg_pool(net, [3, 3]) 
 
      branch_pool = ops.conv2d(branch_pool, 32, [1, 1]) 
 
      net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3) 
 
      end_points['mixed_35x35x256a'] = net 
 
     # mixed_1: 35 x 35 x 288. 
 
     with tf.variable_scope('mixed_35x35x288a'): 
 
      with tf.variable_scope('branch1x1'): 
 
      branch1x1 = ops.conv2d(net, 64, [1, 1]) 
 
      with tf.variable_scope('branch5x5'): 
 
      branch5x5 = ops.conv2d(net, 48, [1, 1]) 
 
      branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) 
 
      with tf.variable_scope('branch3x3dbl'): 
 
      branch3x3dbl = ops.conv2d(net, 64, [1, 1]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.avg_pool(net, [3, 3]) 
 
      branch_pool = ops.conv2d(branch_pool, 64, [1, 1]) 
 
      net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3) 
 
      end_points['mixed_35x35x288a'] = net 
 
     # mixed_2: 35 x 35 x 288. 
 
     with tf.variable_scope('mixed_35x35x288b'): 
 
      with tf.variable_scope('branch1x1'): 
 
      branch1x1 = ops.conv2d(net, 64, [1, 1]) 
 
      with tf.variable_scope('branch5x5'): 
 
      branch5x5 = ops.conv2d(net, 48, [1, 1]) 
 
      branch5x5 = ops.conv2d(branch5x5, 64, [5, 5]) 
 
      with tf.variable_scope('branch3x3dbl'): 
 
      branch3x3dbl = ops.conv2d(net, 64, [1, 1]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.avg_pool(net, [3, 3]) 
 
      branch_pool = ops.conv2d(branch_pool, 64, [1, 1]) 
 
      net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3) 
 
      end_points['mixed_35x35x288b'] = net 
 
     # mixed_3: 17 x 17 x 768. 
 
     with tf.variable_scope('mixed_17x17x768a'): 
 
      with tf.variable_scope('branch3x3'): 
 
      branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID') 
 
      with tf.variable_scope('branch3x3dbl'): 
 
      branch3x3dbl = ops.conv2d(net, 64, [1, 1]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3], 
 
             stride=2, padding='VALID') 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID') 
 
      net = tf.concat([branch3x3, branch3x3dbl, branch_pool], 3) 
 
      end_points['mixed_17x17x768a'] = net 
 
     # mixed4: 17 x 17 x 768. 
 
     with tf.variable_scope('mixed_17x17x768b'): 
 
      with tf.variable_scope('branch1x1'): 
 
      branch1x1 = ops.conv2d(net, 192, [1, 1]) 
 
      with tf.variable_scope('branch7x7'): 
 
      branch7x7 = ops.conv2d(net, 128, [1, 1]) 
 
      branch7x7 = ops.conv2d(branch7x7, 128, [1, 7]) 
 
      branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) 
 
      with tf.variable_scope('branch7x7dbl'): 
 
      branch7x7dbl = ops.conv2d(net, 128, [1, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.avg_pool(net, [3, 3]) 
 
      branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) 
 
      net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3) 
 
      end_points['mixed_17x17x768b'] = net 
 
     # mixed_5: 17 x 17 x 768. 
 
     with tf.variable_scope('mixed_17x17x768c'): 
 
      with tf.variable_scope('branch1x1'): 
 
      branch1x1 = ops.conv2d(net, 192, [1, 1]) 
 
      with tf.variable_scope('branch7x7'): 
 
      branch7x7 = ops.conv2d(net, 160, [1, 1]) 
 
      branch7x7 = ops.conv2d(branch7x7, 160, [1, 7]) 
 
      branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) 
 
      with tf.variable_scope('branch7x7dbl'): 
 
      branch7x7dbl = ops.conv2d(net, 160, [1, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.avg_pool(net, [3, 3]) 
 
      branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) 
 
      net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3) 
 
      end_points['mixed_17x17x768c'] = net 
 
     # mixed_6: 17 x 17 x 768. 
 
     with tf.variable_scope('mixed_17x17x768d'): 
 
      with tf.variable_scope('branch1x1'): 
 
      branch1x1 = ops.conv2d(net, 192, [1, 1]) 
 
      with tf.variable_scope('branch7x7'): 
 
      branch7x7 = ops.conv2d(net, 160, [1, 1]) 
 
      branch7x7 = ops.conv2d(branch7x7, 160, [1, 7]) 
 
      branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) 
 
      with tf.variable_scope('branch7x7dbl'): 
 
      branch7x7dbl = ops.conv2d(net, 160, [1, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.avg_pool(net, [3, 3]) 
 
      branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) 
 
      net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3) 
 
      end_points['mixed_17x17x768d'] = net 
 
     # mixed_7: 17 x 17 x 768. 
 
     with tf.variable_scope('mixed_17x17x768e'): 
 
      with tf.variable_scope('branch1x1'): 
 
      branch1x1 = ops.conv2d(net, 192, [1, 1]) 
 
      with tf.variable_scope('branch7x7'): 
 
      branch7x7 = ops.conv2d(net, 192, [1, 1]) 
 
      branch7x7 = ops.conv2d(branch7x7, 192, [1, 7]) 
 
      branch7x7 = ops.conv2d(branch7x7, 192, [7, 1]) 
 
      with tf.variable_scope('branch7x7dbl'): 
 
      branch7x7dbl = ops.conv2d(net, 192, [1, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1]) 
 
      branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7]) 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.avg_pool(net, [3, 3]) 
 
      branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) 
 
      net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3) 
 
      end_points['mixed_17x17x768e'] = net 
 
     # Auxiliary Head logits 
 
     aux_logits = tf.identity(end_points['mixed_17x17x768e']) 
 
     with tf.variable_scope('aux_logits'): 
 
      aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3, 
 
            padding='VALID') 
 
      aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj') 
 
      # Shape of feature map before the final layer. 
 
      shape = aux_logits.get_shape() 
 
      aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01, 
 
            padding='VALID') 
 
      aux_logits = ops.flatten(aux_logits) 
 
      aux_logits = ops.fc(aux_logits, num_classes, activation=None, 
 
           stddev=0.001, restore=restore_logits) 
 
      end_points['aux_logits'] = aux_logits 
 
     # mixed_8: 8 x 8 x 1280. 
 
     # Note that the scope below is not changed to not void previous 
 
     # checkpoints. 
 
     # (TODO) Fix the scope when appropriate. 
 
     with tf.variable_scope('mixed_17x17x1280a'): 
 
      with tf.variable_scope('branch3x3'): 
 
      branch3x3 = ops.conv2d(net, 192, [1, 1]) 
 
      branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2, 
 
            padding='VALID') 
 
      with tf.variable_scope('branch7x7x3'): 
 
      branch7x7x3 = ops.conv2d(net, 192, [1, 1]) 
 
      branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7]) 
 
      branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1]) 
 
      branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3], 
 
            stride=2, padding='VALID') 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID') 
 
      net = tf.concat([branch3x3, branch7x7x3, branch_pool], 3) 
 
      end_points['mixed_17x17x1280a'] = net 
 
     # mixed_9: 8 x 8 x 2048. 
 
     with tf.variable_scope('mixed_8x8x2048a'): 
 
      with tf.variable_scope('branch1x1'): 
 
      branch1x1 = ops.conv2d(net, 320, [1, 1]) 
 
      with tf.variable_scope('branch3x3'): 
 
      branch3x3 = ops.conv2d(net, 384, [1, 1]) 
 
      branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]), 
 
            ops.conv2d(branch3x3, 384, [3, 1])], 3) 
 
      with tf.variable_scope('branch3x3dbl'): 
 
      branch3x3dbl = ops.conv2d(net, 448, [1, 1]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3]) 
 
      branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]), 
 
             ops.conv2d(branch3x3dbl, 384, [3, 1])], 3) 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.avg_pool(net, [3, 3]) 
 
      branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) 
 
      net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3) 
 
      end_points['mixed_8x8x2048a'] = net 
 
     # mixed_10: 8 x 8 x 2048. 
 
     with tf.variable_scope('mixed_8x8x2048b'): 
 
      with tf.variable_scope('branch1x1'): 
 
      branch1x1 = ops.conv2d(net, 320, [1, 1]) 
 
      with tf.variable_scope('branch3x3'): 
 
      branch3x3 = ops.conv2d(net, 384, [1, 1]) 
 
      branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]), 
 
            ops.conv2d(branch3x3, 384, [3, 1])], 3) 
 
      with tf.variable_scope('branch3x3dbl'): 
 
      branch3x3dbl = ops.conv2d(net, 448, [1, 1]) 
 
      branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3]) 
 
      branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]), 
 
             ops.conv2d(branch3x3dbl, 384, [3, 1])], 3) 
 
      with tf.variable_scope('branch_pool'): 
 
      branch_pool = ops.avg_pool(net, [3, 3]) 
 
      branch_pool = ops.conv2d(branch_pool, 192, [1, 1]) 
 
      net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3) 
 
      end_points['mixed_8x8x2048b'] = net 
 
     # Final pooling and prediction 
 
     with tf.variable_scope('logits'): 
 
      shape = net.get_shape() 
 
      net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool') 
 
      # 1 x 1 x 2048 
 
      net = ops.dropout(net, dropout_keep_prob, scope='dropout') 
 
      net = ops.flatten(net, scope='flatten') 
 
      # 2048 
 
      logits = ops.fc(net, num_classes, activation=None, scope='logits', 
 
          restore=restore_logits) 
 
      # 1000 
 
      end_points['logits'] = logits 
 
      end_points['predictions'] = tf.nn.softmax(logits, name='predictions') 
 
     return logits, end_points

예측 코드

config = tf.ConfigProto(allow_soft_placement=True) 
 
saver = tf.train.import_meta_graph('path/to/meta/graph') 
 
graph = tf.get_default_graph() 
 
with tf.Session(config=config,graph=graph) as sess: 
 
     print graph 
 
     saver.restore(sess,'/path/to/chpk/') 
 
     init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables()) 
 
     sess.run(init_op) 
 
     print ('Restored checkpoint file and graph') 
 
     tens=image_preprocessing(tf.read_file('/serving/13_left.jpeg')) 
 
     with slim.arg_scope(inception_arg_scope()): 
 
       logits = inception_v3(tf.expand_dims(tens,0), 
 
           num_classes=6, 
 
           is_training=False) 
 
     prob = tf.nn.softmax(logits) 
 
     sess.run(prob)
: 지금까지 나는 다음을

FailedPreconditionError (see above for traceback): Attempting to use uninitialized value mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2 
 
\t [[Node: mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2/read = Identity[T=DT_FLOAT, _class=["loc:@mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2"], _device="/job:localhost/replica:0/task:0/cpu:0"](mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2)]]

내가 지금 여기에 어떤 도움이 많이 감사합니다 Tensorflow 상당히 새로운 오전 :

다음과 같은 오류를 제공합니다. 나는 잘못된 것을하고 있지만, 단지 그것을 알아낼 수는 없다. 미리 감사드립니다 :)

EDIT 1

나는 세션에 그래프와 무게를 다시로드 내 세션을 다시 시작됩니다. 나는 그래프를 얼려서 frozen_graph.pb로 저장했다.

def freeze_graph(model_folder): 
 
    # We retrieve our checkpoint fullpath 
 
    checkpoint = tf.train.get_checkpoint_state(model_folder) 
 
    input_checkpoint = checkpoint.model_checkpoint_path 
 

 
    # We precise the file fullname of our freezed graph 
 
    absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1]) 
 
    output_graph = absolute_model_folder + "/frozen_model.pb" 
 

 
    # Before exporting our graph, we need to precise what is our output node 
 
    # This is how TF decides what part of the Graph he has to keep and what part it can dump 
 
    output_node_names = "tower_0/logits/predictions" 
 

 
    # We clear devices to allow TensorFlow to control on which device it will load operations 
 
    clear_devices = True 
 

 
    # We import the meta graph and retrieve a Saver 
 
    saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices) 
 

 
    # We retrieve the protobuf graph definition 
 
    graph = tf.get_default_graph() 
 
    input_graph_def = graph.as_graph_def() 
 

 
    # We start a session and restore the graph weights 
 
    with tf.Session() as sess: 
 
     saver.restore(sess, input_checkpoint) 
 
     for op in sess.graph.get_operations(): 
 
       print(op.name) 
 
     # We use a built-in TF helper to export variables to constants 
 
     output_graph_def = graph_util.convert_variables_to_constants(
 
      sess, # The session is used to retrieve the weights 
 
      input_graph_def # The graph_def is used to retrieve the nodes 
 
      ,output_node_names.split(",")) # The output node names are used to select the usefull nodes 
 

 

 
     # Finally we serialize and dump the output graph to the filesystem 
 
     with tf.gfile.GFile(output_graph, "wb") as f: 
 
      f.write(output_graph_def.SerializeToString()) 
 
     print("%d ops in the final graph." % len(output_graph_def.node)) 
 

 

 
if __name__ == '__main__': 
 
    parser = argparse.ArgumentParser() 
 
    parser.add_argument("--model_folder", type=str, help="Model folder to export") 
 
    args = parser.parse_args() 
 
    freeze_graph(args.model_folder)

내가 새로운 세션으로 냉동 그래프를로드 : 다음은 내 코드입니다. 내 노드 이름은 다음과 같습니다

prefix/batch_processing/batch_join/fifo_queue 
 
prefix/batch_processing/batch_join/n 
 
prefix/batch_processing/batch_join 
 
prefix/batch_processing/Reshape/shape 
 
prefix/batch_processing/Reshape 
 
. 
 
. 
 
. 
 
prefix/logits/logits/weights 
 
prefix/logits/logits/weights/read 
 
prefix/logits/logits/biases 
 
prefix/logits/logits/biases/read 
 
prefix/tower_0/logits/logits/xw_plus_b/MatMul 
 
prefix/tower_0/logits/logits/xw_plus_b 
 
prefix/tower_0/logits/predictions

내가 입력 노드와 마지막 노드 (예측)을 사용하여 새 이미지를 분류합니다. 내 코드는이를 위해 다음과 같은 :

def load_graph(frozen_graph_filename): 
 
    # We load the protobuf file from the disk and parse it to retrieve the 
 
    # unserialized graph_def 
 
    with tf.gfile.GFile(frozen_graph_filename, "rb") as f: 
 
     graph_def = tf.GraphDef() 
 
     graph_def.ParseFromString(f.read()) 
 

 
    # Then, we can use again a convenient built-in function to import a graph_def into the 
 
    # current default Graph 
 
    with tf.Graph().as_default() as graph: 
 
     tf.import_graph_def(
 
      graph_def, 
 
      input_map=None, 
 
      return_elements=None, 
 
      name="prefix", 
 
      op_dict=None, 
 
      producer_op_list=None 
 
     ) 
 
    return graph 
 

 
#graph = load_graph('/serving/frozen_model.pb') 
 

 
if __name__ == '__main__': 
 
    import scipy.misc 
 
    # Let's allow the user to pass the filename as an argument 
 
    parser = argparse.ArgumentParser() 
 
    parser.add_argument("--frozen_model_filename", default="/serving/frozen_model.pb", type=str, help="Frozen model file to import") 
 
    args = parser.parse_args() 
 

 
    # We use our "load_graph" function 
 
    graph = load_graph(args.frozen_model_filename) 
 

 
    # We can verify that we can access the list of operations in the graph 
 
    for op in graph.get_operations(): 
 
     print(op.name) 
 
     # prefix/Placeholder/inputs_placeholder 
 
     # ... 
 
     # prefix/Accuracy/predictions 
 

 
    # We access the input and output nodes 
 
    x = graph.get_tensor_by_name('prefix/batch_processing/batch_join/fifo_queue:0') 
 
    y = graph.get_tensor_by_name('prefix/tower_0/logits/predictions:0') 
 
    image_data = tf.gfile.FastGFile('/serving/13_left.jpeg', 'rb').read() 
 
    # We launch a Session 
 
    with tf.Session(graph=graph) as sess: 
 
     # Note: we didn't initialize/restore anything, everything is stored in the graph_def 
 
     y_out = sess.run(y, feed_dict={ 
 
      x: image_data}) # < 45 
 

 
     print(y_out) # [[ False ]] Yay, it works!

을 나는 다음과 같은 오류가 발생합니다.

Traceback (most recent call last): 
 
    File "predict_3.py", line 183, in <module> 
 
    x: image_data}) # < 45 
 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 767, in run 
 
    run_metadata_ptr) 
 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 929, in _run 
 
    subfeed_dtype = subfeed_t.dtype.as_numpy_dtype 
 
    File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/dtypes.py", line 138, in as_numpy_dtype 
 
    return _TF_TO_NP[self._type_enum] 
 
KeyError: 20

확실하지 무엇 지금은 잘못된 것입니다. 내 노드가 올바른 것을 본다. 이것에 대한 도움은 많이 감사합니다.

+0

어떤 TensorFlow 버전을 실행하고 있습니까? – Neal

+0

TF v1.0을 실행 중입니다. –

답변

2

마지막으로 나는이 문제를 해결하는 방법을 알아 냈습니다. 예측을 위해 노드를 다시 사용할 수있게 해주는 .pb 파일을 생성하기 위해 그래프를 고정해야합니다.

def freeze_graph(model_folder): 
 
    # We retrieve our checkpoint fullpath 
 
    checkpoint = tf.train.get_checkpoint_state(model_folder) 
 
    input_checkpoint = checkpoint.model_checkpoint_path 
 

 
    # We precise the file fullname of our freezed graph 
 
    absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1]) 
 
    output_graph = absolute_model_folder + "/frozen_model.pb" 
 

 
    # Before exporting our graph, we need to precise what is our output node 
 
    # This is how TF decides what part of the Graph he has to keep and what part it can dump 
 
    # NOTE: this variable is plural, because you can have multiple output nodes 
 
    output_node_names = "tower_0/logits/predictions" 
 

 
    # We clear devices to allow TensorFlow to control on which device it will load operations 
 
    clear_devices = True 
 

 
    # We import the meta graph and retrieve a Saver 
 
    saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices) 
 

 
    # We retrieve the protobuf graph definition 
 
    graph = tf.get_default_graph() 
 
    input_graph_def = graph.as_graph_def() 
 

 
    # We start a session and restore the graph weights 
 
    with tf.Session() as sess: 
 
     saver.restore(sess, input_checkpoint) 
 
     for op in sess.graph.get_operations(): 
 
       print(op.name) 
 
     # We use a built-in TF helper to export variables to constants 
 
     output_graph_def = graph_util.convert_variables_to_constants(
 
      sess, # The session is used to retrieve the weights 
 
      input_graph_def # The graph_def is used to retrieve the nodes 
 
      ,output_node_names.split(",")) # The output node names are used to select the usefull nodes 
 

 

 
     # Finally we serialize and dump the output graph to the filesystem 
 
     with tf.gfile.GFile(output_graph, "wb") as f: 
 
      f.write(output_graph_def.SerializeToString()) 
 
     print("%d ops in the final graph." % len(output_graph_def.node)) 
 

 

 
if __name__ == '__main__': 
 
    parser = argparse.ArgumentParser() 
 
    parser.add_argument("--model_folder", type=str, help="Model folder to export") 
 
    args = parser.parse_args() 
 

 
    freeze_graph(args.model_folder)

이후, 우리는 그래프를로드 할 수 있습니다 : 다음은이 작업을 수행하려면 코드입니다. 다음은이 내 코드입니다 :

def load_graph(frozen_graph_filename): 
 
    # We load the protobuf file from the disk and parse it to retrieve the 
 
    # unserialized graph_def 
 
    with tf.gfile.GFile(frozen_graph_filename, "rb") as f: 
 
     graph_def = tf.GraphDef() 
 
     graph_def.ParseFromString(f.read()) 
 

 
    # Then, we can use again a convenient built-in function to import a graph_def into the 
 
    # current default Graph 
 
    with tf.Graph().as_default() as graph: 
 
     tf.import_graph_def(
 
      graph_def, 
 
      input_map=None, 
 
      return_elements=None, 
 
      name="prefix", 
 
      op_dict=None, 
 
      producer_op_list=None 
 
     ) 
 
    return graph

이것은 당신의 프로토콜 버퍼 (frozen_graph.pb)를 생성 할 우리가 할 수있는 보라

if __name__ == '__main__': 
 
    # Let's allow the user to pass the filename as an argument 
 
    parser = argparse.ArgumentParser() 
 
    parser.add_argument("--frozen_model_filename", default="/serving/frozen_model.pb", type=str, help="Frozen model file to import") 
 
    parser.add_argument("--image_name",type=str,help="Image to test") 
 
    args = parser.parse_args() 
 

 
    # Create test batch 
 
    image_data = create_test_batch(args.image_name) 
 
    # We use our "load_graph" function 
 
    graph = load_graph(args.frozen_model_filename) 
 

 
    # We can verify that we can access the list of operations in the graph 
 
    #for op in graph.get_operations(): 
 
     #print(op.name) 
 
     # prefix/Placeholder/inputs_placeholder 
 
     # ... 
 
     # prefix/Accuracy/predictions 
 

 
    # We access the input and output nodes 
 
    x = graph.get_tensor_by_name('prefix/batch_processing/Reshape:0') # Input tensor 
 
    y = graph.get_tensor_by_name('prefix/tower_0/logits/predictions:0') # Output tensor 
 

 
    # We launch a Session 
 
    with tf.Session(graph=graph) as sess: 
 
     # Note: we didn't initialize/restore anything, everything is stored in the graph_def 
 
     y_out = sess.run(y, feed_dict={ 
 
      x:image_data}) # < 45  
 
     print(y_out)

내 입력 노드 (x)는 크기 64와 크기 299x299x3의 배치를 기대하므로 복제본으로 내 방식을 해킹했습니다. 테스트 이미지를 64 번 작성하고 입력 배치를 작성합니다. 나는 다음과 같은 방법으로이 작업을 수행 :

입력 배치 문제를 해결하는 더 좋은 방법이 있다면

def create_test_batch(input_image): 
 
     data = [] 
 
     img = cv2.imread(input_image) # Read the test image 
 
     img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) # Convert RGB image to YUV 
 
     # equalize the histogram of the Y channel 
 
     img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0]) 
 
     # convert the YUV image back to RGB format 
 
     img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) 
 
     img_resize = cv2.resize(img_output,(299,299)) # Resize the image acceptable by InceptionV3 model 
 
     for i in range(0,64): 
 
       data.append(img_resize) # Create a batch of 64 images 
 
       #data.append(np.resize((ndimage.imread('/serving/'+input_image)),(299,299,3))) 
 
     print np.shape(data) 
 
     return data

, 나는 대답을 주셔서 감사합니다.