2016-07-13 6 views
1

나는 미리 훈련 된 VGG 모델을로드하고 실행하기 위해 this을 추적했습니다. 그러나 숨겨진 레이어에서 기능 맵을 추출하고 "임의의 기능 맵 추출"섹션 here에서 결과를 복제하려고했습니다. Keras 훈련 된 VGG 오류

File "VGG_Keras.py", line 98, in <module> 
    plt.imshow(features[0][13]) 
IndexError: index 13 is out of bounds for axis 0 with size 1 

가 어떻게이 문제를 해결할 수 있습니다

#!/usr/bin/python 

import matplotlib.pyplot as plt 
import theano 
from scipy import misc 
from PIL import Image 
import PIL.ImageOps 
from keras.models import Sequential 
from keras.layers.core import Flatten, Dense, Dropout 
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D 
from keras.optimizers import SGD 
import numpy as np 
from keras import backend as K 

def get_features(model, layer, X_batch): 
    get_features = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer].output,]) 
    features = get_features([X_batch,0]) 
    return features 

def VGG_16(weights_path=None): 
    model = Sequential() 
    model.add(ZeroPadding2D((1,1),input_shape=(3,224,224))) 
    model.add(Convolution2D(64, 3, 3, activation='relu')) 
    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(64, 3, 3, activation='relu')) 
    model.add(MaxPooling2D((2,2), strides=(2,2))) 

    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(128, 3, 3, activation='relu')) 
    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(128, 3, 3, activation='relu')) 
    model.add(MaxPooling2D((2,2), strides=(2,2))) 

    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(256, 3, 3, activation='relu')) 
    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(256, 3, 3, activation='relu')) 
    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(256, 3, 3, activation='relu')) 
    model.add(MaxPooling2D((2,2), strides=(2,2))) 

    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(512, 3, 3, activation='relu')) 
    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(512, 3, 3, activation='relu')) 
    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(512, 3, 3, activation='relu')) 
    model.add(MaxPooling2D((2,2), strides=(2,2))) 

    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(512, 3, 3, activation='relu')) 
    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(512, 3, 3, activation='relu')) 
    model.add(ZeroPadding2D((1,1))) 
    model.add(Convolution2D(512, 3, 3, activation='relu')) 
    model.add(MaxPooling2D((2,2), strides=(2,2))) 

    model.add(Flatten()) 
    model.add(Dense(4096, activation='relu')) 
    model.add(Dropout(0.5)) 
    model.add(Dense(4096, activation='relu')) 
    model.add(Dropout(0.5)) 
    model.add(Dense(1000, activation='softmax')) 

    if weights_path: 
     model.load_weights("/home/srilatha/Desktop/Research_intern/vgg16_weights.h5") 

    return model 

if __name__ == "__main__": 
    #f="/home/srilatha/Desktop/Research_intern/Data_sets/Data_set_2/FGNET/male/007A23.JPG" 
    f="/home/srilatha/Desktop/Research_intern/Data_sets/Cropped_data_set/1/7.JPG" 
    image = Image.open(f) 
    new_width = 224 
    new_height = 224 
    im = image.resize((new_width, new_height), Image.ANTIALIAS) 
    im=np.array(im) 
    im=np.tile(im[:,:,None],(1,1,3)) 
    #imRGB = np.repeat(im[:, :, np.newaxis], 3, axis=2) 
    print(im) 
    #print(type(im)) 
    im = im.transpose((2,0,1)) 
    im = np.expand_dims(im, axis=0) 


    # Test pretrained model 
    model = VGG_16('/home/srilatha/Desktop/Research_intern/vgg16_weights.h5') 
    sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) 
    model.compile(optimizer=sgd, loss='categorical_crossentropy') 
    out = model.predict(im) 
    #get_feature = theano.function([model.layers[0].input], model.layers[3].get_output(train=False), allow_input_downcast=False) 
    #feat = get_feature(im) 
    #get_activations = theano.function([model.layers[0].input], model.layers[1].get_output(train=False), allow_input_downcast=True) 
    #activations = get_activations(model, 1, im) 
    #plt.imshow(activations) 
    #plt.imshow(im) 
    features=get_features(model,15,im) 
    plt.imshow(features[0][13]) 
    #out = model.predict(im) 
    #plt.plot(out.ravel()) 
    #plt.show() 
    print np.argmax(out) 

그러나, 나는이 오류가 발생하고 다음과 같이 내 코드는?

답변

1

우선, 다음 번에 다른 사람들이보다 쉽게 ​​당신을 도울 수 있도록 코드의 깨끗한 버전을 업데이 트합니다.

둘째, 디버깅 함수를 수정 :

def get_features(model, layer, X_batch): 
    print model.layers[layer] 
    print model.layers[layer].output_shape 
    get_features = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer].output,]) 
    features = get_features([X_batch,0]) 
    print features.shape 
    return features 

당신은 features 실제로 list 것을 발견 할 것이다 :

  1. 출력 K.function의 즉, get_features[model.layers[layer].output,]의 결과는, 목록입니다.
  2. get_features[0](1, 256, 56, 56)==>(batch_size, channel, W, H)
  3. get_features[0][0] 일괄 첫 번째 사진의 특징이며, 따라서 model.layers[layer].output 형상이다.
  4. 나는 당신이 찾고있는 것이 get_features[0][0][13]이라고 믿습니다.
관련 문제