2017-04-06 2 views
1

cifar10으로 프라이버시 자동 인코딩을 시도하고 Magnus Erik Hvass Pedersen (https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/06_CIFAR-10.ipynb)의 cnn 자습서를 참조합니다.오류가있는 cifar10 autoencoder : 자리 표시 자 텐서 'x'에 dtype float 값을 입력해야합니다.

lap_noise1=np.random.laplace(loc=0.0, scale=(sen[0]/epsilon), size=(tf.shape(x)[0], img_size_cropped*img_size_cropped*num_channels)) 

오류 메시지 :

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'x' with dtype float 

손실 함수에 대한 글로벌 민감도를 계산할 때 나는 줄에서 오류 메시지를 가지고, 불행하게도

import matplotlib.pyplot as plt 
import tensorflow as tf 
import numpy as np 
from sklearn.metrics import confusion_matrix 
import time 
from datetime import timedelta 
import math 
import os 
import cifar10 
from cifar10 import img_size, num_channels, num_classes 

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.03) 
session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) 

cifar10.maybe_download_and_extract() 
class_names = cifar10.load_class_names() 

images_train, cls_train, labels_train = cifar10.load_training_data() 
images_test, cls_test, labels_test = cifar10.load_test_data() 
print("Size of:") 
print("- Training-set:\t\t{}".format(len(images_train))) 
print("- Test-set:\t\t{}".format(len(images_test))) 
img_size_cropped = 24 

x = tf.placeholder(tf.float32, shape=[None, img_size, img_size, num_channels], name='x') 
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true') 
y_true_cls = tf.argmax(y_true, dimension=1) 

def pre_process_image(image, training): 
    # This function takes a single image as input, 
    # and a boolean whether to build the training or testing graph. 

    if training: 
     # For training, add the following to the TensorFlow graph. 

     # Randomly crop the input image. 
     image = tf.random_crop(image, size=[img_size_cropped, img_size_cropped, num_channels]) 

     # Randomly flip the image horizontally. 
     image = tf.image.random_flip_left_right(image) 

     # Randomly adjust hue, contrast and saturation. 
     image = tf.image.random_hue(image, max_delta=0.05) 
     image = tf.image.random_contrast(image, lower=0.3, upper=1.0) 
     image = tf.image.random_brightness(image, max_delta=0.2) 
     image = tf.image.random_saturation(image, lower=0.0, upper=2.0) 

     # Some of these functions may overflow and result in pixel 
     # values beyond the [0, 1] range. It is unclear from the 
     # documentation of TensorFlow 0.10.0rc0 whether this is 
     # intended. A simple solution is to limit the range. 

     # Limit the image pixels between [0, 1] in case of overflow. 
     image = tf.minimum(image, 1.0) 
     image = tf.maximum(image, 0.0) 
    else: 
     # For training, add the following to the TensorFlow graph. 

     # Crop the input image around the centre so it is the same 
     # size as images that are randomly cropped during training. 
     image = tf.image.resize_image_with_crop_or_pad(image, 
                 target_height=img_size_cropped, 
                 target_width=img_size_cropped) 
    return image 

def pre_process(images, training): 
    # Use TensorFlow to loop over all the input images and call 
    # the function above which takes a single image as input. 
    images = tf.map_fn(lambda image: pre_process_image(image, training), images) 

    return images 


#define the first privacy autoencoder parameters 
n_input=img_size_cropped*img_size_cropped*num_channels #24*24*3 
n_hidden_1=math.floor(img_size_cropped*img_size_cropped*num_channels/2) 
weights={ 
    'encoder_h1':tf.Variable(tf.truncated_normal([n_input,n_hidden_1], stddev=1/192.0)), 
    'decoder_h1':tf.Variable(tf.truncated_normal([n_hidden_1,n_input], stddev=1/192.0)), 
} 
biases={ 
    'encoder_b1':tf.Variable(tf.random_normal([n_hidden_1])), 
    'decoder_b1':tf.Variable(tf.random_normal([n_input])), 
}  
epsilon = 0.0005 
train_batch_size = 128 
batch_size = 128       

def inference(images): 
    xs=tf.reshape(images,[tf.shape(x)[0], img_size_cropped*img_size_cropped*num_channels]) 
    #encoder 
    h=tf.add(tf.matmul(xs, weights['encoder_h1']), biases['encoder_b1']) 
    encoder_layer_1=tf.nn.sigmoid(h) 

    #decoder 
    x_tilde=tf.add(tf.matmul(encoder_layer_1,weights['decoder_h1']), biases['decoder_b1']) 
    decoder_layer_1=tf.nn.sigmoid(x_tilde) 

    #encoder_layer_1 for next dPA use, decoder_layer_1 for decoder_output, x_tilde for objective function(loss_restruction()) 
    return encoder_layer_1, decoder_layer_1, x_tilde 

def loss_restruction(images,decoder_layer_1, x_tilde):  
    xs=tf.reshape(images,[tf.shape(x)[0], img_size_cropped*img_size_cropped*num_channels]) 
    y_pred=decoder_layer_1 
    f1=tf.multiply(xs,math.log(2)) #shape=(128, 1728) 
    f2=tf.multiply((tf.ones_like(xs)-xs),math.log(2)) #shape=(128,1728) 
    #difine polynomial coefficients to stuck laplacian noise 
    coefficient_1=tf.add(f1,f2) 
    coefficient_2=tf.add(f1,f2)/1 
    coefficient_3=tf.add(f1,f2)/(2*1) 

    #compute global sensitivity to generate laplacian noise 
    global_sensitivity=2.0*tf.reduce_max(
     tf.reduce_sum(tf.add(tf.add(tf.abs(coefficient_1), tf.abs(coefficient_2)),tf.abs(coefficient_3)),axis=1, keep_dims=True),reduction_indices=0) #[1].*2 
    sen=session.run(global_sensitivity) 
    lap_noise1=np.random.laplace(loc=0.0, scale=(sen[0]/epsilon), size=(tf.shape(x)[0], img_size_cropped*img_size_cropped*num_channels)) 
    lap_noise2=np.random.laplace(loc=0.0, scale=(sen[0]/epsilon), size=(tf.shape(x)[0], img_size_cropped*img_size_cropped*num_channels)) 
    lap_noise3=np.random.laplace(loc=0.0, scale=(sen[0]/epsilon), size=(tf.shape(x)[0], img_size_cropped*img_size_cropped*num_channels)) 

    #restruction function with laplacian noise as a loss function 
    loss=tf.reduce_mean(tf.reduce_sum(tf.add(
     tf.add(tf.add(coefficient_1,lap_noise1), tf.multiply(tf.add(coefficient_2,lap_noise2), x_tilde)), 
     tf.multiply(tf.add(coefficient_3,lap_noise3), tf.square(x_tilde))),1)) 

    return y_pred, loss 

''' 
    Helper-function for creating Main Processing 
    The following helper-function creates the main part of the privacy autoencoder. 
    '''  
def main_network(images, training): 
    encoder_layer_1, decoder_layer_1, x_tilde=inference(images) 

    if training: 
     y_pred, loss=loss_restruction(images, decoder_layer_1, x_tilde) 
    else: 
     y_pred=decoder_layer_1 
     loss=tf.constant([0]) 

    return y_pred, loss 


def create_network(training): 
    with tf.variable_scope('network', reuse=not training): 
     # Just rename the input placeholder variable for convenience. 
     images = x 

     # Create TensorFlow graph for pre-processing. 
     images = pre_process(images=images, training=training) 

     # Create TensorFlow graph for the main processing. 
     y_pred, loss = main_network(images=images, training=training) 
    return y_pred, loss 


global_step = tf.Variable(initial_value=0, name='global_step', trainable=False)  
_, loss = create_network(training=True) 
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss, global_step=global_step) 
y_pred, _ = create_network(training=False) 

saver = tf.train.Saver() 
save_dir = 'checkpoints/' 
if not os.path.exists(save_dir): 
    os.makedirs(save_dir) 
save_path = os.path.join(save_dir, 'cifar10_privacy_autoencoder') 
try: 
    print("Trying to restore last checkpoint ...") 
    last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=save_dir) 
    saver.restore(session, save_path=last_chk_path) 
    print("Restored checkpoint from:", last_chk_path) 
except: 
    print("Failed to restore checkpoint. Initializing variables instead.") 
    session.run(tf.global_variables_initializer()) 


''' 
    Function for selecting a random batch of images from the training-set. 
    ''' 
def random_batch(): 
    # Number of images in the training-set. 
    num_images = len(images_train) 

    # Create a random index. 
    idx = np.random.choice(num_images, 
          size=train_batch_size, 
          replace=False) 

    # Use the random index to select random images and labels. 
    x_batch = images_train[idx, :, :, :] 
    y_batch = labels_train[idx, :] 

    return x_batch, y_batch 


def optimize(num_iterations): 
    # Start-time used for printing time-usage below. 
    start_time = time.time() 

    for i in range(num_iterations): 

     x_batch, y_true_batch = random_batch() 
     feed_dict_train = {x: x_batch, 
          y_true: y_true_batch} 

     i_global, _, cost = session.run([global_step, optimizer, loss], 
            feed_dict=feed_dict_train) 

     # Print status to screen every 100 iterations (and last). 
     if (i_global % 100 == 0) or (i == num_iterations - 1): 
      # Print status. 
      msg = "Global Step: {0:>6}, Training Batch Cost: {1:>6.1%}" 
      print(msg.format(i_global, cost)) 

     # Save a checkpoint to disk every 1000 iterations (and last). 
     if (i_global % 1000 == 0) or (i == num_iterations - 1): 
      saver.save(session, 
         save_path=save_path, 
         global_step=global_step) 

      print("Saved checkpoint.") 

    # Ending time. 
    end_time = time.time() 

    # Difference between start and end-times. 
    time_dif = end_time - start_time 

    # Print the time-usage. 
    print("Time usage: " + str(timedelta(seconds=int(round(time_dif))))) 
    print("Optimization Finished!") 

optimize(num_iterations=2000) 

: 이 내 코드입니다 자세한 오류 메시지는 다음과 같습니다.

Loading data: /tmp/cifar10_data\cifar-10-batches-py/data_batch_2 
Loading data: /tmp/cifar10_data\cifar-10-batches-py/data_batch_3 
Loading data: /tmp/cifar10_data\cifar-10-batches-py/data_batch_4 
Loading data: /tmp/cifar10_data\cifar-10-batches-py/data_batch_5 
Loading data: /tmp/cifar10_data\cifar-10-batches-py/test_batch 
Size of: 
- Training-set:  50000 
- Test-set:  10000 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "BestSplits" device_type: "CPU"') for unknown op: BestSplits 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "CountExtremelyRandomStats" device_type: "CPU"') for unknown op: CountExtremelyRandomStats 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "FinishedNodes" device_type: "CPU"') for unknown op: FinishedNodes 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "GrowTree" device_type: "CPU"') for unknown op: GrowTree 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "ReinterpretStringToFloat" device_type: "CPU"') for unknown op: ReinterpretStringToFloat 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "SampleInputs" device_type: "CPU"') for unknown op: SampleInputs 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "ScatterAddNdim" device_type: "CPU"') for unknown op: ScatterAddNdim 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "TopNInsert" device_type: "CPU"') for unknown op: TopNInsert 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "TopNRemove" device_type: "CPU"') for unknown op: TopNRemove 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "TreePredictions" device_type: "CPU"') for unknown op: TreePredictions 
E c:\tf_jenkins\home\workspace\release-win\device\cpu\os\windows\tensorflow\core\framework\op_kernel.cc:943] OpKernel ('op: "UpdateFertileSlots" device_type: "CPU"') for unknown op: UpdateFertileSlots 
Traceback (most recent call last): 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1022, in _do_call 
    return fn(*args) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1004, in _run_fn 
    status, run_metadata) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\contextlib.py", line 66, in __exit__ 
    next(self.gen) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 466, in raise_exception_on_not_ok_status 
    pywrap_tensorflow.TF_GetCode(status)) 
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'x' with dtype float 
    [[Node: x = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]] 

During handling of the above exception, another exception occurred: 

Traceback (most recent call last): 
    File "autoencoder.py", line 158, in <module> 
    _, loss = create_network(training=True) 
    File "autoencoder.py", line 153, in create_network 
    y_pred, loss = main_network(images=images, training=training) 
    File "autoencoder.py", line 136, in main_network 
    y_pred, loss=loss_restruction(images, decoder_layer_1, x_tilde) 
    File "autoencoder.py", line 120, in loss_restruction 
    sen=session.run(global_sensitivity) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 767, in run 
    run_metadata_ptr) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 965, in _run 
    feed_dict_string, options, run_metadata) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1015, in _do_run 
    target_list, options, run_metadata) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", line 1035, in _do_call 
    raise type(e)(node_def, op, message) 
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'x' with dtype float 
    [[Node: x = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]] 

Caused by op 'x', defined at: 
    File "autoencoder.py", line 25, in <module> 
    x = tf.placeholder(tf.float32, shape=[None, img_size, img_size, num_channels], name='x') 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\array_ops.py", line 1502, in placeholder 
    name=name) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 2149, in _placeholder 
    name=name) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 763, in apply_op 
    op_def=op_def) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 2327, in create_op 
    original_op=self._default_original_op, op_def=op_def) 
    File "C:\Users\Lee Janice\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\ops.py", line 1226, in __init__ 
    self._traceback = _extract_stack() 

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'x' with dtype float 
    [[Node: x = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]] 

loss_restruction() 함수에 session.run (tf.local_variables_initializer())을 추가하려고했지만 작동하지 않았습니다. 나는 lap_noise1이 sen [0]tf.shape (x) [0]을 호출 할 때 오류가 발생했다고 생각합니다. 어떻게이 부분을 고칠 수 있습니까?

답변

0

문제는 따라서 당신은 무엇 오류 메시지 상태 인 x에 값을 공급해야하며, 귀하의 의견 자리 x에 따라 달라집니다 당신이 global_sensitivity를 요청이 줄을 다음

sen=session.run(global_sensitivity) 

입니다.

  • 것은 먼저 다음 그래프를 구축, 세션을 만드는 :

    나는 몇 가지 코드 문제를 참조하십시오. 나는 그것이 일반적으로 해롭다는 것을 확신하지 못한다. 일반적으로 먼저 그래프를 작성한 다음 세션에서 시작합니다.

  • 처음으로 세션을 생성하는 동안 그래프를 작성하는 것이 여전히 가능할 수 있지만 해당 오류 메시지가 표시되지 않는 경우에도 loss_restruction(images,decoder_layer_1, x_tilde) 기능이 의도 한대로 작동하지 않습니다. 여기에서 CPU로 중간 결과를 다운로드하고, 계산을 수행 한 다음 그래프에서 다시 사용하여 손실을 계산합니다. 그러나, loss_restruction 함수는 번, 즉으로, 즉 그래프를 작성할 때만 호출됩니다. 따라서 sen = session.run(global_sensitivity) 문은 번만 번만 실행됩니다. 나는. 값 sen은 실제로 교육 시간에 고정되어 최적화 중에 변경되지 않습니다. 따라서 역 전파가 예상 한 그라디언트를 계산하지 않으므로 모델을 훈련하는 것이 효과가 있을지는 의문입니다.

제 제안은 sen = session.run(global_sensitivity) 문을 생략 할 수 있도록 코드를 다시 작성하는 것입니다. , here을 확인

lap_noise1=np.random.laplace(loc=0.0, scale=(sen[0]/epsilon), size=(tf.shape(x)[0], img_size_cropped*img_size_cropped*num_channels)) 
lap_noise2=np.random.laplace(loc=0.0, scale=(sen[0]/epsilon), size=(tf.shape(x)[0], img_size_cropped*img_size_cropped*num_channels)) 
lap_noise3=np.random.laplace(loc=0.0, scale=(sen[0]/epsilon), size=(tf.shape(x)[0], img_size_cropped*img_size_cropped*num_channels)) 

라플라스 분포가 Tensorflow에서 구현 될 것으로 보인다 : 당신이 그것에 대해 할 일은 Tensorflow에서 그 세 줄을 구현하는 것입니다.

+0

당신의 답을 고맙다. lap_noise를 생성하기 위해 tensorflow 함수를 사용하려고 할 때, 내 논리 오류로 인해 여전히 문제가 있지만, 코드에서 구문 오류를 해결했다. : –

+0

논리 오류로 무엇을 의미합니까? – kaufmanu

+0

손실이 NaN %로 인쇄됩니다.어쩌면 내가 뭔가 잘못 쓰는 것 같아. 코드를 다시 확인하겠습니다. 고맙습니다! 내가 궁금해하는 또 다른 질문은 tensorflow API에서 찾을 수있는 그런 함수가 없을 때이다. 그런 다음 어떻게 이런 유형의 문제를 해결할 수 있을까? –

관련 문제