2017-03-28 5 views
0

여기에 코드를 사용하여 NN을 교육하려고합니다. https://github.com/RobRomijnders/LSTM_tsc.Tensorflow TypeError

많은 변화 내가이 오류를 얻을 내 데이터에 맞게 후 :

형식 오류 : 인수 없음은 내가 이전 오류했던 것처럼 대답 검색이

내가 발견하지만 난 비록 유효하지 않은 유형이 없습니다 가져 오기를 이곳에 오기 : Tensorflow TypeError on session.run arguments/output

난 내 코드는 (적어도 내가 그것을 찾을 수 없습니다)

import numpy as np 
import os 
os.environ['TF_CPP_MIN_LOG_LEVEL']='3' 
import tensorflow as tf 
tf.logging.set_verbosity(tf.logging.ERROR) 
import matplotlib.pyplot as plt 
import sys 
sys.path.append('E:\\Descargas\\recommendersystems\\train') 
from tsc_model import Model,sample_batch,load_data,check_test 


config = {}        #Put all configuration information into the dict 
config['num_layers'] = 3    #number of layers of stacked RNN's 
config['hidden_size'] = 120    #memory cells in a layer 
config['max_grad_norm'] = 5    #maximum gradient norm during training 
config['batch_size'] = batch_size = 30 
config['learning_rate'] = .005 
config['num_classes'] = 2 

max_iterations = 3000 
dropout = 0.8 
ratio = np.array([0.8,0.9]) #Ratios where to split the training and validation set 


direc = 'E:\\Descargas\\recommendersystems\\ets_challenge\\train\\train3.csv' 
X_train,X_val,X_test,y_train,y_val,y_test = load_data(direc,ratio) 
N,sl = X_train.shape 
config['sl'] = sl = X_train.shape[1] 
config['num_classes'] = num_classes = len(np.unique(y_train)) 

# Collect the costs in a numpy fashion 
epochs = np.floor(batch_size*max_iterations/N) 
print('Train %.0f samples in approximately %d epochs' %(N,epochs)) 
perf_collect = np.zeros((4,int(np.floor(max_iterations /100)))) 

#Instantiate a model 
model = Model(config) 




sess = tf.Session() #Depending on your use, do not forget to close the session 
writer = tf.summary.FileWriter("/home/rob/Dropbox/ml_projects/LSTM/log_tb", sess.graph) #writer for Tensorboard 
sess.run(model.init_op) 

step = 0 
cost_train_ma = -np.log(1/float(num_classes)+1e-9) #Moving average training cost 
acc_train_ma = 0.0 
try: 
    for i in range(max_iterations): 
    X_batch, y_batch = sample_batch(X_train,y_train,batch_size) 

    #Next line does the actual training 
    cost_train, acc_train,_ = sess.run([model.cost,model.accuracy, model.train_op],feed_dict = {model.input: X_batch,model.labels: y_batch,model.keep_prob:dropout}) 
    cost_train_ma = cost_train_ma*0.99 + cost_train*0.01 
    acc_train_ma = acc_train_ma*0.99 + acc_train*0.01 
    if i%100 == 0: 
     #Evaluate training performance 
     perf_collect[0,step] = cost_train 
     perf_collect[1,step] = acc_train 

     #Evaluate validation performance 
     X_batch, y_batch = sample_batch(X_val,y_val,batch_size) 
     cost_val, summ,acc_val = sess.run([model.cost,model.merged,model.accuracy],feed_dict = {model.input: X_batch, model.labels: y_batch, model.keep_prob:1.0}) 
     perf_collect[1,step] = cost_val 
     perf_collect[2,step] = acc_val 
     print('At %5.0f/%5.0f: COST %5.3f/%5.3f(%5.3f) -- Acc %5.3f/%5.3f(%5.3f)' %(i,max_iterations,cost_train,cost_val,cost_train_ma,acc_train,acc_val,acc_train_ma)) 

     #Write information to TensorBoard 
     writer.add_summary(summ, i) 
     writer.flush() 

     step +=1 
except KeyboardInterrupt: 
    #Pressing ctrl-c will end training. This try-except ensures we still plot the performance 
    pass 

acc_test,cost_test = check_test(model,sess,X_test,y_test) 
epoch = float(i)*batch_size/N 
print('After training %.1f epochs, test accuracy is %5.3f and test cost is %5.3f'%(epoch,acc_test,cost_test)) 


plt.plot(perf_collect[0],label='Train') 
plt.plot(perf_collect[1],label = 'Valid') 
plt.plot(perf_collect[2],label = 'Valid accuracy') 
plt.axis([0, step, 0, np.max(perf_collect)]) 
plt.legend() 
plt.show() 
,691 점에서 초래 아니라고 생각

그리고 이것은 오류 추적입니다

TypeError         Traceback (most recent call last) 
<ipython-input-1-200bd46aa85d> in <module>() 
    64  #Evaluate validation performance 
    65  X_batch, y_batch = sample_batch(X_val,y_val,batch_size) 
---> 66  cost_val, summ,acc_val = sess.run([model.cost,model.merged,model.accuracy],feed_dict = {model.input: X_batch, model.labels: y_batch, model.keep_prob:1.0}) 
    67  perf_collect[1,step] = cost_val 
    68  perf_collect[2,step] = acc_val 

\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata) 
    765  try: 
    766  result = self._run(None, fetches, feed_dict, options_ptr, 
--> 767       run_metadata_ptr) 
    768  if run_metadata: 
    769   proto_data = tf_session.TF_GetBuffer(run_metadata_ptr) 

\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata) 
    950 
    951  # Create a fetch handler to take care of the structure of fetches. 
--> 952  fetch_handler = _FetchHandler(self._graph, fetches, feed_dict_string) 
    953 
    954  # Run request and get response. 

\lib\site-packages\tensorflow\python\client\session.py in __init__(self, graph, fetches, feeds) 
    406  """ 
    407  with graph.as_default(): 
--> 408  self._fetch_mapper = _FetchMapper.for_fetch(fetches) 
    409  self._fetches = [] 
    410  self._targets = [] 

\lib\site-packages\tensorflow\python\client\session.py in for_fetch(fetch) 
    228  elif isinstance(fetch, (list, tuple)): 
    229  # NOTE(touts): This is also the code path for namedtuples. 
--> 230  return _ListFetchMapper(fetch) 
    231  elif isinstance(fetch, dict): 
    232  return _DictFetchMapper(fetch) 

\lib\site-packages\tensorflow\python\client\session.py in __init__(self, fetches) 
    335  """ 
    336  self._fetch_type = type(fetches) 
--> 337  self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches] 
    338  self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers) 
    339 

\lib\site-packages\tensorflow\python\client\session.py in <listcomp>(.0) 
    335  """ 
    336  self._fetch_type = type(fetches) 
--> 337  self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches] 
    338  self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers) 
    339 

\lib\site-packages\tensorflow\python\client\session.py in for_fetch(fetch) 
    225  if fetch is None: 
    226  raise TypeError('Fetch argument %r has invalid type %r' % 
--> 227      (fetch, type(fetch))) 
    228  elif isinstance(fetch, (list, tuple)): 
    229  # NOTE(touts): This is also the code path for namedtuples. 

TypeError: Fetch argument None has invalid type <class 'NoneType'> 

도움을 주시면 감사하겠습니다!

답변

0

오류 메시지에서 model.cost, model.merged 또는 model.accuracy 중 하나가 None 인 것으로 나타납니다. self.costself.mergedhere을 할당, here을 할당하고, self.accuracyhere을 할당하십시오 original source를 찾고, Model 개체의 이러한 모든 속성은 Model.__init__()에 할당 될 것으로 보인다.

데이터를 수정하기위한 코드 변경 중 하나가 해당 생성자 중 하나가 더 이상 발생하지 않도록 해당 생성자의 제어 흐름을 변경했을 가능성이 큽니다. 먼저 어떤 속성이 None인지 확인한 다음 Model.__init__()을 추적하여 설정되지 않은 이유를 알아내는 것이 좋습니다.

관련 문제