1

GRU 셀과 함께 RNN을 사용하여 텍스트를 만들려고하지만 텍스트를 생성하기 위해 텍스트에서 RNN 함수를 호출하면 오류가 발생합니다.Tensorflow GRU 변수가 이미 있음 ValueError

# -*- coding: UTF-8 -*- 
import tensorflow as tf 
import os 
import numpy as np 
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 


wordp = "" 
with open("test.txt", encoding = "UTF-8") as file: 
    reader = file.readlines() 
    for row in reader: 
     wordp += row + " " 

words = set(wordp.split()) 

dictionary = {i: j for i, j in enumerate(words)} 
reverse = {j: i for i, j in enumerate(words)} 
training_epochs = 1 
learning_rate = .001 
input_dim = len(words) 
output_dim = 1 
num_units = 300 
num_units1 = 300 
output_prob = .5 
train = [] 

for line in reader: 
    line = line.split() 
    for i in range(len(line)): 
     x = line[i] 
     line[i] = np.zeros(len(dictionary)) 
     line[i][reverse[x]] = 1 
    train.append(line) 

def emb(param): 
    train = [] 
    global dictionary 
    global reverse 
    for i in range(len(param)): 
     x = param[i] 
     param[i] = np.zeros(len(dictionary)) 
     param[i][reverse[x]] = 1 
    return np.asarray(param, dtype = np.float32) 

def RNN(x, weights, biases): 
    cells = [] 

    LSTM = tf.contrib.rnn.GRUCell(num_units, activation=tf.nn.relu) 
    LSTM = tf.contrib.rnn.DropoutWrapper(LSTM, output_keep_prob=output_prob) 
    cells.append(LSTM) 
    LSTM1 = tf.contrib.rnn.GRUCell(num_units1, activation=tf.nn.relu) 
    LSTM1 = tf.contrib.rnn.DropoutWrapper(LSTM1, output_keep_prob=output_prob) 
    cells.append(LSTM1) 
    stack = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True) 
    lists = tf.split(x, num_or_size_splits = 3, axis = 0) 


    outputs, states = tf.nn.static_rnn(stack, lists, dtype = tf.float32) 
    return tf.matmul(outputs[-1], weights)+biases 

weights = tf.Variable(tf.random_normal([num_units,input_dim])) 
biases = tf.Variable(tf.random_normal([input_dim])) 
x_vals = tf.placeholder(tf.float32, shape = [None, input_dim]) 
y_vals = tf.placeholder(tf.float32, shape = [input_dim]) 
pred = RNN(x_vals,weights,biases) 
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_vals)) 
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy) 
init = tf.global_variables_initializer() 

with tf.Session() as sess: 
    sess.run(init) 
    for j in range(training_epochs): 
     accu = 0 
     c = 0 
     count = 0 
     for line in train: 
      i = 0 
      while i+4 < len(line): 
       _, acc, cost = sess.run([optimizer, accuracy, cross_entropy], feed_dict={x_vals: line[i:i+3], y_vals: line[i+3]}) 
       i+=1 
       accu += acc 
       count += 1 
       c+= cost 
     print("Accuracy") 
     print(accu/count) 
     print("Cost") 
     print(c/count) 

    new = ["badly", "broken", "big"] 
    while len(" ".join(new)) <= 140: 
     var = RNN(emb(new[-3:]), weights, biases) 
     sess.run(var) 
     new.append(dictionary[var.index(max(var))]) 

교육 부분은 잘 작동하지만 내가 두 번째 시간 동안 RNN 함수를 호출 할 때 문제가 온다 : 다음과 같이

Traceback (most recent call last): 
    File "trump.py", line 93, in <module> 
    var = RNN(emb(new[-3:]), weights, biases) 
    File "trump.py", line 58, in RNN 
    outputs, states = tf.nn.static_rnn(stack, lists, dtype = tf.float32) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn.py", line 1212, in static_rnn 
    (output, state) = call_cell() 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn.py", line 1199, in <lambda> 
    call_cell = lambda: cell(input_, state) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 180, in __call__ 
    return super(RNNCell, self).__call__(inputs, state) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\layers\base.py", line 441, in __call__ 
    outputs = self.call(inputs, *args, **kwargs) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 916, in call 
    cur_inp, new_state = cell(cur_inp, cur_state) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 752, in __call__ 
    output, new_state = self._cell(inputs, state, scope) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 180, in __call__ 
    return super(RNNCell, self).__call__(inputs, state) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\layers\base.py", line 441, in __call__ 
    outputs = self.call(inputs, *args, **kwargs) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 295, in call 
    self._kernel_initializer)) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 1017, in _linear 
    initializer=kernel_initializer) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1065, in get_variabl 
e 
    use_resource=use_resource, custom_getter=custom_getter) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 962, in get_variable 

    use_resource=use_resource, custom_getter=custom_getter) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 360, in get_variable 

    validate_shape=validate_shape, use_resource=use_resource) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1405, in wrapped_cus 
tom_getter 
    *args, **kwargs) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 183, in _rnn_get_vari 
able 
    variable = getter(*args, **kwargs) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 183, in _rnn_get_vari 
able 
    variable = getter(*args, **kwargs) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 352, in _true_getter 

    use_resource=use_resource) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 664, in _get_single_ 
variable 
    name, "".join(traceback.format_list(tb)))) 
ValueError: Variable rnn/multi_rnn_cell/cell_0/gru_cell/gates/kernel already exists, disallowed. Did you mean to set reuse=True 
in VarScope? Originally defined at: 

    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 1269, in __init__ 
    self._traceback = _extract_stack() 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 2506, in create_op 
    original_op=self._default_original_op, op_def=op_def) 
    File "C:\Users\Abhinav Kumar\Miniconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 767, in apply_ 
op 
    op_def=op_def) 

RNN 기능을 시작하는 코드이다. 다른 변수 범위에서 RNN 함수 안팎에서 다양한 변수를 넣으려고했지만 GRU 하위 변수가 초기화되지 않았다는 오류가 발생합니다.

도움 주셔서 감사합니다.

답변

0

코드에서 몇 가지 문제가 있습니다

  • accuracy 정의되지 않은

  • RNN그래프를 생성하는 기능입니다 (난 당신이 스크립트의 중간 버전을 게시 가정) (LSTM, LSTM1 등이 노드입니다).

    정상적으로 교육을 위해 작동하지만 테스트 시간의 두 번째 호출은 그래프에 새 노드를 생성하므로이 오류 메시지가 표시됩니다.

내가 RNN 기능과 같은 인라인 한 :

weights = tf.Variable(tf.random_normal([num_units,input_dim])) 
biases = tf.Variable(tf.random_normal([input_dim])) 
x_vals = tf.placeholder(tf.float32, shape = [None, input_dim]) 
y_vals = tf.placeholder(tf.float32, shape = [input_dim]) 

cells = [] 
LSTM = tf.contrib.rnn.GRUCell(num_units, activation=tf.nn.relu) 
LSTM = tf.contrib.rnn.DropoutWrapper(LSTM, output_keep_prob=output_prob) 
cells.append(LSTM) 
LSTM1 = tf.contrib.rnn.GRUCell(num_units1, activation=tf.nn.relu) 
LSTM1 = tf.contrib.rnn.DropoutWrapper(LSTM1, output_keep_prob=output_prob) 
cells.append(LSTM1) 
stack = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True) 
lists = tf.split(x_vals, num_or_size_splits = 3, axis = 0) 
outputs, states = tf.nn.static_rnn(stack, lists, dtype = tf.float32) 
pred = tf.matmul(outputs[-1], weights)+biases 

tf.argmax(pred, dimension=1)에 시험 대상을 변경 :

while len(" ".join(new)) <= 140: 
    test_input = emb(new[-3:]) 
    max_word = tf.argmax(pred, dimension=1)[0] 
    word_index = sess.run(max_word, feed_dict={x_vals: test_input}) 
    print dictionary[word_index] 

과 나를 위해 일부 단어를 생성합니다. 나는 커다란 훈련 파일이 없기 때문에 출력물은 나에게는 의미가 없지만 잘하면 당신의 경우에는 더 좋을 것이다.

관련 문제