편집 기록

편집 기록
  • 프로필 nowp님의 편집
    날짜2021.04.23

    tensorflow v1 간단한 logistic regression 예제 수행 후 nan 이 발생


    import tensorflow.compat.v1 as tf
    tf.disable_v2_behavior()
    import numpy as np
    
    x_data = [[25,10,3],[29,6,4],[0,1,1],[28,2,0],[12,14,1],[5,13,3],[28,1,4],[20,0,3],[5,2,0],[3,0,1],[2,6,3],[20,2,2],[7,15,4],[27,14,2],[18,8,0],[1,12,3],[21,5,4],[19,12,2],[2,5,3],[17,0,4],[5,5,0],[15,3,3],[25,7,4],[26,3,3],[14,12,1],[0,11,0],[9,13,2],[6,6,3],[17,15,2],[19,13,0]] 
    y_data = [[0,1,0],[0,0,1],[0,0,1],[0,0,1],[0,1,0],[0,1,0],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,1,0],[1,0,0],[0,0,1],[0,0,1],[0,0,1],[1,0,0],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,1,0],[0,0,1],[0,1,0],[0,0,1],[1,0,0],[0,1,0]] 
    
    X=tf.placeholder(tf.float32,[None,3]) 
    Y=tf.placeholder(tf.float32,[None,3])
    nb_classes = 3 
    
    W=tf.Variable(tf.random_normal([3, nb_classes]), name = 'weight')
    b=tf.Variable(tf.random_normal([nb_classes]), name = 'bias')
    
    hypothesis = tf.nn.softmax(tf.matmul(X,W) + b)
    
    cost = tf.reduce_mean( - tf.reduce_sum(Y * tf.log(hypothesis) + (1-Y) * tf.log(1-hypothesis)))
    
    optimizer =tf.train.GradientDescentOptimizer(learning_rate=0.001)
    train=optimizer.minimize(cost)
    
    #-----------------------------------------------------------------------#
    
    xdata_new=  [[1,11,7],[1,3,4],[1,1,0],[1,1,0]] 
    
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    
    for step in range(2001):
        _, cost_val=sess.run([train,cost], feed_dict={X:x_data, Y:y_data})
    
        if step %100==0:
                print(step, cost_val)
        sess.run(hypothesis, feed_dict={X: x_data})
    
        a = sess.run(hypothesis, feed_dict = { X:xdata_new})
    print(a, sess.run(tf.arg_max(a,1)))
    

    위와같이 모델을 만들었는데 제가 데이터를 많이 넣었더니 nan이 뜹니다. learnig rate만 조절해 봤는데 해결이 안되서 부탁드립니다. cost 값이 0.xxxx값이 나오도록 할수 있을까요?

  • 프로필 알 수 없는 사용자님의 편집
    날짜2021.04.22

    tensorflow 관련 질문드립니다.


    import tensorflow.compat.v1 as tf tf.disable_v2_behavior() import numpy as np

    x_data = [[25,10,3],[29,6,4],[0,1,1],[28,2,0],[12,14,1],[5,13,3],[28,1,4],[20,0,3],[5,2,0],[3,0,1],[2,6,3],[20,2,2],[7,15,4],[27,14,2],[18,8,0],[1,12,3],[21,5,4],[19,12,2],[2,5,3],[17,0,4],[5,5,0],[15,3,3],[25,7,4],[26,3,3],[14,12,1],[0,11,0],[9,13,2],[6,6,3],[17,15,2],[19,13,0]] y_data = [[0,1,0],[0,0,1],[0,0,1],[0,0,1],[0,1,0],[0,1,0],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,1,0],[1,0,0],[0,0,1],[0,0,1],[0,0,1],[1,0,0],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,1,0],[0,0,1],[0,1,0],[0,0,1],[1,0,0],[0,1,0]]

    X=tf.placeholder(tf.float32,[None,3]) Y=tf.placeholder(tf.float32,[None,3]) nb_classes = 3

    W=tf.Variable(tf.random_normal([3, nb_classes]), name = 'weight') b=tf.Variable(tf.random_normal([nb_classes]), name = 'bias')

    hypothesis = tf.nn.softmax(tf.matmul(X,W) + b)

    cost = tf.reduce_mean( - tf.reduce_sum(Y * tf.log(hypothesis) + (1-Y) * tf.log(1-hypothesis)))

    optimizer =tf.train.GradientDescentOptimizer(learning_rate=0.001) train=optimizer.minimize(cost)

    -----------------------------------------------------------------------

    xdata_new= [[1,11,7],[1,3,4],[1,1,0],[1,1,0]]

    sess = tf.Session() sess.run(tf.global_variables_initializer())

    for step in range(2001): _, cost_val=sess.run([train,cost], feed_dict={X:x_data, Y:y_data})

    if step %100==0:
            print(step, cost_val)
    sess.run(hypothesis, feed_dict={X: x_data})
    
    a = sess.run(hypothesis, feed_dict = { X:xdata_new})
    

    print(a, sess.run(tf.arg_max(a,1)))

    위와같이 모델을 만들었는데 제가 데이터를 많이 넣었더니 nan이 뜨네요ㅠ learnig rate만 조절해 봤는데 해결이 안되서 부탁드립니다..cost 값이 0.xxxx값이 나오도록 할수 있을까요..?