The base model algorithm tensorflow KMeans

tensorflow KMeans algorithm execution.

 

code show as below:

 

__future__ Import print_function from # Ignore All GPUs, TF Random Forest does not Benefit from IT. Import os Import numpy AS NP Import tensorflow AS TF from tensorflow.contrib.factorization Import KMeans os.environ [ "CUDA_VISIBLE_DEVICES"] = "" # import MNIST data from tensorflow.examples.tutorials.mnist import Input_Data MNIST = input_data.read_data_sets ( "./ tmp / data /", one_hot = True) full_data_x mnist.train.images = # parameter num_steps = 50 # total the number of training steps batch_size = 1024 # batch number k = 25 # number of clusters num_classes = 10 # 10-based digital num_features = 784 # 28 * 28 = > 784 class feature # input image
























= Tf.placeholder X-(tf.float32, Shape = [None, NUM_FEATURES])
# tag (a tag assigned to a central dot, test)
the Y = tf.placeholder (tf.float32, Shape = [None, num_classes]) # KMeans parameters the kmeans KMeans = (X-Inputs =, K = num_clusters, distance_metric = 'cosine',               use_mini_batch = True) # establish KMeans FIG training_graph = kmeans.training_graph () IF len (training_graph)>. 6: # 1.4 Tensorflow +   (all_scores, cluster_idx, Scores, cluster_centers_initialized,     cluster_centers_var, init_op, train_op) = training_graph the else:   (all_scores, cluster_idx, Scores, cluster_centers_initialized,     init_op, train_op) = training_graph















= cluster_idx cluster_idx [0] # for FIX A tuple being cluster_idx
avg_distance = tf.reduce_mean (Scores) # initialization parameter init_vars tf.global_variables_initializer = () # TensorFlow open session sess = tf.Session (http: //www.my516 .com) # initialization sess.run (init_vars, feed_dict = {X-: full_data_x}) sess.run (init_op, feed_dict = {X-: full_data_x}) # training for I in Range (. 1, for num_steps +. 1):   _ , D, IDX = sess.run ([train_op, avg_distance, cluster_idx],                         feed_dict = {X-: full_data_x})   IF% 10 == I == 0. 1 or I:       Print ( "the Step I%, Avg. Distance: F% "% (i, d)) prior to each dot allocation table #



















# Training round with their management to the nearest sample to calculate the total number of particles of each label particle tag ( 'IDX')
Counts = np.zeros (Shape = (K, num_classes))
for I in Range (len (IDX) ):
  Counts [IDX [I]] + = mnist.train.labels [I]
# tags assigned to the highest frequency particle
labels_map = [np.argmax (C) in Counts for C]
labels_map = tf.convert_to_tensor (labels_map) evaluation # # centroid_id -> label cluster_label = tf.nn.embedding_lookup (labels_map, cluster_idx) # calculation accuracy correct_prediction = tf.equal (cluster_label, tf.cast (tf.argmax (the Y,. 1), tf.int32)) accuracy_op tf.reduce_mean = (tf.cast (correct_prediction, tf.float32)) # test model test_x, test_y = mnist.test.images, mnist.test.labels










print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
 

operation result:

Step 1, Avg Distance: 0.341471
Step 10, Avg Distance: 0.221609
Step 20, Avg Distance: 0.220328
Step 30, Avg Distance: 0.219776
Step 40, Avg Distance: 0.219419
Step 50, Avg Distance: 0.219154
Test Accuracy: 0.7127
---------------------

Guess you like

Origin www.cnblogs.com/hyhy904/p/11182996.html