You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

TextClassificationTrain.cs 8.8 kB

6 years ago
6 years ago
6 years ago
6 years ago
6 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. using System;
  2. using System.Collections;
  3. using System.Collections.Generic;
  4. using System.IO;
  5. using System.Linq;
  6. using System.Text;
  7. using NumSharp;
  8. using Tensorflow;
  9. using Tensorflow.Keras.Engine;
  10. using TensorFlowNET.Examples.Text.cnn_models;
  11. using TensorFlowNET.Examples.TextClassification;
  12. using TensorFlowNET.Examples.Utility;
  13. using static Tensorflow.Python;
  14. namespace TensorFlowNET.Examples.CnnTextClassification
  15. {
  16. /// <summary>
  17. /// https://github.com/dongjun-Lee/text-classification-models-tf
  18. /// </summary>
  19. public class TextClassificationTrain : IExample
  20. {
  21. public int Priority => 100;
  22. public bool Enabled { get; set; } = false;
  23. public string Name => "Text Classification";
  24. public int? DataLimit = null;
  25. public bool ImportGraph { get; set; } = true;
  26. private string dataDir = "text_classification";
  27. private string dataFileName = "dbpedia_csv.tar.gz";
  28. public string model_name = "vd_cnn"; // word_cnn | char_cnn | vd_cnn | word_rnn | att_rnn | rcnn
  29. private const int CHAR_MAX_LEN = 1014;
  30. private const int NUM_CLASS = 2;
  31. private const int BATCH_SIZE = 64;
  32. private const int NUM_EPOCHS = 10;
  33. protected float loss_value = 0;
  34. public bool Run()
  35. {
  36. PrepareData();
  37. var graph = tf.Graph().as_default();
  38. return with(tf.Session(graph), sess =>
  39. {
  40. if (ImportGraph)
  41. return RunWithImportedGraph(sess, graph);
  42. else
  43. return RunWithBuiltGraph(sess, graph);
  44. });
  45. }
  46. protected virtual bool RunWithImportedGraph(Session sess, Graph graph)
  47. {
  48. Console.WriteLine("Building dataset...");
  49. var (x, y, alphabet_size) = DataHelpers.build_char_dataset("train", model_name, CHAR_MAX_LEN, DataLimit);
  50. Console.WriteLine("\tDONE");
  51. var (train_x, valid_x, train_y, valid_y) = train_test_split(x, y, test_size: 0.15f);
  52. Console.WriteLine("Import graph...");
  53. var meta_file = model_name + ".meta";
  54. tf.train.import_meta_graph(Path.Join("graph", meta_file));
  55. Console.WriteLine("\tDONE");
  56. // definitely necessary, otherwize will get the exception of "use uninitialized variable"
  57. sess.run(tf.global_variables_initializer());
  58. var train_batches = batch_iter(train_x, train_y, BATCH_SIZE, NUM_EPOCHS);
  59. var num_batches_per_epoch = (len(train_x) - 1); // BATCH_SIZE + 1
  60. double max_accuracy = 0;
  61. Tensor is_training = graph.get_operation_by_name("is_training");
  62. Tensor model_x = graph.get_operation_by_name("x");
  63. Tensor model_y = graph.get_operation_by_name("y");
  64. Tensor loss = graph.get_operation_by_name("loss/loss");
  65. //var optimizer_nodes = graph._nodes_by_name.Keys.Where(key => key.Contains("optimizer")).ToArray();
  66. Tensor optimizer = graph.get_operation_by_name("loss/optimizer");
  67. Tensor global_step = graph.get_operation_by_name("global_step");
  68. Tensor accuracy = graph.get_operation_by_name("accuracy/accuracy");
  69. int i = 0;
  70. foreach (var (x_batch, y_batch) in train_batches)
  71. {
  72. i++;
  73. Console.WriteLine("Training on batch " + i);
  74. var train_feed_dict = new Hashtable
  75. {
  76. [model_x] = x_batch,
  77. [model_y] = y_batch,
  78. [is_training] = true,
  79. };
  80. // original python:
  81. //_, step, loss = sess.run([model.optimizer, model.global_step, model.loss], feed_dict = train_feed_dict)
  82. var result = sess.run(new Tensor[] { optimizer, global_step, loss }, train_feed_dict);
  83. // exception here, loss value seems like a float[]
  84. //loss_value = result[2];
  85. var step = result[1];
  86. if (step % 10 == 0)
  87. Console.WriteLine($"Step {step} loss: {result[2]}");
  88. if (step % 100 == 0)
  89. {
  90. continue;
  91. // # Test accuracy with validation data for each epoch.
  92. var valid_batches = batch_iter(valid_x, valid_y, BATCH_SIZE, 1);
  93. var (sum_accuracy, cnt) = (0, 0);
  94. foreach (var (valid_x_batch, valid_y_batch) in valid_batches)
  95. {
  96. // valid_feed_dict = {
  97. // model.x: valid_x_batch,
  98. // model.y: valid_y_batch,
  99. // model.is_training: False
  100. // }
  101. // accuracy = sess.run(model.accuracy, feed_dict = valid_feed_dict)
  102. // sum_accuracy += accuracy
  103. // cnt += 1
  104. }
  105. // valid_accuracy = sum_accuracy / cnt
  106. // print("\nValidation Accuracy = {1}\n".format(step // num_batches_per_epoch, sum_accuracy / cnt))
  107. // # Save model
  108. // if valid_accuracy > max_accuracy:
  109. // max_accuracy = valid_accuracy
  110. // saver.save(sess, "{0}/{1}.ckpt".format(args.model, args.model), global_step = step)
  111. // print("Model is saved.\n")
  112. }
  113. }
  114. return false;
  115. }
  116. protected virtual bool RunWithBuiltGraph(Session session, Graph graph)
  117. {
  118. Console.WriteLine("Building dataset...");
  119. var (x, y, alphabet_size) = DataHelpers.build_char_dataset("train", model_name, CHAR_MAX_LEN, DataLimit);
  120. var (train_x, valid_x, train_y, valid_y) = train_test_split(x, y, test_size: 0.15f);
  121. ITextClassificationModel model = null;
  122. switch (model_name) // word_cnn | char_cnn | vd_cnn | word_rnn | att_rnn | rcnn
  123. {
  124. case "word_cnn":
  125. case "char_cnn":
  126. case "word_rnn":
  127. case "att_rnn":
  128. case "rcnn":
  129. throw new NotImplementedException();
  130. break;
  131. case "vd_cnn":
  132. model = new VdCnn(alphabet_size, CHAR_MAX_LEN, NUM_CLASS);
  133. break;
  134. }
  135. // todo train the model
  136. return false;
  137. }
  138. // TODO: this originally is an SKLearn utility function. it randomizes train and test which we don't do here
  139. private (NDArray, NDArray, NDArray, NDArray) train_test_split(NDArray x, NDArray y, float test_size = 0.3f)
  140. {
  141. Console.WriteLine("Splitting in Training and Testing data...");
  142. int len = x.shape[0];
  143. //int classes = y.Data<int>().Distinct().Count();
  144. //int samples = len / classes;
  145. int train_size = (int)Math.Round(len * (1 - test_size));
  146. var train_x = x[new Slice(stop:train_size), new Slice()];
  147. var valid_x = x[new Slice(start: train_size+1), new Slice()];
  148. var train_y = y[new Slice(stop: train_size)];
  149. var valid_y = y[new Slice(start: train_size + 1)];
  150. Console.WriteLine("\tDONE");
  151. return (train_x, valid_x, train_y, valid_y);
  152. }
  153. private IEnumerable<(NDArray, NDArray)> batch_iter(NDArray inputs, NDArray outputs, int batch_size, int num_epochs)
  154. {
  155. var num_batches_per_epoch = (len(inputs) - 1); // batch_size + 1
  156. foreach (var epoch in range(num_epochs))
  157. {
  158. foreach (var batch_num in range(num_batches_per_epoch))
  159. {
  160. var start_index = batch_num * batch_size;
  161. var end_index = Math.Min((batch_num + 1) * batch_size, len(inputs));
  162. yield return (inputs[new Slice(start_index, end_index)], outputs[new Slice(start_index,end_index)]);
  163. }
  164. }
  165. }
  166. public void PrepareData()
  167. {
  168. string url = "https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz";
  169. Web.Download(url, dataDir, dataFileName);
  170. Compress.ExtractTGZ(Path.Join(dataDir, dataFileName), dataDir);
  171. if (ImportGraph)
  172. {
  173. // download graph meta data
  174. var meta_file = model_name + ".meta";
  175. url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/" + meta_file;
  176. Web.Download(url, "graph", meta_file);
  177. }
  178. }
  179. }
  180. }

tensorflow框架的.NET版本,提供了丰富的特性和API,可以借此很方便地在.NET平台下搭建深度学习训练与推理流程。