You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

Example-fsharp.md 1.8 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455
  1. Linear Regression in `Eager` mode:
  2. ```fsharp
  3. #r "nuget: TensorFlow.Net"
  4. #r "nuget: TensorFlow.Keras"
  5. #r "nuget: SciSharp.TensorFlow.Redist"
  6. open Tensorflow
  7. open Tensorflow.NumPy
  8. open type Tensorflow.Binding
  9. open type Tensorflow.KerasApi
  10. let tf = New<tensorflow>()
  11. tf.enable_eager_execution()
  12. // Parameters
  13. let training_steps = 1000
  14. let learning_rate = 0.01f
  15. let display_step = 100
  16. // Sample data
  17. let train_X =
  18. np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
  19. 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f)
  20. let train_Y =
  21. np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
  22. 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f)
  23. let n_samples = train_X.shape.[0]
  24. // We can set a fixed init value in order to demo
  25. let W = tf.Variable(-0.06f,name = "weight")
  26. let b = tf.Variable(-0.73f, name = "bias")
  27. let optimizer = keras.optimizers.SGD(learning_rate)
  28. // Run training for the given number of steps.
  29. for step = 1 to (training_steps + 1) do
  30. // Run the optimization to update W and b values.
  31. // Wrap computation inside a GradientTape for automatic differentiation.
  32. use g = tf.GradientTape()
  33. // Linear regression (Wx + b).
  34. let pred = W * train_X + b
  35. // Mean square error.
  36. let loss = tf.reduce_sum(tf.pow(pred - train_Y,2)) / (2 * n_samples)
  37. // should stop recording
  38. // compute gradients
  39. let gradients = g.gradient(loss,struct (W,b))
  40. // Update W and b following gradients.
  41. optimizer.apply_gradients(zip(gradients, struct (W,b)))
  42. if (step % display_step) = 0 then
  43. let pred = W * train_X + b
  44. let loss = tf.reduce_sum(tf.pow(pred-train_Y,2)) / (2 * n_samples)
  45. printfn $"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}"
  46. ```