You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hetu_ncf.py 1.9 kB

4 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647
  1. import hetu as ht
  2. from hetu import init
  3. import numpy as np
  4. def neural_mf(user_input, item_input, y_, num_users, num_items):
  5. embed_dim = 8
  6. layers = [64, 32, 16, 8]
  7. learning_rate = 0.01
  8. User_Embedding = init.random_normal(
  9. (num_users, embed_dim + layers[0] // 2), stddev=0.01, name="user_embed", ctx=ht.cpu(0))
  10. Item_Embedding = init.random_normal(
  11. (num_items, embed_dim + layers[0] // 2), stddev=0.01, name="item_embed", ctx=ht.cpu(0))
  12. user_latent = ht.embedding_lookup_op(
  13. User_Embedding, user_input, ctx=ht.cpu(0))
  14. item_latent = ht.embedding_lookup_op(
  15. Item_Embedding, item_input, ctx=ht.cpu(0))
  16. mf_user_latent = ht.slice_op(user_latent, (0, 0), (-1, embed_dim))
  17. mlp_user_latent = ht.slice_op(user_latent, (0, embed_dim), (-1, -1))
  18. mf_item_latent = ht.slice_op(item_latent, (0, 0), (-1, embed_dim))
  19. mlp_item_latent = ht.slice_op(item_latent, (0, embed_dim), (-1, -1))
  20. W1 = init.random_normal((layers[0], layers[1]), stddev=0.1, name='W1')
  21. W2 = init.random_normal((layers[1], layers[2]), stddev=0.1, name='W2')
  22. W3 = init.random_normal((layers[2], layers[3]), stddev=0.1, name='W3')
  23. W4 = init.random_normal((embed_dim + layers[3], 1), stddev=0.1, name='W4')
  24. mf_vector = ht.mul_op(mf_user_latent, mf_item_latent)
  25. mlp_vector = ht.concat_op(mlp_user_latent, mlp_item_latent, axis=1)
  26. fc1 = ht.matmul_op(mlp_vector, W1)
  27. relu1 = ht.relu_op(fc1)
  28. fc2 = ht.matmul_op(relu1, W2)
  29. relu2 = ht.relu_op(fc2)
  30. fc3 = ht.matmul_op(relu2, W3)
  31. relu3 = ht.relu_op(fc3)
  32. concat_vector = ht.concat_op(mf_vector, relu3, axis=1)
  33. y = ht.matmul_op(concat_vector, W4)
  34. y = ht.sigmoid_op(y)
  35. loss = ht.binarycrossentropy_op(y, y_)
  36. loss = ht.reduce_mean_op(loss, [0])
  37. opt = ht.optim.SGDOptimizer(learning_rate=learning_rate)
  38. train_op = opt.minimize(loss)
  39. return loss, y, train_op