You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

SegmentationFree.prototxt 4.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. input: "data"
  2. input_dim: 1
  3. input_dim: 3
  4. input_dim: 160
  5. input_dim: 40
  6. layer {
  7. name: "conv0"
  8. type: "Convolution"
  9. bottom: "data"
  10. top: "conv0"
  11. convolution_param {
  12. num_output: 32
  13. bias_term: true
  14. pad_h: 1
  15. pad_w: 1
  16. kernel_h: 3
  17. kernel_w: 3
  18. stride_h: 1
  19. stride_w: 1
  20. }
  21. }
  22. layer {
  23. name: "bn0"
  24. type: "BatchNorm"
  25. bottom: "conv0"
  26. top: "bn0"
  27. batch_norm_param {
  28. moving_average_fraction: 0.99
  29. eps: 0.001
  30. }
  31. }
  32. layer {
  33. name: "bn0_scale"
  34. type: "Scale"
  35. bottom: "bn0"
  36. top: "bn0"
  37. scale_param {
  38. bias_term: true
  39. }
  40. }
  41. layer {
  42. name: "relu0"
  43. type: "ReLU"
  44. bottom: "bn0"
  45. top: "bn0"
  46. }
  47. layer {
  48. name: "pool0"
  49. type: "Pooling"
  50. bottom: "bn0"
  51. top: "pool0"
  52. pooling_param {
  53. pool: MAX
  54. kernel_h: 2
  55. kernel_w: 2
  56. stride_h: 2
  57. stride_w: 2
  58. pad_h: 0
  59. pad_w: 0
  60. }
  61. }
  62. layer {
  63. name: "conv1"
  64. type: "Convolution"
  65. bottom: "pool0"
  66. top: "conv1"
  67. convolution_param {
  68. num_output: 64
  69. bias_term: true
  70. pad_h: 1
  71. pad_w: 1
  72. kernel_h: 3
  73. kernel_w: 3
  74. stride_h: 1
  75. stride_w: 1
  76. }
  77. }
  78. layer {
  79. name: "bn1"
  80. type: "BatchNorm"
  81. bottom: "conv1"
  82. top: "bn1"
  83. batch_norm_param {
  84. moving_average_fraction: 0.99
  85. eps: 0.001
  86. }
  87. }
  88. layer {
  89. name: "bn1_scale"
  90. type: "Scale"
  91. bottom: "bn1"
  92. top: "bn1"
  93. scale_param {
  94. bias_term: true
  95. }
  96. }
  97. layer {
  98. name: "relu1"
  99. type: "ReLU"
  100. bottom: "bn1"
  101. top: "bn1"
  102. }
  103. layer {
  104. name: "pool1"
  105. type: "Pooling"
  106. bottom: "bn1"
  107. top: "pool1"
  108. pooling_param {
  109. pool: MAX
  110. kernel_h: 2
  111. kernel_w: 2
  112. stride_h: 2
  113. stride_w: 2
  114. pad_h: 0
  115. pad_w: 0
  116. }
  117. }
  118. layer {
  119. name: "conv2"
  120. type: "Convolution"
  121. bottom: "pool1"
  122. top: "conv2"
  123. convolution_param {
  124. num_output: 128
  125. bias_term: true
  126. pad_h: 1
  127. pad_w: 1
  128. kernel_h: 3
  129. kernel_w: 3
  130. stride_h: 1
  131. stride_w: 1
  132. }
  133. }
  134. layer {
  135. name: "bn2"
  136. type: "BatchNorm"
  137. bottom: "conv2"
  138. top: "bn2"
  139. batch_norm_param {
  140. moving_average_fraction: 0.99
  141. eps: 0.001
  142. }
  143. }
  144. layer {
  145. name: "bn2_scale"
  146. type: "Scale"
  147. bottom: "bn2"
  148. top: "bn2"
  149. scale_param {
  150. bias_term: true
  151. }
  152. }
  153. layer {
  154. name: "relu2"
  155. type: "ReLU"
  156. bottom: "bn2"
  157. top: "bn2"
  158. }
  159. layer {
  160. name: "pool2"
  161. type: "Pooling"
  162. bottom: "bn2"
  163. top: "pool2"
  164. pooling_param {
  165. pool: MAX
  166. kernel_h: 2
  167. kernel_w: 2
  168. stride_h: 2
  169. stride_w: 2
  170. pad_h: 0
  171. pad_w: 0
  172. }
  173. }
  174. layer {
  175. name: "conv_512_15"
  176. type: "Convolution"
  177. bottom: "pool2"
  178. top: "conv_512_15"
  179. convolution_param {
  180. num_output: 512
  181. bias_term: true
  182. pad_h: 0
  183. pad_w: 0
  184. kernel_h: 1
  185. kernel_w: 5
  186. stride_h: 1
  187. stride_w: 1
  188. }
  189. }
  190. layer {
  191. name: "batch_normalization_1"
  192. type: "BatchNorm"
  193. bottom: "conv_512_15"
  194. top: "batch_normalization_1"
  195. batch_norm_param {
  196. moving_average_fraction: 0.99
  197. eps: 0.001
  198. }
  199. }
  200. layer {
  201. name: "batch_normalization_1_scale"
  202. type: "Scale"
  203. bottom: "batch_normalization_1"
  204. top: "batch_normalization_1"
  205. scale_param {
  206. bias_term: true
  207. }
  208. }
  209. layer {
  210. name: "activation_1"
  211. type: "ReLU"
  212. bottom: "batch_normalization_1"
  213. top: "batch_normalization_1"
  214. }
  215. layer {
  216. name: "conv_512_51"
  217. type: "Convolution"
  218. bottom: "batch_normalization_1"
  219. top: "conv_512_51"
  220. convolution_param {
  221. num_output: 512
  222. bias_term: true
  223. pad_h: 0
  224. pad_w: 0
  225. kernel_h: 5
  226. kernel_w: 1
  227. stride_h: 1
  228. stride_w: 1
  229. }
  230. }
  231. layer {
  232. name: "batch_normalization_2"
  233. type: "BatchNorm"
  234. bottom: "conv_512_51"
  235. top: "batch_normalization_2"
  236. batch_norm_param {
  237. moving_average_fraction: 0.99
  238. eps: 0.001
  239. }
  240. }
  241. layer {
  242. name: "batch_normalization_2_scale"
  243. type: "Scale"
  244. bottom: "batch_normalization_2"
  245. top: "batch_normalization_2"
  246. scale_param {
  247. bias_term: true
  248. }
  249. }
  250. layer {
  251. name: "activation_2"
  252. type: "ReLU"
  253. bottom: "batch_normalization_2"
  254. top: "batch_normalization_2"
  255. }
  256. layer {
  257. name: "conv_1024_11"
  258. type: "Convolution"
  259. bottom: "batch_normalization_2"
  260. top: "conv_1024_11"
  261. convolution_param {
  262. num_output: 1024
  263. bias_term: true
  264. pad_h: 0
  265. pad_w: 0
  266. kernel_h: 1
  267. kernel_w: 1
  268. stride_h: 1
  269. stride_w: 1
  270. }
  271. }
  272. layer {
  273. name: "batch_normalization_3"
  274. type: "BatchNorm"
  275. bottom: "conv_1024_11"
  276. top: "batch_normalization_3"
  277. batch_norm_param {
  278. moving_average_fraction: 0.99
  279. eps: 0.001
  280. }
  281. }
  282. layer {
  283. name: "batch_normalization_3_scale"
  284. type: "Scale"
  285. bottom: "batch_normalization_3"
  286. top: "batch_normalization_3"
  287. scale_param {
  288. bias_term: true
  289. }
  290. }
  291. layer {
  292. name: "activation_3"
  293. type: "ReLU"
  294. bottom: "batch_normalization_3"
  295. top: "batch_normalization_3"
  296. }
  297. layer {
  298. name: "conv_class_11"
  299. type: "Convolution"
  300. bottom: "batch_normalization_3"
  301. top: "conv_class_11"
  302. convolution_param {
  303. num_output: 84
  304. bias_term: true
  305. pad_h: 0
  306. pad_w: 0
  307. kernel_h: 1
  308. kernel_w: 1
  309. stride_h: 1
  310. stride_w: 1
  311. }
  312. }
  313. layer {
  314. name: "prob"
  315. type: "Softmax"
  316. bottom: "conv_class_11"
  317. top: "prob"
  318. }