logo资料库

BP神经网络算法的java实现.doc

第1页 / 共12页
第2页 / 共12页
第3页 / 共12页
第4页 / 共12页
第5页 / 共12页
第6页 / 共12页
第7页 / 共12页
第8页 / 共12页
资料共12页,剩余部分请下载后查看
public ArrayList arWeight=new ArrayList(); public Node() { activation = 0; error = 0; threshold = 0; amultinum = 1; public double activation; public double threshold; public double weights[]; public double detweightslast[]; public double detthresholdlast; public double error; public int numOfweights; public double amultinum; 1. package ann; 2. 3. 4. public class Node implements java.io.Serializable,Cloneable { 5. 6. 7. 8. 9. 10. 11. 12. 13. 14. 15. 16. 17. 18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35. 36. 37. 38. 39. 40. 41. 42. 43. 44. amultinum = 1; numOfweights = numOfweights0; weights = new double[numOfweights]; detweightslast = new double[numOfweights]; detthresholdlast = 0; error = 0; int i; for (i = 0; i numOfweights; i++) { amultinum = 1; numOfweights = numOfweights0; activation = act; threshold = thr; weights = new double[numOfweights]; detweightslast = new double[numOfweights]; weights[i] = (2 * Math.random() - 1) * amultinum; detweightslast[i] = 0; } threshold = (2 * Math.random() - 1) * amultinum; public Node(double act, double thr, int numOfweights0) { public Node(int numOfweights0) { } }
} public void setWeight(ArrayList weight){ weights = new double[weight.size()]; for(int i=0;i
public double LO=-10; public double desiredOutputs[]; public double a=0.2; public int connecttype; public double total_error, total_error_one_circle_all; public double error_compared_to_tolerance; double total_error_one_circle[]; public int trainnum; public Nnetwork() { } public Nnetwork(int inp, int hide[], int outp, int hidenum, int connecttyp e0) { connecttype = connecttype0; int i, j; n_input = inp; n_output = outp; total_error_one_circle = new double[outp]; desiredOutputs = new double[outp]; output = new Layer(n_output); for (i = 0; i n_output; i++) { output.nodes[i] = new Node(0); } n_layer = hidenum; hidelayer = new Layer[n_layer]; for (i = n_layer - 1; i >= 0; i--) { hidelayer[i] = new Layer(hide[i]); for (j = 0; j hidelayer[i].N_NODES; j++) { if (i == n_layer - 1) { hidelayer[i].nodes[j] = new Node(outp); } else { hidelayer[i].nodes[j] = new Node(hidelayer[i + 1].N_NODES); } } } input = new Layer(n_input); for (i = 0; i n_input; i++) { input.nodes[i] = new Node(hidelayer[0].N_NODES); } } 13. 14. 15. 16. 17. 18. 19. 20. 21. 22. 23. 24. 25. 26. 27. 28. 29. 30. 31. 32. 33. 34. 35. 36. 37. 38. 39. 40. 41. 42. 43. 44. 45. 46. 47. 48. 49. 50. 51. 52. 53. 54. 55.
56. 57. 58. 59. 60. 61. 62. 63. 64. 65. 66. 67. 68. 69. 70. 71. 72. 73. 74. 75. 76. 77. 78. 79. 80. 81. 82. 83. 84. 85. 86. 87. 88. 89. 90. 91. 92. 93. 94. 95. 96. void FirstTimeSettings() { for (int i = 0; i n_layer; i++) { int j; for (j = 0; j hidelayer[i].N_NODES; j++) { hidelayer[i].nodes[j].threshold = (2 * Math.random() - 1) * hidelayer[i].nodes[j].amultinum; } } for (int i = 0; i n_output; i++) { output.nodes[i].threshold = (2 * Math.random() - 1) * output.nodes[i].amultinum; } } void BeforeTraining(double inp[], double outp[]) { int i; for (i = 0; i n_input; i++) { input.nodes[i].activation = inp[i]; } for (i = 0; i n_output; i++) { desiredOutputs[i] = outp[i]; } } public void Calc_Activation(double result[]) { int i, j, ci; for (i = 0; i n_layer; i++) { if (i == 0) { for (j = 0; j hidelayer[i].N_NODES; j++) { hidelayer[i].nodes[j].activation = 0; for (ci = 0; ci n_input; ci++) { hidelayer[i].nodes[j].activation += input.nodes[ci].activation * input.nodes[ci].weights[j]; } hidelayer[i].nodes[j].activation += hidelayer[i].nodes[j].threshol d; hidelayer[i].nodes[j].activation = activefun(hidelayer[i].nodes[j]. activation); } } else { for (j = 0; j hidelayer[i].N_NODES; j++) { hidelayer[i].nodes[j].activation = 0; for (ci = 0; ci hidelayer[i - 1].N_NODES; ci++) {
97. 98. 99. hidelayer[i].nodes[j].activation += hidelayer[i -1].nodes[ci].ac tivation * hidelayer[i - 1].nodes[ci].weights[j]; } hidelayer[i].nodes[j].activation += hidelayer[i].nodes[j].threshol d; 100. hidelayer[i].nodes[j].activation = activefun(hidelayer[i].nodes[j] .activation); 101. 102. 103. 104. 105. 106. 107. } } } for (j = 0; j output.N_NODES; j++) { output.nodes[j].activation = 0; for (ci = 0; ci hidelayer[n_layer - 1].N_NODES; ci++) { output.nodes[j].activation += hidelayer[n_layer -1].nodes[ci].activ ation * hidelayer[n_layer -1].nodes[ci].weights[j]; 108. 109. 110. 111. 112. 113. 114. 115. 116. 117. 118. 119. } output.nodes[j].activation += output.nodes[j].threshold; output.nodes[j].activation = activefun(output.nodes[j].activation); } for (i = 0; i n_output; i++) { result[i] = output.nodes[i].activation; } } void Calc_error_output() { for (int x = 0; x n_output; x++) //output.nodes[x].error = output.nodes[x].activation * (1 - output.node s[x].activation ) * (desiredOutputs[x] - output.nodes[x].activation ); 120. 121. [x]); { } output.nodes[x].error += (output.nodes[x].activation - desiredOutputs output.nodes[x].error *= difactivefun(output.nodes[x].activation); 122. 123. 124. 125. 126. 127. 128. 129. 130. 131. 132. 133. } void Calc_error_hidden() { int j, i; for (j = 0; j hidelayer[n_layer - 1].N_NODES; j++) { for (int x = 0; x n_output; x++) { hidelayer[n_layer - 1].nodes[j].error += hidelayer[n_layer - 1].nodes[j].weights[x] * output.nodes[x].error; } hidelayer[n_layer -
134. 135. 136. 137. 138. 139. 140. 141. 142. 143. 144. 145. 146. 147. 148. 149. 150. 151. 152. 153. 154. 155. 156. 157. 158. 159. 160. 161. 162. 163. 164. 165. 166. 167. 168. 169. 170. 171. 172. 173. 174. 175. 176. 1].nodes[j].error *= difactivefun(hidelayer[n_layer - 1].nodes[j].activation); } for (i = n_layer - 2; i >= 0; i--) { for (j = 0; j hidelayer[i].N_NODES; j++) { for (int x = 0; x hidelayer[i + 1].N_NODES; x++) { hidelayer[i].nodes[j].error += hidelayer[i].nodes[j].weights[x] * hidelayer[i + 1].nodes[x].error; } hidelayer[i].nodes[j].error *= difactivefun(hidelayer[i].nodes[j].activation); } } } void Calc_new_Thresholds() { int i; // computing the thresholds for next itration for hidden layer for (i = 0; i n_layer; i++) { for (int x = 0; x hidelayer[i].N_NODES; x++) { double det = a * hidelayer[i].nodes[x].detthresholdlast + hidelayer[i].nodes[x].error * LH; hidelayer[i].nodes[x].detthresholdlast = det; hidelayer[i].nodes[x].threshold += det; } } for (int y = 0; y output.N_NODES; y++) { double det = a * output.nodes[y].detthresholdlast + output.nodes[y].error * LO; output.nodes[y].detthresholdlast = det; output.nodes[y].threshold += det; } } void Calc_new_weights_in_hidden() { int i, j; double temp = 0.0f; for (j = 0; j hidelayer[n_layer - 1].N_NODES; j++) { temp = hidelayer[n_layer - 1].nodes[j].activation * LO;
n_output; y++) { double det = a * hidelayer[n_layer - 1].nodes[j].detweightslast[y] temp * output.nodes[y].error; hidelayer[n_layer - 1].nodes[j].detweightslast[y] = det; hidelayer[n_layer - 1].nodes[j].weights[y] += det; for (i = 0; i n_layer - 1; i++) { for (j = 0; j hidelayer[i].N_NODES; j++) { temp = hidelayer[i].nodes[j].activation * LH; for (int y = 0; y hidelayer[i + 1].N_NODES; y++) { double det = a * hidelayer[i].nodes[j].detweightslast[y] + temp * hidelayer[i + 1].nodes[y].error; hidelayer[i].nodes[j].detweightslast[y] = det; hidelayer[i].nodes[j].weights[y] += det; } } } } } for (int y = 0; y 177. 178. + 179. 180. 181. 182. 183. 184. 185. 186. 187. 188. 189. 190. 191. 192. 193. 194. 195. 196. 197. 198. 199. 200. 201. 202. 203. 204. 205. 206. 207. 208. 209. 210. 211. 212. 213. 214. 215. 216. 217. 218. [x].activation - desiredOutputs[x]); 219. void Calc_new_weights_in_input() { continue; } } } } double Calc_total_error_in_pattern() { double temp = 0.0; for (int x = 0; x n_output; x++) { int j; double temp = 0.0f; for (j = 0; j input.N_NODES; j++) { temp = input.nodes[j].activation * LH; for (int y = 0; y hidelayer[0].N_NODES; y++) { double det = a * input.nodes[j].detweightslast[y] + temp * hidelayer[0].nodes[y].error; input.nodes[j].detweightslast[y] = det; input.nodes[j].weights[y] += det;
220. 221. 222. temp += Math.pow((output.nodes[x].activation - desiredOutputs[x]), 2); total_error_one_circle[x] += Math.pow((output.nodes[x].activation - d esiredOutputs[x]), 2); 223. 224. 225. 226. 227. 228. 229. 230. 231. 232. 233. 234. 235. 236. 237. 238. 239. 240. 241. 242. 243. 244. 245. 246. 247. 248. 249. 250. 251. 252. 253. 254. 255. 256. 257. 258. 259. } total_error = temp; total_error_one_circle_all += total_error; return temp; } void reset_error() { for (int i = 0; i n_input; i++) { input.nodes[i].error = 0; } for (int i = 0; i n_output; i++) { output.nodes[i].error = 0; } for (int i = 0; i n_layer; i++) { for (int j = 0; j hidelayer[i].N_NODES; j++) { hidelayer[i].nodes[j].error = 0; } } } void reset_total_error() { total_error_one_circle_all = 0; for (int x = 0; x n_output; x++) { total_error_one_circle[x] = 0; } } void Training_for_one_pattern(double result[]) { Calc_Activation(result); Calc_error_output(); Calc_error_hidden(); Calc_new_Thresholds(); Calc_new_weights_in_hidden(); Calc_new_weights_in_input(); } public void Training(double inputs[][], double outputs[][], int num, bool ean ifresort) { 260.
分享到:
收藏