实验四 BP神经网络模拟sin函数Word下载.docx
- 文档编号:18405289
- 上传时间:2022-12-16
- 格式:DOCX
- 页数:14
- 大小:113.27KB
实验四 BP神经网络模拟sin函数Word下载.docx
《实验四 BP神经网络模拟sin函数Word下载.docx》由会员分享,可在线阅读,更多相关《实验四 BP神经网络模拟sin函数Word下载.docx(14页珍藏版)》请在冰豆网上搜索。
单元数量:
d
单元i的输出:
单元i的激活函数:
线性函数
隐层:
单元j的输入:
单元j的输出:
单元j的激活函数:
非线性函数
输出层:
单元k的输入:
c
单元k的输出:
单元k的激活函数:
3、程序设计
1、编译环境
使用C#语言编写,编译器为VS2008,运行环境为Windows+.NetFramework3.5
2、程序运行流程
运行程序后,单击“训练”按钮,开始训练,训练完成后可以看到函数图像与各权值,输入x的值可以查看预测值与准确值。
3、类设计
神经网络类的定义:
classNeuralNetwork
{
#regionInstanceFields
//privatefields
privateintnum_in;
privateintnum_hid;
privateintnum_out;
privatedouble[,]i_to_h_wts;
privatedouble[,]h_to_o_wts;
privatedouble[]inputs;
privatedouble[]hidden;
privatedouble[]outputs;
privatedoublelearningRate=0.3;
privateRandomgen=newRandom();
#endregion
#regionConstructor
///<
summary>
///CreatesanewNeuralNetwork,usingtheparameters
///provided
/summary>
paramname="
num_in"
>
Numberofinputsnodes<
/param>
num_hid"
Numberofhiddennodes<
num_out"
Numberofoutputnodes<
publicNeuralNetwork(intnum_in,intnum_hid,intnum_out)
this.num_in=num_in;
this.num_hid=num_hid;
this.num_out=num_out;
i_to_h_wts=newdouble[num_in+1,num_hid];
h_to_o_wts=newdouble[num_hid+1,num_out];
inputs=newdouble[num_in+1];
hidden=newdouble[num_hid+1];
outputs=newdouble[num_out];
}
#regionInitializationofrandomweights
///Randomlyinitialiseallthenetworkweights.
///Needtostartwithsomeweights.
///Thismethodsetsuptheinputtohiddennodesand
///hiddennodestooutputnodeswithrandomvalues
publicvoidinitialiseNetwork()
//Settheinputvalueforbiasnode
inputs[num_in]=1.0;
hidden[num_hid]=1.0;
//Setweightsbetweeninput&
hiddennodes.
for(inti=0;
i<
num_in+1;
i++)
for(intj=0;
j<
num_hid;
j++)
//Setrandomweightsbetween-2&
2
i_to_h_wts[i,j]=(gen.NextDouble()*4)-2;
//Setweightsbetweenhidden&
outputnodes.
num_hid+1;
num_out;
h_to_o_wts[i,j]=(gen.NextDouble()*4)-2;
#regionPassforward
///Doesacompletepassthroughwithinthenetwork,usingthe
///applied_inputsparameters.Thepassthorughisdonetothe
///inputtohidden,andhiddentoouputlayers
applied_inputs"
Andouble[]arraywhichholdsinputvalues,which
///arethenpresetedtothenetworksinputlayer<
publicvoidpass_forward(double[]applied_inputs)
//Loadasetofinputsintoourcurrentinputs
num_in;
inputs[i]=applied_inputs[i];
//Forwardtohiddennodes,andcalculateactivationsinhiddenlayer
doublesum=0.0;
sum+=inputs[j]*i_to_h_wts[j,i];
hidden[i]=ActivationFunction.Sigmoid(sum);
//Forwardtooutputnodes,andcalculateactivationsinoutputlayer
sum+=hidden[j]*h_to_o_wts[j,i];
//passthesum,throughtheactivationfunction,Sigmoidinthiscase
//whichallowsforbackwarddifferentation
outputs[i]=ActivationFunction.Sigmoid(sum);
#regionPublicProperties
///gets/setsthenumberofinputnodesfortheNeuralNetwork
publicintNumberOfInputs
get{returnnum_in;
set{num_in=value;
///gets/setsthenumberofhiddennodesfortheNeuralNetwork
publicintNumberOfHidden
get{returnnum_hid;
set{num_hid=value;
///gets/setsthenumberofoutputnodesfortheNeuralNetwork
publicintNumberOfOutputs
get{returnnum_out;
set{num_out=value;
///gets/setstheinputtohiddenweightsfortheNeuralNetwork
publicdouble[,]InputToHiddenWeights
get{returni_to_h_wts;
set{i_to_h_wts=value;
///gets/setsthehiddentooutputweightsfortheNeuralNetwork
publicdouble[,]HiddenToOutputWeights
get{returnh_to_o_wts;
set{h_to_o_wts=value;
///gets/setstheinputvaluesfortheNeuralNetwork
publicdouble[]Inputs
get{returninputs;
set{inputs=value;
///gets/setsthehiddenvaluesfortheNeuralNetwork
publicdouble[]Hidden
get{returnhidden;
set{hidden=value;
///gets/setstheoutputsvaluesfortheNeuralNetwork
publicdouble[]Outputs
get{returnoutputs;
set{outputs=value;
///gets/setstheLearningRate(eta)valuefortheNeuralNetwork
publicdoubleLearningRate
get{returnlearningRate;
set{learningRate=value;
训练过程:
publicvoidTrain()
_nn.initialiseNetwork();
training_times;
foreach(TrainSettrainSetin_trainSets)
double[]inputs=newdouble[]{trainSet.Input};
double[]outputs=newdouble[]{trainSet.Output};
_nn.pass_forward(inputs);
train_network(outputs);
权值变更函数:
privatevoidtrain_network(double[]outputs)
//getmomentumvalues(deltavaluesfromlastpass)
double[]delta_hidden=newdouble[_nn.NumberOfHidden+1];
double[]delta_outputs=newdouble[_nn.NumberOfOutputs];
//Getthedeltavaluefortheoutputlayer
_nn.NumberOfOutputs;
delta_outputs[i]=
_nn.Outputs[i]*(1.0-_nn.Outputs[i])*(outputs[i]-_nn.Outputs[i]);
//Getthedeltavalueforthehiddenlayer
_nn.NumberOfHidden+1;
doubleerror=0.0;
error+=_nn.HiddenToOutputWeights[i,j]*delta_outputs[j];
delta_hidden[i]=_nn.Hidden[i]*(1.0-_nn.Hidden[i])*error;
//Nowupdatetheweightsbetweenhidden&
outputlayer
//usemomentum(deltavaluesfromlastpass),
//toensuremovedincorrectdirection
_nn.HiddenToOutputWeights[j,i]+=_nn.LearningRate*delta_outputs[i]*_nn.Hidden[j];
//Nowupdatetheweightsbetweeninput&
hiddenlayer
_nn.NumberOfHidden;
_nn.NumberOfInputs+1;
_nn.InputToHiddenWeights[j,i]+=_nn.LearningRate*delta_hidden[i]*_nn.Inputs[j];
激活函数(sigmoid函数):
publicstaticdoubleSigmoid(doublex)
return1.0/(1.0+Math.Pow(Math.E,-x));
建立训练集和输入层,隐层,输出层分别为1,4,1的神经网络实例:
TrainSet[]set=newTrainSet[setSize];
setSize;
doublen=Math.PI*2/setSize;
set[i]=newTrainSet(n*(i+1),Math.Sin(n*(i+1))/2+0.5);
}NeuralNetworknetwork=newNeuralNetwork(1,4,1);
绘制拟合后的图形:
Graphicsg;
g=panel1.CreateGraphics();
g.Clear(panel1.BackColor);
doublepi=Math.PI*2/setSize;
g.FillRectangle(brush,(float)(pi*(i+1))*40,100-(float)(_nntrainer.GetOutPut(newdouble[]{pi*(i+1)})[0])*100,1,1);
输出任意角度的拟合值和正弦值:
double_result=_nntrainer.GetOutPut(newdouble[]{Double.Parse(x_value.Text)})[0];
result.Text=((_result-0.5)*2).ToString();
sinx.Text=Math.Sin(Double.Parse(x_value.Text)).ToString();
4、程序运行截图
1、程序界面
2、训练结果
3、对比预测值与准确值
- 配套讲稿:
如PPT文件的首页显示word图标,表示该PPT已包含配套word讲稿。双击word图标可打开word文档。
- 特殊限制:
部分文档作品中含有的国旗、国徽等图片,仅作为作品整体效果示例展示,禁止商用。设计者仅对作品中独创性部分享有著作权。
- 关 键 词:
- 实验四 BP神经网络模拟sin函数 实验 BP 神经网络 模拟 sin 函数
![提示](https://static.bdocx.com/images/bang_tan.gif)