clear all;clc
rng(6);
epochs = 80; %30
mdl = 'MODELO';
stoptrainingcriteria = "AverageReward";
stoptrainingvalue = 2000000;
load_system(mdl);
numObs = 1;
obsInfo = rlNumericSpec([numObs 1]);
obsInfo.Name = 'observations';
ActionInfo = rlNumericSpec([1 1],...
LowerLimit=[1]',...
UpperLimit=[1000]');
ActionInfo.Name = 'alfa';
blk = [mdl,'/RL Agent'];
env = rlSimulinkEnv(mdl,blk,obsInfo,ActionInfo);
env.ResetFcn = @(in) resetfunction(in, mdl);
initOpts = rlAgentInitializationOptions('NumHiddenUnit',32); %32
agent = rlDDPGAgent(obsInfo, ActionInfo, initOpts);
agent.SampleTime = 1;% -1
agent.AgentOptions.NoiseOptions.MeanAttractionConstant = 1/30;% 1/30
agent.AgentOptions.NoiseOptions.StandardDeviation = 41; % 41
agent.AgentOptions.NoiseOptions.StandardDeviationDecayRate = 0.00001;% 0
agent.AgentOptions.NumStepsToLookAhead = 32; % 32
agent.AgentOptions.CriticOptimizerOptions.LearnRate = 1e-03;
agent.AgentOptions.CriticOptimizerOptions.GradientThreshold = 1;
agent.AgentOptions.ActorOptimizerOptions.LearnRate = 1e-04;
agent.AgentOptions.ActorOptimizerOptions.GradientThreshold = 1;
opt = rlTrainingOptions(...
'MaxEpisodes', epochs,...
'MaxStepsPerEpisode', 1000,... % 1000
'StopTrainingCriteria', stoptrainingcriteria,...
'StopTrainingValue', stoptrainingvalue,...
'Verbose', true,...
'Plots', "training-progress");
trainResults = train(agent,env,opt);
generatePolicyFunction(agent);
Here it is the function I use to create the graph:
policy1 = getGreedyPolicy(agent);
policy2 = getExplorationPolicy(agent);
x_values = 0:0.1:120;
actions1 = zeros(length(x_values), 1);
actions2 = zeros(length(x_values), 1);
for i = 1:length(x_values)
actions1(i) = cell2mat(policy1.getAction(x_values(i)));
actions2(i) = cell2mat(policy2.getAction(x_values(i)));
end
hold on
plot(x_values, actions2);
plot(x_values, actions1, 'LineWidth', 2);
hold off