Unrecognized function or variable ‘rlContinuousDeterministicActorRepresentation’
Hello, This is my code
%clc
%clear all
rng(0)
% Load MATPOWER case
mpc = loadcase(‘case118’);
% Create the environment
env = createOpfEnv();
import matlab.rl.*
% Define SAC agent
actorNetwork = [
featureInputLayer(obsInfo.Dimension(1))
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(actInfo.Dimension(1))
];
criticNetwork = [
featureInputLayer([obsInfo.Dimension(1) + actInfo.Dimension(1)])
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(1)
];
actorOptions = rlRepresentationOptions(‘Optimizer’,’adam’,’LearnRate’,1e-4);
criticOptions = rlRepresentationOptions(‘Optimizer’,’adam’,’LearnRate’,1e-4);
actor = rlContinuousDeterministicActorRepresentation(actorNetwork, obsInfo, actInfo, ‘Observation’, {‘observations’}, ‘Action’, {‘actions’}, actorOptions);
critic = rlQValueRepresentation(criticNetwork, obsInfo, actInfo, ‘Observation’, {‘observations’}, ‘Action’, {‘actions’}, criticOptions);
agentOptions = rlSACAgentOptions(‘SampleTime’, 1, ‘TargetSmoothFactor’, 1e-3, ‘TargetUpdateFrequency’, 1, ‘ExperienceBufferLength’, 1e6);
agent = rlSACAgent(actor, critic, agentOptions);
% Training options
trainOpts = rlTrainingOptions(…
‘MaxEpisodes’, 1000, …
‘MaxStepsPerEpisode’, 100, …
‘StopTrainingCriteria’, ‘AverageReward’, …
‘StopTrainingValue’, -100, …
‘ScoreAveragingWindowLength’, 10, …
‘Verbose’, true, …
‘Plots’, ‘training-progress’);
% Train the agent
train(agent, env, trainOpts);
and when i tried running i got the error "Error in DRL_code (line 32)
actor = rlContinuousDeterministicActorRepresentation(actorNetwork, obsInfo, actInfo, ‘Observation’, {‘observations’}, ‘Action’, {‘actions’}, actorOptions);"
I am using MATLAB 2024AHello, This is my code
%clc
%clear all
rng(0)
% Load MATPOWER case
mpc = loadcase(‘case118’);
% Create the environment
env = createOpfEnv();
import matlab.rl.*
% Define SAC agent
actorNetwork = [
featureInputLayer(obsInfo.Dimension(1))
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(actInfo.Dimension(1))
];
criticNetwork = [
featureInputLayer([obsInfo.Dimension(1) + actInfo.Dimension(1)])
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(1)
];
actorOptions = rlRepresentationOptions(‘Optimizer’,’adam’,’LearnRate’,1e-4);
criticOptions = rlRepresentationOptions(‘Optimizer’,’adam’,’LearnRate’,1e-4);
actor = rlContinuousDeterministicActorRepresentation(actorNetwork, obsInfo, actInfo, ‘Observation’, {‘observations’}, ‘Action’, {‘actions’}, actorOptions);
critic = rlQValueRepresentation(criticNetwork, obsInfo, actInfo, ‘Observation’, {‘observations’}, ‘Action’, {‘actions’}, criticOptions);
agentOptions = rlSACAgentOptions(‘SampleTime’, 1, ‘TargetSmoothFactor’, 1e-3, ‘TargetUpdateFrequency’, 1, ‘ExperienceBufferLength’, 1e6);
agent = rlSACAgent(actor, critic, agentOptions);
% Training options
trainOpts = rlTrainingOptions(…
‘MaxEpisodes’, 1000, …
‘MaxStepsPerEpisode’, 100, …
‘StopTrainingCriteria’, ‘AverageReward’, …
‘StopTrainingValue’, -100, …
‘ScoreAveragingWindowLength’, 10, …
‘Verbose’, true, …
‘Plots’, ‘training-progress’);
% Train the agent
train(agent, env, trainOpts);
and when i tried running i got the error "Error in DRL_code (line 32)
actor = rlContinuousDeterministicActorRepresentation(actorNetwork, obsInfo, actInfo, ‘Observation’, {‘observations’}, ‘Action’, {‘actions’}, actorOptions);"
I am using MATLAB 2024A Hello, This is my code
%clc
%clear all
rng(0)
% Load MATPOWER case
mpc = loadcase(‘case118’);
% Create the environment
env = createOpfEnv();
import matlab.rl.*
% Define SAC agent
actorNetwork = [
featureInputLayer(obsInfo.Dimension(1))
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(actInfo.Dimension(1))
];
criticNetwork = [
featureInputLayer([obsInfo.Dimension(1) + actInfo.Dimension(1)])
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(64)
reluLayer
fullyConnectedLayer(1)
];
actorOptions = rlRepresentationOptions(‘Optimizer’,’adam’,’LearnRate’,1e-4);
criticOptions = rlRepresentationOptions(‘Optimizer’,’adam’,’LearnRate’,1e-4);
actor = rlContinuousDeterministicActorRepresentation(actorNetwork, obsInfo, actInfo, ‘Observation’, {‘observations’}, ‘Action’, {‘actions’}, actorOptions);
critic = rlQValueRepresentation(criticNetwork, obsInfo, actInfo, ‘Observation’, {‘observations’}, ‘Action’, {‘actions’}, criticOptions);
agentOptions = rlSACAgentOptions(‘SampleTime’, 1, ‘TargetSmoothFactor’, 1e-3, ‘TargetUpdateFrequency’, 1, ‘ExperienceBufferLength’, 1e6);
agent = rlSACAgent(actor, critic, agentOptions);
% Training options
trainOpts = rlTrainingOptions(…
‘MaxEpisodes’, 1000, …
‘MaxStepsPerEpisode’, 100, …
‘StopTrainingCriteria’, ‘AverageReward’, …
‘StopTrainingValue’, -100, …
‘ScoreAveragingWindowLength’, 10, …
‘Verbose’, true, …
‘Plots’, ‘training-progress’);
% Train the agent
train(agent, env, trainOpts);
and when i tried running i got the error "Error in DRL_code (line 32)
actor = rlContinuousDeterministicActorRepresentation(actorNetwork, obsInfo, actInfo, ‘Observation’, {‘observations’}, ‘Action’, {‘actions’}, actorOptions);"
I am using MATLAB 2024A reinforcement learning MATLAB Answers — New Questions