summaryrefslogtreecommitdiff
path: root/rl/gym_demo/carl_pole.py
blob: 146c337f9800c66f1efc7c9369d5a2bb176d2b3a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

import gym
import numpy as np

class BespokeAgent:
    def __init__(self, env):
        pass

    def decide(self, observation):
        position, velocity = observation
        lb = min(-0.09*(position + 0.25) ** 2 + 0.03, 0.3*(position + 0.9)**4 - 0.008)
        ub = -0.07*(position + 0.38) ** 2 + 0.07
        if lb < velocity < ub:
            action = 2
        else:
            action = 0
        # print('observation: {}, lb: {}, ub: {} => action: {}'.format(observation, lb, ub, action))
        return action

    def learn(self, *argg):
        pass


def play(i, agent, env, render=True, train=False):
    episode_reward = 0
    observation = env.reset()
    while True:
        if render:
            env.render()
        action = agent.decide(observation)
        next_observation, reward, done, _ = env.step(action)
        episode_reward += reward
        if train:
            agent.learn(observation, action, reward, done)
        if done:
            env.close()
            break
        observation = next_observation
    print(i, episode_reward)
    return i, episode_reward


if __name__ == '__main__':
    env = gym.make('MountainCar-v0')
    agent = BespokeAgent(env)
    rewards = [play(i, agent, env) for i in range(100)]
    print(rewards)