인공지능/공부

강화학습 과제 1.1

이게될까 2024. 3. 25. 13:37
728x90
728x90

TD를 구현해서 MDP랑 비교하는게 과제 입니당..

일단 코드만 있고 수요일 수업 진행 후 마무리하기

import random
import numpy as np

class GridWorld():
    def __init__(self):
        self.x=0
        self.y=0

    def step(self, a):
        if a == 0:
            self.move_left()
        elif a == 1:
            self.move_up()
        elif a == 2:
            self.move_right()
        elif a == 3:
            self.move_down()

        reward = -1
        done = self.is_done()
        return (self.x, self.y), reward, done

    def move_right(self):
        self.y += 1
        if self.y > 3:
            self.y = 3

    def move_left(self):
        self.y -= 1
        if self.y < 0:
            self.y = 0

    def move_up(self):
        self.x -= 1
        if self.x < 0:
            self.x = 0
            
    def move_down(self):
        self.x += 1
        if self.x > 3:
            self.x = 3

    def is_done(self):
        if self.x == 3 and self.y == 3:
            return True
        else :
            return False

    def get_state(self):
        return (self.x, self.y)

    def reset(self):
        self.x = 0
        self.y = 0
        return (self.x, self.y)

class Agent():
    def __init__(self):
        pass

    def select_action(self):
        coin = random.random()
        if coin < 0.25:
            action = 0
        elif coin < 0.5:
            action = 1
        elif coin < 0.75:
            action = 2
        else :
            action = 3
        return action         
            
            
def main():
    #TD
    env = GridWorld()
    agent = Agent()
    data = [[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
    gamma = 1.0
    reward = -1
    alpha = 0.01

    for k in range(50000):
        done = False
        while not done:
            x, y = env.get_state()
            action = agent.select_action()
            (x_prime, y_prime), reward, done = env.step(action)
            data[x][y] = data[x][y] + alpha*(reward+gamma*data[x_prime][y_prime]-data[x][y])
        env.reset()

    for row in data:
        print(row)

if __name__== '__main__':
    main()

 

 

# n-step TD 업데이트를 위한 함수
def n_step_td_update(env, agent, data, n, alpha, gamma):
    states = []
    rewards = [0]  # 첫 번째 보상을 0으로 설정하여 n 스텝이 완료되지 않은 상태에서의 업데이트를 처리
    state = env.reset()
    for step in range(n):
        action = agent.select_action()
        next_state, reward, done = env.step(action)
        states.append(state)
        rewards.append(reward)
        if done:
            break
        state = next_state

    # n 스텝 TD 업데이트
    for t in range(len(states)):
        G = sum([gamma ** (i-t-1) * rewards[i+1] for i in range(t, min(t + n, len(states)))])
        if t + n < len(states):
            G += gamma ** n * data[states[t + n][0]][states[t + n][1]]
        data[states[t][0]][states[t][1]] += alpha * (G - data[states[t][0]][states[t][1]])

# 메인 함수에서 n-step TD로 변경
def main():
    env = GridWorld()
    agent = Agent()
    data = [[0,0,0,0] for _ in range(4)]
    gamma = 1.0
    alpha = 0.01
    n = 4  # 예시로 4-step TD를 사용

    for k in range(50000):
        n_step_td_update(env, agent, data, n, alpha, gamma)

    for row in data:
        print(row)
728x90