Facebook
From asdasd, 2 Weeks ago, written in Python.
Embed
Download Paste or View Raw
Hits: 144
  1. import cv2
  2. import numpy as np
  3. import pyautogui
  4. import pygetwindow as gw
  5. import pytesseract
  6. import torch
  7. import torch.nn as nn
  8. import torch.nn.functional as F
  9. import torch.optim as optim
  10. import random
  11. import time
  12. import ctypes
  13. from ctypes import wintypes
  14.  
  15. pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
  16.  
  17. PROCESS_VM_READ = 0x0010
  18. PROCESS_QUERY_INFORMATION = 0x0400
  19.  
  20. def read_memory(process_id, address):
  21.     process_handle = ctypes.windll.kernel32.OpenProcess(PROCESS_VM_READ | PROCESS_QUERY_INFORMATION, False, process_id)
  22.     if not process_handle:
  23.         raise Exception("Cannot open process: {}".format(process_id))
  24.     value = ctypes.c_int()
  25.     bytes_read = wintypes.SIZE()
  26.     ctypes.windll.kernel32.ReadProcessMemory(process_handle, ctypes.c_void_p(address), ctypes.byref(value), ctypes.sizeof(value), ctypes.byref(bytes_read))
  27.     ctypes.windll.kernel32.CloseHandle(process_handle)
  28.     return value.value
  29.  
  30. class Net(nn.Module):
  31.     def __init__(self):
  32.         super(Net, self).__init__()
  33.         self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
  34.         self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
  35.         self.conv2_drop = nn.Dropout2d()
  36.         self.fc1 = nn.Linear(6480, 50)
  37.         self.fc2 = nn.Linear(50, 5)  # Actions: w, a, s, d, space
  38.  
  39.     def forward(self, x):
  40.         x = F.relu(F.max_pool2d(self.conv1(x), 2))
  41.         x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
  42.         x = x.view(-1, 6480)
  43.         x = F.relu(self.fc1(x))
  44.         x = self.fc2(x)
  45.         return F.log_softmax(x, dim=1)
  46.  
  47. def get_game_image(game_window):
  48.     screenshot = pyautogui.screenshot(region=(game_window.left, game_window.top, game_window.width, game_window.height))
  49.     screenshot = np.array(screenshot)
  50.     screenshot = cv2.cvtColor(screenshot, cv2.COLOR_BGR2GRAY)
  51.     return cv2.resize(screenshot, (84, 84))
  52.  
  53. def perform_action(action_index, game_window):
  54.     actions = ["w", "a", "s", "d", "space"]
  55.     action = actions[action_index]
  56.     print(f"Performing action: {action}")
  57.     if action == "space":
  58.         x = random.randint(game_window.left, game_window.left + game_window.width)
  59.         y = random.randint(game_window.top, game_window.top + game_window.height)
  60.         pyautogui.moveTo(x, y)
  61.         pyautogui.keyDown(action)
  62.         time.sleep(0.1)
  63.         pyautogui.keyUp(action)
  64.     else:
  65.         pyautogui.keyDown(action)
  66.         time.sleep(0.1)
  67.         pyautogui.keyUp(action)
  68.     return action
  69.  
  70. def train(model, device, game_window, optimizer, previous_score, previous_hp):
  71.     model.train()
  72.     current_score = previous_score
  73.     current_hp = previous_hp
  74.     for i in range(10000):
  75.         image = get_game_image(game_window)
  76.         image_tensor = torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0).to(device)
  77.         optimizer.zero_grad()
  78.         output = model(image_tensor)
  79.         action_index = output.max(1)[1].view(1, 1).item()
  80.         action = perform_action(action_index, game_window)
  81.         time.sleep(0.1)
  82.         new_hp = read_memory(process_id, hp_address)
  83.         new_score = read_memory(process_id, score_address)
  84.         print(f"Previous HP: {previous_hp}, Current HP: {new_hp}")
  85.         print(f"Previous Score: {previous_score}, Current Score: {new_score}")
  86.         hp_diff = new_hp - previous_hp
  87.         score_diff = new_score - previous_score
  88.         reward = hp_diff + score_diff
  89.         print(f"HP Diff: {hp_diff}, Score Diff: {score_diff}, Reward: {reward}")
  90.         loss = -torch.tensor([reward], dtype=torch.float, device=device) * output[0][action_index]
  91.         loss.backward()
  92.         optimizer.step()
  93.         previous_hp = new_hp
  94.         previous_score = new_score
  95.         print(f"Loss: {loss.item()}")
  96.  
  97. def main():
  98.     device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  99.     model = Net().to(device)
  100.     optimizer = optim.Adam(model.parameters(), lr=0.001)
  101.     game_window = gw.getWindowsWithTitle("Yet Another Zombie Defense 2")[0]
  102.     game_window.activate()
  103.     initial_image = get_game_image(game_window)
  104.     initial_hp = read_memory(process_id, hp_address)
  105.     initial_score = read_memory(process_id, score_address)
  106.     train(model, device, game_window, optimizer, initial_score, initial_hp)
  107.  
  108. if __name__ == "__main__":
  109.     main()
  110.