Facebook
From asdf, 2 Weeks ago, written in Plain Text.
Embed
Download Paste or View Raw
Hits: 133
  1. import cv2
  2. import numpy as np
  3. import pyautogui
  4. import pygetwindow as gw
  5. import pytesseract
  6. import torch
  7. import torch.nn as nn
  8. import torch.nn.functional as F
  9. import torch.optim as optim
  10. import random
  11. import time
  12. import ctypes
  13. import ctypes
  14. from ctypes import wintypes
  15.  
  16. pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
  17.  
  18. PROCESS_VM_READ = 0x0010
  19. PROCESS_QUERY_INFORMATION = 0x0400
  20.  
  21. def read_memory(process_id, address):
  22.     # Uzyskaj uchwyt do procesu z odpowiednimi uprawnieniami
  23.     process_handle = ctypes.windll.kernel32.OpenProcess(PROCESS_VM_READ | PROCESS_QUERY_INFORMATION, False, process_id)
  24.  
  25.     if not process_handle:
  26.         raise Exception("Nie można otworzyć procesu: {}".format(process_id))
  27.  
  28.     # Przygotuj buffor, do którego zostanie wczytana wartość
  29.     value = ctypes.c_int()
  30.     bytes_read = wintypes.SIZE()
  31.  
  32.     # Przeczytaj pamięć
  33.     ctypes.windll.kernel32.ReadProcessMemory(process_handle, ctypes.c_void_p(address), ctypes.byref(value), ctypes.sizeof(value), ctypes.byref(bytes_read))
  34.  
  35.     # Zamknij uchwyt do procesu
  36.     ctypes.windll.kernel32.CloseHandle(process_handle)
  37.  
  38.     return value.value
  39.  
  40.  
  41.  
  42. class Net(nn.Module):
  43.     def __init__(self):
  44.         super(Net, self).__init__()
  45.         self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
  46.         self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
  47.         self.conv2_drop = nn.Dropout2d()
  48.         self.fc1 = nn.Linear(6480, 50)
  49.         self.fc2 = nn.Linear(50, 5)  # 5 akcji: w, a, s, d, space
  50.  
  51.     def forward(self, x):
  52.         x = F.relu(F.max_pool2d(self.conv1(x), 2))
  53.         x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
  54.         x = x.view(-1, 6480)
  55.         x = F.relu(self.fc1(x))
  56.         x = self.fc2(x)
  57.         return F.log_softmax(x, dim=1)
  58.  
  59.  
  60. def get_game_image(game_window):
  61.     screenshot = pyautogui.screenshot(region=(game_window.left, game_window.top, game_window.width, game_window.height))
  62.     screenshot = np.array(screenshot)
  63.     screenshot = cv2.cvtColor(screenshot, cv2.COLOR_BGR2GRAY)
  64.     return cv2.resize(screenshot, (84, 84))
  65.  
  66. def get_score_region(screenshot):
  67.     # Assuming the score is located at the top right corner of the screen
  68.     # We can try to give some padding to make sure we are only capturing the score
  69.     score_height = 40
  70.     score_width = 150
  71.     top_padding = 10
  72.     right_padding = 10
  73.    
  74.     y = top_padding
  75.     x = screenshot.shape[1] - score_width - right_padding
  76.     score_region = screenshot[y:y + score_height, x:x + score_width]
  77.    
  78.     return score_region
  79.  
  80. def get_score(screenshot):
  81.     score_region = get_score_region(screenshot)
  82.     if score_region.size == 0:
  83.         print("Score region is empty!")
  84.         return 0
  85.     # We already have a grayscale image so we don't need to convert it again
  86.     thresholded = cv2.threshold(score_region, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
  87.     score_text = pytesseract.image_to_string(thresholded, config='--psm 6')
  88.     score_digits = ''.join(filter(str.isdigit, score_text))
  89.     return int(score_digits) if score_digits else 0
  90.  
  91. def perform_action(action_index, game_window):
  92.     actions = ["w", "a", "s", "d", "space"]
  93.     action = actions[action_index]
  94.     print(f"Performing action: {action}")
  95.  
  96.     if action == "space":
  97.         x = random.randint(game_window.left, game_window.left + game_window.width)
  98.         y = random.randint(game_window.top, game_window.top + game_window.height)
  99.         pyautogui.moveTo(x, y)
  100.         #pyautogui.click()
  101.         pyautogui.keyDown(action)
  102.         time.sleep (0.1)
  103.         pyautogui.keyUp(action)
  104.     else:
  105.         pyautogui.keyDown(action)
  106.         time.sleep (0.1)
  107.         pyautogui.keyUp(action)
  108.     return action
  109.  
  110.  
  111.     # Przykładowe użycie - ZMIEN process_id NA WŁAŚCIWY PROCES ORAZ address NA WŁAŚCIWY ADRES
  112. process_id = 2616  # ID procesu gry
  113. hp_address = 0x0437E9DC  # Adres pamięci HP
  114. score_address = 0x040351A4  # Adres pamięci HP
  115.  
  116. def train(model, device, game_window, optimizer, previous_score, previous_hp):
  117.     model.train()
  118.     current_score = previous_score
  119.     current_hp = previous_hp
  120.     for i in range(10000):  # liczba epok treningowych
  121.         time.sleep(0.1)
  122.         image = get_game_image(game_window)
  123.         image_tensor = (
  124.             torch.from_numpy(image).float().unsqueeze(0).unsqueeze(0).to(device)
  125.         )
  126.         optimizer.zero_grad()
  127.         output = model(image_tensor)
  128.        
  129.         new_hp = read_memory(process_id, hp_address)
  130.         new_score = read_memory(process_id, score_address)  
  131.        
  132.         if new_hp <= 6:
  133.             time.sleep(2)
  134.             pyautogui.keyDown("space")
  135.             time.sleep (0.05)
  136.             pyautogui.keyUp("space")
  137.          acti 1).item()
  138.          acti game_window)
  139.        
  140.         #new_image = get_game_image(game_window)
  141.        
  142.  
  143.         print(f"Previous HP: {previous_hp}, Current HP: {new_hp}")
  144.         print(f"Previous Score: {previous_score}, Current Score: {new_score}")
  145.        
  146.         # Oblicz nagrodę i stratę
  147.         #          100          130 = -30
  148.         hp_diff = new_hp - previous_hp
  149.         score_diff = new_score - previous_score;
  150.         reward = 0;
  151.        
  152.         if new_hp <= 6:
  153.             reward-=15;
  154.         else:
  155.             if hp_diff >= 0:
  156.                 reward+=1;
  157.             else:
  158.                 reward-=5;
  159.             if score_diff > 0:
  160.                 reward +=10;
  161.         #reward = hp_diff + score_diff;
  162.         print(f"HP Diff: {hp_diff}, Score Diff: {score_diff}, Reward: {reward}")
  163.         loss = -torch.tensor([reward], dtype=torch.float,device = device) * output[0][action_index]      
  164.         #loss = -reward * output[0][action_index]
  165.         loss.backward()
  166.         optimizer.step()
  167.         previous_hp = new_hp
  168.         previous_score = new_score
  169.         print(f"Loss: {loss.item()}")
  170.  
  171.  
  172. def main():
  173.     device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  174.     model = Net().to(device)
  175.     optimizer = optim.Adam(model.parameters(), lr=0.001)
  176.     game_window = gw.getWindowsWithTitle("Yet Another Zombie Defense 2")[0]
  177.     game_window.activate()
  178.     process_id = game_window._hWnd
  179.     initial_image = get_game_image(game_window)
  180.     #initial_score = get_score(initial_image)
  181.     train(model, device, game_window, optimizer, 0, 150)
  182.  
  183.  
  184. if __name__ == "__main__":
  185.     main()
  186.