Add: Minimap tracking

This commit is contained in:
LmeSzinc 2023-07-30 23:30:17 +08:00
parent 9f0528299f
commit 7e4c9b0258
7 changed files with 1005 additions and 0 deletions

View File

@ -32,5 +32,8 @@ starlette==0.14.2
uvicorn[standard]==0.17.6
aiofiles
# Game resources
srcmap==1.1.0
# For dev
# pip-tools

View File

@ -63,6 +63,7 @@ scipy==1.10.1 # via -r requirements-in.txt
shapely==2.0.1 # via ppocr-onnx
six==1.16.0 # via uiautomator2
sniffio==1.3.0 # via anyio
srcmap==1.1.0 # via -r requirements-in.txt
starlette==0.14.2 # via -r requirements-in.txt
sympy==1.12 # via onnxruntime
tornado==6.3.1 # via pywebio

View File

@ -0,0 +1,397 @@
from dataclasses import dataclass
from typing import Any
import cv2
import numpy as np
from scipy import signal
from module.base.utils import (
area_offset,
area_pad,
color_similarity_2d,
crop,
get_bbox,
image_size,
rgb2yuv
)
from module.logger import logger
from tasks.map.minimap.utils import (
convolve,
cubic_find_maximum,
image_center_crop,
map_image_preprocess,
peak_confidence
)
from tasks.map.resource.resource import MapResource
@dataclass
class PositionPredictState:
size: Any = None
scale: Any = None
search_area: Any = None
search_image: Any = None
result_mask: Any = None
result: Any = None
sim: Any = None
loca: Any = None
local_sim: Any = None
local_loca: Any = None
precise_sim: Any = None
precise_loca: Any = None
global_loca: Any = None
class Minimap(MapResource):
def init_position(self, position: tuple[int, int]):
logger.info(f"init_position:{position}")
self.position = position
def _predict_position(self, image, scale=1.0):
"""
Args:
image:
scale:
Returns:
PositionPredictState:
"""
scale *= self.POSITION_SEARCH_SCALE
local = cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
size = np.array(image_size(image))
if sum(self.position) > 0:
search_position = np.array(self.position, dtype=np.int64)
search_position += self.POSITION_FEATURE_PAD
search_size = np.array(image_size(local)) * self.POSITION_SEARCH_RADIUS
search_half = (search_size // 2 * 2).astype(np.int64)
search_area = area_offset((0, 0, *(search_half * 2)), offset=-search_half)
search_area = area_offset(search_area, offset=np.multiply(search_position, self.POSITION_SEARCH_SCALE))
search_area = np.array(search_area).astype(np.int64)
search_image = crop(self.assets_floor_feat, search_area, copy=False)
result_mask = crop(self.assets_floor_outside_mask, search_area, copy=False)
else:
search_area = (0, 0, *image_size(local))
search_image = self.assets_floor_feat
result_mask = self.assets_floor_outside_mask
# if round(scale, 5) == self.POSITION_SEARCH_SCALE * 1.0:
# Image.fromarray((local).astype(np.uint8)).save('local.png')
# Image.fromarray((search_image).astype(np.uint8)).save('search_image.png')
# Using mask will take 3 times as long
# mask = self.get_circle_mask(local)
# result = cv2.matchTemplate(search_image, local, cv2.TM_CCOEFF_NORMED, mask=mask)
result = cv2.matchTemplate(search_image, local, cv2.TM_CCOEFF_NORMED)
result_mask = image_center_crop(result_mask, size=image_size(result))
result[result_mask] = 0
_, sim, _, loca = cv2.minMaxLoc(result)
# if round(scale, 3) == self.POSITION_SEARCH_SCALE * 1.0:
# result[result <= 0] = 0
# Image.fromarray((result * 255).astype(np.uint8)).save('match_result.png')
# Gaussian filter to get local maximum
local_maximum = cv2.subtract(result, cv2.GaussianBlur(result, (5, 5), 0))
_, local_sim, _, local_loca = cv2.minMaxLoc(local_maximum)
# if round(scale, 5) == self.POSITION_SEARCH_SCALE * 1.0:
# local_maximum[local_maximum < 0] = 0
# local_maximum[local_maximum > 0.1] = 0.1
# Image.fromarray((local_maximum * 255 * 10).astype(np.uint8)).save('local_maximum.png')
# Calculate the precise location using CUBIC
# precise = crop(result, area=area_offset((-4, -4, 4, 4), offset=local_loca))
# precise_sim, precise_loca = cubic_find_maximum(precise, precision=0.05)
# precise_loca -= 5
precise_loca = np.array((0, 0))
precise_sim = result[local_loca[1], local_loca[0]]
state = PositionPredictState(
size=size, scale=scale,
search_area=search_area, search_image=search_image, result_mask=result_mask, result=result,
sim=sim, loca=loca, local_sim=local_sim, local_loca=local_loca,
precise_sim=precise_sim, precise_loca=precise_loca,
)
# Location on search_image
lookup_loca = precise_loca + local_loca + size * scale / 2
# Location on GIMAP
global_loca = (lookup_loca + search_area[:2]) / self.POSITION_SEARCH_SCALE
# Can't figure out why but the result_of_0.5_lookup_scale + 0.5 ~= result_of_1.0_lookup_scale
global_loca += self.POSITION_MOVE_PATCH
# Move to the origin point of map
global_loca -= self.POSITION_FEATURE_PAD
state.global_loca = global_loca
return state
def _predict_precise_position(self, state):
"""
Args:
result (PositionPredictState):
Returns:
PositionPredictState
"""
size = state.size
scale = state.scale
search_area = state.search_area
result = state.result
loca = state.loca
local_loca = state.local_loca
precise = crop(result, area=area_offset((-4, -4, 4, 4), offset=loca))
precise_sim, precise_loca = cubic_find_maximum(precise, precision=0.05)
precise_loca -= 5
state.precise_sim = precise_sim
state.precise_loca = precise_loca
# Location on search_image
lookup_loca = precise_loca + local_loca + size * scale / 2
# Location on GIMAP
global_loca = (lookup_loca + search_area[:2]) / self.POSITION_SEARCH_SCALE
# Can't figure out why but the result_of_0.5_lookup_scale + 0.5 ~= result_of_1.0_lookup_scale
global_loca += self.POSITION_MOVE_PATCH
# Move to the origin point of map
global_loca -= self.POSITION_FEATURE_PAD
state.global_loca = global_loca
return state
def update_position(self, image):
"""
Get position on GIMAP, costs about 6.57ms.
The following attributes will be set:
- position_similarity
- position
- position_scene
"""
image = self.get_minimap(image, self.POSITION_RADIUS)
image = map_image_preprocess(image)
image &= self.get_circle_mask(image)
best_sim = -1.
best_scale = 1.0
best_state = None
# Walking is in scale 1.20
# Running is in scale 1.25
scale_list = [1.00, 1.05, 1.10, 1.15, 1.20, 1.25]
for scale in scale_list:
state = self._predict_position(image, scale)
# print([np.round(i, 3) for i in [scale, state.sim, state.local_sim, state.global_loca]])
if state.sim > best_sim:
best_sim = state.sim
best_scale = scale
best_state = state
best_state = self._predict_precise_position(best_state)
self.position_similarity = round(best_state.precise_sim, 3)
self.position_similarity_local = round(best_state.local_sim, 3)
self.position = tuple(np.round(best_state.global_loca, 1))
self.position_scale = round(best_scale, 3)
return self.position
def update_direction(self, image):
"""
Get direction of character, costs about 0.64ms.
The following attributes will be set:
- direction_similarity
- direction
"""
image = self.get_minimap(image, self.DIRECTION_RADIUS)
image = color_similarity_2d(image, color=self.DIRECTION_ARROW_COLOR)
try:
area = area_pad(get_bbox(image, threshold=128), pad=-1)
except IndexError:
# IndexError: index 0 is out of bounds for axis 0 with size 0
logger.warning('No direction arrow on minimap')
return
image = crop(image, area=area)
scale = self.DIRECTION_ROTATION_SCALE * self.DIRECTION_SEARCH_SCALE
mapping = cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
result = cv2.matchTemplate(self.ArrowRotateMap, mapping, cv2.TM_CCOEFF_NORMED)
result = cv2.subtract(result, cv2.GaussianBlur(result, (5, 5), 0))
_, sim, _, loca = cv2.minMaxLoc(result)
loca = np.array(loca) / self.DIRECTION_SEARCH_SCALE // (self.DIRECTION_RADIUS * 2)
degree = int((loca[0] + loca[1] * 8) * 5)
def to_map(x):
return int((x * self.DIRECTION_RADIUS * 2 + self.DIRECTION_RADIUS) * self.POSITION_SEARCH_SCALE)
# Row on ArrowRotateMapAll
row = int(degree // 8) + 45
# Calculate +-1 rows to get result with a precision of 1
row = (row - 2, row + 3)
# Convert to ArrowRotateMapAll and to be 5px larger
row = (to_map(row[0]) - 5, to_map(row[1]) + 5)
precise_map = self.ArrowRotateMapAll[row[0]:row[1], :]
result = cv2.matchTemplate(precise_map, mapping, cv2.TM_CCOEFF_NORMED)
result = cv2.subtract(result, cv2.GaussianBlur(result, (5, 5), 0))
def to_map(x):
return int((x * self.DIRECTION_RADIUS * 2) * self.POSITION_SEARCH_SCALE)
def get_precise_sim(d):
y, x = divmod(d, 8)
im = result[to_map(y):to_map(y + 1), to_map(x):to_map(x + 1)]
_, sim, _, _ = cv2.minMaxLoc(im)
return sim
precise = np.array([[get_precise_sim(_) for _ in range(24)]])
precise_sim, precise_loca = cubic_find_maximum(precise, precision=0.1)
precise_loca = degree // 8 * 8 - 8 + precise_loca[0]
self.direction_similarity = round(precise_sim, 3)
self.direction = precise_loca % 360
def update_rotation(self, image):
"""
Get direction of character, costs about 0.66ms.
The following attributes will be set:
- direction_similarity
- direction
"""
d = self.MINIMAP_RADIUS * 2
scale = 1
# Extract
minimap = self.get_minimap(image, radius=self.MINIMAP_RADIUS)
_, _, v = cv2.split(rgb2yuv(minimap))
image = cv2.subtract(255, v)
# image = cv2.GaussianBlur(image, (3, 3), 0)
# Expand circle into rectangle
remap = cv2.remap(image, *self.RotationRemapData, cv2.INTER_LINEAR)[d * 2 // 10:d * 6 // 10].astype(np.float32)
remap = cv2.resize(remap, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
# Find derivative
gradx = cv2.Scharr(remap, cv2.CV_32F, 1, 0)
# import matplotlib.pyplot as plt
# plt.imshow(gradx)
# plt.show()
# Magic parameters for scipy.find_peaks
para = {
# 'height': (50, 800),
'height': 50,
# 'prominence': (0, 400),
# 'width': (0, d * scale / 20),
# 'distance': d * scale / 18,
'wlen': d * scale,
}
# plt.plot(gradx[d * 3 // 10])
# plt.show()
# `l` for the left of sight area, derivative is positive
# `r` for the right of sight area, derivative is negative
l = np.bincount(signal.find_peaks(gradx.ravel(), **para)[0] % (d * scale), minlength=d * scale)
r = np.bincount(signal.find_peaks(-gradx.ravel(), **para)[0] % (d * scale), minlength=d * scale)
l, r = np.maximum(l - r, 0), np.maximum(r - l, 0)
# plt.plot(l)
# plt.plot(np.roll(r, -d * scale // 4))
# plt.show()
conv0 = []
kernel = 2 * scale
r_expanded = np.concatenate([r, r, r])
r_length = len(r)
# Faster than nested calling np.roll()
def roll_r(shift):
return r_expanded[r_length - shift:r_length * 2 - shift]
def convolve_r(ker, shift):
return sum(roll_r(shift + i) * (ker - abs(i)) // ker for i in range(-ker + 1, ker))
for offset in range(-kernel + 1, kernel):
result = l * convolve_r(ker=3 * kernel, shift=-d * scale // 4 + offset)
# result = l * convolve(np.roll(r, -d * scale // 4 + offset), kernel=3 * scale)
# minus = l * convolve(np.roll(r, offset), kernel=10 * scale) // 5
# if offset == 0:
# plt.plot(result)
# plt.plot(-minus)
# plt.show()
# result -= minus
# result = convolve(result, kernel=3 * scale)
conv0 += [result]
# plt.figure(figsize=(20, 16))
# for row in conv0:
# plt.plot(row)
# plt.show()
conv0 = np.maximum(conv0, 1)
maximum = np.max(conv0, axis=0)
rotation_confidence = round(peak_confidence(maximum), 3)
if rotation_confidence > 0.3:
# Good match
result = maximum
else:
# Convolve again to reduce noice
average = np.mean(conv0, axis=0)
minimum = np.min(conv0, axis=0)
result = convolve(maximum * average * minimum, 2 * scale)
rotation_confidence = round(peak_confidence(maximum), 3)
# plt.plot(maximum)
# plt.plot(result)
# plt.show()
# Convert match point to degree
degree = np.argmax(result) / (d * scale) * 360 + 135
degree = int(degree % 360)
# +3 is a value obtained from experience
# Don't know why but <predicted_rotation> + 3 = <actual_rotation>
rotation = degree + 3
self.rotation_confidence = rotation_confidence
self.rotation = rotation
def update(self, image):
"""
Update minimap, costs about 7.88ms.
"""
self.update_position(image)
self.update_direction(image)
self.update_rotation(image)
# MiniMap P:(567.5, 862.8) (1.00x|0.439|0.157), D:303.8 (0.253), R:304 (0.846)
logger.info(
f'MiniMap '
f'P:({self.position[0]:.1f}, {self.position[1]:.1f}) '
f'({self.position_scale:.2f}x|{self.position_similarity:.3f}|{self.position_similarity_local:.3f}), '
f'D:{self.direction:.1f} ({self.direction_similarity:.3f}), '
f'R:{self.rotation} ({self.rotation_confidence:.3f})'
)
if __name__ == '__main__':
"""
Run mimimap tracking test.
"""
from tasks.base.ui import UI
# Uncomment this to use local srcmap instead of the pre-built one
# MapResource.SRCMAP = '../srcmap/srcmap'
self = Minimap()
# Set plane, assume starting from Jarilo_AdministrativeDistrict
self.set_plane('Jarilo_AdministrativeDistrict', floor='F1')
ui = UI('alas')
ui.device.disable_stuck_detection()
# Set starter point. Starter point will be calculated if it's missing but may contain errors.
# With starter point set, position is only searched around starter point and new position becomes new starter point.
# self.init_position((337, 480))
while 1:
ui.device.screenshot()
self.update(ui.device.image)
self.show_minimap()

194
tasks/map/minimap/utils.py Normal file
View File

@ -0,0 +1,194 @@
import cv2
import numpy as np
from scipy import signal
from module.base.utils import image_size
def map_image_preprocess(image):
"""
A shared preprocess method used in ResourceGenerate and _predict_position()
Args:
image (np.ndarray): Screenshot in RGB
Returns:
np.ndarray:
"""
# image = rgb2luma(image)
image = cv2.GaussianBlur(image, (5, 5), 0)
image = cv2.Canny(image, 15, 50)
return image
def create_circular_mask(h, w, center=None, radius=None):
# https://stackoverflow.com/questions/44865023/how-can-i-create-a-circular-mask-for-a-numpy-array
if center is None: # use the middle of the image
center = (int(w / 2), int(h / 2))
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w - center[0], h - center[1])
y, x = np.ogrid[:h, :w]
dist_from_center = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2)
mask = dist_from_center <= radius
return mask
def rotate_bound(image, angle):
"""
Rotate an image with outbound
https://blog.csdn.net/qq_37674858/article/details/80708393
Args:
image (np.ndarray):
angle (int, float):
Returns:
np.ndarray:
"""
# grab the dimensions of the image and then determine the
# center
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(image, M, (nW, nH))
def cubic_find_maximum(image, precision=0.05):
"""
Using CUBIC resize algorithm to fit a curved surface, find the maximum value and location.
Args:
image (np.ndarray):
precision (int, float):
Returns:
float: Maximum value on curved surface
np.ndarray[float, float]: Location of maximum value
"""
image = cv2.resize(image, None, fx=1 / precision, fy=1 / precision, interpolation=cv2.INTER_CUBIC)
_, sim, _, loca = cv2.minMaxLoc(image)
loca = np.array(loca, dtype=float) * precision
return sim, loca
def image_center_pad(image, size, value=(0, 0, 0)):
"""
Create a new image with given `size`, placing given `image` in the middle.
Args:
image (np.ndarray):
size: (width, height)
value: Color of the background.
Returns:
np.ndarray:
"""
diff = np.array(size) - image_size(image)
left, top = int(diff[0] / 2), int(diff[1] / 2)
right, bottom = diff[0] - left, diff[1] - top
image = cv2.copyMakeBorder(image, top, bottom, left, right, borderType=cv2.BORDER_CONSTANT, value=value)
return image
def image_center_crop(image, size):
"""
Center crop the given image.
Args:
image (np.ndarray):
size: Output image shape, (width, height)
Returns:
np.ndarray:
"""
diff = image_size(image) - np.array(size)
left, top = int(diff[0] / 2), int(diff[1] / 2)
right, bottom = diff[0] - left, diff[1] - top
image = image[top:-bottom, left:-right]
return image
def area2corner(area):
"""
Args:
area: (x1, y1, x2, y2)
Returns:
np.ndarray: [upper-left, upper-right, bottom-left, bottom-right]
"""
return np.array([[area[0], area[1]], [area[2], area[1]], [area[0], area[3]], [area[2], area[3]]])
def convolve(arr, kernel=3):
"""
Args:
arr (np.ndarray): Shape (N,)
kernel (int):
Returns:
np.ndarray:
"""
return sum(np.roll(arr, i) * (kernel - abs(i)) // kernel for i in range(-kernel + 1, kernel))
def convolve_plain(arr, kernel=3):
"""
Args:
arr (np.ndarray): Shape (N,)
kernel (int):
Returns:
np.ndarray:
"""
return sum(np.roll(arr, i) for i in range(-kernel + 1, kernel))
def peak_confidence(arr, **kwargs):
"""
Evaluate the prominence of the highest peak
Args:
arr (np.ndarray): Shape (N,)
**kwargs: Additional kwargs for signal.find_peaks
Returns:
float: 0-1
"""
para = {
'height': 0,
'prominence': 10,
}
para.update(kwargs)
length = len(arr)
peaks, properties = signal.find_peaks(np.concatenate((arr, arr, arr)), **para)
peaks = [h for p, h in zip(peaks, properties['peak_heights']) if length <= p < length * 2]
peaks = sorted(peaks, reverse=True)
count = len(peaks)
if count > 1:
highest, second = peaks[0], peaks[1]
elif count == 1:
highest, second = 1, 0
else:
highest, second = 1, 0
confidence = (highest - second) / highest
return confidence

View File

@ -0,0 +1,84 @@
import os
from PIL import Image
from module.base.utils import load_image
class ResourceConst:
SRCMAP = ''
# Hard-coded coordinates under 1280x720
MINIMAP_CENTER = (39 + 78, 48 + 78)
MINIMAP_RADIUS = 78
POSITION_RADIUS = 75
# Downscale GIMAP and minimap for faster run
POSITION_SEARCH_SCALE = 0.5
# Search the area that is 1.666x minimap, about 100px in wild on GIMAP
POSITION_SEARCH_RADIUS = 1.333
# Can't figure out why but the result_of_0.5_lookup_scale + 0.5 ~= result_of_1.0_lookup_scale
POSITION_MOVE_PATCH = (0.5, 0.5)
# Position starting from the upper-left corner of the template image
# but search an area larger than map
# MINIMAP_RADIUS * POSITION_SEARCH_RADIUS * <max_scale>
POSITION_FEATURE_PAD = int(MINIMAP_RADIUS * POSITION_SEARCH_RADIUS * 1.5)
# Must be odd, equals int(9 * POSITION_SEARCH_SCALE) + 1
POSITION_AREA_DILATE = 5
# Radius to search direction arrow, about 12px
DIRECTION_RADIUS = 12
# Downscale direction arrows for faster run
DIRECTION_SEARCH_SCALE = 0.5
# Scale to 1280x720
DIRECTION_ROTATION_SCALE = 1.0
# Color of the direction arrow
DIRECTION_ARROW_COLOR = (2, 199, 255)
# Downscale GIMAP to run faster
BIGMAP_SEARCH_SCALE = 0.25
# Magic number that resize a 1280x720 screenshot to GIMAP_luma_05x_ps
BIGMAP_POSITION_SCALE = 0.6137
BIGMAP_POSITION_SCALE_ENKANOMIYA = 0.6137 * 0.7641
# Pad 600px, cause camera sight in game is larger than GIMAP
BIGMAP_BORDER_PAD = int(600 * BIGMAP_SEARCH_SCALE)
def __init__(self):
# Usually to be 0.4~0.5
self.position_similarity = 0.
# Usually > 0.05
self.position_similarity_local = 0.
# Current position on GIMAP with an error of about 0.1 pixel
self.position: tuple[float, float] = (0, 0)
# Usually > 0.3
# Warnings will be logged if similarity <= 0.8
self.direction_similarity = 0.
# Current character direction with an error of about 0.1 degree
self.direction: float = 0.
# Usually > 0.9
self.rotation_confidence = 0.
# Current cameta rotation with an error of about 1 degree
self.rotation: int = 0
# Usually to be 0.4~0.5
self.bigmap_similarity = 0.
# Usually > 0.05
self.bigmap_similarity_local = 0.
# Current position on GIMAP with an error of about 0.1 pixel
self.bigmap: tuple[float, float] = (0, 0)
def filepath(self, path: str) -> str:
return os.path.abspath(os.path.join(self.SRCMAP, path))
def load_image(self, file):
if os.path.isabs(file):
return load_image(file)
else:
return load_image(self.filepath(file))
def save_image(self, image, file):
file = self.filepath(file)
print(f'Save image: {file}')
Image.fromarray(image).save(file)

View File

@ -0,0 +1,193 @@
import os
from functools import cached_property
import cv2
import numpy as np
from module.base.utils import (
color_similarity_2d,
crop,
get_bbox,
get_bbox_reversed,
image_paste,
image_size
)
from module.config.utils import iter_folder
from tasks.map.minimap.utils import map_image_preprocess, rotate_bound
from tasks.map.resource.const import ResourceConst
def register_output(output):
def register_wrapper(func):
def wrapper(self, *args, **kwargs):
image = func(self, *args, **kwargs)
self.DICT_GENERATE[output] = image
return image
return wrapper
return register_wrapper
class ResourceGenerator(ResourceConst):
DICT_GENERATE = {}
"""
Input images
"""
@cached_property
@register_output('./srcmap/direction/Arrow.png')
def Arrow(self):
return self.load_image('./resources/direction/Arrow.png')
"""
Output images
"""
@cached_property
def _ArrowRorateDict(self):
"""
Returns:
"""
image = self.Arrow
arrows = {}
for degree in range(0, 360):
rotated = rotate_bound(image, degree)
rotated = crop(rotated, area=get_bbox(rotated, threshold=15))
# rotated = cv2.resize(rotated, None, fx=self.ROTATE, fy=self.ROTATE, interpolation=cv2.INTER_NEAREST)
rotated = color_similarity_2d(rotated, color=self.DIRECTION_ARROW_COLOR)
arrows[degree] = rotated
return arrows
@cached_property
@register_output('./srcmap/direction/ArrowRotateMap.png')
def ArrowRotateMap(self):
radius = self.DIRECTION_RADIUS
image = np.zeros((10 * radius * 2, 9 * radius * 2), dtype=np.uint8)
for degree in range(0, 360, 5):
y, x = divmod(degree / 5, 8)
rotated = self._ArrowRorateDict.get(degree)
point = (radius + int(x) * radius * 2, radius + int(y) * radius * 2)
# print(degree, y, x, point[0],point[0] + radius, point[1],point[1] + rotated.shape[1])
image_paste(rotated, image, origin=point)
image = cv2.resize(image, None,
fx=self.DIRECTION_SEARCH_SCALE, fy=self.DIRECTION_SEARCH_SCALE,
interpolation=cv2.INTER_NEAREST)
return image
@cached_property
@register_output('./srcmap/direction/ArrowRotateMapAll.png')
def ArrowRotateMapAll(self):
radius = self.DIRECTION_RADIUS
image = np.zeros((136 * radius * 2, 9 * radius * 2), dtype=np.uint8)
for degree in range(360 * 3):
y, x = divmod(degree, 8)
rotated = self._ArrowRorateDict.get(degree % 360)
point = (radius + int(x) * radius * 2, radius + int(y) * radius * 2)
# print(degree, y, x, point)
image_paste(rotated, image, origin=point)
image = cv2.resize(image, None,
fx=self.DIRECTION_SEARCH_SCALE, fy=self.DIRECTION_SEARCH_SCALE,
interpolation=cv2.INTER_NEAREST)
return image
@cached_property
def _map_background(self):
image = self.load_image('./resources/position/background.png')
height, width, channel = image.shape
grid = (10, 10)
background = np.zeros((height * grid[0], width * grid[1], channel), dtype=np.uint8)
for y in range(grid[0]):
for x in range(grid[1]):
image_paste(image, background, origin=(width * x, height * y))
background = background.copy()
return background
def _map_image_standardize(self, image, padding=0):
"""
Remove existing paddings
Map stroke color is about 127~134, background is 199~208
"""
image = crop(image, get_bbox_reversed(image, threshold=160))
if padding > 0:
size = np.array((padding, padding)) * 2 + image_size(image)
background = crop(self._map_background, area=(0, 0, *size))
image_paste(image, background, origin=(padding, padding))
return background
else:
return image
def _map_image_extract_feat(self, image):
"""
Extract a feature image for positioning.
"""
image = self._map_image_standardize(image, padding=ResourceConst.POSITION_FEATURE_PAD)
image = map_image_preprocess(image)
scale = self.POSITION_SEARCH_SCALE
image = cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
return image
def _map_image_extract_area(self, image):
"""
Extract accessible area on map.
*.area.png has `area` in red, extract into a binary image.
"""
# To the same size as feature map
image = self._map_image_standardize(image, padding=ResourceConst.POSITION_FEATURE_PAD)
image = color_similarity_2d(image, color=(255, 0, 0))
scale = self.POSITION_SEARCH_SCALE
image = cv2.resize(image, None, fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
_, image = cv2.threshold(image, 180, 255, cv2.THRESH_BINARY)
# Make the area a little bit larger
kernel = self.POSITION_AREA_DILATE
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel, kernel))
image = cv2.dilate(image, kernel)
# Black area on white background
# image = cv2.subtract(255, image)
return image
@cached_property
def GernerateMapFloors(self):
for world in iter_folder(self.filepath('./resources/position'), is_dir=True):
world_name = os.path.basename(world)
for floor in iter_folder(world, ext='.png'):
print(f'Read image: {floor}')
image = self.load_image(floor)
floor_name = os.path.basename(floor)[:-4]
if floor_name.endswith('.area'):
# ./srcmap/position/{world_name}/xxx.area.png
output = f'./srcmap/position/{world_name}/{floor_name}.png'
register_output(output)(ResourceGenerator._map_image_extract_area)(self, image)
else:
output = f'./srcmap/position/{world_name}/{floor_name}.png'
register_output(output)(ResourceGenerator._map_image_standardize)(self, image)
output = f'./srcmap/position/{world_name}/{floor_name}.feat.png'
register_output(output)(ResourceGenerator._map_image_extract_feat)(self, image)
# Floor images are cached already, no need to return a real value
return True
def generate_output(self):
os.makedirs(self.filepath('./srcmap'), exist_ok=True)
# Calculate all resources
for method in self.__dir__():
if not method.startswith('__') and not method.islower():
_ = getattr(self, method)
# Create output folder
folders = set([os.path.dirname(file) for file in self.DICT_GENERATE.keys()])
for output in folders:
output = self.filepath(output)
os.makedirs(output, exist_ok=True)
# Save image
for output, image in self.DICT_GENERATE.items():
self.save_image(image, file=output)
if __name__ == '__main__':
os.chdir(os.path.join(os.path.dirname(__file__), '../../../'))
ResourceConst.SRCMAP = '../srcmap'
ResourceGenerator().generate_output()

View File

@ -0,0 +1,133 @@
import os
from functools import cached_property
import cv2
import numpy as np
from module.base.decorator import del_cached_property
from module.base.utils import area_offset, crop, image_size
from module.exception import ScriptError
from module.logger import logger
from tasks.map.minimap.utils import create_circular_mask
from tasks.map.resource.const import ResourceConst
from tasks.map.keywords import KEYWORDS_MAP_PLANE, MapPlane
class MapResource(ResourceConst):
def __init__(self):
super().__init__()
if MapResource.SRCMAP:
self.SRCMAP = os.path.abspath(MapResource.SRCMAP)
logger.warning(f'MapResource.SRMAP is set to "{self.SRCMAP}", '
f'this should only be used in DEV environment.')
else:
try:
import srcmap
self.SRCMAP = srcmap.srcmap()
except ImportError:
logger.critical('Dependency "srmap" is not installed')
raise ScriptError('Dependency "srmap" is not installed')
# Jarilo_AdministrativeDistrict
self.plane: MapPlane = KEYWORDS_MAP_PLANE.Herta_ParlorCar
# Floor name in game (B1, F1, F2, ...)
self.floor: str = 'F1'
# Key: (width, height), mask shape
# Value: np.ndarray, mask image
self._dict_circle_mask = {}
@cached_property
def ArrowRotateMap(self):
return self.load_image('./direction/ArrowRotateMap.png')
@cached_property
def ArrowRotateMapAll(self):
return self.load_image('./direction/ArrowRotateMapAll.png')
def set_plane(self, plane, floor='F1'):
"""
Args:
plane (MapPlane, str): Such as Jarilo_AdministrativeDistrict
floor (str):
"""
self.plane = MapPlane.find(plane)
self.floor = self.plane.convert_to_floor_name(floor)
del_cached_property(self, 'assets_file_basename')
del_cached_property(self, 'assets_floor')
del_cached_property(self, 'assets_floor_feat')
del_cached_property(self, 'assets_floor_outside_mask')
@cached_property
def assets_file_basename(self):
if self.plane.has_multiple_floors:
return f'./position/{self.plane.world}/{self.plane.name}_{self.floor}'
else:
return f'./position/{self.plane.world}/{self.plane.name}'
@cached_property
def assets_floor(self):
return self.load_image(f'{self.assets_file_basename}.png')
@cached_property
def assets_floor_feat(self):
return self.load_image(f'{self.assets_file_basename}.feat.png')
@cached_property
def assets_floor_outside_mask(self):
image = self.load_image(f'{self.assets_file_basename}.area.png')
return image == 0
def get_minimap(self, image, radius):
"""
Crop the minimap area on image.
"""
area = area_offset((-radius, -radius, radius, radius), offset=self.MINIMAP_CENTER)
image = crop(image, area)
return image
def get_circle_mask(self, image):
"""
Create a circle mask with the shape of given image,
Masks will be cached once created.
"""
w, h = image_size(image)
try:
return self._dict_circle_mask[(w, h)]
except KeyError:
mask = create_circular_mask(w=w, h=h)
mask = (mask * 255).astype(np.uint8)
self._dict_circle_mask[(w, h)] = mask
return mask
@cached_property
def RotationRemapData(self):
d = self.MINIMAP_RADIUS * 2
mx = np.zeros((d, d), dtype=np.float32)
my = np.zeros((d, d), dtype=np.float32)
for i in range(d):
for j in range(d):
mx[i, j] = d / 2 + i / 2 * np.cos(2 * np.pi * j / d)
my[i, j] = d / 2 + i / 2 * np.sin(2 * np.pi * j / d)
return mx, my
@cached_property
def _named_window(self):
return cv2.namedWindow('MinimapTracking')
def show_minimap(self):
image = cv2.cvtColor(self.assets_floor, cv2.COLOR_RGB2BGR)
position = np.array(self.position).astype(int)
def vector(degree):
degree = np.deg2rad(degree - 90)
point = np.array(position) + np.array((np.cos(degree), np.sin(degree))) * 30
return point.astype(int)
image = cv2.circle(image, position, radius=5, color=(0, 0, 255), thickness=-1)
image = cv2.line(image, position, vector(self.direction), color=(0, 255, 0), thickness=2)
image = cv2.line(image, position, vector(self.rotation), color=(255, 0, 0), thickness=2)
cv2.imshow('MinimapTracking', image)
cv2.waitKey(1)