Opt: Remove redundant converts from pillow to numpy

This commit is contained in:
LmeSzinc 2022-01-24 23:43:53 +08:00
parent c5ed59ab26
commit e92579b5f9
28 changed files with 62 additions and 64 deletions

View File

@ -193,7 +193,7 @@ class Button(Resource):
Args:
area (tuple):
image: Pillow image. If provided, load color and image from it.
image (np.ndarray): Screenshot. If provided, load color and image from it.
name (str):
Returns:
@ -214,7 +214,7 @@ class Button(Resource):
Args:
vector (tuple):
image: Pillow image. If provided, load color and image from it.
image (np.ndarray): Screenshot. If provided, load color and image from it.
name (str):
Returns:

View File

@ -52,6 +52,5 @@ class Mask(Template):
Returns:
np.ndarray:
"""
image = np.array(image)
self.set_channel(image_channel(image))
return cv2.bitwise_and(image, self.image)

View File

@ -72,7 +72,6 @@ class Template(Resource):
bool: If matches.
"""
if self.is_gif:
image = np.array(image)
for template in self.image:
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
_, sim, _, _ = cv2.minMaxLoc(res)
@ -83,7 +82,7 @@ class Template(Resource):
return False
else:
res = cv2.matchTemplate(np.array(image), self.image, cv2.TM_CCOEFF_NORMED)
res = cv2.matchTemplate(image, self.image, cv2.TM_CCOEFF_NORMED)
_, sim, _, _ = cv2.minMaxLoc(res)
# print(self.file, sim)
return sim > similarity
@ -92,7 +91,7 @@ class Template(Resource):
"""
Args:
point:
image: Pillow image. If provided, load color and image from it.
image (np.ndarray): Screenshot. If provided, load color and image from it.
name (str):
Returns:
@ -116,7 +115,7 @@ class Template(Resource):
float: Similarity
Button:
"""
res = cv2.matchTemplate(np.array(image), self.image, cv2.TM_CCOEFF_NORMED)
res = cv2.matchTemplate(image, self.image, cv2.TM_CCOEFF_NORMED)
_, sim, _, point = cv2.minMaxLoc(res)
# print(self.file, sim)
@ -137,13 +136,12 @@ class Template(Resource):
raw = image
if self.is_gif:
result = []
image = np.array(image)
for template in self.image:
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
res = np.array(np.where(res > similarity)).T[:, ::-1].tolist()
result += res
else:
result = cv2.matchTemplate(np.array(image), self.image, cv2.TM_CCOEFF_NORMED)
result = cv2.matchTemplate(image, self.image, cv2.TM_CCOEFF_NORMED)
result = np.array(np.where(result > similarity)).T[:, ::-1]
# result: np.array([[x0, y0], [x1, y1], ...)

View File

@ -528,14 +528,14 @@ def color_similar(color1, color2, threshold=10):
def color_similar_1d(image, color, threshold=10):
"""
Args:
image: 1D array.
image (np.ndarray): 1D array.
color: (r, g, b)
threshold(int): Default to 10.
Returns:
np.ndarray: bool
"""
diff = np.array(image).astype(int) - color
diff = image.astype(int) - color
diff = np.max(np.maximum(diff, 0), axis=1) - np.min(np.minimum(diff, 0), axis=1)
return diff <= threshold
@ -549,7 +549,6 @@ def color_similarity_2d(image, color):
Returns:
np.ndarray: uint8
"""
image = np.array(image)
r, g, b = cv2.split(cv2.subtract(image, (*color, 0)))
positive = cv2.max(cv2.max(r, g), b)
r, g, b = cv2.split(cv2.subtract((*color, 0), image))
@ -568,7 +567,6 @@ def extract_letters(image, letter=(255, 255, 255), threshold=128):
Returns:
np.ndarray: Shape (height, width)
"""
image = np.array(image)
r, g, b = cv2.split(cv2.subtract(image, (*letter, 0)))
positive = cv2.max(cv2.max(r, g), b)
r, g, b = cv2.split(cv2.subtract((*letter, 0), image))
@ -587,7 +585,6 @@ def extract_white_letters(image, threshold=128):
Returns:
np.ndarray: Shape (height, width)
"""
image = np.array(image)
r, g, b = cv2.split(cv2.subtract((255, 255, 255, 0), image))
minimum = cv2.min(cv2.min(r, g), b)
maximum = cv2.max(cv2.max(r, g), b)

View File

@ -35,7 +35,7 @@ class RewardCommission(UI, InfoHandler):
Get all commissions from an image.
Args:
image: Pillow image
image (np.ndarray):
Returns:
SelectedGrids:

View File

@ -49,7 +49,7 @@ class RewardDorm(UI):
in: page_dorm
out: page_dorm, with info_bar
"""
image = MASK_DORM.apply(np.array(self.device.image))
image = MASK_DORM.apply(self.device.image)
loves = TEMPLATE_DORM_LOVE.match_multi(image, name='DORM_LOVE')
coins = TEMPLATE_DORM_COIN.match_multi(image, name='DORM_COIN')
logger.info(f'Dorm loves: {len(loves)}, Dorm coins: {len(coins)}')
@ -178,7 +178,7 @@ class RewardDorm(UI):
return Digit(grids.buttons, letter=(255, 255, 255), threshold=128, name='OCR_DORM_FOOD')
def _dorm_has_food(self, button):
return np.min(rgb2gray(np.array(self.image_crop(button)))) < 127
return np.min(rgb2gray(self.image_crop(button))) < 127
def _dorm_feed_click(self, button, count):
"""

View File

@ -31,7 +31,7 @@ class EquipmentChange(Equipment):
index = 0
self.equipping_list = []
for button in EQUIPMENT_GRID.buttons:
crop_image = np.array(self.image_crop(button))
crop_image = self.image_crop(button)
edge_value = abs(np.mean(cv2.Sobel(crop_image, 3, 1, 1)))
if edge_value > 0.1:
self.equipping_list.append(index)
@ -126,7 +126,7 @@ class EquipmentChange(Equipment):
self.equipping_set(False)
res = cv2.matchTemplate(np.array(self.device.screenshot()), np.array(
res = cv2.matchTemplate(self.device.screenshot(), np.array(
self.equip_list[index]), cv2.TM_CCOEFF_NORMED)
_, sim, _, point = cv2.minMaxLoc(res)
@ -137,7 +137,7 @@ class EquipmentChange(Equipment):
for _ in range(0, 15):
self._equipment_swipe()
res = cv2.matchTemplate(np.array(self.device.screenshot()), np.array(
res = cv2.matchTemplate(self.device.screenshot(), np.array(
self.equip_list[index]), cv2.TM_CCOEFF_NORMED)
_, sim, _, point = cv2.minMaxLoc(res)

View File

@ -47,7 +47,7 @@ class AmbushHandler(Combat):
self.wait_until_appear_then_click(MAP_AMBUSH_EVADE)
self.wait_until_appear(INFO_BAR_1)
image = info_letter_preprocess(np.array(self.image_crop(INFO_BAR_DETECT)))
image = info_letter_preprocess(self.image_crop(INFO_BAR_DETECT))
if TEMPLATE_AMBUSH_EVADE_SUCCESS.match(image):
logger.attr('Ambush_evade', 'success')
@ -110,7 +110,7 @@ class AmbushHandler(Combat):
if not self.appear(INFO_BAR_1):
return False
image = info_letter_preprocess(np.array(self.image_crop(INFO_BAR_DETECT)))
image = info_letter_preprocess(self.image_crop(INFO_BAR_DETECT))
if TEMPLATE_MAP_WALK_OUT_OF_STEP.match(image):
logger.warning('Map walk out of step.')
self.handle_info_bar()

View File

@ -179,7 +179,7 @@ class LoginHandler(Combat):
XPS('//*[@content-desc="请滑动阅读协议内容"]', xp, hierarchy)])
test_image_original = self.device.image
image_handle_crop = crop(np.array(test_image_original), (start_padding_results[2], 0,
image_handle_crop = crop(test_image_original, (start_padding_results[2], 0,
start_margin_results[2], self.device.image.height))
# Image.fromarray(image_handle_crop).show()
sims = color_similarity_2d(image_handle_crop, color=(182, 189, 202))

View File

@ -105,7 +105,7 @@ class StrategyHandler(InfoHandler):
Returns:
int: Formation index.
"""
image = np.array(self.image_crop(MAP_BUFF))
image = self.image_crop(MAP_BUFF)
if TEMPLATE_FORMATION_2.match(image):
buff = 'double_line'
elif TEMPLATE_FORMATION_1.match(image):

View File

@ -187,7 +187,7 @@ class FleetOperator:
# Cropping FLEET_*_IN_USE to avoid detecting info_bar, also do the trick.
# It also avoids wasting time on handling the info_bar.
image = rgb2gray(np.array(self.main.image_crop(self._in_use)))
image = rgb2gray(self.main.image_crop(self._in_use))
return np.std(image.flatten(), ddof=1) > self.FLEET_IN_USE_STD
def bar_opened(self):
@ -196,7 +196,7 @@ class FleetOperator:
bool: If dropdown menu appears.
"""
# Check the brightness of the rightest column of the bar area.
luma = rgb2gray(np.array(self.main.image_crop(self._bar)))[:, -1]
luma = rgb2gray(self.main.image_crop(self._bar))[:, -1]
return np.sum(luma > 127) / luma.size > 0.5
def ensure_to_be(self, index):

View File

@ -223,7 +223,7 @@ class SelectedGrids:
if not self:
return self
location = np.array(self.location)
diff = np.sum(np.abs(np.array(location) - camera), axis=1)
diff = np.sum(np.abs(location - camera), axis=1)
# grids = [x for _, x in sorted(zip(diff, self.grids))]
grids = tuple(np.array(self.grids)[np.argsort(diff)])
return SelectedGrids(grids)

View File

@ -1,7 +1,7 @@
import time
import numpy as np
from PIL import Image, ImageDraw, ImageOps
from PIL import ImageDraw, ImageOps
from module.base.utils import *
from module.config.config import AzurLaneConfig
@ -152,13 +152,12 @@ class Homography:
def detect(self, image):
"""
Args:
image: Screenshot.
image (np.ndarray): Screenshot.
Returns:
bool: If success.
"""
start_time = time.time()
image = np.array(image)
self.image = image
# Image initialization

View File

@ -108,7 +108,7 @@ class View(MapDetector):
Update image to all grids.
If camera position didn't change, no need to calculate again, updating image is enough.
"""
image = self._image_clear_ui(np.array(image))
image = self._image_clear_ui(image)
self.image = image
for grid in self:
grid.reset()

View File

@ -175,7 +175,7 @@ class GlobeCamera(GlobeOperation, ZoneManager):
screen = self.globe2screen(location).flatten().round()
screen = np.round(screen).astype(int).tolist()
# Average color of whirlpool center
center = np.array(self.image_crop(screen))
center = self.image_crop(screen)
center = np.array([[cv2.mean(center), ], ]).astype(np.uint8)
h, s, v = rgb2hsv(center)[0][0]
# hsv usually to be (338, 74.9, 100)

View File

@ -107,12 +107,11 @@ class GlobeDetection:
def load(self, image):
"""
Args:
image: Pillow image.
image (np.ndarray):
"""
self.load_globe_map()
start_time = time.time()
image = np.array(image)
local = self.find_peaks(self.perspective_transform(image), para=self.config.OS_LOCAL_FIND_PEAKS_PARAMETERS)
local = local.astype(np.uint8)
local = cv2.resize(local, None, fx=self.config.OS_GLOBE_IMAGE_RESIZE, fy=self.config.OS_GLOBE_IMAGE_RESIZE)

View File

@ -95,7 +95,7 @@ class StorageHandler(GlobeOperation, ZoneManager):
if SCROLL_STORAGE.appear(main=self):
SCROLL_STORAGE.set_bottom(main=self, skip_first_screenshot=True)
image = rgb2gray(np.array(self.device.image))
image = rgb2gray(self.device.image)
items = TEMPLATE_STORAGE_LOGGER.match_multi(image, similarity=0.5)
logger.attr('Storage_logger', len(items))
@ -115,7 +115,10 @@ class StorageHandler(GlobeOperation, ZoneManager):
in: STORAGE_CHECK
out: STORAGE_CHECK, scroll to bottom
"""
sample_types = [TEMPLATE_STORAGE_OFFENSE, TEMPLATE_STORAGE_SURVIVAL, TEMPLATE_STORAGE_COMBAT, TEMPLATE_STORAGE_QUALITY_OFFENSE, TEMPLATE_STORAGE_QUALITY_SURVIVAL, TEMPLATE_STORAGE_QUALITY_COMBAT]
sample_types = [
TEMPLATE_STORAGE_OFFENSE, TEMPLATE_STORAGE_SURVIVAL, TEMPLATE_STORAGE_COMBAT,
TEMPLATE_STORAGE_QUALITY_OFFENSE, TEMPLATE_STORAGE_QUALITY_SURVIVAL, TEMPLATE_STORAGE_QUALITY_COMBAT
]
for sample_type in sample_types:
while 1:
if skip_first_screenshot:
@ -123,7 +126,7 @@ class StorageHandler(GlobeOperation, ZoneManager):
else:
self.device.screenshot()
image = rgb2gray(np.array(self.device.image))
image = rgb2gray(self.device.image)
items = sample_type.match_multi(image, similarity=0.75)
logger.attr('Storage_sample', len(items))
@ -208,7 +211,7 @@ class StorageHandler(GlobeOperation, ZoneManager):
if SCROLL_STORAGE.appear(main=self):
SCROLL_STORAGE.set_top(main=self, skip_first_screenshot=skip_first_screenshot)
image = rgb2gray(np.array(self.device.image))
image = rgb2gray(self.device.image)
items = self._storage_item_to_template(item).match_multi(image, similarity=0.75)
logger.attr(f'Storage_{item}', len(items))

View File

@ -95,7 +95,7 @@ def get_research_name(image):
def get_research_finished(image):
"""
Args:
image: Pillow image
image (np.ndarray):
Returns:
int: Index of the finished project, 0 to 4. Return None if no project finished.
@ -150,7 +150,6 @@ def match_template(image, template, area, offset=30, threshold=0.85):
else:
offset = np.array((0, -offset, 0, offset))
image = crop(image, offset + area)
template = np.array(template)
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)
_, sim, _, point = cv2.minMaxLoc(res)
similarity = sim if sim >= threshold else 0.0

View File

@ -154,7 +154,7 @@ class RewardResearch(ResearchSelector):
else:
self.device.screenshot()
max_rgb = np.max(rgb2gray(np.array(self.image_crop(RESEARCH_UNAVAILABLE))))
max_rgb = np.max(rgb2gray(self.image_crop(RESEARCH_UNAVAILABLE)))
# Don't use interval here, RESEARCH_CHECK already appeared 5 seconds ago
if click_timer.reached() and self.appear(RESEARCH_CHECK, offset=(20, 20)):

View File

@ -147,7 +147,7 @@ class Enhancement(Dock):
# Respond accordingly based on info_bar information
if self.info_bar_count():
image = info_letter_preprocess(np.array(self.image_crop(INFO_BAR_DETECT)))
image = info_letter_preprocess(self.image_crop(INFO_BAR_DETECT))
if TEMPLATE_ENHANCE_SUCCESS.match(image):
enhanced = True
elif TEMPLATE_ENHANCE_FAILED.match(image):

View File

@ -32,7 +32,7 @@ class DropImage:
def add(self, image):
"""
Args:
image: Pillow image.
image (np.ndarray):
"""
if self:
self.images.append(image)
@ -54,6 +54,10 @@ class DropImage:
main.device.screenshot()
self.add(main.device.image)
@property
def count(self):
return len(self.images)
def __bool__(self):
return self.save or self.upload
@ -76,6 +80,7 @@ class AzurStats:
"""
self.config = config
@property
def _user_agent(self):
return f'Alas ({str(self.config.DropRecord_AzurStatsID)})'
@ -94,7 +99,7 @@ class AzurStats:
output.seek(0)
data = {'file': (filename, output, 'image/png')}
headers = {'user-agent': self._user_agent()}
headers = {'user-agent': self._user_agent}
session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=5))
session.mount('https://', HTTPAdapter(max_retries=5))
@ -144,7 +149,7 @@ class AzurStats:
def commit(self, images, genre, save=False, upload=False, info=''):
"""
Args:
images (list): List of pillow images
images (list): List of images in numpy array.
genre (str):
save (bool): If save image to local file system.
upload (bool): If upload image to Azur Stats.

View File

@ -15,7 +15,7 @@ class BattleStatusStatistics:
def stats_battle_status(self, image):
"""
Args:
image: Pillow image.
image (np.ndarray):
Returns:
str: Enemy name, such as '中型主力舰队'.

View File

@ -34,7 +34,7 @@ class CampaignBonusStatistics(GetItemsStatistics):
def stats_get_items(self, image, **kwargs):
"""
Args:
image: Pillow image.
image (np.ndarray):
Returns:
list[Item]:

View File

@ -4,6 +4,7 @@ import shutil
from tqdm import tqdm
from module.base.decorator import cached_property
from module.base.utils import load_image
from module.logger import logger
from module.ocr.al_ocr import AlOcr
from module.ocr.ocr import Ocr

View File

@ -31,7 +31,7 @@ class GetItemsStatistics:
def _stats_get_items_is_odd(image):
"""
Args:
image: Pillow image
image (np.ndarray):
Returns:
bool: If the number of items in row is odd.
@ -42,7 +42,7 @@ class GetItemsStatistics:
def _stats_get_items_load(self, image):
"""
Args:
image: Pillow image, 1280x720.
image (np.ndarray):
"""
ITEM_GROUP.item_class = Item
ITEM_GROUP.similarity = 0.92
@ -62,7 +62,7 @@ class GetItemsStatistics:
def stats_get_items(self, image, **kwargs):
"""
Args:
image: Pillow image.
image (np.ndarray):
Returns:
list[Item]:

View File

@ -148,7 +148,7 @@ class ItemGrid:
def _load_image(self, image):
"""
Args:
image: Pillow image
image (np.ndarray):
"""
self.items = []
for button in self.grids.buttons:
@ -166,7 +166,7 @@ class ItemGrid:
if name in self.templates:
continue
image = load_image(image)
image = crop(np.array(image), area=self.template_area)
image = crop(image, area=self.template_area)
self.colors[name] = cv2.mean(image)[:3]
self.templates[name] = image
self.templates_hit[name] = 0
@ -182,7 +182,7 @@ class ItemGrid:
if name in self.cost_templates:
continue
image = load_image(image)
self.cost_templates[name] = np.array(image)
self.cost_templates[name] = image
self.cost_templates_hit[name] = 0
self.next_cost_template_index += 1
@ -191,12 +191,11 @@ class ItemGrid:
Match templates, try most frequent hit templates first.
Args:
image:
image (np.ndarray):
Returns:
str: Template name.
"""
image = np.array(image)
color = cv2.mean(crop(image, self.template_area))[:3]
names = np.array(list(self.templates.keys()))[np.argsort(list(self.templates_hit.values()))][::-1]
for name in names:
@ -219,10 +218,10 @@ class ItemGrid:
def extract_template(self, image):
"""
Args:
image: Pillow image
image (np.ndarray):
Returns:
dict: Newly found templates. Key: str, template name. Value: pillow image
dict: Newly found templates. Key: str, template name. Value: np.ndarray
"""
self._load_image(image)
prev = set(self.templates.keys())
@ -268,7 +267,7 @@ class ItemGrid:
def predict_tag(image):
"""
Args:
image: Pillow image. tag_area of the item.
image (np.ndarray): The tag_area of the item.
Replace this method to predict tags.
Returns:
@ -279,7 +278,7 @@ class ItemGrid:
def predict(self, image, name=True, amount=True, cost=False, price=False, tag=False):
"""
Args:
image: Pillow image
image (np.ndarray):
name (bool): If predict item name.
amount (bool): If predict item amount.
cost (bool): If predict the cost to buy item.

View File

@ -37,12 +37,11 @@ def pack(img_list):
Stack images vertically.
Args:
img_list (list): List of pillow image
img_list (list): List of image
Returns:
Pillow image
np.ndarray:
"""
img_list = [np.array(i) for i in img_list]
image = cv2.vconcat(img_list)
return image
@ -55,7 +54,7 @@ def unpack(image):
image:
Returns:
list: List of pillow image.
list: List of np.ndarray.
"""
size = image_size(image)
if size == (1280, 720):

View File

@ -44,7 +44,7 @@ class Scroll:
Returns:
np.ndarray: Shape (n,), dtype bool.
"""
image = np.array(main.image_crop(self.area))
image = main.image_crop(self.area)
image = color_similarity_2d(image, color=self.color)
mask = np.max(image, axis=1 if self.is_vertical else 0) > self.color_threshold
self.length = np.sum(mask)