From dba44aba40e1f9e67130a8eac9e0e123d29d4c70 Mon Sep 17 00:00:00 2001 From: ninemine <1371605831@qq.com> Date: Mon, 21 Jul 2025 14:47:31 +0800 Subject: [PATCH] BS 0.2.0 Visual / 0.1.1 Web --- Convention/Runtime/Visual/Core.py | 1305 +++++++++++++++++++++ Convention/Runtime/Visual/OpenCV.py | 1433 ++++++++++++++++++++++++ Convention/Runtime/Visual/README.md | 264 +++++ Convention/Runtime/Visual/WordCloud.py | 66 ++ Convention/Runtime/Visual/__init__.py | 0 5 files changed, 3068 insertions(+) create mode 100644 Convention/Runtime/Visual/Core.py create mode 100644 Convention/Runtime/Visual/OpenCV.py create mode 100644 Convention/Runtime/Visual/README.md create mode 100644 Convention/Runtime/Visual/WordCloud.py create mode 100644 Convention/Runtime/Visual/__init__.py diff --git a/Convention/Runtime/Visual/Core.py b/Convention/Runtime/Visual/Core.py new file mode 100644 index 0000000..277346a --- /dev/null +++ b/Convention/Runtime/Visual/Core.py @@ -0,0 +1,1305 @@ +from typing import * +from pydantic import BaseModel +from abc import * +import random +import numpy as np +import matplotlib.pyplot as plt +import seaborn as sns +from ..Internal import * +from ..MathEx.Core import * +#from ..Str.Core import UnWrapper as Unwrapper2Str +from ..File.Core import tool_file, Wrapper as Wrapper2File, tool_file_or_str, is_image_file, loss_file, static_loss_file_dir +from ..Visual.OpenCV import ImageObject, tool_file_cvex, WrapperFile2CVEX, Wrapper as Wrapper2Image, get_new_noise +from PIL.Image import ( + Image as PILImage, + fromarray as PILFromArray, + open as PILOpen +) +from PIL.ImageFile import ImageFile as PILImageFile +import cv2 as cv2 +from io import BytesIO + +class data_visual_generator: + def __init__(self, file:tool_file_or_str): + self._file:tool_file = Wrapper2File(file) + self._file.load() + + def open(self, mode='r', is_refresh=False, encoding:str='utf-8', *args, **kwargs): + self._file.open(mode, is_refresh, encoding, *args, **kwargs) + + def reload(self, file:Optional[tool_file_or_str]): + if file is not None: + self._file = Wrapper2File(file) + self._file.load() + + + def plot_line(self, x, y, df=None, title="折线图", x_label=None, y_label=None): + plt.figure(figsize=(10, 6)) + sns.lineplot(data=df if df is not None else self._file.data, x=x, y=y) + plt.title(title) + plt.xlabel(x_label if x_label is not None else str(x)) + plt.ylabel(y_label if y_label is not None else str(y)) + plt.grid(True) + plt.show() + + def plot_bar(self, x, y, df=None, figsize=(10,6), title="柱状图", x_label=None, y_label=None): + plt.figure(figsize=figsize) + sns.barplot(data=df if df is not None else self._file.data, x=x, y=y) + plt.title(title) + plt.xlabel(x_label if x_label is not None else str(x)) + plt.ylabel(y_label if y_label is not None else str(y)) + plt.grid(True) + plt.show() + + def plot_scatter(self, x, y, df=None, title="散点图", x_label=None, y_label=None): + plt.figure(figsize=(10, 6)) + sns.scatterplot(data=df if df is not None else self._file.data, x=x, y=y) + plt.title(title) + plt.xlabel(x_label if x_label is not None else str(x)) + plt.ylabel(y_label if y_label is not None else str(y)) + plt.grid(True) + plt.show() + + def plot_histogram(self, column, df=None, title="直方图", x_label=None, y_label=None): + plt.figure(figsize=(10, 6)) + sns.histplot(data=df if df is not None else self._file.data, x=column) + plt.title(title) + plt.xlabel(x_label if x_label is not None else str(column)) + plt.ylabel(y_label if y_label is not None else "value") + plt.grid(True) + plt.show() + + def plot_pairplot(self, df=None, title="成对关系图"): + sns.pairplot(df if df is not None else self._file.data) + plt.suptitle(title, y=1.02) + plt.show() + + def plot_pie(self, column, figsize=(10,6), df=None, title="饼图"): + plt.figure(figsize=figsize) + if df is not None: + df[column].value_counts().plot.pie(autopct='%1.1f%%') + else: + self._file.data[column].value_counts().plot.pie(autopct='%1.1f%%') + plt.title(title) + plt.ylabel('') # 移除y轴标签 + plt.show() + + def plot_box(self, x, y, df=None, figsize=(10,6), title="箱线图", x_label=None, y_label=None): + plt.figure(figsize=figsize) + sns.boxplot(data=df if df is not None else self._file.data, x=x, y=y) + plt.title(title) + plt.xlabel(x_label if x_label is not None else str(x)) + plt.ylabel(y_label if y_label is not None else str(y)) + plt.grid(True) + plt.show() + + def plot_heatmap(self, df=None, figsize=(10,6), title="热力图", cmap='coolwarm'): + plt.figure(figsize=figsize) + sns.heatmap(df.corr() if df is not None else self._file.data.corr(), annot=True, cmap=cmap) + plt.title(title) + plt.show() + + def plot_catplot(self, x, y, hue=None, df=None, kind='bar', figsize=(10,6), title="分类数据图", x_label=None, y_label=None): + plt.figure(figsize=figsize) + sns.catplot(data=df if df is not None else self._file.data, x=x, y=y, hue=hue, kind=kind) + plt.title(title) + plt.xlabel(x_label if x_label is not None else str(x)) + plt.ylabel(y_label if y_label is not None else str(y)) + plt.grid(True) + plt.show() + def plot_catplot_strip(self, x, y, hue=None, df=None, figsize=(10,6), title="分类数据图", x_label=None, y_label=None): + self.plot_catplot(x, y, hue=hue, df=df, kind='strip', figsize=figsize, title=title, x_label=x_label, y_label=y_label) + def plot_catplot_swarm(self, x, y, hue=None, df=None, figsize=(10,6), title="分类数据图", x_label=None, y_label=None): + self.plot_catplot(x, y, hue=hue, df=df, kind='swarm', figsize=figsize, title=title, x_label=x_label, y_label=y_label) + def plot_catplot_box(self, x, y, hue=None, df=None, figsize=(10,6), title="分类数据图", x_label=None, y_label=None): + self.plot_catplot(x, y, hue=hue, df=df, kind='box', figsize=figsize, title=title, x_label=x_label, y_label=y_label) + def plot_catplot_violin(self, x, y, hue=None, df=None, figsize=(10,6), title="分类数据图", x_label=None, y_label=None): + self.plot_catplot(x, y, hue=hue, df=df, kind='violin', figsize=figsize, title=title, x_label=x_label, y_label=y_label) + + def plot_jointplot(self, x, y, kind="scatter", df=None, title="联合图", x_label=None, y_label=None): + sns.jointplot(data=df if df is not None else self._file.data, x=x, y=y, kind=kind) + plt.suptitle(title, y=1.02) + plt.xlabel(x_label if x_label is not None else str(x)) + plt.ylabel(y_label if y_label is not None else str(y)) + plt.show() + def plot_jointplot_scatter(self, x, y, df=None, title="联合图", x_label=None, y_label=None): + self.plot_jointplot(x, y, kind="scatter", df=df, title=title, x_label=x_label, y_label=y_label) + def plot_jointplot_kde(self, x, y, df=None, title="联合图", x_label=None, y_label=None): + self.plot_jointplot(x, y, kind="kde", df=df, title=title, x_label=x_label, y_label=y_label) + def plot_jointplot_hex(self, x, y, df=None, title="联合图", x_label=None, y_label=None): + self.plot_jointplot(x, y, kind="hex", df=df, title=title, x_label=x_label, y_label=y_label) + +class data_math_virsual_generator(data_visual_generator): + def drop_missing_values(self, axis): + """删除缺失值""" + self._file.data = self._file.data.dropna(axis=axis) + + def fill_missing_values(self, value): + """填充缺失值""" + self._file.data = self._file.data.fillna(value) + + def remove_duplicates(self): + """删除重复值""" + self._file.data = self._file.data.drop_duplicates() + + def standardize_data(self): + """数据标准化""" + self._file.data = (self._file.data - self._file.data.mean()) / self._file.data.std() + + def normalize_data(self): + """数据归一化""" + self._file.data = (self._file.data - self._file.data.min()) / (self._file.data.max() - self._file.data.min()) + +# region image augmentation + +NDARRAY_ANY = TypeVar("numpy.ndarray") +class BasicAugmentConfig(BaseModel, ABC): + name: str = "unknown" + @abstractmethod + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + ''' + result: + (change config, image) + ''' + raise NotImplementedError() +class ResizeAugmentConfig(BasicAugmentConfig): + width: Optional[int] = None + height: Optional[int] = None + name: str = "resize" + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + width = self.width + height = self.height + if width is None and height is None: + rangewidth = origin.width + rangeheight = origin.height + width = rangewidth + random.randint( + (-rangewidth*(random.random()%1)).__floor__(), + (rangewidth*(random.random()%1)).__floor__() + ) + height = rangeheight + random.randint( + (-rangeheight*(random.random()%1)).__floor__(), + (rangeheight*(random.random()%1)).__floor__() + ) + elif width is None: + width = origin.width + elif height is None: + height = origin.height + change_config = { + "width":width, + "height":height + } + return (change_config, ImageObject(origin.get_resize_image(abs(width), abs(height)))) +class ClipAugmentConfig(BasicAugmentConfig): + mini: Union[float, NDARRAY_ANY] = 0 + maxi: Union[float, NDARRAY_ANY] = 255 + name: str = "clip" + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + mini = self.mini + maxi = self.maxi + if isinstance(mini, ImageObject): + mini = mini.get_array() + if isinstance(maxi, ImageObject): + maxi = maxi.get_array() + change_config = { + "mini":mini, + "maxi":maxi + } + return (change_config, ImageObject(origin.clip(mini, maxi))) +class NormalizeAugmentConfig(BasicAugmentConfig): + name: str = "normalize" + mini: Optional[Union[NDARRAY_ANY, float]] = 0 + maxi: Optional[Union[NDARRAY_ANY, float]] = 255 + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "mini":self.mini, + "maxi":self.maxi + } + return (change_config, ImageObject(origin.normalize(self.mini, self.maxi))) +class StandardizeAugmentConfig(BasicAugmentConfig): + name: str = "standardize" + mean: Optional[Union[NDARRAY_ANY, float]] = 0 + std: Optional[Union[NDARRAY_ANY, float]] = 1 + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "mean":origin.get_array().mean(), + "std":origin.get_array().std() + } + return (change_config, ImageObject(origin.standardize(self.mean, self.std))) +class FlipAugmentConfig(BasicAugmentConfig): + name: str = "flip" + axis: Literal[-1, 1, 0] = 1 + ''' + 1: + vertical + 0: + horizontal + -1: + both + ''' + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "axis":self.axis + } + return (change_config, ImageObject(origin.flip(self.axis))) +class CropAugmentConfig(BasicAugmentConfig): + name: str = "crop" + lbx: Optional[int] = None + lby: Optional[int] = None + width: Optional[int] = None + height: Optional[int] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + lbx = self.lbx if self.lbx is not None else random.randint(0, origin.width) + lby = self.lby if self.lby is not None else random.randint(0, origin.height) + width = self.width if self.width is not None else random.randint(1, origin.width - lbx) + height = self.height if self.height is not None else random.randint(0, origin.height - lby) + change_config = { + "lbx":lbx, + "lby":lby, + "width":width, + "height":height + } + return (change_config, ImageObject(origin.sub_image_with_rect((lbx, lby, width, height)))) +class FilterAugmentConfig(BasicAugmentConfig): + name: str = "filter" + ddepth: int = -1 + kernal: NDARRAY_ANY = cv2.getGaussianKernel(3, 1) + def get_gaussian_kernal(self, kernal_size: int, sigma: float): + return cv2.getGaussianKernel(kernal_size, sigma) + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "filter":self.ddepth, + "kernal":self.kernal + } + return (change_config, ImageObject(origin.filter(self.ddepth, self.kernal))) +class ColorSpaceAugmentConfig(BasicAugmentConfig): + name: str = "color_space" + space: int = cv2.COLOR_BGR2GRAY + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "color_space":self.space + } + return (change_config, ImageObject(origin.convert_to(self.space))) +class LightingAugmentConfig(BasicAugmentConfig): + name: str = "lighting" + lighting: Optional[int] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + lighting = self.lighting if self.lighting is not None else random.randint(0, 50) + change_config = { + "lighting":lighting + } + return (change_config, ImageObject(cv2.add(origin.image, lighting))) +class DarkingAugmentConfig(BasicAugmentConfig): + name: str = "darking" + darking: Optional[FloatBetween01] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + darking = self.darking if self.darking is not None else (random.random()%0.9+0.1) + change_config = { + "darking":darking + } + return (change_config, ImageObject(origin*darking)) +class ContrastAugmentConfig(BasicAugmentConfig): + name: str = "contrast" + contrast: Optional[FloatBetween01] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "contrast":self.contrast + } + contrast = self.contrast if self.contrast is not None else (random.random()%0.9+0.1) + contrast = int(contrast*255) + result = origin.image*(contrast / 127 + 1) - contrast + return (change_config, ImageObject(result)) +class SeparateSceneAugmentConfig(BasicAugmentConfig): + scene: str = "separate_scene" + is_front: bool = True + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "is_front":self.is_front + } + front, back = origin.SeparateFrontBackScenes() + target_0 = back if self.is_front else front + image = origin.image.copy() + image[target_0] = 0 + return (change_config, ImageObject(image)) +class NoiseAugmentConfig(BasicAugmentConfig): + name: str = "noise" + mean: float = 0 + sigma: float = 25 + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "mean":self.mean, + "sigma":self.sigma + } + return (change_config, ImageObject( + origin + get_new_noise( + None, + origin.height, + origin.width, + mean=self.mean, + sigma=self.sigma + ) + )) +class VignettingAugmentConfig(BasicAugmentConfig): + name: str = "vignetting" + ratio_min_dist: float = 0.2 + range_vignette: Tuple[float, float] = (0.2, 0.8) + random_sign: bool = False + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "ratio_min_dist":self.ratio_min_dist, + "range_vignette":self.range_vignette, + "random_sign":self.random_sign + } + h, w = origin.shape[:2] + min_dist = np.array([h, w]) / 2 * np.random.random() * self.ratio_min_dist + + # 创建距离中心点在两个轴上的距离矩阵 + x, y = np.meshgrid(np.linspace(-w/2, w/2, w), np.linspace(-h/2, h/2, h)) + x, y = np.abs(x), np.abs(y) + + # 在两个轴上创建晕影遮罩 + x = (x - min_dist[0]) / (np.max(x) - min_dist[0]) + x = np.clip(x, 0, 1) + y = (y - min_dist[1]) / (np.max(y) - min_dist[1]) + y = np.clip(y, 0, 1) + + # 获取随机晕影强度 + vignette = (x + y) / 2 * np.random.uniform(self.range_vignette[0], self.range_vignette[1]) + vignette = np.tile(vignette[..., None], [1, 1, 3]) + + sign = 2 * (np.random.random() < 0.5) * (self.random_sign) - 1 + return (change_config, ImageObject(origin * (1 + sign * vignette))) +class LensDistortionAugmentConfig(BasicAugmentConfig): + name: str = "lens_distortion" + d_coef: Tuple[ + float, float, float, float, float + ] = (0.15, 0.15, 0.1, 0.1, 0.05) + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "d_coef":self.d_coef + } + # 获取图像的高度和宽度 + h, w = origin.shape[:2] + + # 计算对角线长度 + f = (h ** 2 + w ** 2) ** 0.5 + + # 设置图像投影到笛卡尔坐标系的维度 + K = np.array([[f, 0, w / 2], + [0, f, h / 2], + [0, 0, 1]]) + + d_coef = self.d_coef * np.random.random(5) # 值 + d_coef = d_coef * (2 * (np.random.random(5) < 0.5) - 1) # 符号 + # 从参数生成新的相机矩阵 + M, _ = cv2.getOptimalNewCameraMatrix(K, d_coef, (w, h), 0) + + # 生成用于重映射相机图像的查找表 + remap = cv2.initUndistortRectifyMap(K, d_coef, None, M, (w, h), 5) + + # 将原始图像重映射到新图像 + return (change_config, ImageObject(cv2.remap(origin.image, *remap, cv2.INTER_LINEAR))) +class RotationAugmentConfig(BasicAugmentConfig): + name: str = "rotation" + angle: Optional[float] = None + scale: float = 1.0 + center: Optional[Tuple[int, int]] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + angle = self.angle if self.angle is not None else random.uniform(-30, 30) + center = self.center if self.center is not None else (origin.width // 2, origin.height // 2) + change_config = { + "angle": angle, + "scale": self.scale, + "center": center + } + # 获取旋转矩阵 + rotation_matrix = cv2.getRotationMatrix2D(center, angle, self.scale) + # 应用旋转变换 + rotated_image = cv2.warpAffine(origin.image, rotation_matrix, (origin.width, origin.height)) + return (change_config, ImageObject(rotated_image)) +class BlurAugmentConfig(BasicAugmentConfig): + name: str = "blur" + kernel_size: Tuple[int, int] = (5, 5) + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "kernel_size": self.kernel_size + } + # 应用均值模糊 + blurred_image = cv2.blur(origin.image, self.kernel_size) + return (change_config, ImageObject(blurred_image)) +class MedianBlurAugmentConfig(BasicAugmentConfig): + name: str = "median_blur" + ksize: int = 5 + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "ksize": self.ksize + } + # 应用中值模糊 + blurred_image = cv2.medianBlur(origin.image, self.ksize) + return (change_config, ImageObject(blurred_image)) +class SaturationAugmentConfig(BasicAugmentConfig): + name: str = "saturation" + factor: Optional[float] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + factor = self.factor if self.factor is not None else random.uniform(0.5, 1.5) + change_config = { + "factor": factor + } + # 转换为HSV颜色空间 + hsv = cv2.cvtColor(origin.image, cv2.COLOR_BGR2HSV).astype(np.float32) + # 调整饱和度 + hsv[:, :, 1] = hsv[:, :, 1] * factor + hsv[:, :, 1] = np.clip(hsv[:, :, 1], 0, 255) + # 转换回BGR颜色空间 + result = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR) + return (change_config, ImageObject(result)) +class HueAugmentConfig(BasicAugmentConfig): + name: str = "hue" + shift: Optional[int] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + shift = self.shift if self.shift is not None else random.randint(-20, 20) + change_config = { + "shift": shift + } + # 转换为HSV颜色空间 + hsv = cv2.cvtColor(origin.image, cv2.COLOR_BGR2HSV).astype(np.float32) + # 调整色调 + hsv[:, :, 0] = (hsv[:, :, 0] + shift) % 180 + # 转换回BGR颜色空间 + result = cv2.cvtColor(hsv.astype(np.uint8), cv2.COLOR_HSV2BGR) + return (change_config, ImageObject(result)) +class GammaAugmentConfig(BasicAugmentConfig): + name: str = "gamma" + gamma: Optional[float] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + gamma = self.gamma if self.gamma is not None else random.uniform(0.5, 2.0) + change_config = { + "gamma": gamma + } + # 应用伽马校正 + inv_gamma = 1.0 / gamma + table = np.array([((i / 255.0) ** inv_gamma) * 255 for i in range(256)]).astype(np.uint8) + result = cv2.LUT(origin.image, table) + return (change_config, ImageObject(result)) +class PerspectiveTransformAugmentConfig(BasicAugmentConfig): + name: str = "perspective" + intensity: Optional[float] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + intensity = self.intensity if self.intensity is not None else random.uniform(0.05, 0.1) + change_config = { + "intensity": intensity + } + h, w = origin.shape[:2] + + # 定义源点和目标点 + src_points = np.float32([[0, 0], [w, 0], [0, h], [w, h]]) + + # 随机扰动目标点 + dst_points = np.float32([ + [0 + random.uniform(-intensity * w, intensity * w), 0 + random.uniform(-intensity * h, intensity * h)], + [w + random.uniform(-intensity * w, intensity * w), 0 + random.uniform(-intensity * h, intensity * h)], + [0 + random.uniform(-intensity * w, intensity * w), h + random.uniform(-intensity * h, intensity * h)], + [w + random.uniform(-intensity * w, intensity * w), h + random.uniform(-intensity * h, intensity * h)] + ]) + + # 计算透视变换矩阵 + M = cv2.getPerspectiveTransform(src_points, dst_points) + + # 应用透视变换 + result = cv2.warpPerspective(origin.image, M, (w, h)) + return (change_config, ImageObject(result)) +class ElasticTransformAugmentConfig(BasicAugmentConfig): + name: str = "elastic" + alpha: float = 50 + sigma: float = 5 + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "alpha": self.alpha, + "sigma": self.sigma + } + h, w = origin.shape[:2] + + # 创建随机位移场 + dx = np.random.rand(h, w) * 2 - 1 + dy = np.random.rand(h, w) * 2 - 1 + + # 高斯模糊位移场 + dx = cv2.GaussianBlur(dx, (0, 0), self.sigma) + dy = cv2.GaussianBlur(dy, (0, 0), self.sigma) + + # 归一化并缩放 + dx = dx * self.alpha + dy = dy * self.alpha + + # 创建网格 + x, y = np.meshgrid(np.arange(w), np.arange(h)) + + # 应用位移 + indices_x = np.clip(x + dx, 0, w - 1).astype(np.float32) + indices_y = np.clip(y + dy, 0, h - 1).astype(np.float32) + + # 重映射 + result = cv2.remap(origin.image, indices_x, indices_y, interpolation=cv2.INTER_LINEAR) + return (change_config, ImageObject(result)) +class ChannelShuffleAugmentConfig(BasicAugmentConfig): + name: str = "channel_shuffle" + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + # 获取随机通道顺序 + channels = list(range(origin.image.shape[2])) + random.shuffle(channels) + change_config = { + "channels": channels + } + # 重新排列通道 + result = origin.image[:, :, channels] + return (change_config, ImageObject(result)) +class MotionBlurAugmentConfig(BasicAugmentConfig): + name: str = "motion_blur" + kernel_size: int = 15 + angle: Optional[float] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + angle = self.angle if self.angle is not None else random.uniform(0, 360) + change_config = { + "kernel_size": self.kernel_size, + "angle": angle + } + # 创建运动模糊核 + kernel = np.zeros((self.kernel_size, self.kernel_size)) + center = self.kernel_size // 2 + + # 计算角度的弧度值 + rad = np.deg2rad(angle) + + # 在核上绘制一条线 + x = np.cos(rad) * center + y = np.sin(rad) * center + + # 使用Bresenham算法绘制线 + cv2.line(kernel, + (center - int(np.round(x)), center - int(np.round(y))), + (center + int(np.round(x)), center + int(np.round(y))), + 1, thickness=1) + + # 归一化核 + kernel = kernel / np.sum(kernel) + + # 应用卷积 + result = cv2.filter2D(origin.image, -1, kernel) + return (change_config, ImageObject(result)) +class SolarizeAugmentConfig(BasicAugmentConfig): + name: str = "solarize" + threshold: Optional[int] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + threshold = self.threshold if self.threshold is not None else random.randint(100, 200) + change_config = { + "threshold": threshold + } + # 应用曝光效果 + result = origin.image.copy() + mask = origin.image > threshold + result[mask] = 255 - result[mask] + return (change_config, ImageObject(result)) +class PosterizeAugmentConfig(BasicAugmentConfig): + name: str = "posterize" + bits: Optional[int] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + bits = self.bits if self.bits is not None else random.randint(3, 7) + change_config = { + "bits": bits + } + # 应用海报效果(减少颜色位数) + mask = 255 - (1 << (8 - bits)) + result = origin.image & mask + return (change_config, ImageObject(result)) +class InvertAugmentConfig(BasicAugmentConfig): + name: str = "invert" + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = {} + # 反转图像颜色 + result = 255 - origin.image + return (change_config, ImageObject(result)) +class EqualizationAugmentConfig(BasicAugmentConfig): + name: str = "equalize" + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = {} + # 对每个通道进行直方图均衡化 + result = origin.image.copy() + if len(origin.shape) > 2 and origin.shape[2] > 1: + for i in range(origin.shape[2]): + result[:, :, i] = cv2.equalizeHist(origin.image[:, :, i]) + else: + result = cv2.equalizeHist(origin.image) + return (change_config, ImageObject(result)) +class CutoutAugmentConfig(BasicAugmentConfig): + name: str = "cutout" + n_holes: int = 1 + length: Optional[int] = None + @override + def augment( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + length = self.length if self.length is not None else min(origin.width, origin.height) // 4 + change_config = { + "n_holes": self.n_holes, + "length": length + } + + result = origin.image.copy() + h, w = origin.shape[:2] + + for _ in range(self.n_holes): + # 随机选择矩形的中心点 + y = np.random.randint(h) + x = np.random.randint(w) + + # 计算矩形的边界 + y1 = np.clip(y - length // 2, 0, h) + y2 = np.clip(y + length // 2, 0, h) + x1 = np.clip(x - length // 2, 0, w) + x2 = np.clip(x + length // 2, 0, w) + + # 将矩形区域填充为黑色 + result[y1:y2, x1:x2] = 0 + + return (change_config, ImageObject(result)) +# Config.name -> (field, value) +type ChangeConfig = Dict[str, Dict[str, Any]] +# (field, value) +type ResultImageObjects = Dict[str, ImageObject] +class ImageAugmentConfig(BaseModel): + resize: Optional[ResizeAugmentConfig] = None + clip: Optional[ClipAugmentConfig] = None + normalize: Optional[NormalizeAugmentConfig] = None + standardize:Optional[StandardizeAugmentConfig] = None + flip: Optional[FlipAugmentConfig] = None + crop: Optional[CropAugmentConfig] = None + filters: Sequence[FilterAugmentConfig] = [] + colorspace: Optional[ColorSpaceAugmentConfig] = None + lighting: Optional[LightingAugmentConfig] = None + darking: Optional[DarkingAugmentConfig] = None + contrast: Optional[ContrastAugmentConfig] = None + separate_scene: Literal[0, 1, 2, 3] = 0 + noise: Optional[NoiseAugmentConfig] = None + vignette: Optional[VignettingAugmentConfig] = None + lens_distortion: Optional[LensDistortionAugmentConfig] = None + rotation: Optional[RotationAugmentConfig] = None + blur: Optional[BlurAugmentConfig] = None + median_blur:Optional[MedianBlurAugmentConfig] = None + saturation: Optional[SaturationAugmentConfig] = None + hue: Optional[HueAugmentConfig] = None + gamma: Optional[GammaAugmentConfig] = None + perspective:Optional[PerspectiveTransformAugmentConfig] = None + elastic: Optional[ElasticTransformAugmentConfig] = None + channel_shuffle: Optional[ChannelShuffleAugmentConfig] = None + motion_blur:Optional[MotionBlurAugmentConfig] = None + solarize: Optional[SolarizeAugmentConfig] = None + posterize: Optional[PosterizeAugmentConfig] = None + invert: Optional[InvertAugmentConfig] = None + ''' + None: + 0 + front: + 1 + back: + 2 + both: + 3 + ''' + log_call: Optional[Callable[[Union[str, Dict[str, Any]]], None]] = None + + def get_all_configs(self) -> List[BasicAugmentConfig]: + result = [ + self.resize, + self.clip, + self.normalize, + self.standardize, + self.flip, + self.crop, + self.colorspace, + self.lighting, + self.darking, + self.contrast, + self.noise, + self.vignette, + self.lens_distortion + ] + result.extend(self.filters) + if self.separate_scene&1: + result.append(SeparateSceneAugmentConfig(is_front=True, name="front_scene")) + if self.separate_scene&2: + result.append(SeparateSceneAugmentConfig(is_front=False, name="back_scene")) + return result + + def _inject_log(self, *args, **kwargs): + if self.log_call is not None: + self.log_call(*args, **kwargs) + + def augment( + self, + origin: ImageObject + ) -> Tuple[ChangeConfig, ResultImageObjects]: + result: Dict[str, ImageObject] = {} + result_change_config: Dict[str, Dict[str, Any]] = {} + augment_configs: List[BasicAugmentConfig] = self.get_all_configs() + for item in augment_configs: + if item is not None: + result_change_config[item.name], result[item.name] = item.augment(ImageObject(origin.image)) + self._inject_log(f"augmentation<{item.name}> change config: {result_change_config[item.name]}") + return (result_change_config, result) + def augment_to( + self, + input: Union[tool_file, str, ImageObject, np.ndarray, PILImage, PILImageFile], + output_dir: tool_file_or_str, + *, + # 如果输出目录不存在,将调用must_exist + # 如果输出目录存在但不是目录,将返回父目录 + must_output_dir_exist: bool = False, + output_file_name: str = "output.png", + callback: Optional[Action[ChangeConfig]] = None, + ) -> ResultImageObjects: + # 初始化环境和变量 + origin_image: ImageObject = self.__init_origin_image(input) + result_dir: tool_file = self.__init_result_dir(output_dir, must_output_dir_exist) + # 增强 + self._inject_log(f"输出<{output_file_name}>开始增强") + change_config, result = self._inject_augment( + origin_image=origin_image, + result_dir=result_dir, + output_file_name=output_file_name, + ) + # 结果 + if callback is not None: + callback(change_config) + return result + def augment_from_dir_to( + self, + input_dir: Union[tool_file, str], + output_dir: tool_file_or_str, + *, + # 如果输出目录不存在,将调用must_exist + # 如果输出目录存在但不是目录,将返回父目录 + must_output_dir_exist: bool = False, + callback: Optional[Action2[tool_file, ChangeConfig]] = None, + ) -> Dict[str, List[ImageObject]]: + # 初始化环境和变量 + origin_images: tool_file = Wrapper2File(input_dir) + result_dir: tool_file = self.__init_result_dir(output_dir, must_output_dir_exist) + if origin_images.exists() is False or origin_images.is_dir() is False: + raise FileExistsError(f"input_dir<{origin_images}> is not exist or not dir") + # augment + result: Dict[str, List[ImageObject]] = {} + for image_file in origin_images.dir_tool_file_iter(): + if is_image_file(Unwrapper2Str(image_file)) is False: + continue + change_config, curResult = self._inject_augment( + origin_image=WrapperFile2CVEX(image_file).load(), + result_dir=result_dir, + output_file_name=image_file.get_filename(), + ) + # 添加单个结果 + for key in curResult: + if key in result: + result[key].append(curResult[key]) + else: + result[key] = [curResult[key]] + # 调用回调 + if callback is not None: + callback(image_file, change_config) + # 结果 + return result + def augment_from_images_to( + self, + inputs: Sequence[ImageObject], + output_dir: tool_file_or_str, + *, + # 如果输出目录不存在,将调用must_exist + # 如果输出目录存在但不是目录,将返回父目录 + must_output_dir_exist: bool = False, + callback: Optional[Action2[ImageObject, ChangeConfig]] = None, + fileformat: str = "{}.jpg", + indexbuilder: type = int + ) -> Dict[str, List[ImageObject]]: + # Init env and vars + result_dir: tool_file = self.__init_result_dir(output_dir, must_output_dir_exist) + index: Any = indexbuilder() + # augment + result: Dict[str, List[ImageObject]] = {} + for image in inputs: + current_output_name = fileformat.format(index) + change_config, curResult = self._inject_augment( + origin_image=image, + result_dir=result_dir, + output_file_name=current_output_name, + ) + # append single result + for key in curResult: + if key in result: + result[key].append(curResult[key]) + else: + result[key] = [curResult[key]] + index += 1 + # call feedback + if callback is not None: + callback(image, change_config) + # result + return result + def __init_origin_image(self, input:Union[tool_file, str, ImageObject, np.ndarray, PILImage, PILImageFile]) -> ImageObject: + origin_image: ImageObject = None + # check + if isinstance(input, (tool_file, str)): + inputfile = WrapperFile2CVEX(input) + if inputfile.data is not None: + origin_image = inputfile.data + else: + origin_image = inputfile.load() + elif isinstance(input, (ImageObject, np.ndarray, PILImage, PILImageFile)): + origin_image = Wrapper2Image(input) + else: + raise TypeError(f"input<{input}> is not support type") + return origin_image + def __init_result_dir(self, output_dir:tool_file_or_str, must_output_dir_exist:bool) -> tool_file: + if output_dir is None or isinstance(output_dir, loss_file): + return static_loss_file_dir + result_dir: tool_file = Wrapper2File(output_dir) + # check exist + stats: bool = True + if result_dir.exists() is False: + if must_output_dir_exist: + result_dir.must_exists_path() + else: + stats = False + if stats is False: + raise FileExistsError(f"output_dir<{result_dir}> is not exist") + # check dir stats + if result_dir.is_dir() is False: + if must_output_dir_exist: + result_dir.back_to_parent_dir() + else: + raise FileExistsError(f"output_dir<{result_dir}> is not a dir") + # result + return result_dir + + def _inject_augment( + self, + origin_image: ImageObject, + result_dir: tool_file, + output_file_name: str + ) -> Tuple[ChangeConfig, ResultImageObjects]: + self._inject_log(f"output<{output_file_name}> is start augment") + result_dict, result_images = self.augment(origin_image) + if not (result_dir is None or isinstance(result_dir, loss_file)): + for key, value in result_images.items(): + current_dir = result_dir|key + current_result_file = current_dir|output_file_name + value.save_image(current_result_file, True) + return result_dict, result_images + +def image_augent( + config:ImageAugmentConfig, + source,*args, **kwargs + ): + if isinstance(source, ImageObject): + return config.augment(source, *args, **kwargs) + +def get_config_of_gaussian_blur( + intensity: float, + blur_level: int = 3 + ) -> FilterAugmentConfig: + return FilterAugmentConfig( + name="gaussian_blur", + kernal=cv2.getGaussianKernel(blur_level, intensity) + ) +def get_config_of_smooth_blur(blur_level:int = 3): + result = get_config_of_gaussian_blur(0, blur_level) + result.name = "smooth_blur" + return result +def get_config_of_sharpen( + intensity: float, + sharpen_level: float = 1 + ) -> FilterAugmentConfig: + return FilterAugmentConfig( + name="sharpen", + kernal=np.array([ + [0, -sharpen_level, 0], + [-sharpen_level, intensity-sharpen_level, -sharpen_level], + [0, -sharpen_level, 0] + ]) + ) +def get_config_of_edge_enhance( + intensity: float, + sharpen_level: float = 8 + ) -> FilterAugmentConfig: + return FilterAugmentConfig( + name="edge_enhance", + kernal=np.array([ + [-intensity, -intensity, -intensity], + [-intensity, sharpen_level+intensity, -intensity], + [-intensity, -intensity, -intensity] + ]) + ) +def get_config_of_convert_to_gray() -> ColorSpaceAugmentConfig: + return ColorSpaceAugmentConfig( + name="convert_to_gray", + color_space=cv2.COLOR_BGR2GRAY + ) + +# region end + +# region image convert + +class BasicConvertConfig(BaseModel, ABC): + name: str = "unknown" + + @abstractmethod + def convert( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + ''' + result: + (change config, image) + ''' + raise NotImplementedError() +class PNGConvertConfig(BasicConvertConfig): + name: str = "png" + compression_level: int = 6 # 0-9, 9为最高压缩率 + + @override + def convert( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "compression_level": self.compression_level + } + # 转换为PIL Image以使用其PNG保存功能 + pil_image = PILFromArray(cv2.cvtColor(origin.image, cv2.COLOR_BGR2RGB)) + # 创建内存文件对象 + buffer = BytesIO() + # 保存为PNG + pil_image.save(buffer, format='PNG', optimize=True, compress_level=self.compression_level) + # 从内存读取图像数据 + buffer.seek(0) + result = PILOpen(buffer) + # 转换回OpenCV格式 + result = cv2.cvtColor(np.array(result), cv2.COLOR_RGB2BGR) + return (change_config, ImageObject(result)) +class JPGConvertConfig(BasicConvertConfig): + name: str = "jpg" + quality: int = 95 # 0-100, 100为最高质量 + + @override + def convert( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "quality": self.quality + } + # 转换为PIL Image + pil_image = PILFromArray(cv2.cvtColor(origin.image, cv2.COLOR_BGR2RGB)) + # 创建内存文件对象 + buffer = BytesIO() + # 保存为JPG + pil_image.save(buffer, format='JPEG', quality=self.quality) + # 从内存读取图像数据 + buffer.seek(0) + result = PILOpen(buffer) + # 转换回OpenCV格式 + result = cv2.cvtColor(np.array(result), cv2.COLOR_RGB2BGR) + return (change_config, ImageObject(result)) +class ICOConvertConfig(BasicConvertConfig): + name: str = "ico" + size: Tuple[int, int] = (16, 16) + + @override + def convert( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "size": self.size + } + return (change_config, ImageObject(origin.get_resize_image(*self.size))) +class BMPConvertConfig(BasicConvertConfig): + name: str = "bmp" + + @override + def convert( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = {} + # 直接使用OpenCV保存为BMP + _, buffer = cv2.imencode('.bmp', origin.image) + # 解码回图像 + result = cv2.imdecode(buffer, cv2.IMREAD_COLOR) + return (change_config, ImageObject(result)) +class WebPConvertConfig(BasicConvertConfig): + name: str = "webp" + quality: int = 80 # 0-100, 100为最高质量 + + @override + def convert( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Any], ImageObject]: + change_config = { + "quality": self.quality + } + # 转换为PIL Image + pil_image = PILFromArray(cv2.cvtColor(origin.image, cv2.COLOR_BGR2RGB)) + # 创建内存文件对象 + buffer = BytesIO() + # 保存为WebP + pil_image.save(buffer, format='WEBP', quality=self.quality) + # 从内存读取图像数据 + buffer.seek(0) + result = PILOpen(buffer) + # 转换回OpenCV格式 + result = cv2.cvtColor(np.array(result), cv2.COLOR_RGB2BGR) + return (change_config, ImageObject(result)) +class ImageConvertConfig(BaseModel): + png: Optional[PNGConvertConfig] = None + jpg: Optional[JPGConvertConfig] = None + ico: Optional[ICOConvertConfig] = None + bmp: Optional[BMPConvertConfig] = None + webp: Optional[WebPConvertConfig] = None + log_call: Optional[Callable[[Union[str, Dict[str, Any]]], None]] = None + + def get_all_configs(self) -> List[BasicConvertConfig]: + return [ + self.png, + self.jpg, + self.ico, + self.bmp, + self.webp + ] + + def _inject_log(self, *args, **kwargs): + if self.log_call is not None: + self.log_call(*args, **kwargs) + + def convert( + self, + origin: ImageObject + ) -> Tuple[Dict[str, Dict[str, Any]], Dict[str, ImageObject]]: + result: Dict[str, ImageObject] = {} + result_change_config: Dict[str, Dict[str, Any]] = {} + convert_configs: List[BasicConvertConfig] = self.get_all_configs() + + for item in convert_configs: + if item is not None: + result_change_config[item.name], result[item.name] = item.convert(ImageObject(origin.image)) + self._inject_log(f"conversion<{item.name}> change config: {result_change_config[item.name]}") + + return (result_change_config, result) + + def convert_to( + self, + input: Union[tool_file, str, ImageObject, np.ndarray, PILImage, PILImageFile], + output_dir: tool_file_or_str, + *, + must_output_dir_exist: bool = False, + output_file_name: str = "output.png", + callback: Optional[Action[Dict[str, Dict[str, Any]]]] = None, + ) -> Dict[str, ImageObject]: + # 初始化环境和变量 + origin_image: ImageObject = self.__init_origin_image(input) + result_dir: tool_file = self.__init_result_dir(output_dir, must_output_dir_exist) + + # 转换 + self._inject_log(f"输出<{output_file_name}>开始转换") + change_config, result = self._inject_convert( + origin_image=origin_image, + result_dir=result_dir, + output_file_name=output_file_name, + ) + + # 结果 + if callback is not None: + callback(change_config) + return result + + def __init_origin_image(self, input: Union[tool_file, str, ImageObject, np.ndarray, PILImage, PILImageFile]) -> ImageObject: + origin_image: ImageObject = None + # check + if isinstance(input, (tool_file, str)): + inputfile = WrapperFile2CVEX(input) + if inputfile.data is not None: + origin_image = inputfile.data + else: + origin_image = inputfile.load() + elif isinstance(input, (ImageObject, np.ndarray, PILImage, PILImageFile)): + origin_image = Wrapper2Image(input) + else: + raise TypeError(f"input<{input}> is not support type") + return origin_image + + def __init_result_dir(self, output_dir: tool_file_or_str, must_output_dir_exist: bool) -> tool_file: + if output_dir is None or isinstance(output_dir, loss_file): + return static_loss_file_dir + result_dir: tool_file = Wrapper2File(output_dir) + # check exist + stats: bool = True + if result_dir.exists() is False: + if must_output_dir_exist: + result_dir.must_exists_path() + else: + stats = False + if stats is False: + raise FileExistsError(f"output_dir<{result_dir}> is not exist") + # check dir stats + if result_dir.is_dir() is False: + if must_output_dir_exist: + result_dir.back_to_parent_dir() + else: + raise FileExistsError(f"output_dir<{result_dir}> is not a dir") + # result + return result_dir + + def _inject_convert( + self, + origin_image: ImageObject, + result_dir: tool_file, + output_file_name: str + ) -> Tuple[Dict[str, Dict[str, Any]], Dict[str, ImageObject]]: + self._inject_log(f"output<{output_file_name}> is start convert") + result_dict, result_images = self.convert(origin_image) + if not (result_dir is None or isinstance(result_dir, loss_file)): + for key, value in result_images.items(): + current_dir = result_dir|key + current_result_file = current_dir|output_file_name + value.save_image(current_result_file, True) + return result_dict, result_images + +def image_convert( + config: ImageConvertConfig, + source, + *args, + **kwargs +): + if isinstance(source, ImageObject): + return config.convert(source, *args, **kwargs) + +# region end + diff --git a/Convention/Runtime/Visual/OpenCV.py b/Convention/Runtime/Visual/OpenCV.py new file mode 100644 index 0000000..0c9aac9 --- /dev/null +++ b/Convention/Runtime/Visual/OpenCV.py @@ -0,0 +1,1433 @@ +from ..Config import * + +try: + import cv2 as base + import cv2.data as BaseData +except ImportError: + InternalImportingThrow("OpenCV", ["opencv-python"]) + raise +try: + from PIL import ImageFile, Image +except ImportError: + InternalImportingThrow("OpenCV", ["Pillow"]) + raise + +from ..File import tool_file + +# OpenCV Image format is BGR +# PIL Image format is RBG + +VideoWriter = base.VideoWriter +def mp4_with_MPEG4_fourcc() -> int: + return VideoWriter.fourcc(*"mp4v") +def avi_with_Xvid_fourcc() -> int: + return VideoWriter.fourcc(*"XVID") +def avi_with_DivX_fourcc() -> int: + return VideoWriter.fourcc(*"DIVX") +def avi_with_MJPG_fourcc() -> int: + return VideoWriter.fourcc(*"MJPG") +def mp4_or_avi_with_H264_fourcc() -> int: + return VideoWriter.fourcc(*"X264") +def avi_with_H265_fourcc() -> int: + return VideoWriter.fourcc(*"H264") +def wmv_with_WMV1_fourcc() -> int: + return VideoWriter.fourcc(*"WMV1") +def wmv_with_WMV2_fourcc() -> int: + return VideoWriter.fourcc(*"WMV2") +def oggTheora_with_THEO_fourcc() -> int: + return VideoWriter.fourcc(*"THEO") +def flv_with_FLV1_fourcc() -> int: + return VideoWriter.fourcc(*"FLV1") +class VideoWriterInstance(VideoWriter): + def __init__( + self, + file_name: Union[tool_file, str], + fourcc: int, + fps: float, + frame_size: tuple[int, int], + is_color: bool = True + ): + super().__init__(str(file_name), fourcc, fps, frame_size, is_color) + def __del__(self): + self.release() + +AffineFeature_feature2D = base.AffineFeature.create +SIFT_Feature2D = base.SIFT.create +ORB_Feature2D = base.ORB.create +BRISK_Feature2D = base.BRISK.create +AKAZE_Feature2D = base.AKAZE.create +KAZE_Feature2D = base.KAZE.create +MSER_Feature2D = base.MSER.create +FastFeatureDetector_Feature2D = base.FastFeatureDetector.create +AgastFeatureDetector_Feature2D = base.AgastFeatureDetector.create +GFTTDetector_Feature2D = base.GFTTDetector.create +SimpleBlobDetector_Feature2D = base.SimpleBlobDetector.create +class Feature2DInstance[featrue:base.Feature2D]: + def __init__( + self, + feature2D:Union[featrue, Callable[[],featrue]] + ): + if isinstance(feature2D, base.Feature2D): + self.mainFeature2D = feature2D + else: + self.mainFeature2D = feature2D() + def detect[Mat_or_Mats:Union[ + MatLike, + base.UMat, + Sequence[MatLike], + Sequence[base.UMat] + ]]( + self, + image: Mat_or_Mats, + mask: Optional[Mat_or_Mats] = None + ) -> Sequence[base.KeyPoint]: + return self.mainFeature2D.detect(image, mask) + def compute[Mat_or_Mats:Union[ + MatLike, + base.UMat, + Sequence[MatLike], + Sequence[base.UMat] + ]]( + self, + image: Mat_or_Mats, + keypoints: Optional[Sequence[base.KeyPoint]] = None, + descriptors: Optional[Mat_or_Mats] = None + ) -> Tuple[Sequence[base.KeyPoint], MatLike]: + return self.mainFeature2D.compute(image, keypoints, descriptors) + def detectAndCompute[_Mat:Union[ + MatLike, + base.UMat, + ]]( + self, + image: _Mat, + mask: Optional[_Mat] = None, + descriptors: Optional[_Mat] = None, + useProvidedKeypoints:bool = False + ) -> Tuple[Sequence[base.KeyPoint], MatLike]: + return self.mainFeature2D.detectAndCompute(image, mask, descriptors, useProvidedKeypoints) + +def wait_key(delay:int): + return base.waitKey(delay) +def until_esc(): + return wait_key(0) + +def is_current_key(key:str, *, wait_delay:int = 1): + return wait_key(wait_delay) & 0xFF == ord(key[0]) + +class light_cv_view: + def __init__(self, filename_or_index:Union[str, tool_file, int]): + self.__capture: base.VideoCapture = None + self.stats: bool = True + self.retarget(filename_or_index) + def __del__(self): + self.release() + + @override + def ToString(self): + return f"View<{self.width}x{self.height}>" + + def __bool__(self): + return self.stats + + def is_open(self): + return self.__capture.isOpened() + + def release(self): + if self.__capture is not None: + self.__capture.release() + def retarget(self, filename_or_index:Union[str, tool_file, int]): + self.release() + if isinstance(filename_or_index, int): + self.__capture = base.VideoCapture(filename_or_index) + else: + self.__capture = base.VideoCapture(str(filename_or_index)) + return self + + def next_frame(self) -> MatLike: + self.stats, frame =self.__capture.read() + if self.stats: + return frame + else: + return None + + def get_captrue_info(self, id:int): + return self.__capture.get(id) + def get_prop_pos_msec(self): + return self.get_captrue_info(0) + def get_prop_pos_frames(self): + return self.get_captrue_info(1) + def get_prop_avi_ratio(self): + return self.get_captrue_info(2) + def get_prop_frame_width(self): + return self.get_captrue_info(3) + def get_prop_frame_height(self): + return self.get_captrue_info(4) + def get_prop_fps(self): + return self.get_captrue_info(5) + def get_prop_fourcc(self): + return self.get_captrue_info(6) + def get_prop_frame_count(self): + return self.get_captrue_info(7) + def get_prop_format(self): + return self.get_captrue_info(8) + def get_prop_mode(self): + return self.get_captrue_info(9) + def get_prop_brightness(self): + return self.get_captrue_info(10) + def get_prop_contrast(self): + return self.get_captrue_info(11) + def get_prop_saturation(self): + return self.get_captrue_info(12) + def get_prop_hue(self): + return self.get_captrue_info(13) + def get_prop_gain(self): + return self.get_captrue_info(14) + def get_prop_exposure(self): + return self.get_captrue_info(15) + def get_prop_convert_rgb(self): + return self.get_captrue_info(16) + + def setup_capture(self, id:int, value): + self.__capture.set(id, value) + return self + def set_prop_pos_msec(self, value:int): + return self.setup_capture(0, value) + def set_prop_pos_frames(self, value:int): + return self.setup_capture(1, value) + def set_prop_avi_ratio(self, value:float): + return self.setup_capture(2, value) + def set_prop_frame_width(self, value:int): + return self.setup_capture(3, value) + def set_prop_frame_height(self, value:int): + return self.setup_capture(4, value) + def set_prop_fps(self, value:int): + return self.setup_capture(5, value) + def set_prop_fourcc(self, value): + return self.setup_capture(6, value) + def set_prop_frame_count(self, value): + return self.setup_capture(7, value) + def set_prop_format(self, value): + return self.setup_capture(8, value) + def set_prop_mode(self, value): + return self.setup_capture(9, value) + def set_prop_brightness(self, value): + return self.setup_capture(10, value) + def set_prop_contrast(self, value): + return self.setup_capture(11, value) + def set_prop_saturation(self, value): + return self.setup_capture(12, value) + def set_prop_hue(self, value): + return self.setup_capture(13, value) + def set_prop_gain(self, value): + return self.setup_capture(14, value) + def set_prop_exposure(self, value): + return self.setup_capture(15, value) + def set_prop_convert_rgb(self, value:int): + return self.setup_capture(16, value) + def set_prop_rectification(self, value:int): + return self.setup_capture(17, value) + + @property + def width(self) -> float: + return self.get_prop_frame_width() + @width.setter + def width(self, value:float) -> float: + self.set_prop_frame_width(value) + return value + @property + def height(self): + return self.get_prop_frame_height() + @height.setter + def height(self, value:float) -> float: + self.set_prop_frame_height(value) + return value + + @property + def frame_size(self) -> Tuple[float, float]: + return self.get_prop_frame_width(), self.get_prop_frame_height() + @property + def shape(self): + return self.frame_size + @frame_size.setter + def frame_size(self, value:Tuple[float, float]) -> Tuple[float, float]: + self.set_prop_frame_width(value[0]) + self.set_prop_frame_height(value[1]) + return value + +class light_cv_camera(light_cv_view, any_class): + def __init__(self, index:int = 0): + self.writer: VideoWriter = None + super().__init__(int(index)) + + @override + def release(self): + super().release() + if self.writer is not None: + self.writer.release() + + def current_frame(self): + return self.next_frame() + + def recording( + self, + stop_pr: Callable[[], bool], + writer: Union[VideoWriter, Callable[[MatLike], Any]], + ): + writer_stats = False + if isinstance(writer, VideoWriter): + self.writer = writer + writer_stats = True + while self.is_open(): + if stop_pr(): + break + frame = self.current_frame() + base.imshow("__recording__", frame) + if writer_stats: + writer.write(frame) + else: + writer(frame) + base.destroyWindow("__recording__") + return self + + @override + def ToString(self): + return f"Camera<{self.width}x{self.height}>" + +def get_zero_mask(shape, *args, **kwargs) -> MatLike: + return np.zeros(shape, *args, **kwargs) +def get_one_mask(shape, value, *args, **kwargs) -> MatLike: + return np.ones(shape, value, *args, **kwargs) + +class ImageObject(left_np_ndarray_reference): + @property + def __image(self) -> MatLike: + return self.ref_value + @__image.setter + def __image(self, value:MatLike) -> MatLike: + self.ref_value = value + return value + + @overload + def __init__(self, imagePath:str, flags:Optional[int] = None):... + @overload + def __init__(self, image:tool_file, flags:Optional[int] = None):... + @overload + def __init__(self, camera:light_cv_camera):... + @overload + def __init__(self, image:MatLike, flags:Optional[int] = None):... + @overload + def __init__(self, image:Self):... + @overload + def __init__(self, image:Image.Image):... + @overload + def __init__(self, image:ImageFile.ImageFile):... + @overload + def __init__(self, image:np.ndarray):... + def __init__( + self, + image: Optional[Union[ + str, + Self, + light_cv_camera, + tool_file, + MatLike, + np.ndarray, + ImageFile.ImageFile, + Image.Image + ]], + flags: Optional[Any] = None + ) -> None: + super().__init__() + self.__camera: light_cv_camera = None + self.current: MatLike = None + self.__gray: MatLike = None + if isinstance(image, light_cv_camera): + self.lock_from_camera(image) + else: + self.load_image(image, flags) + + def internal_check_when_image_is_none_throw_error(self): + if self.image is None: + raise ValueError("Image is None") + return self + + @override + def SymbolName(self): + return "Image" + @override + def ToString(self): + current = self.image + if current is None: + return "null" + return f"Image<{current.shape[1]}x{current.shape[0]}:\n"+str( + self.image)+"\n>" + + @property + def camera(self) -> light_cv_camera: + if self.__camera is None or self.__camera.is_open() is False: + return None + else: + return self.__camera + @property + def image(self) -> MatLike: + if self.current is not None: + return self.current + elif self.camera is None: + return self.__image + else: + return self.__camera.current_frame() + + @image.setter + def image(self, image: Optional[Union[ + str, + Self, + tool_file, + MatLike, + np.ndarray, + ImageFile.ImageFile, + Image.Image + ]]): + self.load_image(image) + + def load_from_nparray( + self, + array_: np.ndarray, + code: Optional[int] = None, + *args, **kwargs + ): + self.__gray = None + if code is None: + self.__image = array_ + else: + self.__image = base.cvtColor(array_, code, *args, **kwargs).astype(np.uint8) + return self + def load_from_PIL_image( + self, + image: Image.Image, + code: Optional[int] = None, + *args, **kwargs + ): + if code is None: + self.load_from_nparray(np.array(image), *args, **kwargs) + else: + self.load_from_nparray(base.cvtColor(np.array(image), code, *args, **kwargs).astype(np.uint8)) + return self + def load_from_PIL_ImageFile( + self, + image: ImageFile.ImageFile, + rect: Optional[Tuple[float, float, float, float]] = None + ): + return self.load_from_PIL_image(image.crop(rect)) + def load_from_cv2_image(self, image: MatLike): + self.__gray = None + self.__image = image + return self + def lock_from_camera(self, camera: light_cv_camera): + self.__camera = camera + return self + + @property + def height(self) -> int: + return self.shape[0] + @property + def width(self) -> int: + return self.shape[1] + @property + def channel_depth(self) -> int: + return self.shape[2] + + @property + def pixel_count(self) -> int: + return self.image.size + @property + def dtype(self): + return self.image.dtype + + def is_enable(self): + return self.image is not None + def is_invalid(self): + return self.is_enable() is False + def __bool__(self): + return self.is_enable() + def __MatLike__(self): + return self.image + + @overload + def load_image(self, image:str, flags:int = -1):... + @overload + def load_image(self, image:tool_file, flags:int = -1):... + @overload + def load_image(self, image:light_cv_camera):... + @overload + def load_image(self, image:MatLike):... + @overload + def load_image(self, image:Self):... + @overload + def load_image(self, image:Image.Image):... + @overload + def load_image(self, image:ImageFile.ImageFile):... + def load_image( + self, + image: Optional[Union[ + str, + tool_file, + Self, + MatLike, + np.ndarray, + ImageFile.ImageFile, + Image.Image + ]], + flags: int = -1 + ): + """加载图片""" + if image is None: + self.__image = None + return self + elif isinstance(image, ImageObject): + self.__image = image.image + elif isinstance(image, MatLike): + self.__image = image + elif isinstance(image, np.ndarray): + self.load_from_nparray(image, flags) + elif isinstance(image, ImageFile.ImageFile): + self.load_from_PIL_ImageFile(image, flags) + elif isinstance(image, Image.Image): + self.load_from_PIL_image(image, flags) + elif isinstance(image, loss_file): + self.__image = None + else: + self.__image = base.imread(str(image), flags) + return self + def save_image( + self, + save_path: Union[str, tool_file], + is_path_must_exist: bool = False + ) -> Self: + """保存图片""" + if isinstance(save_path, loss_file): + return self + if is_path_must_exist: + Wrapper2File(save_path).try_create_parent_path() + if self.is_enable(): + base.imwrite(str(save_path), self.image) + return self + + def show_image( + self, + window_name: str = "Image", + delay: Union[int,str] = 0, + image_show_func: Callable[[Self], None] = None, + *args, **kwargs + ): + """显示图片""" + if self.is_invalid(): + return self + if self.camera is not None: + while (wait_key(1) & 0xFF != ord(str(delay)[0])) and self.camera is not None: + # dont delete this line, self.image is camera flame now, see + self.current = self.image + if image_show_func is not None: + image_show_func(self) + if self.current is not None: + base.imshow(window_name, self.current) + # dont delete this line, see property + self.current = None + else: + base.imshow(window_name, self.image) + base.waitKey(delay = int(delay), *args, **kwargs) + if base.getWindowProperty(window_name, base.WND_PROP_VISIBLE) > 0: + base.destroyWindow(window_name) + return self + + # 绝对值转换 + def convert_scale_abs(self): + """绝对值转换""" + return ImageObject(base.convertScaleAbs(self.image)) + + # 图像边缘检测 + def edge_detect_with_sobel( + self, + *, + ksize: Optional[int] = None, + scale: Optional[float] = None, + delta: Optional[float] = None, + borderType: Optional[int] = None, + **kwargs + ): + if ksize is not None: + kwargs["ksize"] = ksize + if scale is not None: + kwargs["scale"] = scale + if delta is not None: + kwargs["delta"] = delta + if borderType is not None: + kwargs["borderType"] = borderType + gray = self.get_grayscale() + dx = base.Sobel(gray, base.CV_16S, 1, 0, **kwargs) + dy = base.Sobel(gray, base.CV_16S, 0, 1, **kwargs) + return ImageObject(dx).convert_scale_abs().merge_with_blending( + ImageObject(dy).convert_scale_abs(), (0.5, 0.5)) + def edge_detect_with_roberts(self): + kernelx = np.array([[-1,0],[0,1]], dtype=int) + kernely = np.array([[0,-1],[1,0]], dtype=int) + gray = self.get_grayscale() + dx = base.filter2D(gray, base.CV_16S, kernelx) + dy = base.filter2D(gray, base.CV_16S, kernely) + return ImageObject(dx).convert_scale_abs().merge_with_blending( + ImageObject(dy).convert_scale_abs(), (0.5, 0.5)) + def edge_detect_with_laplacian( + self, + kernalSize: int = 3 + ): + gray = self.get_grayscale() + return ImageObject(base.convertScaleAbs( + base.Laplacian(gray, base.CV_16S, ksize=kernalSize) + )) + def edge_detect_with_canny( + self, + threshold1: float, + threshold2: float, + **kwargs + ): + return ImageObject(base.Canny( + self.get_grayscale(), threshold1, threshold2, **kwargs + )) + + # 分离通道 + def split(self): + """分离通道""" + return base.split(self.image) + def split_to_image_object(self): + """分离通道""" + return [ImageObject(channel) for channel in self.split()] + @property + def channels(self): + return self.split() + @property + def blue_channel(self): + return self.channels[0] + @property + def green_channel(self): + return self.channels[1] + @property + def red_channel(self): + return self.channels[2] + @property + def alpha_channel(self): + return self.channels[3] + def get_blue_image(self): + return ImageObject(self.blue_channel) + def get_green_image(self): + return ImageObject(self.green_channel) + def get_red_image(self): + return ImageObject(self.red_channel) + def get_alpha_image(self): + return ImageObject(self.alpha_channel) + + # 混合通道 + def merge_channels_from_list(self, channels:List[MatLike]): + """合并通道""" + self.image = base.merge(channels) + return self + def merge_channels(self, blue:MatLike, green:MatLike, red:MatLike): + """合并通道""" + return self.merge_channels_from_list([blue, green, red]) + def merge_channel_list(self, bgr:List[MatLike]): + """合并通道""" + return self.merge_channels_from_list(bgr) + + # Transform + def get_resize_image(self, width:int, height:int): + if self.is_enable(): + return ImageObject(base.resize(self.image, (width, height))) + return None + def get_rotate_image(self, angle:float): + if self.is_invalid(): + return None + (h, w) = self.image.shape[:2] + center = (w // 2, h // 2) + M = base.getRotationMatrix2D(center, angle, 1.0) + return ImageObject(base.warpAffine(self.image, M, (w, h))) + def resize_image(self, width:int, height:int): + """调整图片大小""" + new_image = self.get_resize_image(width, height) + if new_image is not None: + self.image = new_image.image + return self + def rotate_image(self, angle:float): + """旋转图片""" + new_image = self.get_rotate_image(angle) + if new_image is not None: + self.image = new_image.image + return self + + # 图片翻折 + def flip(self, flip_code:int): + """翻转图片""" + if self.is_enable(): + self.image = base.flip(self.image, flip_code) + return self + def horizon_flip(self): + """水平翻转图片""" + return self.flip(1) + def vertical_flip(self): + """垂直翻转图片""" + return self.flip(0) + def both_flip(self): + """双向翻转图片""" + return self.flip(-1) + + # 合并序列 + def horizontal_stack(self, *images:Self): + """水平合并图片""" + return ImageObject(base.hconcat(self.image, *[image.image for image in images])) + def vertical_stack(self, *images:Self): + """垂直合并图片""" + return ImageObject(base.vconcat(self.image, *[image.image for image in images])) + + # 色彩空间猜测 + def guess_color_space(self) -> str: + """猜测色彩空间""" + if self.is_invalid(): + return None + image = self.image + # 计算每个通道的像素值分布 + hist_b = base.calcHist([image], [0], None, [256], [0, 256]) + hist_g = base.calcHist([image], [1], None, [256], [0, 256]) + hist_r = base.calcHist([image], [2], None, [256], [0, 256]) + + # 计算每个通道的像素值总和 + sum_b = np.sum(hist_b) + sum_g = np.sum(hist_g) + sum_r = np.sum(hist_r) + + # 根据像素值总和判断色彩空间 + if sum_b > sum_g and sum_b > sum_r: + #print("The image might be in BGR color space.") + return "BGR" + elif sum_g > sum_b and sum_g > sum_r: + #print("The image might be in GRAY color space.") + return "GRAY" + else: + #print("The image might be in RGB color space.") + return "RGB" + + # 颜色转化 + def get_convert(self, color_convert:int): + """颜色转化""" + if self.is_invalid(): + return None + return ImageObject(base.cvtColor(self.image, color_convert)) + def convert_to(self, color_convert:int): + """颜色转化""" + if self.is_invalid(): + return None + self.image = self.get_convert(color_convert) + return self + + def is_grayscale(self): + return self.dimension == 2 + def get_grayscale(self, curColor=base.COLOR_BGR2GRAY) -> MatLike: + if self.is_invalid(): + return None + if self.__gray is None and self.camera is None: + self.__gray = base.cvtColor(self.image, curColor) + return self.__gray + def convert_to_grayscale(self): + """将图片转换为灰度图""" + self.__image = self.get_grayscale() + return self + + def get_convert_flag( + self, + targetColorTypeName:Literal[ + "BGR", "RGB", "GRAY", "YCrCb" + ] + ) -> Optional[int]: + """获取颜色转化标志""" + flag = self.guess_color_space() + if flag is None: + return None + + if targetColorTypeName == "BGR": + if flag == "RGB": + return base.COLOR_RGB2BGR + elif flag == "GRAY": + return base.COLOR_GRAY2BGR + elif flag == "YCrCb": + return base.COLOR_YCrCb2BGR + elif targetColorTypeName == "RGB": + if flag == "BGR": + return base.COLOR_BGR2RGB + elif flag == "GRAY": + return base.COLOR_GRAY2RGB + elif flag == "YCrCb": + return base.COLOR_YCrCb2RGB + elif targetColorTypeName == "GRAY": + if flag == "RGB": + return base.COLOR_RGB2GRAY + elif flag == "RGB": + return base.COLOR_BGR2GRAY + return None + + # 原址裁切 + def sub_image(self, x:int, y:int ,width:int ,height:int): + """裁剪图片""" + if self.is_invalid(): + return self + self.image = self.image[y:y+height, x:x+width] + return self + + # 直方图 + def equalizeHist(self, is_cover = False) -> MatLike: + """直方图均衡化""" + if self.is_invalid(): + return self + result:MatLike = base.equalizeHist(self.image) + if is_cover: + self.image = result + return result + def calcHist( + self, + channel: Union[List[int], int], + mask: Optional[MatLike] = None, + hist_size: Sequence[int] = [256], + ranges: Sequence[float] = [0, 256] + ) -> MatLike: + """计算直方图""" + if self.is_invalid(): + return None + return base.calcHist( + [self.image], + channel if isinstance(channel, list) else [channel], + mask, + hist_size, + ranges) + + # 子集操作 + def sub_image_with_rect(self, rect:Tuple[float, float, float, float]): + """裁剪图片""" + if self.is_invalid(): + return self + self.image = self.image[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]] + return self + def sub_image_with_box(self, box:Tuple[float, float, float, float]): + """裁剪图片""" + if self.is_invalid(): + return self + self.image = self.image[box[1]:box[3], box[0]:box[2]] + return self + def sub_cover_with_rect(self, image:Union[Self, MatLike], rect:Tuple[float, float, float, float]): + """覆盖图片""" + if self.is_invalid(): + raise ValueError("Real Image is none") + if isinstance(image, MatLike): + image = ImageObject(image) + self.image[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]] = image.image + return self + def sub_cover_with_box(self, image:Union[Self, MatLike], box:Tuple[float, float, float, float]): + """覆盖图片""" + if self.is_invalid(): + raise ValueError("Real Image is none") + if isinstance(image, MatLike): + image = ImageObject(image) + self.image[box[1]:box[3], box[0]:box[2]] = image.image + return self + + def operator_cv(self, func:Callable[[MatLike], Any], *args, **kwargs): + func(self.image, *args, **kwargs) + return self + + # 序列合并 + @override + def _inject_stack_uniform_item(self): + return np.uint8(self.image) + + # 从另一图像合并 + def merge_with_blending(self, other:Self, weights:Tuple[float, float]): + return ImageObject(base.addWeighted(self.image, weights[0], other.image, weights[1], 0)) + # 从另一图像合并(遮罩) + def merge_with_mask(self, other:Self, mask:Self): + return ImageObject(base.bitwise_and(self.image, other.image, mask.image)) + + # 滤波 + def filter(self, ddepth:int, kernel:MatLike, *args, **kwargs): + return base.filter2D(self.image, ddepth, kernel, *args, **kwargs) + def filter_blur(self, kernalSize:Tuple[float, float]): + return base.blur(self.image, kernalSize) + def filter_gaussian(self, kernalSize:Tuple[float, float], sigmaX:float, sigmaY:float): + return base.GaussianBlur(self.image, kernalSize, sigmaX, sigmaY) + def filter_median(self, kernalSize:int): + return base.medianBlur(self.image, kernalSize) + def filter_bilateral(self, d:float, sigmaColor:float, sigmaSpace:float): + return base.bilateralFilter(self.image, d, sigmaColor, sigmaSpace) + def filter_sobel(self, dx:int, dy:int, kernalSize:int): + return base.Sobel(self.image, -1, dx, dy, ksize=kernalSize) + def filter_canny(self, threshold1:float, threshold2:float): + return base.Canny(self.image, threshold1, threshold2) + def filter_laplacian(self, kernalSize:int): + return base.Laplacian(self.image, -1, ksize=kernalSize) + def filter_scharr(self, dx:int, dy:int): + return base.Scharr(self.image, -1, dx, dy) + def filter_box_blur(self, kernalSize:Tuple[float, float]): + return base.boxFilter(self.image, -1, ksize=kernalSize, normalize=0) + + # 阈值 + def threshold( + self, + threshold:float, + type:int + ): + return base.threshold(self.image, threshold, 255, type) + def adaptiveThreshold( + self, + adaptiveMethod: int = base.ADAPTIVE_THRESH_MEAN_C, + thresholdType: int = base.THRESH_BINARY, + blockSize: int = 11, + C: float = 2, + ): + return base.adaptiveThreshold(self.image, 255, adaptiveMethod, thresholdType, blockSize, C) + # 获取二值化 + def Separate2EnableScene(self,*, is_front=True, is_back=False): + ''' + return mask -> front, back + ''' + if is_back == is_front: + is_back = not is_front + gray = self.get_grayscale() + if is_front: + return base.threshold(gray, 255.0/2.0, 255, base.THRESH_BINARY) + else: + return base.threshold(gray, 255.0/2.0, 255, base.THRESH_BINARY_INV) + def Separate2EnableScenes_with_Otsu(self,*, is_front=True, is_back=False): + ''' + return mask -> front, back + ''' + if is_back == is_front: + is_back = not is_front + gray = self.get_grayscale() + if is_front: + return base.threshold(gray, 0, 255, base.THRESH_BINARY | base.THRESH_OTSU) + else: + return base.threshold(gray, 0, 255, base.THRESH_BINARY_INV | base.THRESH_OTSU) + # 获取二值化遮罩 + def SeparateFrontBackScenes(self): + ''' + return mask -> front, back + ''' + gray = self.get_grayscale() + _, front = base.threshold(gray, 255.0/2.0, 255, base.THRESH_BINARY) + _, back = base.threshold(gray, 255.0/2.0, 255, base.THRESH_BINARY_INV) + return np.where(gray>=front), np.where(gray>=back) + def SeparateFrontBackScenes_with_Otsu(self): + ''' + return mask -> front, back + ''' + gray = self.get_grayscale() + _, front = base.threshold(gray, 0, 255, base.THRESH_BINARY | base.THRESH_OTSU) + _, back = base.threshold(gray, 0, 255, base.THRESH_BINARY_INV | base.THRESH_OTSU) + return np.where(gray>=front), np.where(gray>=back) + # 获取核 + def get_kernel(self, shape:int, kernalSize:Tuple[float, float]): + return base.getStructuringElement(shape, kernalSize) + def get_rect_kernal(self, kernalSize:Tuple[float, float]): + return self.get_kernel(base.MORPH_RECT, kernalSize) + def get_cross_kernal(self, kernalSize:Tuple[float, float]): + return self.get_kernel(base.MORPH_CROSS, kernalSize) + def get_ellipse_kernal(self, kernalSize:Tuple[float, float]): + return self.get_kernel(base.MORPH_ELLIPSE, kernalSize) + # 膨胀 + def dilate(self, kernel:Optional[MatLike]=None, *args, **kwargs): + if kernel is None: + kernel = self.get_rect_kernal((3, 3)) + return base.dilate(self.image, kernel, *args, **kwargs) + # 腐蚀 + def erode(self, kernel:Optional[MatLike]=None, *args, **kwargs): + if kernel is None: + kernel = self.get_rect_kernal((3, 3)) + return base.erode(self.image, kernel, *args, **kwargs) + # 开运算 + def open_operator(self, kernel:Optional[MatLike]=None, *args, **kwargs): + if kernel is None: + kernel = self.get_rect_kernal((3, 3)) + return base.morphologyEx(self.image, base.MORPH_OPEN, kernel, *args, **kwargs) + # 闭运算 + def close_operator(self, kernel:Optional[MatLike]=None, *args, **kwargs): + if kernel is None: + kernel = self.get_rect_kernal((3, 3)) + return base.morphologyEx(self.image, base.MORPH_CLOSE, kernel, *args, **kwargs) + # 梯度运算 + def gradient_operator(self, kernel:Optional[MatLike]=None, *args, **kwargs): + if kernel is None: + kernel = self.get_rect_kernal((3, 3)) + return base.morphologyEx(self.image, base.MORPH_GRADIENT, kernel, *args, **kwargs) + # 顶帽运算 + def tophat_operator(self, kernel:Optional[MatLike]=None, *args, **kwargs): + if kernel is None: + kernel = self.get_rect_kernal((3, 3)) + return base.morphologyEx(self.image, base.MORPH_TOPHAT, kernel, *args, **kwargs) + # 黑帽运算 + def blackhat_operator(self, kernel:Optional[MatLike]=None, *args, **kwargs): + if kernel is None: + kernel = self.get_rect_kernal((3, 3)) + return base.morphologyEx(self.image, base.MORPH_BLACKHAT, kernel, *args, **kwargs) + + # 绘制轮廓 + def drawContours( + self, + contours: Sequence[MatLike], + contourIdx: int = -1, + color: Union[MatLike, Tuple[int]] = (0, 0, 0), + thickness: int = 1, + lineType: int = base.LINE_8, + hierarchy: Optional[MatLike] = None, + maxLevel: int = base.FILLED, + offset: Optional[Point] = None, + is_draw_on_self:bool = False + ) -> MatLike: + image = self.image if is_draw_on_self else self.image.copy() + return base.drawContours(image, contours, contourIdx, color, thickness, lineType, hierarchy, maxLevel, offset) + # 修改自身的绘制 + def draw_rect( + self, + rect: Rect, + color: Union[MatLike, Tuple[int]] = (0, 0, 0), + thickness: int = 1, + lineType: int = base.LINE_8, + ) -> MatLike: + base.rectangle(self.image, rect, color, thickness, lineType) + return self + # 获取轮廓 + def get_contours( + self, + *, + mode: int = base.RETR_LIST, + method: int = base.CHAIN_APPROX_SIMPLE, + is_front: bool = True, + contours: Optional[Sequence[MatLike]] = None, + hierarchy: Optional[MatLike] = None, + offset: Optional[Point] = None + ) -> Tuple[Sequence[MatLike], MatLike]: + _, bin = self.Separate2EnableScene(is_front=is_front) + if offset is not None: + return base.findContours(bin, mode, method, contours, hierarchy, offset) + else: + return base.findContours(bin, mode, method, contours, hierarchy) + def get_contours_mask( + self, + width: int, + *, + mode: int = base.RETR_LIST, + method: int = base.CHAIN_APPROX_SIMPLE, + is_front: bool = True, + contours: Optional[Sequence[MatLike]] = None, + hierarchy: Optional[MatLike] = None, + offset: Optional[Point] = None, + ) -> Tuple[Sequence[MatLike], MatLike]: + find_contours, _ = self.get_contours( + mode=mode, + method=method, + is_front=is_front, + contours=contours, + hierarchy=hierarchy, + offset=offset + ) + return base.drawContours(get_zero_mask(self.shape, dtype=np.uint8), find_contours, -1, (255, 255, 255), width) + def get_contours_fill_inside_mask( + self, + *, + mode: int = base.RETR_LIST, + method: int = base.CHAIN_APPROX_SIMPLE, + is_front: bool = True, + contours: Optional[Sequence[MatLike]] = None, + hierarchy: Optional[MatLike] = None, + offset: Optional[Point] = None + ) -> Tuple[Sequence[MatLike], MatLike]: + return self.get_contours_mask( + mode=mode, + method=method, + is_front=is_front, + contours=contours, + hierarchy=hierarchy, + offset=offset, + width=-1 + ) + # 获取轮廓方框 + def get_xy_rect_from_contours( + self, + *, + mode: int = base.RETR_LIST, + method: int = base.CHAIN_APPROX_SIMPLE, + is_front: bool = True, + contours: Optional[Sequence[MatLike]] = None, + hierarchy: Optional[MatLike] = None, + offset: Optional[Point] = None + ) -> Sequence[Rect]: + return [base.boundingRect(contour) for contour in self.get_contours( + mode=mode, + method=method, + is_front=is_front, + contours=contours, + hierarchy=hierarchy, + offset=offset + )] + def get_minarea_rect_from_contours( + self, + *, + mode: int = base.RETR_LIST, + method: int = base.CHAIN_APPROX_SIMPLE, + is_front: bool = True, + contours: Optional[Sequence[MatLike]] = None, + hierarchy: Optional[MatLike] = None, + offset: Optional[Point] = None + ) -> Sequence[RotatedRect]: + return [base.minAreaRect(contour) for contour in self.get_contours( + mode=mode, + method=method, + is_front=is_front, + contours=contours, + hierarchy=hierarchy, + offset=offset)] + + # 图像匹配 + def match_on_scene( + self, + scene_image: Self, + # Feature2D config + featrue_type: Optional[Union[ + base.Feature2D, + ClosuresCallable[base.Feature2D], + Feature2DInstance + ]] = SIFT_Feature2D, + optout_feature_kp_and_des_ref: + Optional[left_value_reference[ + Tuple[Sequence[base.KeyPoint], MatLike] + ]] = None, + # Match Config + match_min_points: int = 4, + # Draw rect Config + rect_color: Tuple[int, int, int] = (0, 255, 0), + rect_thickness: int = 2, + # Draw match Config + out_drawMatches_ref:Optional[left_value_reference[MatLike] + ] = None, + drawMatches_range: Optional[Tuple[int, int]] = None + ) -> MatLike: + ''' + 本图像作为目标特征 + + Args + --- + Target Image + scene_image: + 识别的场景, 此图像将作为目标匹配的场景 + + Feature2D Config + type: + 特征检测器类型/生成器/实例, 默认为SIFT + ref: + 左值引用容器, ref_value为空时将用于存储特征点与描述符, + 不为空则提取ref_value作为本次的特征点与描述符 + + Match Config + min_points: + 匹配的最小点数, 小于此值则无法找到目标物体 + + Draw rect Config + color: + 矩形框颜色, 默认为绿色 + thickness: + 矩形框厚度, 默认为2 + + Draw match Config + ref: + 左值引用容器, 将用于存储合成的对比图像 + range: + 匹配点的绘制范围(靠前的匹配度高), 默认为全部绘制 + + Return + --- + MatLike: 绘制了方框的矩形 + ''' + # 读取目标图和场景图 + target_img = self.get_grayscale() + scene_img = scene_image.get_grayscale() + + # 初始化SIFT检测器 + feature2D:Feature2DInstance = featrue_type if ( + isinstance(featrue_type, Feature2DInstance) + ) else Feature2DInstance(featrue_type) + + # 检测关键点和描述符 + kp1:Sequence[base.KeyPoint] = None + des1:MatLike = None + if optout_feature_kp_and_des_ref is None: + kp1, des1 = feature2D.detectAndCompute(target_img, None) + else: + if optout_feature_kp_and_des_ref.ref_value is None: + kp1, des1 = feature2D.detectAndCompute(target_img, None) + optout_feature_kp_and_des_ref.ref_value = (kp1, des1) + else: + kp1, des1 = optout_feature_kp_and_des_ref.ref_value + kp2, des2 = feature2D.detectAndCompute(scene_img, None) + + # 初始化BFMatcher + bf = base.BFMatcher(base.NORM_L2, crossCheck=True) + + # 匹配描述符 + matches = bf.match(des1, des2) + matches = sorted(matches, key=lambda x: x.distance) + + # 如果匹配点数少于min_match_points个,无法找到目标物体 + if len(matches) < match_min_points: + return None + + # 提取匹配点的位置 + src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2) + dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2) + + # 使用RANSAC算法找到单应性矩阵 + M, _ = base.findHomography(src_pts, dst_pts, base.RANSAC, 5.0) + + # 获取目标物体的边界框 + h, w = target_img.shape + pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) + dst = base.perspectiveTransform(pts, M) + + # 在scene中绘制边界框 + result = scene_image.image.copy() + base.polylines( + result, + [np.int32(dst)], + True, + rect_color, + rect_thickness, + base.LINE_AA + ) + + # 合成的绘制结果 + if out_drawMatches_ref is not None: + if drawMatches_range is None: + out_drawMatches_ref.ref_value = base.drawMatches( + self.image, + kp1, + scene_image.image, + kp2, + matches, + None, flags=base.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS + ) + else: + out_drawMatches_ref.ref_value = base.drawMatches( + self.image, + kp1, + scene_image.image, + kp2, + matches[drawMatches_range[0]:drawMatches_range[1]], + None, flags=base.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS + ) + + # 返回绘制了方框的结果 + return result + + # 图像卷积 + def conv(self, kernel:MatLike, *args, **kwargs): + return base.filter2D(self.image, -1, kernel, *args, **kwargs) + def conv_with_shape(self, shape:Tuple[int, int], *args, **kwargs): + return base.filter2D(self.image, -1, shape, *args, **kwargs) + + # np加速 + +def get_new_noise( + raw_image: Optional[MatLike], + height: int, + weight: int, + *, + mean: float = 0, + sigma: float = 25, + dtype = np.uint8 + ) -> MatLike: + noise = raw_image + if noise is None: + noise = np.zeros((height, weight), dtype=dtype) + base.randn(noise, mean, sigma) + return base.cvtColor(noise, base.COLOR_GRAY2BGR) +class NoiseImageObject(ImageObject): + def __init__( + self, + height: int, + weight: int, + *, + mean: float = 0, + sigma: float = 25, + dtype = np.uint8 + ): + super().__init__(get_new_noise( + None, height, weight, mean=mean, sigma=sigma, dtype=dtype + )) + + @override + def SymbolName(self): + return "Noise" + +def Unwrapper(image:Optional[Union[ + str, + ImageObject, + tool_file, + MatLike, + np.ndarray, + ImageFile.ImageFile, + Image.Image + ]]) -> MatLike: + return image.image if isinstance(image, ImageObject) else ImageObject(image).image + +def Wrapper(image:Optional[Union[ + str, + ImageObject, + tool_file, + MatLike, + np.ndarray, + ImageFile.ImageFile, + Image.Image + ]]) -> ImageObject: + return ImageObject(image) + +class light_cv_window: + def __init__(self, name:str): + self.__my_window_name = name + base.namedWindow(self.__my_window_name) + def __del__(self): + self.destroy() + + def show_image(self, image:Union[ImageObject, MatLike]): + if self.__my_window_name is None: + self.__my_window_name = "window" + if isinstance(image, ImageObject): + image = image.image + base.imshow(self.__my_window_name, image) + return self + def destroy(self): + if self.__my_window_name is not None and base.getWindowProperty(self.__my_window_name, base.WND_PROP_VISIBLE) > 0: + base.destroyWindow(self.__my_window_name) + return self + + @property + def window_rect(self): + return base.getWindowImageRect(self.__my_window_name) + @window_rect.setter + def window_rect(self, rect:Tuple[float, float, float, float]): + self.set_window_rect(rect[0], rect[1], rect[2], rect[3]) + + def set_window_size(self, weight:int, height:int): + base.resizeWindow(self.__my_window_name, weight, height) + return self + def get_window_size(self) -> Tuple[float, float]: + rect = self.window_rect + return rect[2], rect[3] + + def get_window_property(self, prop_id:int): + return base.getWindowProperty(self.__my_window_name, prop_id) + def set_window_property(self, prop_id:int, prop_value:int): + base.setWindowProperty(self.__my_window_name, prop_id, prop_value) + return self + def get_prop_frame_width(self): + return self.window_rect[2] + def get_prop_frame_height(self): + return self.window_rect[3] + def is_full_window(self): + return base.getWindowProperty(self.__my_window_name, base.WINDOW_FULLSCREEN) > 0 + def set_full_window(self): + base.setWindowProperty(self.__my_window_name, base.WINDOW_FULLSCREEN, 1) + return self + def set_normal_window(self): + base.setWindowProperty(self.__my_window_name, base.WINDOW_FULLSCREEN, 0) + return self + def is_using_openGL(self): + return base.getWindowProperty(self.__my_window_name, base.WINDOW_OPENGL) > 0 + def set_using_openGL(self): + base.setWindowProperty(self.__my_window_name, base.WINDOW_OPENGL, 1) + return self + def set_not_using_openGL(self): + base.setWindowProperty(self.__my_window_name, base.WINDOW_OPENGL, 0) + return self + def is_autosize(self): + return base.getWindowProperty(self.__my_window_name, base.WINDOW_AUTOSIZE) > 0 + def set_autosize(self): + base.setWindowProperty(self.__my_window_name, base.WINDOW_AUTOSIZE, 1) + return self + def set_not_autosize(self): + base.setWindowProperty(self.__my_window_name, base.WINDOW_AUTOSIZE, 0) + return self + + def set_window_rect(self, x:int, y:int, weight:int, height:int): + base.moveWindow(self.__my_window_name, x, y) + return self.set_window_size(weight, height) + + def set_window_pos(self, x:int, y:int): + base.moveWindow(self.__my_window_name, x, y) + return self + + def wait_key(self, wait_time:int=0): + return base.waitKey(wait_time) + +def get_haarcascade_frontalface(name_or_default:Optional[str]=None): + if name_or_default is None: + name_or_default = "haarcascade_frontalface_default" + return base.CascadeClassifier(BaseData.haarcascades+'haarcascade_frontalface_default.xml') + +def detect_human_face( + image: ImageObject, + detecter: base.CascadeClassifier, + scaleFactor: float = 1.1, + minNeighbors: int = 4, + *args, **kwargs): + '''return is Rect[]''' + return detecter.detectMultiScale(image.image, scaleFactor, minNeighbors, *args, **kwargs) + +class internal_detect_faces_oop(Callable[[ImageObject], None]): + def __init__(self): + self.face_cascade = get_haarcascade_frontalface() + def __call__(self, image:ImageObject): + gray = image.convert_to_grayscale() + faces = self.face_cascade.detectMultiScale(gray, 1.3, 5) + for (x,y,w,h) in faces: + image.operator_cv(base.rectangle,(x,y),(x+w,y+h),(255,0,0),2) + +def easy_detect_faces(camera:light_cv_camera): + ImageObject(camera).show_image("window", 'q', internal_detect_faces_oop()) + +# 示例使用 +if __name__ == "__main__": + img_obj = ImageObject("path/to/your/image.jpg") + img_obj.show_image() + img_obj.resize_image(800, 600) + img_obj.rotate_image(45) + img_obj.convert_to_grayscale() + img_obj.save_image("path/to/save/image.jpg") + +# Override tool_file to tool_file_ex + +class tool_file_cvex(tool_file): + def __init__(self, file_path:str, *args, **kwargs): + super().__init__(file_path, *args, **kwargs) + + @override + def load_as_image(self) -> ImageObject: + self.data = ImageObject(self) + return self.data + @override + def save_as_image(self, path = None): + image:ImageObject = self.data + image.save_image(path if path is not None else self.get_path()) + return self + +def WrapperFile2CVEX(file:Union[tool_file_or_str, tool_file_cvex]): + if isinstance(file, tool_file_cvex): + return file + elif isinstance(file, str): + return tool_file_cvex(file) + elif isinstance(file, tool_file): + result = tool_file_cvex(str(file)) + result.data = file.data + return result + else: + raise TypeError("file must be tool_file or str") \ No newline at end of file diff --git a/Convention/Runtime/Visual/README.md b/Convention/Runtime/Visual/README.md new file mode 100644 index 0000000..a4990cd --- /dev/null +++ b/Convention/Runtime/Visual/README.md @@ -0,0 +1,264 @@ +# Visual 模块 + +Visual模块提供了数据可视化和图像处理相关的功能,包括数据图表、图像处理、词云等。 + +## 目录结构 + +- `Core.py`: 核心数据可视化功能 +- `OpenCV.py`: OpenCV图像处理功能 +- `WordCloud.py`: 词云生成功能 +- `Manim.py`: 数学动画功能 + +## 功能特性 + +### 1. 数据可视化 (Core.py) + +#### 1.1 基础图表 + +- 折线图 +- 柱状图 +- 散点图 +- 直方图 +- 饼图 +- 箱线图 +- 热力图 +- 分类数据图 +- 联合图 + +#### 1.2 数据处理 + +- 缺失值处理 +- 重复值处理 +- 数据标准化 +- 数据归一化 + +### 2. 图像处理 (OpenCV.py) + +#### 2.1 图像操作 + +- 图像加载 + - 支持多种格式(jpg, png, bmp等) + - 支持从文件路径或URL加载 + - 支持从内存缓冲区加载 +- 图像保存 + - 支持多种格式输出 + - 支持质量参数设置 + - 支持压缩选项 +- 图像显示 + - 支持窗口标题设置 + - 支持窗口大小调整 + - 支持键盘事件处理 +- 图像转换 + - RGB转灰度 + - RGB转HSV + - RGB转LAB + - 支持自定义转换矩阵 +- 图像缩放 + - 支持多种插值方法 + - 支持保持宽高比 + - 支持指定目标尺寸 +- 图像旋转 + - 支持任意角度旋转 + - 支持旋转中心点设置 + - 支持旋转后尺寸调整 +- 图像翻转 + - 水平翻转 + - 垂直翻转 + - 对角线翻转 +- 图像合并 + - 支持多图像拼接 + - 支持透明度混合 + - 支持蒙版处理 + +#### 2.2 ImageObject类详解 + +ImageObject类提供了完整的图像处理功能: + +```python +from Convention.Visual import OpenCV + +# 创建图像对象 +image = OpenCV.ImageObject("input.jpg") + +# 基本属性 +width = image.width # 图像宽度 +height = image.height # 图像高度 +channels = image.channels # 通道数 +dtype = image.dtype # 数据类型 + +# 图像处理 +image.resize_image(800, 600) # 调整大小 +image.convert_to_grayscale() # 转换为灰度图 +image.filter_gaussian((5, 5), 1.5, 1.5) # 高斯滤波 +image.rotate_image(45) # 旋转45度 +image.flip_image(horizontal=True) # 水平翻转 + +# 图像增强 +image.adjust_brightness(1.2) # 调整亮度 +image.adjust_contrast(1.5) # 调整对比度 +image.adjust_saturation(0.8) # 调整饱和度 +image.equalize_histogram() # 直方图均衡化 + +# 边缘检测 +image.detect_edges(threshold1=100, threshold2=200) # Canny边缘检测 +image.detect_contours() # 轮廓检测 + +# 特征提取 +keypoints = image.detect_keypoints() # 关键点检测 +descriptors = image.compute_descriptors() # 描述子计算 + +# 图像保存 +image.save_image("output.jpg", quality=95) # 保存图像 +image.save_image("output.png", compression=9) # 保存PNG + +# 图像显示 +image.show_image("预览") # 显示图像 +image.wait_key(0) # 等待按键 + +# 图像信息 +print(image.get_info()) # 获取图像信息 +print(image.get_histogram()) # 获取直方图 +``` + +#### 2.3 图像增强 + +- 边缘检测 +- 滤波处理 +- 阈值处理 +- 形态学操作 +- 轮廓检测 +- 特征匹配 + +#### 2.4 视频处理 + +- 视频读取 +- 视频写入 +- 摄像头控制 +- 帧处理 + +### 3. 词云生成 (WordCloud.py) + +#### 3.1 词云功能 + +- 词云创建 +- 标题设置 +- 渲染输出 +- 样式定制 + +### 4. 数学动画 (Manim.py) + +#### 4.1 动画功能 + +- 数学公式动画 +- 几何图形动画 +- 图表动画 +- 场景管理 + +## 使用示例 + +### 1. 数据可视化示例 + +```python +from Convention.Visual import Core + +# 创建数据可视化生成器 +generator = Core.data_visual_generator("data.csv") + +# 绘制折线图 +generator.plot_line("x", "y", title="折线图示例") + +# 绘制柱状图 +generator.plot_bar("category", "value", title="柱状图示例") + +# 绘制散点图 +generator.plot_scatter("x", "y", title="散点图示例") + +# 绘制饼图 +generator.plot_pie("category", title="饼图示例") +``` + +### 2. 图像处理示例 + +```python +from Convention.Visual import OpenCV + +# 创建图像对象 +image = OpenCV.ImageObject("input.jpg") + +# 图像处理 +image.resize_image(800, 600) +image.convert_to_grayscale() +image.filter_gaussian((5, 5), 1.5, 1.5) + +# 保存图像 +image.save_image("output.jpg") +``` + +### 3. 词云生成示例 + +```python +from Convention.Visual import WordCloud + +# 创建词云 +wordcloud = WordCloud.make_word_cloud("词云", [ + ("Python", 100), + ("Java", 80), + ("C++", 70), + ("JavaScript", 90), +]) + +# 设置标题 +WordCloud.set_title(wordcloud, "编程语言词云") + +# 渲染输出 +WordCloud.render_to(wordcloud, "wordcloud.html") +``` + +### 4. 视频处理示例 + +```python +from Convention.Visual import OpenCV + +# 创建视频捕获对象 +camera = OpenCV.light_cv_camera(0) + +# 创建视频写入对象 +writer = OpenCV.VideoWriterInstance( + "output.avi", + OpenCV.avi_with_Xvid_fourcc(), + 30.0, + (640, 480) +) + +# 录制视频 +def stop_condition(): + return OpenCV.is_current_key('q') + +camera.recording(stop_condition, writer) +``` + +## 依赖项 + +- matplotlib: 数据可视化 +- seaborn: 高级数据可视化 +- opencv-python: 图像处理 +- pyecharts: 词云生成 +- manim: 数学动画 + +## 注意事项 + +1. 使用图像处理时注意内存占用 +2. 视频处理时注意帧率设置 +3. 词云生成时注意数据量 +4. 动画制作时注意性能优化 + +## 性能优化 + +1. 使用图像处理时注意批量处理 +2. 视频处理时使用合适的编码格式 +3. 词云生成时控制词数 +4. 动画制作时优化渲染设置 + +## 贡献指南 + +欢迎提交Issue和Pull Request来改进功能或添加新特性。 diff --git a/Convention/Runtime/Visual/WordCloud.py b/Convention/Runtime/Visual/WordCloud.py new file mode 100644 index 0000000..4edb1a2 --- /dev/null +++ b/Convention/Runtime/Visual/WordCloud.py @@ -0,0 +1,66 @@ +from ..Internal import * +from pyecharts.charts import WordCloud +from pyecharts import options as opts +from pyecharts import types +#from ..File.Core import tool_file, UnWrapper as UnWrapper2Str + +def make_word_cloud( + series_name: str, + data_pair: Sequence[Tuple[str, int]], + **kwargs, + ): + wordcloud = WordCloud() + wordcloud.add(series_name, data_pair, **kwargs) + return wordcloud + +def set_title( + wordcloud: WordCloud, + title: str +): + wordcloud.set_global_opts( + title_opts=opts.TitleOpts(title=title) + ) + +def render_to( + wordcloud: WordCloud, + file_name: Union[tool_file, str] +): + wordcloud.render(UnWrapper2Str(file_name)) + +class light_word_cloud(left_value_reference[WordCloud]): + def __init__( + self, + series_name: str, + data_pair: types.Sequence, + **kwargs, + ): + super().__init__(make_word_cloud(series_name, data_pair, **kwargs)) + + def set_title( + self, + title: str + ): + set_title(self.ref_value, title) + + def render_to( + self, + file_name: Union[tool_file, str] + ): + render_to(self.ref_value, file_name) + +if __name__ == "__main__": + # 准备数据 + wordcloud = make_word_cloud("", [ + ("Python", 100), + ("Java", 80), + ("C++", 70), + ("JavaScript", 90), + ("Go", 60), + ("Rust", 50), + ("C#", 40), + ("PHP", 30), + ("Swift", 20), + ("Kotlin", 10), + ], word_size_range=[20, 100]) + set_title(wordcloud, "cloud") + render_to(wordcloud, "wordcloud.html") diff --git a/Convention/Runtime/Visual/__init__.py b/Convention/Runtime/Visual/__init__.py new file mode 100644 index 0000000..e69de29