Today, I worked with art artificial intelligence, to create tool for my game development.
I used python and PyQt6 and this tool help me to remove border, resize, split, rename and save images as PNG file type for Godot game engine.



Is a blog about python programming language. You can see my work with python programming language, tutorials and news.
from krita import *
from PyQt5.QtWidgets import QAction, QMessageBox
import os
class ExportGodotPNG(Extension):
def __init__(self, parent):
super().__init__(parent)
def setup(self):
pass
def export_png(self):
# Get the active document
doc = Krita.instance().activeDocument()
if not doc:
QMessageBox.warning(None, "Error", "No document open! Please open a document and try again.")
return
# Create an InfoObject for PNG export
info = InfoObject()
info.setProperty("alpha", True) # Keep alpha channel for transparency
info.setProperty("compression", 0) # No compression for maximum quality
info.setProperty("interlaced", False) # Disable interlacing
info.setProperty("forceSRGB", True) # Force sRGB for Godot compatibility
# Build the output file path
if doc.fileName():
base_path = os.path.splitext(doc.fileName())[0]
else:
base_path = os.path.join(os.path.expanduser("~"), "export_godot")
output_file = base_path + "_godot.png"
# Export the document as PNG
try:
doc.exportImage(output_file, info)
# Show success message with brief usage info
QMessageBox.information(None, "Success",
f"Successfully exported as PNG for Godot: {output_file}\n\n"
"This PNG has no compression, alpha channel support, and sRGB for Godot compatibility. "
"To use in Godot, import the PNG and adjust texture settings as needed."
)
except Exception as e:
QMessageBox.critical(None, "Error", f"Export failed: {str(e)}")
def createActions(self, window):
# Create only the export action in Tools > Scripts
action_export = window.createAction("export_godot_png", "Export Godot PNG", "tools/scripts")
action_export.triggered.connect(self.export_png)
# Register the plugin
Krita.instance().addExtension(ExportGodotPNG(Krita.instance()))
bl_info = {
"name": "Append Materials from Folder",
"author": "Grok",
"version": (1, 2),
"blender": (3, 0, 0),
"location": "View3D > Sidebar > Append Materials",
"description": "Select a folder and append all materials from .blend files recursively",
"category": "Import-Export",
}
import bpy
import os
from bpy.types import Operator, Panel, PropertyGroup
from bpy.props import StringProperty, PointerProperty
class AppendMaterialsProperties(PropertyGroup):
folder_path: StringProperty(
name="Folder Path",
description="Path to the folder containing .blend files",
default="",
maxlen=1024,
subtype='DIR_PATH'
)
class APPEND_OT_materials_from_folder(Operator):
bl_idname = "append.materials_from_folder"
bl_label = "Append Materials from Folder"
bl_options = {'REGISTER', 'UNDO'}
bl_description = "Append all materials from .blend files in the selected folder and subfolders"
def execute(self, context):
props = context.scene.append_materials_props
folder_path = props.folder_path
# Normalize path to avoid issues with slashes
folder_path = os.path.normpath(bpy.path.abspath(folder_path))
if not folder_path or not os.path.isdir(folder_path):
self.report({'ERROR'}, f"Invalid or no folder selected: {folder_path}")
return {'CANCELLED'}
self.report({'INFO'}, f"Scanning folder: {folder_path}")
blend_files_found = 0
materials_appended = 0
errors = []
# Walk recursively through the folder
for root, dirs, files in os.walk(folder_path):
self.report({'INFO'}, f"Checking folder: {root}")
for file in files:
if file.lower().endswith('.blend'):
blend_files_found += 1
blend_path = os.path.join(root, file)
self.report({'INFO'}, f"Found .blend file: {blend_path}")
try:
# Open the .blend file to inspect materials
with bpy.data.libraries.load(blend_path, link=False) as (data_from, data_to):
if data_from.materials:
data_to.materials = data_from.materials
materials_appended += len(data_from.materials)
self.report({'INFO'}, f"Appended {len(data_from.materials)} materials from: {blend_path}")
else:
self.report({'WARNING'}, f"No materials found in: {blend_path}")
except Exception as e:
errors.append(f"Failed to process {blend_path}: {str(e)}")
self.report({'WARNING'}, f"Error in {blend_path}: {str(e)}")
# Final report
if blend_files_found == 0:
self.report({'WARNING'}, f"No .blend files found in {folder_path} or its subfolders!")
else:
self.report({'INFO'}, f"Found {blend_files_found} .blend files, appended {materials_appended} materials.")
if errors:
self.report({'WARNING'}, f"Encountered {len(errors)} errors: {'; '.join(errors)}")
return {'FINISHED'}
class VIEW3D_PT_append_materials(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "Append Materials"
bl_label = "Append Materials from Folder"
def draw(self, context):
layout = self.layout
props = context.scene.append_materials_props
layout.prop(props, "folder_path")
layout.operator("append.materials_from_folder", text="Append Materials")
def register():
bpy.utils.register_class(AppendMaterialsProperties)
bpy.utils.register_class(APPEND_OT_materials_from_folder)
bpy.utils.register_class(VIEW3D_PT_append_materials)
bpy.types.Scene.append_materials_props = PointerProperty(type=AppendMaterialsProperties)
def unregister():
bpy.utils.unregister_class(VIEW3D_PT_append_materials)
bpy.utils.unregister_class(APPEND_OT_materials_from_folder)
bpy.utils.unregister_class(AppendMaterialsProperties)
del bpy.types.Scene.append_materials_props
if __name__ == "__main__":
register()
import subprocess
import sys
import os
import shutil
import importlib.util
import re
import concurrent.futures
from typing import List, Tuple, Set
class ModuleManager:
def __init__(self):
self.modules: Set[str] = set()
self.pip_path = self._get_pip_path()
def _get_pip_path(self) -> str:
possible_path = os.path.join(sys.exec_prefix, "Scripts", "pip.exe")
return shutil.which("pip") or (possible_path if os.path.exists(possible_path) else None)
def extract_imports_from_file(self, file_path: str) -> List[Tuple[str, str]]:
imports = []
try:
with open(file_path, 'r', encoding='utf-8') as file:
for line in file:
# Detect 'import module'
import_match = re.match(r'^\s*import\s+([a-zA-Z0-9_]+)(\s+as\s+.*)?$', line)
if import_match:
module = import_match.group(1)
imports.append((module, line.strip()))
continue
# Detect 'from module import ...'
from_match = re.match(r'^\s*from\s+([a-zA-Z0-9_]+)\s+import\s+.*$', line)
if from_match:
module = from_match.group(1)
imports.append((module, line.strip()))
except FileNotFoundError:
print(f"❌ Fișierul {file_path} nu a fost găsit.")
except Exception as e:
print(f"❌ Eroare la citirea fișierului {file_path}: {e}")
return imports
def scan_directory_for_py_files(self, directory: str = '.') -> List[str]:
py_files = []
for root, _, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
py_files.append(os.path.join(root, file))
return py_files
def collect_unique_modules(self, directory: str = '.') -> None:
py_files = self.scan_directory_for_py_files(directory)
all_imports = []
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_file = {executor.submit(self.extract_imports_from_file, file_path): file_path for file_path in py_files}
for future in concurrent.futures.as_completed(future_to_file):
imports = future.result()
all_imports.extend(imports)
for module, _ in all_imports:
self.modules.add(module)
def is_module_installed(self, module: str) -> bool:
return importlib.util.find_spec(module) is not None
def run_pip_install(self, module: str) -> bool:
if not self.pip_path:
print(f"❌ Nu am găsit pip pentru {module}.")
return False
try:
subprocess.check_call([self.pip_path, "install", module])
print(f"✅ Pachetul {module} a fost instalat cu succes.")
return True
except subprocess.CalledProcessError as e:
print(f"❌ Eroare la instalarea pachetului {module}: {e}")
return False
def check_and_install_modules(self) -> None:
def process_module(module):
print(f"\n🔎 Verific dacă {module} este instalat...")
if self.is_module_installed(module):
print(f"✅ {module} este deja instalat.")
else:
print(f"📦 Instalez {module}...")
self.run_pip_install(module)
# Re-verifică după instalare
if self.is_module_installed(module):
print(f"✅ {module} funcționează acum.")
else:
print(f"❌ {module} nu funcționează după instalare.")
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(process_module, self.modules)
def main():
print("🔍 Verific pip...")
manager = ModuleManager()
if manager.pip_path:
print(f"✅ Pip este disponibil la: {manager.pip_path}")
else:
print("⚠️ Pip nu este disponibil.")
return
directory = sys.argv[1] if len(sys.argv) > 1 else '.'
print(f"\n📜 Scanez directorul {directory} pentru fișiere .py...")
manager.collect_unique_modules(directory)
if not manager.modules:
print("⚠️ Nu s-au găsit module în importuri.")
return
print(f"\nModule unice detectate: {', '.join(manager.modules)}")
manager.check_and_install_modules()
if __name__ == "__main__":
main()
python catafest_build_package_001.py
🔍 Verificare module standard...
[✓] Modul standard 'json' este disponibil.
[✓] Modul standard 'subprocess' este disponibil.
[✓] Modul standard 'platform' este disponibil.
[✓] Modul standard 'datetime' este disponibil.
[✓] Modul standard 'os' este disponibil.
[✓] Modul standard 'sys' este disponibil.
📦 Verificare și instalare module pip...
[✓] Modulul 'PyQt6' este deja instalat.
[✓] Modulul 'build' este deja instalat.
* Creating isolated environment: venv+pip...
* Installing packages in isolated environment:
- setuptools
- wheel
...
import torch
import torch.nn as nn
import numpy as np
data = np.array([
[1800.5, 1810.0, 1795.0, 1000, 1805.2],
[1805.2, 1815.0, 1800.0, 1200, 1812.8],
[1812.8, 1820.0, 1808.0, 1100, 1810.5],
[1810.5, 1818.0, 1805.0, 1300, 1825.0],
[1825.0, 1830.0, 1815.0, 1400, 1820.3],
[1820.3, 1828.0, 1810.0, 1250, 1835.7]
])
X, y = torch.tensor(data[:, :4], dtype=torch.float32), torch.tensor(data[:, 4], dtype=torch.float32)
model = nn.Sequential(nn.Linear(4, 6), nn.ReLU(), nn.Linear(6, 4), nn.ReLU(), nn.Linear(4, 1))
optimizer = torch.optim.Adam(model.parameters())
loss_fn = nn.MSELoss()
for _ in range(3000):
optimizer.zero_grad()
y_pred = model(X).squeeze()
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
prediction = model(torch.tensor([[1830.0, 1840.0, 1825.0, 1150]], dtype=torch.float32))
print("Predicted XAU/USD closing price:", round(prediction.item(), 2))
python torch_001.py
Predicted XAU/USD closing price: 1819.57
import tensorflow as tf
import numpy as np
data = np.array([
[1800.5, 1810.0, 1795.0, 1000, 1805.2],
[1805.2, 1815.0, 1800.0, 1200, 1812.8],
[1812.8, 1820.0, 1808.0, 1100, 1810.5],
[1810.5, 1818.0, 1805.0, 1300, 1825.0],
[1825.0, 1830.0, 1815.0, 1400, 1820.3],
[1820.3, 1828.0, 1810.0, 1250, 1835.7]
])
X, y = data[:, :4], data[:, 4]
model = tf.keras.Sequential([
tf.keras.layers.Dense(6, activation='relu', input_shape=(4,)),
tf.keras.layers.Dense(4, activation='relu'),
tf.keras.layers.Dense(1)
])
model.compile(optimizer='adam', loss='mse')
model.fit(X, y, epochs=3000, verbose=0)
prediction = model.predict(np.array([[1830.0, 1840.0, 1825.0, 1150]]))
print("Predicted XAU/USD closing price:", round(prediction[0][0], 2))
python tf_001.py
2025-08-30 21:11:13.966066: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
C:\Python313\Lib\site-packages\google\protobuf\runtime_version.py:98: UserWarning: Protobuf gencode version 5.28.3 is exactly one major version older than the runtime version 6.31.1 at tensorflow/core/framework/attr_value.proto. Please update the gencode to avoid compatibility violations in the next runtime release.
...
Predicted XAU/USD closing price: 2.9
from sklearn.neural_network import MLPRegressor
import numpy as np
data = np.array([
[1800.5, 1810.0, 1795.0, 1000, 1805.2],
[1805.2, 1815.0, 1800.0, 1200, 1812.8],
[1812.8, 1820.0, 1808.0, 1100, 1810.5],
[1810.5, 1818.0, 1805.0, 1300, 1825.0],
[1825.0, 1830.0, 1815.0, 1400, 1820.3],
[1820.3, 1828.0, 1810.0, 1250, 1835.7]
])
X, y = data[:, :4], data[:, 4]
model = MLPRegressor(hidden_layer_sizes=(6, 4), max_iter=3000)
model.fit(X, y)
prediction = model.predict([[1830.0, 1840.0, 1825.0, 1150]])
print("Predicted XAU/USD closing price:", round(prediction[0], 2))
import os
import subprocess
from PyQt6.QtWidgets import (
QApplication, QWidget, QVBoxLayout, QPushButton,
QListWidget, QMessageBox
)
# fisier download : yt-dlp.exe -vU https://www.youtube.com/watch?v=xxxxxx -f bestvideo*+bestaudio/best
FOLDER_PATH = r"D:\Software"
class FFmpegMerger(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Combinare Video + Audio cu FFmpeg")
self.resize(600, 400)
self.layout = QVBoxLayout()
self.file_list = QListWidget()
self.process_button = QPushButton("Prelucrează în MP4")
self.layout.addWidget(self.file_list)
self.layout.addWidget(self.process_button)
self.setLayout(self.layout)
self.process_button.clicked.connect(self.process_files)
self.populate_file_list()
def populate_file_list(self):
files = os.listdir(FOLDER_PATH)
video_files = [f for f in files if f.endswith(".f401.mp4")]
audio_files = [f for f in files if f.endswith(".f251-9.webm")]
base_names = set(f.split(".f401.mp4")[0] for f in video_files)
candidates = []
for base in base_names:
audio_name = f"{base}.f251-9.webm"
output_name = f"{base}.mp4"
if audio_name in audio_files and output_name not in files:
candidates.append(base)
for name in candidates:
self.file_list.addItem(name)
def process_files(self):
for i in range(self.file_list.count()):
base = self.file_list.item(i).text()
video_path = os.path.join(FOLDER_PATH, f"{base}.f401.mp4")
audio_path = os.path.join(FOLDER_PATH, f"{base}.f251-9.webm")
output_path = os.path.join(FOLDER_PATH, f"{base}.mp4")
cmd = [
"ffmpeg",
"-i", video_path,
"-i", audio_path,
"-c:v", "copy",
"-c:a", "aac",
"-strict", "experimental",
output_path
]
try:
subprocess.run(cmd, check=True)
except subprocess.CalledProcessError as e:
QMessageBox.critical(self, "Eroare", f"Eroare la procesarea {base}: {e}")
return
QMessageBox.information(self, "Succes", "Toate fișierele au fost prelucrate cu succes!")
if __name__ == "__main__":
app = QApplication([])
window = FFmpegMerger()
window.show()
app.exec()
pip install geoai-py
Successfully installed Flask-Caching-2.3.1 MarkupSafe-3.0.2 PySocks-1.7.1 PyYAML-6.0.2 absl-py-2.3.1 aenum-3.1.16 affine-2.4.0 aiohappyeyeballs-2.6.1 aiohttp-3.12.14 aiosignal-1.4.0 albucore-0.0.24 albumentations-2.0.8 aniso8601-10.0.1 annotated-types-0.7.0 antlr4-python3-runtime-4.9.3 anyio-4.9.0 anywidget-0.9.18 argon2-cffi-25.1.0 argon2-cffi-bindings-21.2.0 arrow-1.3.0 asttokens-3.0.0 beautifulsoup4-4.13.4 bitsandbytes-0.46.1 bleach-6.2.0 blinker-1.9.0 bqplot-0.12.45 branca-0.8.1 buildingregulariser-0.2.2 cachelib-0.13.0 cachetools-6.1.0 cffi-1.17.1 click-8.2.1 click-plugins-1.1.1.2 cligj-0.7.2 color-operations-0.2.0 comm-0.2.2 contextily-1.6.2 contourpy-1.3.2 cycler-0.12.1 datasets-4.0.0 decorator-5.2.1 defusedxml-0.7.1 dill-0.3.8 docstring-parser-0.17.0 duckdb-1.3.2 einops-0.8.1 eval-type-backport-0.2.2 ever-beta-0.5.1 executing-2.2.0 fastjsonschema-2.21.1 filelock-3.18.0 fiona-1.10.1 flask-3.1.1 flask-cors-6.0.1 flask-restx-1.3.0 folium-0.20.0 fonttools-4.59.0 fqdn-1.5.1 frozenlist-1.7.0 fsspec-2025.3.0 gdown-5.2.0 geoai-py-0.9.0 geographiclib-2.0 geojson-3.2.0 geopandas-1.1.1 geopy-2.4.1 gitdb-4.0.12 gitpython-3.1.44 grpcio-1.73.1 h11-0.16.0 httpcore-1.0.9 httpx-0.28.1 huggingface_hub-0.33.4 hydra-core-1.3.2 importlib-resources-6.5.2 ipyevents-2.0.2 ipyfilechooser-0.6.0 ipyleaflet-0.20.0 ipython-9.4.0 ipython-pygments-lexers-1.1.1 ipytree-0.2.2 ipyvue-1.11.2 ipyvuetify-1.11.3 ipywidgets-8.1.7 isoduration-20.11.0 itsdangerous-2.2.0 jedi-0.19.2 jinja2-3.1.6 joblib-1.5.1 jsonargparse-4.40.0 jsonnet-0.21.0 jsonpointer-3.0.0 jupyter-client-8.6.3 jupyter-core-5.8.1 jupyter-events-0.12.0 jupyter-leaflet-0.20.0 jupyter-server-2.16.0 jupyter-server-proxy-4.4.0 jupyter-server-terminals-0.5.3 jupyterlab-pygments-0.3.0 jupyterlab_widgets-3.0.15 kiwisolver-1.4.8 kornia-0.8.1 kornia_rs-0.1.9 leafmap-0.48.6 lightly-1.5.21 lightly_utils-0.0.2 lightning-2.5.2 lightning-utilities-0.14.3 localtileserver-0.10.6 mapclassify-2.10.0 maplibre-0.3.4 markdown-3.8.2 markdown-it-py-3.0.0 matplotlib-3.10.3 matplotlib-inline-0.1.7 mdurl-0.1.2 mercantile-1.2.1 mistune-3.1.3 morecantile-6.2.0 multidict-6.6.3 multiprocess-0.70.16 narwhals-1.48.0 nbclient-0.10.2 nbconvert-7.16.6 nbformat-5.10.4 numexpr-2.11.0 omegaconf-2.3.0 opencv-python-headless-4.12.0.88 overrides-7.7.0 overturemaps-0.15.0 pandas-2.3.1 pandocfilters-1.5.1 parso-0.8.4 planetary-computer-1.0.0 plotly-6.2.0 prettytable-3.16.0 prometheus-client-0.22.1 prompt_toolkit-3.0.51 propcache-0.3.2 psygnal-0.14.0 pure-eval-0.2.3 pyarrow-21.0.0 pycparser-2.22 pydantic-2.11.7 pydantic-core-2.33.2 pygments-2.19.2 pyogrio-0.11.0 pyparsing-3.2.3 pyproj-3.7.1 pystac-1.13.0 pystac-client-0.9.0 python-box-7.3.2 python-dateutil-2.9.0.post0 python-dotenv-1.1.1 python-json-logger-3.3.0 pytorch_lightning-2.5.2 pytz-2025.2 pywin32-311 pywinpty-2.0.15 pyzmq-27.0.0 rasterio-1.4.3 regex-2024.11.6 rfc3339-validator-0.1.4 rfc3986-validator-0.1.1 rich-14.0.0 rio-cogeo-5.4.2 rio-tiler-7.8.1 rioxarray-0.19.0 rtree-1.4.0 safetensors-0.5.3 scikit-learn-1.7.1 scooby-0.10.1 segmentation-models-pytorch-0.5.0 send2trash-1.8.3 sentry-sdk-2.33.0 server-thread-0.3.0 shapely-2.1.1 simpervisor-1.0.0 simsimd-6.5.0 six-1.17.0 smmap-5.0.2 sniffio-1.3.1 soupsieve-2.7 stack_data-0.6.3 stringzilla-3.12.5 tensorboard-2.20.0 tensorboard-data-server-0.7.2 tensorboardX-2.6.4 terminado-0.18.1 threadpoolctl-3.6.0 timm-1.0.17 tinycss2-1.4.0 tokenizers-0.21.2 torch-2.7.1 torchange-0.0.1 torchgeo-0.7.1 torchinfo-1.8.0 torchmetrics-1.7.4 torchvision-0.22.1 tornado-6.5.1 traitlets-5.14.3 traittypes-0.2.1 transformers-4.53.2 types-python-dateutil-2.9.0.20250708 typeshed-client-2.8.2 typing-inspection-0.4.1 tzdata-2025.2 uri-template-1.3.0 uvicorn-0.35.0 wandb-0.21.0 wcwidth-0.2.13 webcolors-24.11.1 webencodings-0.5.1 websocket-client-1.8.0 werkzeug-3.1.3 whitebox-2.3.6 whiteboxgui-2.3.0 widgetsnbextension-4.0.14 xarray-2025.7.1 xxhash-3.5.0 xyzservices-2025.4.0 yarl-1.20.1
>>> import geoai
>>> dir(geoai)
['AgricultureFieldDelineator', 'Any', 'AutoConfig', 'AutoModelForMaskGeneration',
'AutoModelForMaskedImageModeling', 'AutoProcessor', 'BoundingBox', 'BuildingFootprintExtractor',
'CLIPSegForImageSegmentation', 'CLIPSegProcessor', 'CLIPSegmentation', 'CarDetector',
'ChangeDetection', 'CustomDataset', 'DetectionResult', 'Dict', 'ET', 'GroundedSAM', 'Image',
'Iterable', 'List', 'Map', 'MapLibre', 'MultiPolygon', 'NonGeoDataset', 'ObjectDetector',
'Optional', 'OrderedDict', 'ParkingSplotDetector', 'Path', 'Polygon', 'RandomRotation',
'ShipDetector', 'SolarPanelDetector', 'Tuple', 'Union', 'Window', '__author__',
'__builtins__', '__cached__', '__doc__', '__email__', '__file__', '__loader__', '__name__',
'__package__', '__path__', '__spec__', '__version__', 'adaptive_regularization',
'add_geometric_properties', 'analyze_vector_attributes', 'batch_vector_to_raster', 'bbox_to_xy',
'box', 'boxes_to_vector', 'calc_stats', 'change_detection', 'classify', 'classify_image',
'classify_images', 'clip_raster_by_bbox', 'coords_to_xy', 'create_overview_image',
'create_split_map', 'create_vector_data', 'csv', 'cv2', 'dataclass', 'deeplabv3_resnet50',
'dict_to_image', 'dict_to_rioxarray', 'download', 'download_file', 'download_model_from_hf',
'download_naip', 'download_overture_buildings', 'download_pc_stac_item', 'edit_vector_data',
'export_geotiff_tiles', 'export_geotiff_tiles_batch', 'export_tiles_to_geojson',
'export_training_data', 'extract', 'extract_building_stats', 'fasterrcnn_resnet50_fpn_v2',
'fcn_resnet50', 'features', 'geoai', 'geojson_to_coords', 'geojson_to_xy', 'get_device',
'get_instance_segmentation_model', 'get_model_config', 'get_model_input_channels',
'get_overture_data', 'get_raster_info', 'get_raster_info_gdal', 'get_raster_resolution',
'get_raster_stats', 'get_vector_info', 'get_vector_info_ogr', 'glob', 'gpd', 'hf',
'hf_hub_download', 'hybrid_regularization', 'image_segmentation', 'inspect_pth_file',
'install_package', 'instance_segmentation', 'instance_segmentation_batch',
'instance_segmentation_inference_on_geotiff', 'json', 'leafmap', 'logging', 'maplibregl',
'mapping', 'mask_generation', 'maskrcnn_resnet50_fpn', 'masks_to_vector', 'math',
'mosaic_geotiffs', 'ndimage', 'np', 'object_detection', 'object_detection_batch', 'orthogonalize',
'os', 'pc_collection_list', 'pc_item_asset_list', 'pc_stac_download', 'pc_stac_search', 'pd',
'pipeline', 'plot_batch', 'plot_images', 'plot_masks', 'plot_performance_metrics',
'plot_prediction_comparison', 'plt', 'print_raster_info', 'print_vector_info', 'raster_to_vector',
'raster_to_vector_batch', 'rasterio', 'read_pc_item_asset', 'read_raster', 'read_vector',
'region_groups', 'regularization', 'regularize', 'requests', 'rotate', 'rowcol_to_xy', 'rxr',
'segment', 'semantic_segmentation', 'semantic_segmentation_batch', 'set_proj_lib_path', 'shape',
'show', 'stack_bands', 'subprocess', 'sys', 'temp_file_path', 'time', 'torch', 'torchgeo', 'tqdm',
'train', 'train_MaskRCNN_model', 'train_classifier', 'train_instance_segmentation_model',
'train_segmentation_model', 'transform_bounds', 'try_common_architectures', 'utils',
'vector_to_geojson', 'vector_to_raster', 'view_image', 'view_pc_item', 'view_pc_items',
'view_raster', 'view_vector', 'view_vector_interactive', 'visualize_vector_by_attribute',
'warnings', 'write_colormap', 'xr']
# Generate star polygon vertices
points_cw = []
points_ccw = []
for i in range(steps):
t = 2 * math.pi * i / steps
r = outer_radius if i % 2 == 0 else inner_radius
x_cw = center[0] + r * math.cos(t)
y_cw = center[1] + r * math.sin(t)
x_ccw = center[0] + r * math.cos(-t + math.pi / max(lobes, 1))
y_ccw = center[1] + r * math.sin(-t + math.pi / max(lobes, 1))
points_cw.append((x_cw, y_cw))
points_ccw.append((x_ccw, y_ccw))