analitics

Pages

Showing posts with label module. Show all posts
Showing posts with label module. Show all posts

Monday, September 8, 2025

Python 3.13.0 : Script for python modules then installs them - updated with fix.

This script scans a folder full of .py files Python scripts, identifies all the external modules they import, filters out built-in ones, writes the installable ones to a requirements.txt file, and then installs them using pip—in parallel threads for speed.
I use the copilot and some comments are into my language, but I tested and works well:
NOTE: I updated with detection python modules based "from" and another issue: check if python module is instaled and step over that python module ...
This script will try to install many python modules, I can update to be better with these issues:
...some modules are default , some scripts are from another area, see Blender 3D with bpy python modules, some packages comes with same modules, this can be soleved with defined lists with unique items.
import subprocess
import sys
import os
import shutil
import importlib.util
import re
import concurrent.futures
from typing import List, Tuple, Set

class ModuleManager:
    def __init__(self):
        self.modules: Set[str] = set()
        self.pip_path = self._get_pip_path()

    def _get_pip_path(self) -> str:
        possible_path = os.path.join(sys.exec_prefix, "Scripts", "pip.exe")
        return shutil.which("pip") or (possible_path if os.path.exists(possible_path) else None)

    def extract_imports_from_file(self, file_path: str) -> List[Tuple[str, str]]:
        imports = []
        try:
            with open(file_path, 'r', encoding='utf-8') as file:
                for line in file:
                    # Detect 'import module'
                    import_match = re.match(r'^\s*import\s+([a-zA-Z0-9_]+)(\s+as\s+.*)?$', line)
                    if import_match:
                        module = import_match.group(1)
                        imports.append((module, line.strip()))
                        continue
                    
                    # Detect 'from module import ...'
                    from_match = re.match(r'^\s*from\s+([a-zA-Z0-9_]+)\s+import\s+.*$', line)
                    if from_match:
                        module = from_match.group(1)
                        imports.append((module, line.strip()))
        except FileNotFoundError:
            print(f"❌ Fișierul {file_path} nu a fost găsit.")
        except Exception as e:
            print(f"❌ Eroare la citirea fișierului {file_path}: {e}")
        return imports

    def scan_directory_for_py_files(self, directory: str = '.') -> List[str]:
        py_files = []
        for root, _, files in os.walk(directory):
            for file in files:
                if file.endswith('.py'):
                    py_files.append(os.path.join(root, file))
        return py_files

    def collect_unique_modules(self, directory: str = '.') -> None:
        py_files = self.scan_directory_for_py_files(directory)
        all_imports = []
        with concurrent.futures.ThreadPoolExecutor() as executor:
            future_to_file = {executor.submit(self.extract_imports_from_file, file_path): file_path for file_path in py_files}
            for future in concurrent.futures.as_completed(future_to_file):
                imports = future.result()
                all_imports.extend(imports)
        
        for module, _ in all_imports:
            self.modules.add(module)

    def is_module_installed(self, module: str) -> bool:
        return importlib.util.find_spec(module) is not None

    def run_pip_install(self, module: str) -> bool:
        if not self.pip_path:
            print(f"❌ Nu am găsit pip pentru {module}.")
            return False
        try:
            subprocess.check_call([self.pip_path, "install", module])
            print(f"✅ Pachetul {module} a fost instalat cu succes.")
            return True
        except subprocess.CalledProcessError as e:
            print(f"❌ Eroare la instalarea pachetului {module}: {e}")
            return False

    def check_and_install_modules(self) -> None:
        def process_module(module):
            print(f"\n🔎 Verific dacă {module} este instalat...")
            if self.is_module_installed(module):
                print(f"✅ {module} este deja instalat.")
            else:
                print(f"📦 Instalez {module}...")
                self.run_pip_install(module)
                # Re-verifică după instalare
                if self.is_module_installed(module):
                    print(f"✅ {module} funcționează acum.")
                else:
                    print(f"❌ {module} nu funcționează după instalare.")

        with concurrent.futures.ThreadPoolExecutor() as executor:
            executor.map(process_module, self.modules)

def main():
    print("🔍 Verific pip...")
    manager = ModuleManager()
    if manager.pip_path:
        print(f"✅ Pip este disponibil la: {manager.pip_path}")
    else:
        print("⚠️ Pip nu este disponibil.")
        return

    directory = sys.argv[1] if len(sys.argv) > 1 else '.'
    print(f"\n📜 Scanez directorul {directory} pentru fișiere .py...")
    manager.collect_unique_modules(directory)
    
    if not manager.modules:
        print("⚠️ Nu s-au găsit module în importuri.")
        return
    
    print(f"\nModule unice detectate: {', '.join(manager.modules)}")
    manager.check_and_install_modules()

if __name__ == "__main__":
    main()

Saturday, August 30, 2025

Python 3.13.0 : Predicted XAU/USD with torch.

Testing the torch python package
import torch
import torch.nn as nn
import numpy as np

data = np.array([
    [1800.5, 1810.0, 1795.0, 1000, 1805.2],
    [1805.2, 1815.0, 1800.0, 1200, 1812.8],
    [1812.8, 1820.0, 1808.0, 1100, 1810.5],
    [1810.5, 1818.0, 1805.0, 1300, 1825.0],
    [1825.0, 1830.0, 1815.0, 1400, 1820.3],
    [1820.3, 1828.0, 1810.0, 1250, 1835.7]
])

X, y = torch.tensor(data[:, :4], dtype=torch.float32), torch.tensor(data[:, 4], dtype=torch.float32)
model = nn.Sequential(nn.Linear(4, 6), nn.ReLU(), nn.Linear(6, 4), nn.ReLU(), nn.Linear(4, 1))
optimizer = torch.optim.Adam(model.parameters())
loss_fn = nn.MSELoss()
for _ in range(3000):
    optimizer.zero_grad()
    y_pred = model(X).squeeze()
    loss = loss_fn(y_pred, y)
    loss.backward()
    optimizer.step()
prediction = model(torch.tensor([[1830.0, 1840.0, 1825.0, 1150]], dtype=torch.float32))
print("Predicted XAU/USD closing price:", round(prediction.item(), 2))
The result is :
python torch_001.py
Predicted XAU/USD closing price: 1819.57

Python 3.13.0 : Predicted XAU/USD with tensorflow.

This is the source code :
import tensorflow as tf
import numpy as np

data = np.array([
    [1800.5, 1810.0, 1795.0, 1000, 1805.2],
    [1805.2, 1815.0, 1800.0, 1200, 1812.8],
    [1812.8, 1820.0, 1808.0, 1100, 1810.5],
    [1810.5, 1818.0, 1805.0, 1300, 1825.0],
    [1825.0, 1830.0, 1815.0, 1400, 1820.3],
    [1820.3, 1828.0, 1810.0, 1250, 1835.7]
])

X, y = data[:, :4], data[:, 4]
model = tf.keras.Sequential([
    tf.keras.layers.Dense(6, activation='relu', input_shape=(4,)),
    tf.keras.layers.Dense(4, activation='relu'),
    tf.keras.layers.Dense(1)
])
model.compile(optimizer='adam', loss='mse')
model.fit(X, y, epochs=3000, verbose=0)
prediction = model.predict(np.array([[1830.0, 1840.0, 1825.0, 1150]]))
print("Predicted XAU/USD closing price:", round(prediction[0][0], 2))
The result is :
python tf_001.py
2025-08-30 21:11:13.966066: I tensorflow/core/util/port.cc:153] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.
C:\Python313\Lib\site-packages\google\protobuf\runtime_version.py:98: UserWarning: Protobuf gencode version 5.28.3 is exactly one major version older than the runtime version 6.31.1 at tensorflow/core/framework/attr_value.proto. Please update the gencode to avoid compatibility violations in the next runtime release.
...
Predicted XAU/USD closing price: 2.9

Wednesday, August 27, 2025

Python 3.13.0 : Predicted XAU/USD with MLPRegressor.

Testing the MLPRegressor from sklearn python package:
from sklearn.neural_network import MLPRegressor
import numpy as np

data = np.array([
    [1800.5, 1810.0, 1795.0, 1000, 1805.2],
    [1805.2, 1815.0, 1800.0, 1200, 1812.8],
    [1812.8, 1820.0, 1808.0, 1100, 1810.5],
    [1810.5, 1818.0, 1805.0, 1300, 1825.0],
    [1825.0, 1830.0, 1815.0, 1400, 1820.3],
    [1820.3, 1828.0, 1810.0, 1250, 1835.7]
])

X, y = data[:, :4], data[:, 4]
model = MLPRegressor(hidden_layer_sizes=(6, 4), max_iter=3000)
model.fit(X, y)

prediction = model.predict([[1830.0, 1840.0, 1825.0, 1150]])
print("Predicted XAU/USD closing price:", round(prediction[0], 2))
The answer is: Predicted XAU/USD closing price: 1836.68

Sunday, August 17, 2025

News : Django Hijack

Log in and work on behalf of other users without having to know their credentials.
With Django Hijack, admins can impersonate and work on behalf of other users without having to know their credentials.
Comes with two versions available: v2.x and v3.x. The last v3.x provides a security first design, easy integration, customisation, out-of-the-box Django admin support and dark-mode. It is a complete rewrite and all former APIs are broken.

Saturday, July 5, 2025

Python 3.13.5 : the groq tested with basic example !

About groq you can find on the official Grok website .
Today I tested the groq A.P.I. with the basic example :
import os

from groq import Groq

client = Groq(
    api_key="gsk_..."
)

chat_completion = client.chat.completions.create(
    messages=[
        {
            "role": "user",
            "content": "explain what groq is",
        }
    ],
    model="llama-3.3-70b-versatile",
)

print(chat_completion.choices[0].message.content)
The result of : "explain what groq is" is that :
Groq is a relatively new and innovative technology company that focuses on developing high-performance, artificial intelligence (AI) computing hardware and software solutions. Founded in 2016 by Jonathan Ross, a former Google engineer, Groq aims to provide faster, more efficient, and scalable AI computing capabilities to various industries. Groq's primary product is a tensor processing unit (TPU) chip designed specifically for machine learning (ML) and deep learning (DL) workloads. This TPU is optimized for high-performance, low-latency, and low-power consumption, making it suitable for applications that require rapid processing of vast amounts of data, such as: 1. **Computer Vision**: Image and video recognition, object detection, and tracking. 2. **Natural Language Processing (NLP)**: Text analysis, sentiment analysis, and language translation. 3. **Speech Recognition**: Voice command processing and speech-to-text applications. Groq's innovative architecture and design philosophy are centered around the following key aspects: 1. **High-Bandwidth Memory**: Groq's TPU features high-bandwidth memory, which enables fast data transfer and processing. 2. **Scalable Architecture**: The TPU is designed to scale horizontally, allowing for easy expansion to meet growing computational demands. 3. **Low-Latency**: Groq's architecture minimizes latency, ensuring fast response times and real-time processing capabilities. 4. **Software-Defined**: The TPU is software-defined, allowing for flexibility and customization to support a wide range of AI applications and frameworks. Groq's technology has the potential to accelerate AI adoption in various industries, including: 1. **Autonomous Vehicles**: Enhanced computer vision and sensor processing for safer and more efficient autonomous driving. 2. **Healthcare**: Faster medical image analysis and diagnosis, as well as improved personalized medicine and treatment planning. 3. **Financial Services**: Enhanced risk analysis, portfolio optimization, and fraud detection using AI-powered systems. While Groq is still a relatively new company, its innovative approach to AI computing has garnered significant attention and interest from the tech industry, investors, and potential customers. As AI continues to transform various aspects of our lives, Groq's technology is poised to play a significant role in shaping the future of artificial intelligence and machine learning.

Tuesday, July 1, 2025

Python 3.13.5 : use the jupyterlab, notebook and voila - part 001.

JupyterLab is the latest web-based interactive development environment for notebooks, code, and data. Its flexible interface allows users to configure and arrange workflows in data science, scientific computing, computational journalism, and machine learning. A modular design invites extensions to expand and enrich functionality.
Let's test with these python modules: jupyterlab, notebook and voila.
First, I install with pip tool the jupyterlab python module:
pip install jupyterlab
Collecting jupyterlab
...
Successfully installed anyio-4.9.0 argon2-cffi-25.1.0 argon2-cffi-bindings-21.2.0 arrow-1.3.0 asttokens-3.0.0 async-lru-2.0.5
attrs-25.3.0 babel-2.17.0 bleach-6.2.0 cffi-1.17.1 comm-0.2.2 debugpy-1.8.14 defusedxml-0.7.1 executing-2.2.0 
fastjsonschema-2.21.1 fqdn-1.5.1 h11-0.16.0 httpcore-1.0.9 httpx-0.28.1 ipykernel-6.29.5 ipython-9.4.0 
ipython-pygments-lexers-1.1.1 isoduration-20.11.0 jedi-0.19.2 json5-0.12.0 jsonpointer-3.0.0 jsonschema-4.24.0 
jsonschema-specifications-2025.4.1 jupyter-client-8.6.3 jupyter-core-5.8.1 jupyter-events-0.12.0 jupyter-lsp-2.2.5 
jupyter-server-2.16.0 jupyter-server-terminals-0.5.3 jupyterlab-4.4.4 jupyterlab-pygments-0.3.0 jupyterlab-server-2.27.3 
matplotlib-inline-0.1.7 mistune-3.1.3 nbclient-0.10.2 nbconvert-7.16.6 nbformat-5.10.4 nest-asyncio-1.6.0 notebook-shim-0.2.4
overrides-7.7.0 pandocfilters-1.5.1 parso-0.8.4 platformdirs-4.3.8 prometheus-client-0.22.1 prompt_toolkit-3.0.51 
psutil-7.0.0 pure-eval-0.2.3 pycparser-2.22 python-json-logger-3.3.0 pywin32-310 pywinpty-2.0.15 pyyaml-6.0.2 pyzmq-27.0.0
referencing-0.36.2 rfc3339-validator-0.1.4 rfc3986-validator-0.1.1 rpds-py-0.26.0 send2trash-1.8.3 sniffio-1.3.1 
stack_data-0.6.3 terminado-0.18.1 tinycss2-1.4.0 tornado-6.5.1 traitlets-5.14.3 types-python-dateutil-2.9.0.20250516 
uri-template-1.3.0 wcwidth-0.2.13 webcolors-24.11.1 webencodings-0.5.1 websocket-client-1.8.0
Next, the install of the notebook python module
pip install notebook
Collecting notebook
...
Installing collected packages: notebook
Successfully installed notebook-7.4.4
This python package named voila will help us to use online graphic user interfaces:
pip install voila
Collecting voila
...
Successfully installed voila-0.5.8 websockets-15.0.1
The voila package need to work with ipywidgets python package:
pip install ipywidgets
Collecting ipywidgets
...
Successfully installed ipywidgets-8.1.7 jupyterlab_widgets-3.0.15 widgetsnbextension-4.0.14
Let's start the jupiter tool with this command:
jupyter notebook
I used an default python example with a slider:
import ipywidgets as widgets
from IPython.display import display

slider = widgets.IntSlider(value=5, min=0, max=10)
display(slider)
This command will start the web with the slider using the voila command:
voila test.ipynb
The result is this:

Friday, June 27, 2025

Python 3.13.5 : the manim python module - part 001.

I used this package few days ago and now I wrote about how can used it.
Update the new release of pip is available: 25.0.1 -> 25.1.1 , run this command:
python.exe -m pip install --upgrade pip
The documentation can be found on the official website.
pip install manim
Collecting manim
  Downloading manim-0.19.0-py3-none-any.whl.metadata (11 kB)
...
Successfully installed Pillow-11.2.1 Pygments-2.19.1 audioop-lts-0.2.1 av-13.1.0 beautifulsoup4-4.13.4 click-8.2.1 cloup-3.0.7 colorama-0.4.6 decorator-5.2.1 glcontext-3.0.0 isosurfaces-0.1.2 manim-0.19.0 manimpango-0.6.0 mapbox-earcut-1.0.3 markdown-it-py-3.0.0 mdurl-0.1.2 moderngl-5.12.0 moderngl-window-3.1.1 networkx-3.5 numpy-2.3.0 pycairo-1.28.0 pydub-0.25.1 pyglet-2.1.6 pyglm-2.8.2 rich-14.0.0 scipy-1.15.3 screeninfo-0.8.1 skia-pathops-0.8.0.post2 soupsieve-2.7 srt-3.5.3 svgelements-1.9.6 tqdm-4.67.1 typing-extensions-4.14.0 watchdog-6.0.0
Let's see how can be used to see the help area:
python.exe -m manim render --help
Manim Community v0.19.0

Usage: python -m manim render ...
Let's use this source code:
from manim import *

class AdvancedAnimation(Scene):
    def construct(self):
        # Scene 1: Introduction
        title = Text("Advanced Animation with Manim").scale(0.76)
        self.play(FadeIn(title))
        self.wait(2)

        # Scene 2: Custom Animation
        circle = Circle().set_fill(color=BLUE, opacity=0.5)
        square = Square().set_fill(color=RED, opacity=0.5)
        self.add(circle, square)
        self.play(
            Rotate(circle, angle=TAU),
            Rotate(square, angle=-TAU),
            run_time=2,
            rate_func=linear
        )
        self.wait()

        # Scene 3: Text Animation
        text = Text("This is a custom text animation", font_size=40).to_edge(UP)
        self.play(Write(text), run_time=2)
        self.wait()

        # Scene 4: Shapes Manipulation
        triangle = Triangle().shift(RIGHT * 2)
        self.play(GrowFromCenter(triangle), run_time=1.5)
        self.wait()

        # Scene 5: Transition to next scene
        self.play(Uncreate(triangle), FadeOut(text))

        # Scene 6: Final Animation
        final_text = Text("This is the end of our animation", font_size=50).to_edge(DOWN)
        self.play(FadeIn(final_text), run_time=1.5)
        self.wait(2)

# Run the animation
AdvancedAnimation()
Use this command to render:
python.exe -m manim render manim_test_001.py AdvancedAnimation -p
AdvancedAnimation -p
Manim Community v0.19.0

[06/27/25 19:52:43] INFO     Animation 0 : Partial movie file      scene_file_writer.py:588
                             written in
                             'D:\PythonProjects\manim_projects\med
                             ia\videos\manim_test_001\1080p60\part
                             ial_movie_files\AdvancedAnimation\397
                             7891868_355746014_223132457.mp4'
...
[06/27/25 19:53:56] INFO     Previewed File at:                             file_ops.py:237
                             'D:\PythonProjects\manim_projects\media\videos
                             \manim_test_001\1080p60\AdvancedAnimation.mp4'
The result comes with many files, see this 1080p60 video result:

Tuesday, June 24, 2025

Python 3.13.5 : Get bookmarks from Edge browser with python.

Today I tested with these python modules json and pathlib.
This python script will get all bookmarks from Edge browser:
import json
from pathlib import Path

bookmark_path = Path.home() / "AppData/Local/Microsoft/Edge/User Data/Default/Bookmarks"

with open(bookmark_path, "r", encoding="utf-8") as f:
    data = json.load(f)

# Exemplu: listăm toate titlurile bookmark-urilor
def extract_bookmarks(bookmark_node):
    bookmarks = []
    if "children" in bookmark_node:
        for child in bookmark_node["children"]:
            bookmarks.extend(extract_bookmarks(child))
    elif bookmark_node.get("type") == "url":
        bookmarks.append((bookmark_node["name"], bookmark_node["url"]))
    return bookmarks

all_bookmarks = extract_bookmarks(data["roots"]["bookmark_bar"])
for name, url in all_bookmarks:
    print(f"{name}: {url}")

Friday, June 20, 2025

Python 3.13.5 : testing with flask, request and playwright python module.

Today some testing with these python modules: flask, request and playwright.
I used pip to install flask python package:
pip install flask
Collecting flask
  Downloading flask-3.1.1-py3-none-any.whl.metadata (3.0 kB)
...
Installing collected packages: markupsafe, itsdangerous, blinker, werkzeug, jinja2, flask
Successfully installed blinker-1.9.0 flask-3.1.1 itsdangerous-2.2.0 jinja2-3.1.6 markupsafe-3.0.2 werkzeug-3.1.3
pip install requests
...
Installing collected packages: urllib3, idna, charset_normalizer, certifi, requests
Successfully installed certifi-2025.6.15 charset_normalizer-3.4.2 idna-3.10 requests-2.32.4 urllib3-2.5.0
pip install playwright
Collecting playwright
...
Installing collected packages: pyee, greenlet, playwright
Successfully installed greenlet-3.2.3 playwright-1.52.0 pyee-13.0.0
...
playwright install
Downloading Chromium 136.0.7103.25 ...
This will download a lot fo files and the will install the playwright tool.
First script is simple one will try to get cloudflare header on default ip:
from flask import Flask, request

app = Flask(__name__)

@app.route("/")
def detecteaza_ipuri():
    ip_client = request.headers.get('CF-Connecting-IP', 'Necunoscut')
    ip_cloudflare = request.remote_addr
    return (
        f"IP real vizitator: {ip_client}
" f"IP Cloudflare (vizibil de server): {ip_cloudflare}" ) if __name__ == "__main__": app.run(debug=True)
The next one will check more ...
from flask import Flask, request
import requests
import ipaddress

app = Flask(__name__)

def este_ip_cloudflare(ip):
    try:
        raspuns = requests.get("https://www.cloudflare.com/ips-v4")
        raspuns.raise_for_status()
        subneturi = raspuns.text.splitlines()

        for subnet in subneturi:
            if ipaddress.ip_address(ip) in ipaddress.ip_network(subnet):
                return True
        return False
    except Exception as e:
        return f"Eroare la verificarea IP-ului Cloudflare: {e}"

@app.route("/")
def detecteaza_ipuri():
    ip_client = request.headers.get('CF-Connecting-IP', 'Necunoscut')
    ip_cloudflare = request.remote_addr
    rezultat = este_ip_cloudflare(ip_cloudflare)

    return (
        f"IP real vizitator: {ip_client}
" f"IP Cloudflare (forwarder): {ip_cloudflare}
" f"Este IP-ul din rețeaua Cloudflare? {'DA' if rezultat == True else 'NU' if rezultat == False else rezultat}" ) if __name__ == "__main__": app.run(host="0.0.0.0", port=5000)
Now, the script with the request python module:
import requests

url = "https://cobalt.tools"
headers = {
    "User-Agent": "Mozilla/5.0",  # Simulează un browser real
}

try:
    r = requests.get(url, headers=headers, timeout=10)
    content = r.text.lower()

    print(f"Cod răspuns HTTP: {r.status_code}")

    if "cloudflare" in content or "cf-ray" in content or "attention required" in content:
        print("Cloudflare a intermediat cererea sau a blocat-o cu o pagină specială.")
    else:
        print("Cererea a fost servită normal.")
except Exception as e:
    print(f"Eroare la conexiune: {e}")
The last one will use the playwright python module:
import sys
import re
from urllib.parse import urlparse
from pathlib import Path
from playwright.sync_api import sync_playwright

def converteste_url_in_nume_fisier(url):
    parsed = urlparse(url)
    host = parsed.netloc.replace('.', '_')
    path = parsed.path.strip('/').replace('/', '_')
    if not path:
        path = 'index'
    return f"{host}_{path}.txt"

if len(sys.argv) != 2:
    print("Utilizare: python script.py https://exemplu.com")
    sys.exit(1)

url = sys.argv[1]
fisier_output = converteste_url_in_nume_fisier(url)

with sync_playwright() as p:
    browser = p.chromium.launch(headless=True)
    pagina = browser.new_page()
    pagina.goto(url, wait_until='networkidle')
    continut = pagina.content()
    Path(fisier_output).write_text(continut, encoding='utf-8')
    browser.close()

print(f"Conținutul a fost salvat în: {fisier_output}")
This will create a file with the source code of web page.

Thursday, June 19, 2025

News : UV - fast Python package and project manager.

An extremely fast Python package and project manager, written in Rust.
cd uv_projects

uv_projects>uv init hello-world
Initialized project `hello-world` at `D:\PythonProjects\uv_projects\hello-world`

uv_projects>cd hello-world

uv_projects\hello-world>uv run main.py
Using CPython 3.13.5 interpreter at: C:\Python3135\python.exe
Creating virtual environment at: .venv
Hello from hello-world!

Saturday, February 8, 2025

Python 3.13.0rc1 : Testing python with Ollama local install.

I was very busy with development and testing for about two weeks and my laptop was stuck and I was working hard... Today I managed to test local background clipping on my laptop with a local Ollama installation separated by a Python module but with processing from the Python script. I also used Microsoft's Copilot artificial intelligence for python and it works well even though it is not theoretically specialized in development. The source code is quite large but the result is very good and fast:
import subprocess
import os
import json
from PIL import Image, ImageOps

class OllamaProcessor:
    def __init__(self, config_file):
        self.config_file = config_file
        self.model_methods = self.load_config()

    def load_config(self):
        try:
            with open(self.config_file, 'r') as file:
                config = json.load(file)
            print("Configuration loaded successfully.")
            return config
        except FileNotFoundError:
            print(f"Configuration file {self.config_file} not found.")
            raise
        except json.JSONDecodeError:
            print(f"Error decoding JSON from the configuration file {self.config_file}.")
            raise

    def check_ollama(self):
        try:
            result = subprocess.run(["ollama", "--version"], capture_output=True, text=True, check=True)
            print("Ollama is installed. Version:", result.stdout)
        except subprocess.CalledProcessError as e:
            print("Ollama is not installed or not found in PATH. Ensure it's installed and accessible.")
            raise
... 
Here is the result obtained after finishing running in the command line:
python ollama_test_001.py
Configuration file ollama_config.json created successfully.
Configuration loaded successfully.
Ollama is installed. Version: ollama version is 0.5.7

Available models: ['NAME']
pulling manifest
pulling 170370233dd5... 100% ▕██████████████▏ 4.1 GB
pulling 72d6f08a42f6... 100% ▕██████████████▏ 624 MB
pulling 43070e2d4e53... 100% ▕██████████████▏  11 KB
pulling c43332387573... 100% ▕██████████████▏   67 B
pulling ed11eda7790d... 100% ▕██████████████▏   30 B
pulling 7c658f9561e5... 100% ▕██████████████▏  564 B
verifying sha256 digest
writing manifest
success
Model llava pulled successfully for method process_images_in_folder.
Some "Command failed ..." but the result is cutting well and it has transparency !

Thursday, January 30, 2025

Blender 3D and python scripting - part 032.

Today I created an addon for Blender version 4.3.2 that allows me to select two folders to render 3D objects from the first folder and add 512px samples with these renderings to the second folder.
This is what the addon installed in Blender 3D looks like:
Here's what the source code of this addon looks like:
bl_info = {
    "name": "3D File Renderer by catafest",
    "blender": (4, 3, 2),
    "category": "Object",
    "author": "Catalin George Festila\n"
              "nicknames: catafest and mythcat\n"
              "country: Romania\n"
              "mail: catafest [at] yahoo.com",
    "version": (1, 0),
    "blender": (2, 80, 0),
    "location": "View3D > UI > 3D File Renderer",
    "description": "Addon for rendering 3D files",
    "warning": "",
    "doc_url": "https://github.com/catafest",
    "tracker_url": "https://github.com/catafest/issues",
    "support": "COMMUNITY",
}

import bpy
import os

class FileRendererProperties(bpy.types.PropertyGroup):
    input_directory: bpy.props.StringProperty(
        name="Input Directory",
        description="Directory containing 3D files",
        default="",
        maxlen=1024,
        subtype='DIR_PATH'
    )
    output_directory: bpy.props.StringProperty(
        name="Output Directory",
        description="Directory to save rendered images",
        default="",
        maxlen=1024,
        subtype='DIR_PATH'
    )

class RENDER_OT_files(bpy.types.Operator):
    bl_idname = "render.files"
    bl_label = "Start render 3D files for all files"
    
    def execute(self, context):
        input_directory = context.scene.file_renderer_props.input_directory
        output_directory = context.scene.file_renderer_props.output_directory
        
        if not input_directory or not output_directory:
            self.report({'ERROR'}, "Input and Output directories must be set.")
            return {'CANCELLED'}
        
        if not os.path.exists(output_directory):
            os.makedirs(output_directory)
        
        def render_file(file_path, output_path):
            try:
                bpy.ops.wm.read_factory_settings(use_empty=True)
                ext = os.path.splitext(file_path)[1].lower()
                if ext == ".glb":
                    bpy.ops.import_scene.gltf(filepath=file_path)
                elif ext == ".obj":
                    bpy.ops.import_scene.obj(filepath=file_path)
                elif ext == ".fbx":
                    bpy.ops.import_scene.fbx(filepath=file_path)
                else:
                    raise ValueError("Unsupported file format")
                
                bpy.ops.object.camera_add(location=(0, -3, 1.5), rotation=(1.1, 0, 0))
                camera = bpy.context.scene.objects['Camera']
                bpy.context.scene.camera = camera
                bpy.ops.object.light_add(type='POINT', location=(0, -3, 3))
                light = bpy.context.view_layer.objects.active
                light.data.energy = 1000
                
                bpy.context.scene.render.resolution_x = 512
                bpy.context.scene.render.resolution_y = 512
                bpy.context.scene.render.filepath = output_path
                bpy.ops.render.render(write_still=True)
            except Exception as e:
                # Generate a red image with "BAD FILE" text using Blender
                bpy.ops.wm.read_factory_settings(use_empty=True)
                bpy.ops.mesh.primitive_plane_add(size=2)
                plane = bpy.context.active_object
                mat = bpy.data.materials.new(name="BadFileMaterial")
                mat.diffuse_color = (1, 0, 0, 1)  # Red
                plane.data.materials.append(mat)
                
                # Add "BAD FILE" text
                bpy.ops.object.text_add(location=(0, 0, 0.1))
                text_obj = bpy.context.active_object
                text_obj.data.body = "BAD FILE"
                text_obj.data.size = 0.5
                text_obj.data.align_x = 'CENTER'
                text_obj.data.align_y = 'CENTER'
                text_obj.rotation_euler = (1.5708, 0, 0)
                
                # Set camera and light
                bpy.ops.object.camera_add(location=(0, -3, 1.5), rotation=(1.1, 0, 0))
                camera = bpy.context.scene.objects['Camera']
                bpy.context.scene.camera = camera
                bpy.ops.object.light_add(type='POINT', location=(0, -3, 3))
                light = bpy.context.view_layer.objects.active
                light.data.energy = 1000
                
                bpy.context.scene.render.resolution_x = 512
                bpy.context.scene.render.resolution_y = 512
                bpy.context.scene.render.filepath = output_path
                bpy.ops.render.render(write_still=True)
        
        for filename in os.listdir(input_directory):
            if filename.lower().endswith((".glb", ".obj", ".fbx")):
                file_path = os.path.join(input_directory, filename)
                output_path = os.path.join(output_directory, os.path.splitext(filename)[0] + ".png")
                render_file(file_path, output_path)
        
        self.report({'INFO'}, "Rendering of files is complete.")
        return {'FINISHED'}

class ABOUT_OT_dialog(bpy.types.Operator):
    bl_idname = "wm.about_dialog"
    bl_label = "About this addon"
    
    def execute(self, context):
        return context.window_manager.invoke_props_dialog(self)
    
    def draw(self, context):
        layout = self.layout
        layout.label(text="3D File Renderer by catafest")
        layout.label(text="Author: Catalin George Festila")
        layout.label(text="Nicknames: catafest and mythcat")
        layout.label(text="Country: Romania")
        layout.label(text="Email: catafest [at] yahoo.com")
        layout.operator("wm.url_open", text="LinkedIn").url = "https://www.linkedin.com/in/c%C4%83t%C4%83lin-george-fe%C8%99til%C4%83-05780a67"
        layout.operator("wm.url_open", text="Author Site").url = "https://sites.google.com/view/festila-george-catalin"
        layout.operator("wm.url_open", text="catafest GitHub").url = "https://github.com/catafest"
        layout.operator("wm.url_open", text="catafest-work GitHub").url = "https://github.com/catafest-work"

class FileRendererPanel(bpy.types.Panel):
    bl_label = "3D File Renderer by catafest"
    bl_idname = "OBJECT_PT_file_renderer"
    bl_space_type = 'VIEW_3D'
    bl_region_type = 'UI'
    bl_category = 'File Renderer'
    
    def draw(self, context):
        layout = self.layout
        scene = context.scene
        file_renderer_props = scene.file_renderer_props
        
        layout.prop(file_renderer_props, "input_directory")
        layout.prop(file_renderer_props, "output_directory")
        
        # Styling the render button
        render_button = layout.operator("render.files", text="Start render 3D files for all files")
        
        layout.separator()
        
        layout.operator("wm.about_dialog", text="About this addon")

def register():
    bpy.utils.register_class(FileRendererProperties)
    bpy.utils.register_class(RENDER_OT_files)
    bpy.utils.register_class(ABOUT_OT_dialog)
    bpy.utils.register_class(FileRendererPanel)
    bpy.types.Scene.file_renderer_props = bpy.props.PointerProperty(type=FileRendererProperties)

def unregister():
    bpy.utils.unregister_class(FileRendererProperties)
    bpy.utils.unregister_class(RENDER_OT_files)
    bpy.utils.unregister_class(ABOUT_OT_dialog)
    bpy.utils.unregister_class(FileRendererPanel)
    del bpy.types.Scene.file_renderer_props

if __name__ == "__main__":
    register()

Tuesday, January 28, 2025

Blender 3D and python scripting - part 031.

This python script for blender will select vertrices based on boolean ...
This is the result with Blender version 4.3 :
This is the python script:
import bpy, bmesh

obj = bpy.context.active_object
me = obj.data

bpy.ops.object.mode_set(mode = 'EDIT') 
bpy.ops.mesh.select_mode(type="VERT")


bm = bmesh.from_edit_mesh(obj.data)
selected = [False,False,True,True,True,True,True,True]
verts = [vert for vert in bpy.context.active_object.data.vertices if vert.select]
all = [vert for vert in bpy.context.active_object.data.vertices]
print("selected:",len(verts))
print("all:",len(all))
bpy.ops.object.mode_set(mode = 'OBJECT')
me.vertices.foreach_set(
"select",
selected
)
bpy.ops.object.mode_set(mode = 'EDIT')

Thursday, January 23, 2025

Blender 3D and python scripting - part 030.

Today I will show a python script for Blender 3D that create an 3D object and one material.
This is the final result of this python script:
This is the python script:
# give Python access to Blender's functionality
import bpy

# extend Python's math functionality
import math


# extend Python functionality to generate random numbers
import random


def partially_clean_the_scene():
    # select all object in the scene
    bpy.ops.object.select_all(action="SELECT")

    # delete all selected objects in the scene
    bpy.ops.object.delete()

    # make sure we remove data that was connected to the objects we just deleted
    bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=True, do_recursive=True)


def create_noise_mask(material):
    """Add a set of nodes to create a noise mask using:
    * Texture Coordinate node
    * Mapping node
    * Noise Texture node
    * Color Ramp node
    """
    node_location_x_step = 300
    node_location_x = -node_location_x_step

    # create a Color Ramp node
    # https://docs.blender.org/api/current/bpy.types.ShaderNodeValToRGB.html
    color_ramp_node = material.node_tree.nodes.new(type="ShaderNodeValToRGB")
    color_ramp_node.color_ramp.elements[0].position = 0.45
    color_ramp_node.color_ramp.elements[1].position = 0.5
    color_ramp_node.location.x = node_location_x
    node_location_x -= node_location_x_step

    # create a Noise Texture node
    # https://docs.blender.org/api/current/bpy.types.ShaderNodeTexNoise.html#bpy.types.ShaderNodeTexNoise
    noise_texture_node = material.node_tree.nodes.new(type="ShaderNodeTexNoise")
    noise_texture_node.inputs["Scale"].default_value = random.uniform(1.0, 20.0)
    noise_texture_node.location.x = node_location_x
    node_location_x -= node_location_x_step

    # create a Mapping node
    # https://docs.blender.org/api/current/bpy.types.ShaderNodeMapping.html#bpy.types.ShaderNodeMapping
    mapping_node = material.node_tree.nodes.new(type="ShaderNodeMapping")
    mapping_node.inputs["Rotation"].default_value.x = math.radians(random.uniform(0.0, 360.0))
    mapping_node.inputs["Rotation"].default_value.y = math.radians(random.uniform(0.0, 360.0))
    mapping_node.inputs["Rotation"].default_value.z = math.radians(random.uniform(0.0, 360.0))
    mapping_node.location.x = node_location_x
    node_location_x -= node_location_x_step

    # create a Texture Coordinate node
    texture_coordinate_node = material.node_tree.nodes.new(type="ShaderNodeTexCoord")
    texture_coordinate_node.location.x = node_location_x

    # connect the nodes
    # https://docs.blender.org/api/current/bpy.types.NodeTree.html#bpy.types.NodeTree
    # https://docs.blender.org/api/current/bpy.types.NodeLinks.html#bpy.types.NodeLinks
    material.node_tree.links.new(noise_texture_node.outputs["Color"], color_ramp_node.inputs["Fac"])
    material.node_tree.links.new(mapping_node.outputs["Vector"], noise_texture_node.inputs["Vector"])
    material.node_tree.links.new(texture_coordinate_node.outputs["Generated"], mapping_node.inputs["Vector"])

    return color_ramp_node


def create_material(name):
    # create new material
    material = bpy.data.materials.new(name=name)
    # enable creating a material via nodes
    material.use_nodes = True

    # get a reference to the Principled BSDF shader node
    principled_bsdf_node = material.node_tree.nodes["Principled BSDF"]

    # set the base color of the material
    principled_bsdf_node.inputs["Base Color"].default_value = (0.8, 0.120827, 0.0074976, 1)

    # set the metallic value of the material
    principled_bsdf_node.inputs["Metallic"].default_value = 1.0

    color_ramp_node = create_noise_mask(material)

    material.node_tree.links.new(color_ramp_node.outputs["Color"], principled_bsdf_node.inputs["Roughness"])

    return material

def add_mesh():
    # create an ico sphere
    bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=5)
    # shade smooth
    bpy.ops.object.shade_smooth()
    # get reference to mesh object
    mesh_obj = bpy.context.active_object

    return mesh_obj

def main():

    partially_clean_the_scene()

    name = "my_generated_material"
    material = create_material(name)

    mesh_obj = add_mesh()

    # apply the material to the mesh object
    mesh_obj.data.materials.append(material)

main()

Tuesday, January 7, 2025

Python 3.13.0rc1 : Simple convert all webp files from folder.

Today, a simple script is used to convert WEBP to PNG files from a defined folder.
The script reads a folder path for each WEBP file that is opened and saved as a PNG file.
import os
import sys
from PIL import Image

def convert_webp_to_png(directory):
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.endswith(".webp"):
                webp_path = os.path.join(root, file)
                png_path = os.path.splitext(webp_path)[0] + ".png"
                with Image.open(webp_path) as img:
                    img.save(png_path, "PNG")
                print(f"webp to png file: {webp_path} -> {png_path}")

if __name__ == "__main__":
    if len(sys.argv) != 2:
        print("How to use: python convert.py path_to_folder_with_webp_files")
        sys.exit(1)

    directory = sys.argv[1]
    if not os.path.isdir(directory):
        print(f"{directory} folder is not valid.")
        sys.exit(1)

    convert_webp_to_png(directory)
    print("Finished !")

Sunday, November 24, 2024

Python 3.13.0 : emoji symbols with PIL.

Today I want to use emoji symbols and I wrote this python script:
from PIL import Image, ImageDraw, ImageFont
import os

# Font size and image dimensions
font_size = 88
width = 640
height = 480

# Use Symbola.ttf from current directory
font_path = "Symbola.ttf"

# Create image
img = Image.new('RGB', (width, height), color='white')
draw = ImageDraw.Draw(img)

# Get font
font = ImageFont.truetype(font_path, font_size)

# Emoji matrix
emoji_matrix = [
    ['😀', '😁', '😂', '🤣', '😃'],
    ['😄', '😅', '😆', '😇', '😈'],
    ['😉', '😊', '😋', '😌', '😍'],
    ['😎', '😏', '😐', '😑', '😒']
]

# Calculate spacing
x_spacing = font_size + 10
y_spacing = font_size + 10

# Calculate starting position to center the grid
start_x = (width - (len(emoji_matrix[0]) * x_spacing)) // 2
start_y = (height - (len(emoji_matrix) * y_spacing)) // 2

# Draw emojis
for i, row in enumerate(emoji_matrix):
    for j, emoji in enumerate(row):
        x = start_x + (j * x_spacing)
        y = start_y + (i * y_spacing)
        draw.text((x, y), emoji, font=font, fill='black')

# Save the image
img.save('emoji_art.png')
print("Emoji art has been created successfully! Check emoji_art.png")
The result image named emoji_art.png is this:

Wednesday, November 20, 2024

Python 3.13.0 : generates multiple deformed polygonal shapes .

Today I created this source code in python that generates eight random convex polygons. The idea was to create sprites for a 2D game: snowballs, boulders, or similar objects... Obviously I also used Sonet 3.5 artificial intelligence. You can find the source code on the pagure account in fedora.
#!/usr/bin/env python3
"""
SVG Polygon Generator

This script generates multiple deformed polygonal shapes and saves them as separate SVG files.
Each polygon maintains convex properties while having controlled random deformations.

Features:
    - Generates 8 unique polygonal shapes
    - Controls deformation through radial and angular factors
    - Maintains convex properties
    - Exports each shape to a separate SVG file
    - Uses random colors for visual distinction

Usage:
    python generate_svgs.py

Output:
    Creates 8 SVG files named 'polygon_1.svg' through 'polygon_8.svg'
"""

from lxml import etree
import random
import math
from pathlib import Path


def create_svg_root():
    """Create and return a base SVG root element with standard attributes."""
    root = etree.Element("svg")
    root.set("width", "500")
    root.set("height", "500")
    root.set("xmlns", "http://www.w3.org/2000/svg")
    return root


def calculate_points(center_x: float, center_y: float, radius: float, 
                    num_sides: int, deform_factor: float) -> list:
    """
    Calculate polygon points with controlled deformation.

    Args:
        center_x: X coordinate of polygon center
        center_y: Y coordinate of polygon center
        radius: Base radius of the polygon
        num_sides: Number of polygon sides
        deform_factor: Maximum allowed deformation factor

    Returns:
        List of tuples containing (x, y) coordinates
    """
    points = []
    angle_step = 2 * math.pi / num_sides
    
    for i in range(num_sides):
        angle = i * angle_step
        radial_deform = random.uniform(-deform_factor, deform_factor)
        angular_deform = random.uniform(-deform_factor/2, deform_factor/2)
        
        modified_angle = angle + angular_deform
        modified_radius = radius * (1 + radial_deform)
        
        x = center_x + modified_radius * math.cos(modified_angle)
        y = center_y + modified_radius * math.sin(modified_angle)
        points.append((x, y))
    
    return points


def generate_deformed_shapes():
    """Generate multiple deformed polygons and save them to separate SVG files."""
    # Base parameters
    num_sides = 8
    center_x = 250
    center_y = 250
    base_radius = 150
    max_deformation = 0.15
    output_dir = Path("generated_polygons")
    
    # Create output directory if it doesn't exist
    output_dir.mkdir(exist_ok=True)

    for i in range(8):
        root = create_svg_root()
        points = calculate_points(center_x, center_y, base_radius, 
                                num_sides, max_deformation)
        
        path = etree.SubElement(root, "path")
        path_data = f"M {points[0][0]} {points[0][1]}"
        path_data += "".join(f" L {p[0]} {p[1]}" for p in points[1:])
        path_data += " Z"
        
        path.set("d", path_data)
        path.set("fill", "none")
        path.set("stroke", f"#{random.randint(0, 16777215):06X}")
        path.set("stroke-width", "2")
        path.set("opacity", "0.7")

        # Save individual SVG file
        output_file = output_dir / f"polygon_{i+1}.svg"
        tree = etree.ElementTree(root)
        tree.write(str(output_file), pretty_print=True, 
                  xml_declaration=True, encoding='utf-8')
    
    print(f"Generated {num_sides} polygons in {output_dir}")

if __name__ == "__main__":
    generate_deformed_shapes()

Monday, November 18, 2024

Python 3.13.0 : Tested TinyDB on Fedora 41.

Today I tested the TinyDB python package on Fedora 41 Linux distro:
TinyDB is a lightweight document oriented database optimized for your happiness :) It’s written in pure Python and has no external dependencies. The target are small apps that would be blown away by a SQL-DB or an external database server.
The documentation for this python package can be found on the official website.
The install on Fedora 14 distro can be done with pip tool:
pip install tinydb
This is the source code I tested:
from tinydb import TinyDB, Query
import datetime

# Create a TinyDB instance
db = TinyDB('my_database.json')

# Define a schema for our documents
UserSchema = {
    'name': str,
    'email': str,
    'age': int,
    'created_at': datetime.datetime
}# Insert some sample data
users = [
    {'name': 'John Doe', 'email': 'john@example.com', 'age': 30},
    {'name': 'Jane Smith', 'email': 'jane@example.com', 'age': 25},
    {'name': 'Bob Johnson', 'email': 'bob@example.com', 'age': 35}
]

for user in users:
    db.insert(user)

# Querying all users
print("\nQuerying all users:")
all_users = db.all()
for user in all_users:
    print(f"Name: {user['name']}, Email: {user['email']}, Age: {user['age']}")

# Filtering data
print("\nFidig users older than 28:")
older_than_28 = db.search(Query().age > 28)
for user in older_than_28:
    print(f"Name: {user['name']}, Email: {user['email']}, Age: {user['age']}")

# Updating data
print("\nUpdatig John Doe's age:")
db.update({'age': 31}, Query().name == 'John Doe')

# Deleting data
print("\nDeletig Jane Smith:")
doc_ids = [doc.doc_id for doc in db.search(Query().email == 'jane@example.com')]
if doc_ids:
    db.remove(doc_ids=doc_ids)
else:
    print("No document found with email 'jane@example.com'")

# Adding a new field
print("\nAddig a 'city' field to all users:")
for user in db.all():
    user['city'] = 'New York'
    db.update(user, doc_ids=[doc.doc_id for doc in db.search(Query().name == user['name'])])

# Sorting data
print("\nSorting users by age:")
sorted_users = sorted(db.all(), key=lambda x: x['age'])
for user in sorted_users:
    print(f"Name: {user['name']}, Email: {user['email']}, Age: {user['age']}")

# Getting document count
print("\nTotal number of users:", len(db.all()))

# Closing the database connection
db.close()
This is the result:
$ python test_001.py 

Querying all users:
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 30
Name: Jane Smith, Email: jane@example.com, Age: 25
Name: Bob Johnson, Email: bob@example.com, Age: 35

Fidig users older than 28:
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: John Doe, Email: john@example.com, Age: 30
Name: Bob Johnson, Email: bob@example.com, Age: 35

Updatig John Doe's age:

Deletig Jane Smith:

Addig a 'city' field to all users:

Sorting users by age:
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: John Doe, Email: john@example.com, Age: 31
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35
Name: Bob Johnson, Email: bob@example.com, Age: 35

Total number of users: 22

Saturday, November 16, 2024

Python 3.13.0 : Test the gi python package on Fedora distro linux.

The gi (GObject Introspection) Python package is excellent! It provides Python bindings for GObject-based libraries like GTK, GLib, and Secret Service. It enables you to write native GNOME applications in Python and access system services seamlessly.
First, install these Fedora packages:
[mythcat@fedora ~]# dnf5 install python3-gobject libsecret-devel
...
Package "python3-gobject-3.48.2-3.fc41.x86_64" is already installed.
Package "libsecret-devel-0.21.4-3.fc41.x86_64" is already installed.
...
[mythcat@fedora ~]# dnf5 install gnome-shell gnome-keyring libsecret
...
I used this simple python source code to test the gi python package:
import gi

# Specify the version of Gio and Secret we want to use
gi.require_version('Gio', '2.0')
gi.require_version('Secret', '1')

from gi.repository import Gio, Secret, GLib

def check_schema(schema_name):
    try:
        Gio.Settings.new(schema_name)
        print(f"Schema '{schema_name}' is available")
        return True
    except GLib.GError as e:
        print(f"Schema '{schema_name}' is not installed: {str(e)}")
        return False

def store_secret():
    schema = Secret.Schema.new("org.example.Password",
        Secret.SchemaFlags.NONE,
        {
            "username": Secret.SchemaAttributeType.STRING,
        }
    )
    
    Secret.password_store_sync(schema, 
        {"username": "myuser"},
        Secret.COLLECTION_DEFAULT,
        "My secret item",
        "Hello, World!",
        None)
    
    print("Secret stored successfully")

def get_secret():
    schema = Secret.Schema.new("org.example.Password",
        Secret.SchemaFlags.NONE,
        {
            "username": Secret.SchemaAttributeType.STRING,
        }
    )
    
    password = Secret.password_lookup_sync(schema,
        {"username": "myuser"},
        None)
    
    if password is not None:
        print(f"Retrieved secret: {password}")
    else:
        print("No secret found")

if __name__ == "__main__":
    print("Starting secret operations...")
    store_secret()
    get_secret()
    print("Finished secret operations.")
The result is this:
$ python test_001.py 
Starting secret operations...
Secret stored successfully
Retrieved secret: Hello, World!
Finished secret operations.