Compare commits

...
Sign in to create a new pull request.

16 commits

55 changed files with 1718 additions and 126 deletions

BIN
.DS_Store vendored

Binary file not shown.

1
.gitignore vendored
View file

@ -1 +1,2 @@
.vscode .vscode
CHALLENGE_2/sleepysound/lib/services/SPOTIFY_SECRET.dart

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -6,7 +6,8 @@ import os
from datetime import datetime, date from datetime import datetime, date
from plant_model import PlantGrowthModel from plant_model import PlantGrowthModel
from data_handler import DataHandler from data_handler import DataHandler
from tkcalendar import DateEntry from tkcalendar import DateEntry, Calendar
from plant_meteo import HappyMeteo
class PlantGrowthDashboard: class PlantGrowthDashboard:
def __init__(self, root): def __init__(self, root):
@ -15,7 +16,11 @@ class PlantGrowthDashboard:
self.root.geometry("1000x800") # More square dimensions self.root.geometry("1000x800") # More square dimensions
self.root.configure(bg='#f0f0f0') self.root.configure(bg='#f0f0f0')
image = Image.open("public/transparentLogo.png") image = Image.open("public/logoTransparent.png")
desired_size = (128, 128)
image = image.resize(desired_size, Image.Resampling.LANCZOS)
# Convert to PhotoImage # Convert to PhotoImage
icon = ImageTk.PhotoImage(image) icon = ImageTk.PhotoImage(image)
# Set as window icon # Set as window icon
@ -24,9 +29,12 @@ class PlantGrowthDashboard:
# Initialize components # Initialize components
self.plant_model = PlantGrowthModel() self.plant_model = PlantGrowthModel()
self.data_handler = DataHandler() self.data_handler = DataHandler()
self.happyMeteo = HappyMeteo()
# Variables - fixed plant type # Variables - fixed plant type
self.current_plant = "tomato" # Fixed plant type self.current_plant = "tomato" # Fixed plant type
self.counter = 0
self.filenames = ["basilico.jpg", "pomodoro.png"]
self.ambient_mode = tk.StringVar(value="controlled") self.ambient_mode = tk.StringVar(value="controlled")
self.baseline_image_path = None self.baseline_image_path = None
@ -63,8 +71,8 @@ class PlantGrowthDashboard:
# Configure grid weights for square layout # Configure grid weights for square layout
self.root.columnconfigure(0, weight=1) self.root.columnconfigure(0, weight=1)
self.root.rowconfigure(0, weight=1) self.root.rowconfigure(0, weight=1)
main_frame.columnconfigure(0, weight=1) main_frame.columnconfigure(0, weight=3)
main_frame.columnconfigure(1, weight=2) # Center panel wider main_frame.columnconfigure(1, weight=1) # Center panel wider
main_frame.columnconfigure(2, weight=1) main_frame.columnconfigure(2, weight=1)
main_frame.rowconfigure(1, weight=1) main_frame.rowconfigure(1, weight=1)
@ -143,6 +151,8 @@ class PlantGrowthDashboard:
command=lambda x, p=param: self.on_param_change(p)) command=lambda x, p=param: self.on_param_change(p))
scale.pack(side=tk.LEFT, fill=tk.X, expand=True, padx=(4, 4)) scale.pack(side=tk.LEFT, fill=tk.X, expand=True, padx=(4, 4))
setattr(self, f"{param}_scale", scale)
# Value label # Value label
value_label = ttk.Label(param_frame, text=f"{self.env_params[param].get():.1f}", width=5, font=('Arial', 8)) value_label = ttk.Label(param_frame, text=f"{self.env_params[param].get():.1f}", width=5, font=('Arial', 8))
value_label.pack(side=tk.RIGHT) value_label.pack(side=tk.RIGHT)
@ -163,28 +173,33 @@ class PlantGrowthDashboard:
spacer.grid(row=row, column=0, columnspan=2, pady=10) spacer.grid(row=row, column=0, columnspan=2, pady=10)
# Add this after the parameters section # Add this after the parameters section
ttk.Label(control_frame, text="Final date of growth:", ttk.Label(control_frame, text="Final date of growth (choose a date):",
font=('Arial', 9, 'bold')).grid(row=row, column=0, columnspan=2, sticky=tk.W, pady=(10, 4)) font=('Arial', 9, 'bold')).grid(row=row, column=0, columnspan=2, sticky=tk.W, pady=(10, 4))
row += 1 row += 1
# Compact date entry with calendar popup # Compact date entry with calendar popup
# To get the selected dat simply call self.date_entry.get_date() # To get the selected dat simply call self.calendar.get_date()
self.date_entry = DateEntry(control_frame, # self.date_entry
width=12, self.calendar = Calendar(control_frame, selectmode='day',
background='darkblue', year=2025, month=8, day=1,
foreground='white', font=('Arial', 8))
borderwidth=2, self.calendar.grid(row=row, column=0, columnspan=2, pady=2)
font=('Arial', 8),
date_pattern='dd/mm/yyyy',
state='readonly', # Add this
cursor='hand2') # Add this
self.date_entry.grid(row=row, column=0, columnspan=2, sticky=tk.W, pady=2)
row += 1 row += 1
control_frame.columnconfigure(0, weight=1) control_frame.columnconfigure(0, weight=1)
control_frame.columnconfigure(1, weight=1) control_frame.columnconfigure(1, weight=1)
def disable_parameter(self, param):
scale = getattr(self, f"{param}_scale", None)
if scale:
scale.configure(state='disabled')
def enable_parameter(self, param):
scale = getattr(self, f"{param}_scale", None)
if scale:
scale.configure(state='normal')
def setup_visualization_panel(self, parent): def setup_visualization_panel(self, parent):
viz_frame = ttk.LabelFrame(parent, text="Plant Visualization", padding="6") viz_frame = ttk.LabelFrame(parent, text="Plant Visualization", padding="6")
viz_frame.grid(row=1, column=1, sticky=(tk.W, tk.E, tk.N, tk.S), padx=3) viz_frame.grid(row=1, column=1, sticky=(tk.W, tk.E, tk.N, tk.S), padx=3)
@ -231,7 +246,7 @@ class PlantGrowthDashboard:
info_frame.pack(fill=tk.X, pady=(8, 0)) info_frame.pack(fill=tk.X, pady=(8, 0))
self.plant_info_text = tk.Text(info_frame, height=8, width=35, wrap=tk.WORD, self.plant_info_text = tk.Text(info_frame, height=8, width=35, wrap=tk.WORD,
font=('Arial', 8), bg="#000000") font=('Arial', 8), bg="#000000", fg="white")
self.plant_info_text.pack(fill=tk.BOTH, expand=True) self.plant_info_text.pack(fill=tk.BOTH, expand=True)
# Submit button # Submit button
@ -264,9 +279,7 @@ class PlantGrowthDashboard:
"""Update the evolution tab with an image from file or show fallback text""" """Update the evolution tab with an image from file or show fallback text"""
if filename and os.path.exists(filename): if filename and os.path.exists(filename):
try: try:
# Load and display the image print(filename)
from PIL import Image, ImageTk
# Open and resize image if needed # Open and resize image if needed
pil_image = Image.open(filename) pil_image = Image.open(filename)
# Optional: resize to fit the display area # Optional: resize to fit the display area
@ -325,14 +338,18 @@ class PlantGrowthDashboard:
if current_mode == "controlled": if current_mode == "controlled":
print("Switched to Controlled mode") print("Switched to Controlled mode")
# Enable all parameter controls # Enable all parameter controls
self.enable_parameter("humidity")
self.enable_parameter("brightness")
self.enable_parameter("temperature")
# No need to call the meteo api # No need to call the meteo api
elif current_mode == "open": elif current_mode == "open":
print("Switched to Open mode") print("Switched to Open mode")
# Disable most parameter controls (temp, humidity, light) # Disable most parameter controls (temp, humidity, light)
self.disable_parameter("humidity")
# Call the meteo api to retrieve all the parameters, set variable meteo_values to retrieve when submitiing all self.disable_parameter("brightness")
# Inside the retrieving of all the data check if the mode is one, select which data to use self.disable_parameter("temperature")
def on_param_change(self, param): def on_param_change(self, param):
value = self.env_params[param].get() value = self.env_params[param].get()
@ -354,7 +371,7 @@ class PlantGrowthDashboard:
self.ambient_mode.set("controlled") self.ambient_mode.set("controlled")
self.date_entry.set_date(date.today()) self.calendar.selection_set(date.today())
def update_parameter_label(self, param, value): def update_parameter_label(self, param, value):
"""Update the value label for a specific parameter""" """Update the value label for a specific parameter"""
@ -383,56 +400,124 @@ class PlantGrowthDashboard:
) )
if file_path: if file_path:
self.baseline_image_path = file_path self.baseline_image_path = file_path
self.update_initial_plant_display()
def submit_plant_data(self): def submit_plant_data(self):
"""Submit plant information and photo""" """Submit plant information and photo"""
try: try:
# Get current parameters start_date = datetime.now().date()
# Fix: Convert calendar date string to date object
calendar_date = self.calendar.get_date()
if isinstance(calendar_date, str):
# Parse the string date (assuming format like "2025-08-02" or "02/08/2025")
try:
if '/' in calendar_date:
# Handle DD/MM/YYYY format
end_date = datetime.strptime(calendar_date, '%d/%m/%Y').date()
else:
# Handle YYYY-MM-DD format
end_date = datetime.strptime(calendar_date, '%Y-%m-%d').date()
except ValueError:
# Fallback: try different formats
for fmt in ['%d/%m/%Y', '%Y-%m-%d', '%m/%d/%Y']:
try:
end_date = datetime.strptime(calendar_date, fmt).date()
break
except ValueError:
continue
else:
# If all formats fail, use today
end_date = datetime.now().date()
else:
# It's already a date object
end_date = calendar_date
time_lapse = end_date - start_date
days_difference = time_lapse.days
params = {param: var.get() for param, var in self.env_params.items()} params = {param: var.get() for param, var in self.env_params.items()}
params['plant_type'] = self.current_plant params['plant_type'] = self.current_plant
params['ambient_mode'] = self.ambient_mode.get() params['ambient_mode'] = self.ambient_mode.get()
current_mode = self.ambient_mode.get()
happy_data = None # Initialize to None instead of 0
if current_mode == "open":
happy_data = self.happyMeteo.openMeteoCall(days_difference)
# Filter out excluded parameters for open mode
excluded_params = {"humidity", "temperature", "brightness"}
params = {param: var.get() for param, var in self.env_params.items()
if param not in excluded_params}
# Re-add the metadata
params['plant_type'] = self.current_plant
params['ambient_mode'] = self.ambient_mode.get()
# Create submission data # Create submission data
submission_data = { submission_data = {
'timestamp': datetime.now().isoformat(), 'timestamp': datetime.now().isoformat(),
'parameters': params, 'parameters': params,
'baseline_image_path': self.baseline_image_path, 'baseline_image_path': self.baseline_image_path,
'plant_info': self.plant_info_text.get(1.0, tk.END), 'plant_info': self.plant_info_text.get(1.0, tk.END),
'start date': datetime.now().date().isoformat(), 'start_date': start_date.isoformat(), # Fixed: was 'start date' (space)
'end_date': self.date_entry.get_date().isoformat() 'end_date': end_date.isoformat(),
'time_lapse_days': days_difference # Added time lapse info
} }
#Remove plant_info_text if current_mode == "open" and happy_data is not None:
submission_data['meteoForecast'] = happy_data
# Clear plant_info_text
self.plant_info_text.delete(1.0, tk.END) self.plant_info_text.delete(1.0, tk.END)
# Save submission data
data_dir = "../data" data_dir = "../data"
os.makedirs(data_dir, exist_ok=True) os.makedirs(data_dir, exist_ok=True)
current_date = datetime.now().strftime('%Y%m%d') current_date = datetime.now().strftime('%Y%m%d')
filename = f"{current_date}-{current_date}.txt" filename = f"{current_date}-{current_date}.txt"
filepath = os.path.join(data_dir, filename) filepath = os.path.join(data_dir, filename)
with open(filepath, 'w') as f: with open(filepath, 'w') as f:
json.dump(submission_data, f, indent=4) json.dump(submission_data, f, indent=4)
# Here call the bot pipeline to store results on files in plant_data # Here call the bot pipeline to store results on files in plant_data
# results are in the form of (text, image) # results are in the form of (text, image)
results = "come bot please" results = None
if results is not None: # Fixed: changed != None to is not None
text = getattr(results, 'text', None) text = getattr(results, 'text', None)
image_filename = getattr(results, 'image', None) image_filename = getattr(results, 'image', None)
else:
text = "<<<----Here at your left you can see the results of the growth of the plant!"
image_filename = self.filenames[self.counter] # Fixed: removed leading slash
self.counter += 1
# Create plant_data directory
images_dir = "./plant_data" images_dir = "./plant_data"
os.makedirs(data_dir, exist_ok=True) os.makedirs(images_dir, exist_ok=True) # Fixed: was data_dir instead of images_dir
image_path = os.path.join(images_dir, image_filename)
image_path = f"public/{image_filename.split('/')[-1]}"
# Update UI with results
self.updating_evolution_and_forecasts(text, image_path) self.updating_evolution_and_forecasts(text, image_path)
# Here update the informations in the last box from plant_data/texts # Here update the informations in the last box from plant_data/texts
# TODO: Implement reading from plant_data/texts
# Here update the informations in growth evolution from plant_data/images # Here update the informations in growth evolution from plant_data/images
# TODO: Implement reading from plant_data/images
# Show success message with better formatting
messagebox.showinfo("Submission Successful",
"Submission successful!\n\n"
"Go to Growth Evolution tab to see the results.")
#Calling a small advertment to notify the user that the image has been generated
messagebox.showinfo("Submission successful, go to growth evolution to see the results")
print(f"Submission data saved to: {filepath}") print(f"Submission data saved to: {filepath}")
except Exception as e: except Exception as e:
messagebox.showerror("Submission Error", f"Error submitting data: {str(e)}") messagebox.showerror("Submission Error", f"Error submitting data: {str(e)}")
print(f"Error details: {e}") # For debugging
def updating_evolution_and_forecasts(self, text, image_path): def updating_evolution_and_forecasts(self, text, image_path):
self.results_text.config(state='normal') # Enable editing self.results_text.config(state='normal') # Enable editing

View file

@ -0,0 +1,80 @@
import openmeteo_requests
import pandas as pd
import requests_cache
from retry_requests import retry
import geocoder
class HappyMeteo:
def __init__(self):
# Setup the Open-Meteo API client with cache and retry on error
cache_session = requests_cache.CachedSession('.cache', expire_after = 3600)
retry_session = retry(cache_session, retries = 5, backoff_factor = 0.2)
self.openmeteo = openmeteo_requests.Client(session = retry_session)
def get_current_location(self):
"""Get current location using IP geolocation"""
try:
g = geocoder.ip('me')
if g.ok:
latitude = g.latlng[0]
longitude = g.latlng[1]
print(f"Latitude: {latitude}")
print(f"Longitude: {longitude}")
print(f"Address: {g.address}")
return latitude, longitude
else:
print("Could not determine location")
return None, None
except Exception as e:
print(f"Error getting location: {e}")
return None, None
def openMeteoCall(self, timeLapse):
lat, lon = self.get_current_location()
# Make sure all required weather variables are listed here
# The order of variables in hourly or daily is important to assign them correctly below
url = "https://api.open-meteo.com/v1/forecast"
params = {
"latitude": lat,
"longitude": lon,
"daily": ["weather_code", "temperature_2m_mean", "rain_sum", "showers_sum", "precipitation_sum", "daylight_duration", "relative_humidity_2m_mean"],
"timezone": "auto",
"forecast_days": timeLapse
}
responses = self.openmeteo.weather_api(url, params=params)
# Process first location. Add a for-loop for multiple locations or weather models
response = responses[0]
# Process daily data. The order of variables needs to be the same as requested.
daily = response.Daily()
daily_weather_code = daily.Variables(0).ValuesAsNumpy()
daily_temperature_2m_mean = daily.Variables(1).ValuesAsNumpy()
daily_rain_sum = daily.Variables(2).ValuesAsNumpy()
daily_showers_sum = daily.Variables(3).ValuesAsNumpy()
daily_precipitation_sum = daily.Variables(4).ValuesAsNumpy()
daily_daylight_duration = daily.Variables(5).ValuesAsNumpy()
daily_relative_humidity_2m_mean = daily.Variables(6).ValuesAsNumpy()
# Return comprehensive data structure
return {
"daily_data": {
"weather_code": daily_weather_code.tolist(),
"temperature_2m_mean": daily_temperature_2m_mean.tolist(),
"rain_sum": daily_rain_sum.tolist(),
"showers_sum": daily_showers_sum.tolist(),
"precipitation_sum": daily_precipitation_sum.tolist(),
"daylight_duration": daily_daylight_duration.tolist(),
"relative_humidity_2m_mean": daily_relative_humidity_2m_mean.tolist()
},
"summary": {
"avg_temperature": float(daily_temperature_2m_mean.mean()),
"total_precipitation": float(daily_precipitation_sum.sum()),
"avg_humidity": float(daily_relative_humidity_2m_mean.mean()),
"total_daylight_hours": float(daily_daylight_duration.sum() / 3600) # Convert seconds to hours
}
}

Binary file not shown.

View file

Before

Width:  |  Height:  |  Size: 1.4 MiB

After

Width:  |  Height:  |  Size: 1.4 MiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

View file

@ -30,6 +30,10 @@ memory-profiler>=0.60.0
tkcalendar tkcalendar
openmeteo-requests
requests-cache
retry-requests
geocoder
# Note: tkinter comes pre-installed with most Python distributions # Note: tkinter comes pre-installed with most Python distributions
# If tkinter is not available, install it using your system package manager: # If tkinter is not available, install it using your system package manager:
# Ubuntu/Debian: sudo apt-get install python3-tk # Ubuntu/Debian: sudo apt-get install python3-tk

185
README.md
View file

@ -1,110 +1,159 @@
# WeGrow \# WeGrow
> *Hackathon Project - [NOI Hackathon] [2025]*
## 🚀 Overview
Hi everyone! We are WeGrow, > \*Hackathon Project - \[NOI Hackathon] \[2025]\*
## 🎯 Problem Statement
Describe the challenge or problem your team is addressing in this hackathon.
## 💡 Solution \## 🚀 Overview
Explain your approach and how your solution addresses the problem statement.
## ✨ Features
- [ ] Feature 1 Hi everyone! We are WeGrow, we are working on a project involving a chatbot which will predict the growth and health of plants.
- [ ] Feature 2
- [ ] Feature 3
- [ ] Feature 4
## 🛠️ Tech Stack
**Frontend:**
- Technology 1 \## 🎯 Problem Statement
- Technology 2
**Backend:**
- Technology 1
- Technology 2
**Other Tools:** The main challenge was to find the right LLM to work in a short time. In particular, to find LLM that parsed text to images and including also meteo api + gps integration to shape the answer of the LLM. We have tested our solution on the basilicum plant, and predicting its growth depending upon different weather condition. There is also the possibility to predict the growth indoor, and not only outside. In that case the user decides the parameters (as humidity, temperature, pressure and so on with a GUI). On the other hand , if the user wants to predict the growth of the plant outside, the LLM will individuate the current weather for the wished period of the user.
- Tool 1
- Tool 2
## 🏗️ Architecture
```text As a problem , we faced an issue to describe the final picture obtained through the various LLM, using Llava as the last one. We ran into GPU exaustation problem, withouth a fully understanding of such problem.
[Add architecture diagram or description here]
```
\## 💡 Solution
To address the problems of the correct answering we had to reading various articles on the correct open source LLM to use. We used :
\- StableDiffusionInstructPix2PixPipeline.from\_pretrained("timbrooks/instruct-pix2pix") : to have the images + text as input and give an image as output.
To install it I have installed ollama for windows from here :
https://ollama.com/download
\- Llava to generate from the final image a text (unsuccessful). To install it we did:
&nbsp; git clone https://github.com/haotian-liu/LLaVA.git
&nbsp; cd LLaVA
&nbsp; pip install -e .
\- Successfully tried to train a model by crawling images from google of basil and tomaetos and training it, to try to optimize the ouput of StableDiffusionInstructPix2PixPipeline.from\_pretrained("timbrooks/instruct-pix2pix"), without actually many changes.
\## ✨ Features
\- \[ ] Interactive dashboard sliders
\- \[ ] Possibility to decide the environment condititions (open vs. controlled (e.g. laboratory))
\- \[ ] Augmented precision of the data trough the open meteo api and the gps integration
\- \[ ] Possibility with the selection of the local environment to run the application completely local, transforming the application in to a lightweight and robust friend
\## 🛠️ Tech Stack
\*\*Frontend:\*\*
\- Python with tkinter
\*\*Backend:\*\*
\- python scripts
\*\*Other Tools:\*\*
\- Open meteo api
\- All the llms to predict the growth explained later
\### Installation
1\. Clone the repository
## 🚀 Using the application
### Prerequisites
```bash ```bash
# List any prerequisites here
# e.g., Node.js 18+, Python 3.9+
```
### Installation
1. Clone the repository
```bash
git clone https://github.com/your-username/NOIProject.git git clone https://github.com/your-username/NOIProject.git
cd NOIProject cd NOIProject
``` ```
1. Install dependencies
1\. Install dependencies
```bash ```bash
# Add installation commands here
# e.g., npm install, pip install -r requirements.txt \# Go into PlantDashboard and create your venv (e.g. PlantEnv)
python3 -m venv PlantEnv
``` ```
1. Set up environment variables
1\. Installing the application
```bash ```bash
# Copy example environment file
cp .env.example .env \#Always in the PlantDashBoard
# Edit .env with your configuration
pip install -r requirements.txt
``` ```
1. Run the application
```bash
# Add run commands here
# e.g., npm start, python app.py
```
## 📸 Screenshots \## 🚀 Using the application
Add screenshots of your application here
## 🎥 Demo
Add link to demo video or live deployment After following the installation steps, you can use our application by simply activating the venv with "source PlantEnv/bin/activate" and running "python launcher.py" from inside the PlantDashBoard folder
## 🧑‍💻 Team
Meet our amazing team of 4:
| Name | Role | GitHub | LinkedIn |
|------|------|---------|----------|
| Member 1 | Role | [@username](https://github.com/username) | [LinkedIn](https://linkedin.com/in/username) |
| Member 2 | Role | [@username](https://github.com/username) | [LinkedIn](https://linkedin.com/in/username) |
| Member 3 | Role | [@username](https://github.com/username) | [LinkedIn](https://linkedin.com/in/username) |
| Member 4 | Role | [@username](https://github.com/username) | [LinkedIn](https://linkedin.com/in/username) |
## 📄 License
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
---

Binary file not shown.

View file

@ -0,0 +1,20 @@
{
"timestamp": "2025-08-02T10:54:55.312319",
"parameters": {
"temperature": 22.0,
"humidity": 65.0,
"soil_acidity": 6.5,
"pressure": 1013.25,
"brightness": 30.0,
"nutrients": 75.0,
"water": 80.0,
"co2": 850.0,
"plant_type": "tomato",
"ambient_mode": "controlled"
},
"baseline_image_path": "/Users/giusber2005/Desktop/workspace/repositories/projects/team-2/PlantDashboard/public/pomodoro.png",
"plant_info": "\n",
"start_date": "2025-08-02",
"end_date": "2025-08-02",
"time_lapse_days": 0
}

View file

@ -0,0 +1,42 @@
from datetime import datetime
from script import PlantPredictor
def dashboard_plant_prediction(image_path, start_date, end_date, additional_notes=""):
"""
Simple function for dashboard integration
"""
try:
# Calculate days
start_dt = datetime.strptime(start_date, "%Y-%m-%d")
end_dt = datetime.strptime(end_date, "%Y-%m-%d")
days = (end_dt - start_dt).days
if days <= 0:
return {"success": False, "error": "Invalid date range"}
# Create predictor and run
predictor = PlantPredictor()
result = predictor.dashboard_plant_prediction(image_path, days, additional_notes)
if result:
return {"success": True, "result": result}
else:
return {"success": False, "error": "No result"}
except Exception as e:
return {"success": False, "error": str(e)}
# Test
if __name__ == "__main__":
result = dashboard_plant_prediction(
"./basilico.jpg",
"2024-08-01",
"2024-08-08",
"Test plant"
)
if result["success"]:
print(" SUCCESS!")
else:
print(f" ERROR: {result['error']}")

View file

@ -1,18 +1,5 @@
ROADMAP ROADMAP
-format the data that will came to the user
-differences between the three modes:
Open mode:
-the temp, humidity, light, water parameters are setted by the meteo api, the rest is setted by the user
-all the parameters are controlled by the user
SemiControlled mode:
-the user choose how to set the parameters
-all parameters free
Controlled mode:
-all the values are set by the user
-the user choose which parameters are free and which are controlled by the meteo api
-make the calendar widget working
-final updates of README.md, hackathon page, forgejo, github page, small design adjustments. -final updates of README.md, hackathon page, forgejo, github page, small design adjustments.

60
script.py Normal file
View file

@ -0,0 +1,60 @@
from datetime import datetime
import sys
import os
class PlantPredictor:
def dashboard_plant_prediction(
image_path: str,
start_date: str,
end_date: str,
additional_notes: str = ""
) -> dict:
try:
# Calcola giorni
start_dt = datetime.strptime(start_date, "%Y-%m-%d")
end_dt = datetime.strptime(end_date, "%Y-%m-%d")
days = (end_dt - start_dt).days
if days <= 0:
return {"success": False, "error": "End date must be after start date", "days": days}
# Log
print(f"Dashboard prediction request: {start_date} to {end_date} ({days} days) image={image_path}")
if additional_notes:
print(f"Notes: {additional_notes}")
# Inizializza il predictor e chiama il metodo
predictor = PlantPredictor()
result = predictor.predict_plant_growth(image_path, days, additional_notes)
# Unwrap risultato tuple
if isinstance(result, tuple) and len(result) == 5:
_img, conditions, weather_df, plant_type, plant_health = result
return {
"success": True,
"plant_analysis": {"plant_type": plant_type, "plant_health": plant_health},
"weather_conditions": conditions,
"weather_data_shape": weather_df.shape,
"parameters_used": {"start_date": start_date, "end_date": end_date, "days": days, "notes": additional_notes, "image": image_path},
"prediction_summary": {
"temperature_range": f"{conditions['avg_temp_min']}{conditions['avg_temp_max']}°C",
"total_rain": f"{conditions['total_rain']}mm",
"sunshine_hours": f"{conditions['total_sunshine_hours']}h"
}
}
else:
return {"success": False, "error": "Invalid result from PlantPredictor", "result": result}
except ValueError as e:
return {"success": False, "error": f"Date format error: {e}"}
except Exception as e:
return {"success": False, "error": f"Unexpected error: {e}"}
# Esempio di test
if __name__ == '__main__':
res = dashboard_plant_prediction(
image_path='./basilico.jpg',
start_date='2024-08-01',
end_date='2024-08-08',
additional_notes='Indoor day 3'
)
print(res)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 436 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 256 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1,023 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1,023 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

View file

@ -0,0 +1 @@
Descrizione non disponibile.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

After

Width:  |  Height:  |  Size: 112 KiB

Before After
Before After

Binary file not shown.

After

Width:  |  Height:  |  Size: 880 KiB

View file

@ -0,0 +1 @@
Descrizione non disponibile.

View file

@ -1,5 +1,4 @@
import io import io
import os
import openmeteo_requests import openmeteo_requests
import pandas as pd import pandas as pd
import requests_cache import requests_cache
@ -13,6 +12,8 @@ from torchvision import transforms
from model import PlantClassifier # personalizzalo secondo il tuo file from model import PlantClassifier # personalizzalo secondo il tuo file
import geocoder import geocoder
import sys import sys
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
import os
print(sys.stdout.encoding) # Check what encoding your console is using print(sys.stdout.encoding) # Check what encoding your console is using
# Force UTF-8 encoding for the entire script # Force UTF-8 encoding for the entire script
@ -26,6 +27,7 @@ class PlantPredictor:
cache_session = requests_cache.CachedSession('.cache', expire_after=3600) cache_session = requests_cache.CachedSession('.cache', expire_after=3600)
retry_session = retry(cache_session, retries=5, backoff_factor=0.2) retry_session = retry(cache_session, retries=5, backoff_factor=0.2)
self.openmeteo = openmeteo_requests.Client(session=retry_session) self.openmeteo = openmeteo_requests.Client(session=retry_session)
self.image_model = None self.image_model = None
self.trained_model = None self.trained_model = None
self.class_labels = ["basil", "tomato"] # oppure caricali dinamicamente self.class_labels = ["basil", "tomato"] # oppure caricali dinamicamente
@ -227,7 +229,7 @@ class PlantPredictor:
# Weather + growth prompt logic (come da tua versione) # Weather + growth prompt logic (come da tua versione)
temp_avg = (plant_conditions['avg_temp_max'] + plant_conditions['avg_temp_min']) / 2 temp_avg = (plant_conditions['avg_temp_max'] + plant_conditions['avg_temp_min']) / 2
if plant_type == "basilico" or ("herb" in plant_type): if plant_type == "basil" or plant_type == "tomato" or ("herb" in plant_type):
if temp_avg > 25: if temp_avg > 25:
temp_effect = "warm weather promoting vigorous basil growth with larger, aromatic leaves and bushier structure" temp_effect = "warm weather promoting vigorous basil growth with larger, aromatic leaves and bushier structure"
elif temp_avg < 15: elif temp_avg < 15:
@ -326,6 +328,50 @@ class PlantPredictor:
print(f"⚠️ Error in health assessment: {e}") print(f"⚠️ Error in health assessment: {e}")
return "unknown health" return "unknown health"
def describe_image_with_llava(self, image_pil, prompt=None):
"""Use LLaVA-Next on CPU to generate a description of the plant image."""
try:
from transformers import LlavaNextForConditionalGeneration, AutoProcessor
if not hasattr(self, "llava_model"):
print("🔄 Caricamento modello LLaVA-Next su CPU…")
model_id = "llava-hf/llava-v1.6-mistral-7b-hf"
# 1) Load the processor
self.llava_processor = AutoProcessor.from_pretrained(model_id)
# 2) Load the model in half-precision, low memory mode
self.llava_model = LlavaNextForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch.float16,
low_cpu_mem_usage=True
).to("cpu")
print("✅ LLaVA-Next caricato su CPU correttamente")
# Free GPU memory if you still have SD components loaded
if torch.cuda.is_available():
del self.image_model
torch.cuda.empty_cache()
# 3) Prepend the <img> token so the processor knows where the image belongs
llava_prompt = "<img> " + (prompt or "Describe the plant growth and condition in this image.")
# 4) Build inputs explicitly
inputs = self.llava_processor(
images=image_pil,
text=llava_prompt,
return_tensors="pt"
).to("cpu")
# 5) Generate
output = self.llava_model.generate(**inputs, max_new_tokens=150)
description = self.llava_processor.decode(output[0], skip_special_tokens=True)
return description
except Exception as e:
print(f"⚠️ Errore durante la descrizione con LLaVA-Next: {e}")
return "Descrizione non disponibile."
def transform_plant_image(self, image_path, prompt): def transform_plant_image(self, image_path, prompt):
"""STEP 4: Generate new image based on analyzed prompt""" """STEP 4: Generate new image based on analyzed prompt"""
@ -413,8 +459,37 @@ class PlantPredictor:
return None return None
if result_image: if result_image:
# Salva limmagine predetta
result_image.save(output_path) result_image.save(output_path)
print(f"Plant growth prediction saved to: {output_path}") print(f"Plant growth prediction saved to: {output_path}")
# —————— Qui inizia il codice per il .txt ——————
# Componi la descrizione
description = (
f"{plant_type.capitalize()} prevista dopo {plant_conditions['days_analyzed']} giorni:\n"
f"- Temperatura: {plant_conditions['avg_temp_min']}{plant_conditions['avg_temp_max']} °C\n"
f"- Pioggia: {plant_conditions['total_rain']} mm\n"
f"- Sole: {plant_conditions['total_sunshine_hours']} h\n"
f"- UV max: {plant_conditions['max_uv_index']}\n"
f"- Range termico giornaliero: {plant_conditions['temp_range']} °C\n"
f"Salute stimata: {plant_health}."
)
# STEP 4.5: Descrizione immagine predetta con LLaVA-Next
try:
llava_description = self.describe_image_with_llava(result_image, prompt)
print("🧠 Descrizione generata da LLaVA-Next:")
print(llava_description)
# Salva descrizione in file .txt separato
llava_txt_path = os.path.splitext(output_path)[0] + "_llava_description.txt"
with open(llava_txt_path, "w", encoding="utf-8") as f:
f.write(llava_description)
print(f"📄 Descrizione visiva salvata in: {llava_txt_path}")
except Exception as e:
print(f"⚠️ LLaVA-Next non ha potuto descrivere limmagine: {e}")
return result_image, plant_conditions, weather_df, plant_type, plant_health return result_image, plant_conditions, weather_df, plant_type, plant_health
else: else:
print("Failed to transform image") print("Failed to transform image")
@ -432,10 +507,10 @@ if __name__ == "__main__":
# Predict plant growth # Predict plant growth
# Replace 'your_plant_image.jpg' with actual image path # Replace 'your_plant_image.jpg' with actual image path
result = predictor.predict_plant_growth( result = predictor.predict_plant_growth(
image_path="./basilico.jpg", image_path="./tomato.jpg",
lat=latitude, lat=latitude,
lon=longitude, lon=longitude,
output_path="./predicted_plant_growth.jpg", output_path="./tomato_new2.jpg",
days=7 days=7
) )

View file

@ -0,0 +1,651 @@
import io
import openmeteo_requests
import pandas as pd
import requests_cache
from retry_requests import retry
from datetime import datetime, timedelta
from PIL import Image
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
import numpy as np
from torchvision import transforms
from model import PlantClassifier # personalizzalo secondo il tuo file
import geocoder
import sys
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
import os
print(sys.stdout.encoding) # Check what encoding your console is using
# Force UTF-8 encoding for the entire script
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace')
class PlantPredictor:
def __init__(self):
"""Initialize the plant prediction pipeline with Open-Meteo client"""
# Setup the Open-Meteo API client with cache and retry on error
cache_session = requests_cache.CachedSession('.cache', expire_after=3600)
retry_session = retry(cache_session, retries=5, backoff_factor=0.2)
self.openmeteo = openmeteo_requests.Client(session=retry_session)
self.image_model = None
self.trained_model = None
self.class_labels = ["basil", "tomato"] # oppure caricali dinamicamente
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_trained_model(self, model_path="./models/basil_tomato_classifier.pth"):
if not os.path.exists(model_path):
print("⚠️ Trained model not found!")
return
try:
model = PlantClassifier(num_classes=2)
# Load checkpoint with proper device mapping
checkpoint = torch.load(model_path, map_location=self.device)
# Handle different checkpoint formats
if 'model_state_dict' in checkpoint:
state_dict = checkpoint['model_state_dict']
else:
# If the checkpoint is just the state dict
state_dict = checkpoint
# Fix key mismatches between training and inference models
# The saved model has keys like "features.*" but current model expects "backbone.features.*"
corrected_state_dict = {}
for key, value in state_dict.items():
if key.startswith('features.'):
# Add "backbone." prefix to features
new_key = 'backbone.' + key
corrected_state_dict[new_key] = value
elif key.startswith('classifier.'):
# Add "backbone." prefix to classifier
new_key = 'backbone.' + key
corrected_state_dict[new_key] = value
else:
# Keep other keys as they are
corrected_state_dict[key] = value
# Load the corrected state dict
model.load_state_dict(corrected_state_dict, strict=False)
model.to(self.device)
model.eval()
self.trained_model = model
print(f"✅ Model loaded successfully on {self.device}")
except Exception as e:
print(f"⚠️ Error loading trained model: {e}")
self.trained_model = None
def get_current_location(self):
try:
g = geocoder.ip('me')
if g.ok:
print(f"📍 Location detected: {g.city}, {g.country}")
print(f"📍 Coordinates: {g.latlng[0]:.4f}, {g.latlng[1]:.4f}")
return g.latlng[0], g.latlng[1]
else:
print("⚠️ Could not detect location, using default (Milan)")
except Exception as e:
print(f"⚠️ Location detection failed: {e}, using default (Milan)")
# default Milan coords if failed
return 45.4642, 9.1900
def load_image_model(self):
"""Load the image transformation model"""
print("Loading Stable Diffusion model...")
self.image_model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
)
if torch.cuda.is_available():
self.image_model = self.image_model.to("cuda")
print("Model loaded successfully!")
def get_weather_forecast(self, lat, lon, days=7):
"""Get weather forecast from Open-Meteo API using official client"""
start_date = datetime.now().strftime("%Y-%m-%d")
end_date = (datetime.now() + timedelta(days=days)).strftime("%Y-%m-%d")
url = "https://api.open-meteo.com/v1/forecast"
params = {
"latitude": lat,
"longitude": lon,
"daily": [
"temperature_2m_max",
"temperature_2m_min",
"precipitation_sum",
"rain_sum",
"uv_index_max",
"sunshine_duration"
],
"start_date": start_date,
"end_date": end_date,
"timezone": "auto"
}
try:
responses = self.openmeteo.weather_api(url, params=params)
response = responses[0] # Process first location
print(f"Coordinates: {response.Latitude()}°N {response.Longitude()}°E")
print(f"Elevation: {response.Elevation()} m asl")
print(f"Timezone: UTC{response.UtcOffsetSeconds()//3600:+d}")
# Process daily data
daily = response.Daily()
# Extract data as numpy arrays (much faster!)
daily_data = {
"date": pd.date_range(
start=pd.to_datetime(daily.Time(), unit="s", utc=True),
end=pd.to_datetime(daily.TimeEnd(), unit="s", utc=True),
freq=pd.Timedelta(seconds=daily.Interval()),
inclusive="left"
),
"temperature_2m_max": daily.Variables(0).ValuesAsNumpy(),
"temperature_2m_min": daily.Variables(1).ValuesAsNumpy(),
"precipitation_sum": daily.Variables(2).ValuesAsNumpy(),
"rain_sum": daily.Variables(3).ValuesAsNumpy(),
"uv_index_max": daily.Variables(4).ValuesAsNumpy(),
"sunshine_duration": daily.Variables(5).ValuesAsNumpy()
}
# Create DataFrame for easy analysis
daily_dataframe = pd.DataFrame(data=daily_data)
return daily_dataframe, response
except Exception as e:
print(f"Error fetching weather data: {e}")
return None, None
def analyze_weather_for_plants(self, weather_df):
"""Analyze weather data and create plant-specific metrics"""
if weather_df is None or weather_df.empty:
return None
# Handle NaN values by filling with 0 or mean
weather_df = weather_df.fillna(0)
# Calculate plant-relevant metrics using pandas (more efficient)
plant_conditions = {
"avg_temp_max": round(weather_df['temperature_2m_max'].mean(), 1),
"avg_temp_min": round(weather_df['temperature_2m_min'].mean(), 1),
"total_precipitation": round(weather_df['precipitation_sum'].sum(), 1),
"total_rain": round(weather_df['rain_sum'].sum(), 1),
"total_sunshine_hours": round(weather_df['sunshine_duration'].sum() / 3600, 1), # Convert to hours
"max_uv_index": round(weather_df['uv_index_max'].max(), 1),
"days_analyzed": len(weather_df),
"temp_range": round(weather_df['temperature_2m_max'].max() - weather_df['temperature_2m_min'].min(), 1)
}
return plant_conditions
CLASS_NAMES = {0: "basil", 1: "tomato"} # Adatta se usi nomi diversi
def create_transformation_prompt(self, image_path, plant_conditions):
if not plant_conditions:
return "Show this plant after one week of growth", "generic plant", "unknown health"
plant_type = "generic plant"
plant_health = "unknown health"
try:
if not os.path.exists(image_path):
raise FileNotFoundError(f"Image file not found at {image_path}")
with Image.open(image_path) as img:
image = img.convert("RGB")
width, height = image.size
try:
plant_type = self.detect_plant_type(image)
except Exception as e:
print(f"⚠️ Plant type detection failed: {e}")
plant_type = "generic plant"
try:
plant_health = self.assess_plant_health(image)
except Exception as e:
print(f"⚠️ Health assessment failed: {e}")
plant_health = "unknown health"
print(f"📸 Image Analysis:")
print(f" Plant type detected: {plant_type}")
print(f" Current health: {plant_health}")
print(f" Image size: {width}x{height}")
except Exception as e:
print(f"⚠️ Warning: Could not analyze image: {str(e)}")
plant_type = "generic plant"
plant_health = "healthy"
# Weather + growth prompt logic (come da tua versione)
temp_avg = (plant_conditions['avg_temp_max'] + plant_conditions['avg_temp_min']) / 2
if plant_type == "basil" or plant_type == "tomato" or ("herb" in plant_type):
if temp_avg > 25:
temp_effect = "warm weather promoting vigorous basil growth with larger, aromatic leaves and bushier structure"
elif temp_avg < 15:
temp_effect = "cool weather slowing basil growth with smaller, less vibrant leaves"
else:
temp_effect = "optimal temperature for basil supporting steady growth with healthy green foliage"
else:
if temp_avg > 25:
temp_effect = "warm weather promoting vigorous growth with larger, darker green leaves"
elif temp_avg < 10:
temp_effect = "cool weather slowing growth with smaller, pale leaves"
else:
temp_effect = "moderate temperature supporting steady growth with healthy green foliage"
if plant_conditions['total_rain'] > 20:
water_effect = "abundant rainfall keeping leaves lush, turgid and deep green"
elif plant_conditions['total_rain'] < 5:
water_effect = "dry conditions causing slight leaf wilting and browning at edges"
else:
water_effect = "adequate moisture maintaining crisp, healthy leaf appearance"
if plant_conditions['total_sunshine_hours'] > 50:
sun_effect = "plenty of sunlight encouraging dense, compact foliage growth"
elif plant_conditions['total_sunshine_hours'] < 20:
sun_effect = "limited sunlight causing elongated stems and sparse leaf growth"
else:
sun_effect = "moderate sunlight supporting balanced, proportional growth"
if plant_conditions['max_uv_index'] > 7:
uv_effect = "high UV causing slight leaf thickening and waxy appearance"
else:
uv_effect = "moderate UV maintaining normal leaf texture"
prompt = (
f"Transform this {plant_type} showing realistic growth after {plant_conditions['days_analyzed']} days. "
f"Current state: {plant_health}. Apply these weather effects: {temp_effect}, {water_effect}, {sun_effect}, and {uv_effect}. "
f"Show natural changes in leaf size, color saturation, stem thickness, and overall plant structure while maintaining the original composition and lighting. "
f"Weather summary: {plant_conditions['avg_temp_min']}-{plant_conditions['avg_temp_max']}°C, "
f"{plant_conditions['total_rain']}mm rain, {plant_conditions['total_sunshine_hours']}h sun"
)
return prompt, plant_type, plant_health
def detect_plant_type(self, image):
"""Use trained model to classify the plant type"""
if self.trained_model is None:
self.load_trained_model()
if self.trained_model is None:
print("⚠️ Trained model not available, using fallback rule.")
return "generic plant"
try:
transform = transforms.Compose([
transforms.Resize((224, 224)), # usa la stessa dimensione del training
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], # mean/std di ImageNet o dataset tuo
[0.229, 0.224, 0.225])
])
input_tensor = transform(image).unsqueeze(0).to(self.device)
with torch.no_grad():
output = self.trained_model(input_tensor)
_, predicted = torch.max(output, 1)
predicted_class = self.class_labels[predicted.item()]
# Get confidence score
probabilities = torch.nn.functional.softmax(output, dim=1)
confidence = probabilities[0][predicted].item()
print(f"🌱 Plant classification: {predicted_class} (confidence: {confidence:.2f})")
return predicted_class
except Exception as e:
print(f"⚠️ Error in plant type detection: {e}")
return "generic plant"
def cleanup_gpu_memory(self):
"""Clean up GPU memory and move models appropriately"""
if torch.cuda.is_available():
# Move Stable Diffusion model to CPU if LLaVA is being used
if hasattr(self, 'image_model') and self.image_model is not None:
print("💾 Moving Stable Diffusion to CPU to free GPU memory...")
self.image_model = self.image_model.to("cpu")
torch.cuda.empty_cache()
torch.cuda.synchronize()
# Print memory stats
allocated = torch.cuda.memory_allocated() / 1024**3
cached = torch.cuda.memory_reserved() / 1024**3
print(f"📊 GPU Memory: {allocated:.1f}GB allocated, {cached:.1f}GB cached")
def assess_plant_health(self, image):
"""Assess basic plant health from image"""
try:
img_array = np.array(image)
# Analyze brightness and color vibrancy
brightness = np.mean(img_array)
green_channel = np.mean(img_array[:,:,1])
if brightness > 150 and green_channel > 120:
return "healthy and vibrant"
elif brightness > 100 and green_channel > 80:
return "moderately healthy"
else:
return "showing some stress"
except Exception as e:
print(f"⚠️ Error in health assessment: {e}")
return "unknown health"
def describe_image_with_llava(self, image_pil, prompt=None):
"""Use LLaVA-Next to generate a description of the plant image with proper device handling."""
try:
from transformers import LlavaNextForConditionalGeneration, LlavaNextProcessor
import torch
if not hasattr(self, "llava_model"):
print("🔄 Loading LLaVA-Next model...")
model_id = "llava-hf/llava-v1.6-mistral-7b-hf"
# Use the correct processor for LLaVA-Next
self.llava_processor = LlavaNextProcessor.from_pretrained(model_id)
# Determine optimal device configuration
if torch.cuda.is_available():
# Check available GPU memory
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3 # GB
print(f"📊 Available GPU memory: {gpu_memory:.1f} GB")
if gpu_memory >= 12: # High memory GPU
device_map = "auto"
torch_dtype = torch.float16
print("🚀 Using GPU with auto device mapping")
else: # Lower memory GPU - use CPU offloading
device_map = {"": "cpu"}
torch_dtype = torch.float32
print("💾 Using CPU due to limited GPU memory")
else:
device_map = {"": "cpu"}
torch_dtype = torch.float32
print("🖥️ Using CPU (no GPU available)")
# Load model with explicit device mapping
self.llava_model = LlavaNextForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch_dtype,
low_cpu_mem_usage=True,
device_map=device_map,
offload_folder="./offload_cache", # Explicit offload directory
offload_state_dict=True if device_map != "auto" else False
)
# Ensure model is in eval mode
self.llava_model.eval()
print("✅ LLaVA-Next loaded successfully")
# Clear CUDA cache before inference
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Prepare the conversation format that LLaVA-Next expects
conversation = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": prompt or "Describe this plant's current condition, growth stage, health indicators, leaf characteristics, and any visible signs of stress or vitality. Focus on botanical details."}
]
}
]
# Apply chat template and process inputs
prompt_text = self.llava_processor.apply_chat_template(conversation, add_generation_prompt=True)
# Process inputs properly
inputs = self.llava_processor(
images=image_pil,
text=prompt_text,
return_tensors="pt"
)
# Handle device placement more carefully
target_device = "cpu" # Default to CPU for stability
if hasattr(self.llava_model, 'device'):
target_device = self.llava_model.device
elif hasattr(self.llava_model, 'hf_device_map'):
# Get the device of the first layer
for module_name, device in self.llava_model.hf_device_map.items():
if device != 'disk':
target_device = device
break
print(f"🎯 Moving inputs to device: {target_device}")
inputs = {k: v.to(target_device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
# Generate with proper parameters and error handling
with torch.no_grad():
try:
output = self.llava_model.generate(
**inputs,
max_new_tokens=150, # Reduced for stability
do_sample=False, # Use greedy decoding for consistency
temperature=None, # Not used with do_sample=False
top_p=None, # Not used with do_sample=False
pad_token_id=self.llava_processor.tokenizer.eos_token_id,
use_cache=True,
repetition_penalty=1.1
)
except RuntimeError as e:
if "out of memory" in str(e).lower():
print("⚠️ GPU OOM, retrying with CPU...")
# Move everything to CPU and retry
inputs = {k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
if hasattr(self.llava_model, 'cpu'):
self.llava_model = self.llava_model.cpu()
output = self.llava_model.generate(
**inputs,
max_new_tokens=150,
do_sample=False,
pad_token_id=self.llava_processor.tokenizer.eos_token_id
)
else:
raise e
# Decode only the new tokens (exclude input tokens)
input_length = inputs["input_ids"].shape[1]
generated_tokens = output[0][input_length:]
description = self.llava_processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
# Clean up cache after generation
if torch.cuda.is_available():
torch.cuda.empty_cache()
return description.strip()
except ImportError as e:
print(f"⚠️ LLaVA-Next dependencies not available: {e}")
return "Visual description not available - missing dependencies."
except Exception as e:
print(f"⚠️ Error during LLaVA-Next description: {e}")
print(f"🔍 Error details: {type(e).__name__}: {str(e)}")
return f"Visual description failed: {str(e)}"
def transform_plant_image(self, image_path, prompt):
"""STEP 4: Generate new image based on analyzed prompt"""
if self.image_model is None:
self.load_image_model()
try:
# Load and prepare image
image = Image.open(image_path).convert("RGB")
# Resize if too large (for memory efficiency)
if max(image.size) > 1024:
image.thumbnail((1024, 1024), Image.Resampling.LANCZOS)
print(f" STEP 4: Generating transformed image...")
print(f" Using prompt: {prompt}")
# Transform image
result = self.image_model(
prompt,
image=image,
num_inference_steps=70,
image_guidance_scale=1.5,
guidance_scale=7.5
).images[0]
return result
except Exception as e:
print(f"Error transforming image: {e}")
return None
@staticmethod
def safe_print(text):
try:
print(text)
except UnicodeEncodeError:
# Fallback for systems with limited encoding support
print(text.encode('ascii', errors='replace').decode('ascii'))
def predict_plant_growth(self, image_path, lat=None, lon=None, output_path="./predicted_plant.jpg", days=7):
"""Complete pipeline: weather + image transformation"""
# Auto-detect location if not provided
if lat is None or lon is None:
print(" Auto-detecting location...")
lat, lon = self.get_current_location()
print(f" Starting plant prediction for coordinates: {lat:.4f}, {lon:.4f}")
print(f" Analyzing {days} days of weather data...")
# Step 1: Get weather data using official Open-Meteo client
print("Fetching weather data with caching and retry...")
weather_df, response_info = self.get_weather_forecast(lat, lon, days)
if weather_df is None:
print("Failed to get weather data")
return None
print(f"Weather data retrieved for {len(weather_df)} days")
print("\nWeather Overview:")
print(weather_df[['date', 'temperature_2m_max', 'temperature_2m_min', 'precipitation_sum', 'sunshine_duration']].head())
# Step 2: Analyze weather for plants
plant_conditions = self.analyze_weather_for_plants(weather_df)
print(f"\nPlant-specific weather analysis: {plant_conditions}")
# Step 3: Analyze image + weather to create intelligent prompt
print("\n STEP 3: Analyzing image and creating transformation prompt...")
try:
prompt, plant_type, plant_health = self.create_transformation_prompt(image_path, plant_conditions)
self.safe_print(f" Plant identified as: {plant_type}")
self.safe_print(f" Current health: {plant_health}")
self.safe_print(f" Generated transformation prompt: {prompt}")
except Exception as e:
print(f" Error in Step 3: {e}")
return None
# Step 4: Generate transformed image
print("\nSTEP 4: Generating prediction image...")
try:
result_image = self.transform_plant_image(image_path, prompt)
except Exception as e:
print(f" Error in Step 4: {e}")
return None
if result_image:
# Save the predicted image
result_image.save(output_path)
print(f"Plant growth prediction saved to: {output_path}")
# Compose the basic description
description = (
f"{plant_type.capitalize()} predicted after {plant_conditions['days_analyzed']} days:\n"
f"- Temperature: {plant_conditions['avg_temp_min']}{plant_conditions['avg_temp_max']} °C\n"
f"- Rain: {plant_conditions['total_rain']} mm\n"
f"- Sunshine: {plant_conditions['total_sunshine_hours']} h\n"
f"- UV max: {plant_conditions['max_uv_index']}\n"
f"- Daily temperature range: {plant_conditions['temp_range']} °C\n"
f"Estimated health: {plant_health}."
)
# STEP 4.5: Enhanced visual description with LLaVA-Next
try:
print("\n🧠 STEP 4.5: Generating detailed visual analysis...")
# Clean up GPU memory before loading LLaVA
self.cleanup_gpu_memory()
llava_description = self.describe_image_with_llava(
result_image,
f"Analyze this {plant_type} plant prediction image. Describe the visible growth changes, leaf development, overall health indicators, and how the plant appears to have responded to the weather conditions: {plant_conditions['avg_temp_min']}-{plant_conditions['avg_temp_max']}°C, {plant_conditions['total_rain']}mm rain, {plant_conditions['total_sunshine_hours']}h sun over {plant_conditions['days_analyzed']} days."
)
print("🧠 AI Visual Analysis:")
print(llava_description)
# Save comprehensive description
complete_description = f"{description}\n\nAI Visual Analysis:\n{llava_description}"
description_txt_path = os.path.splitext(output_path)[0] + "_analysis.txt"
with open(description_txt_path, "w", encoding="utf-8") as f:
f.write(complete_description)
print(f"📄 Complete analysis saved to: {description_txt_path}")
except Exception as e:
print(f"⚠️ Visual analysis failed: {e}")
# Still save basic description
basic_txt_path = os.path.splitext(output_path)[0] + "_basic_info.txt"
with open(basic_txt_path, "w", encoding="utf-8") as f:
f.write(description)
print(f"📄 Basic info saved to: {basic_txt_path}")
return result_image, plant_conditions, weather_df, plant_type, plant_health
else:
print("Failed to transform image")
return None
# Example usage
if __name__ == "__main__":
# Initialize predictor
predictor = PlantPredictor()
# Example coordinates (Milan, Italy)
latitude = 45.4642
longitude = 9.1900
# Predict plant growth
# Replace 'your_plant_image.jpg' with actual image path
result = predictor.predict_plant_growth(
image_path="./basilico.jpg",
lat=latitude,
lon=longitude,
output_path="./basilico_new2.jpg",
days=7
)
if result:
image, conditions, weather_data, plant_type, plant_health = result
print("\n" + "="*50)
print(" PLANT PREDICTION COMPLETED SUCCESSFULLY!")
print("="*50)
print(f" Plant type: {plant_type}")
print(f" Plant health: {plant_health}")
print(f" Weather conditions: {conditions}")
print(f" Data points: {weather_data.shape}")
print(f" Temperature: {conditions['avg_temp_min']}°C to {conditions['avg_temp_max']}°C")
print(f" Total rain: {conditions['total_rain']}mm")
print(f" Sunshine: {conditions['total_sunshine_hours']}h")
else:
print("Plant prediction failed.")

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

View file

@ -0,0 +1 @@
Descrizione non disponibile.

View file

@ -0,0 +1 @@
Descrizione non disponibile.

BIN
test3/foto/basilico-OLD.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1,023 KiB

534
test3/script.py Normal file
View file

@ -0,0 +1,534 @@
import openmeteo_requests
import pandas as pd
import requests_cache
from retry_requests import retry
from datetime import datetime, timedelta
from PIL import Image
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
import numpy as np
import geocoder
class PlantPredictor:
def __init__(self):
"""Initialize the plant prediction pipeline with Open-Meteo client"""
# Setup the Open-Meteo API client with cache and retry on error
cache_session = requests_cache.CachedSession('.cache', expire_after=3600)
retry_session = retry(cache_session, retries=5, backoff_factor=0.2)
self.openmeteo = openmeteo_requests.Client(session=retry_session)
self.image_model = None
def get_current_location(self):
"""Get current location using IP geolocation"""
try:
g = geocoder.ip('me')
if g.ok:
print(f"📍 Location detected: {g.city}, {g.country}")
print(f"📍 Coordinates: {g.latlng[0]:.4f}, {g.latlng[1]:.4f}")
return g.latlng[0], g.latlng[1] # lat, lon
else:
print("⚠️ Could not detect location, using default (Milan)")
self.image_model = None
except Exception as e:
print(f"⚠️ Location detection failed: {e}, using default (Milan)")
self.image_model = None
def load_image_model(self):
"""Load the image transformation model with high-quality settings"""
print("🔄 Loading Stable Diffusion model with high-quality settings...")
# Check if CUDA is available and print GPU info
if torch.cuda.is_available():
gpu_name = torch.cuda.get_device_name(0)
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
print(f"🚀 GPU: {gpu_name} ({gpu_memory:.1f} GB)")
self.image_model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
use_safetensors=True,
safety_checker=None,
requires_safety_checker=False
)
if torch.cuda.is_available():
self.image_model = self.image_model.to("cuda")
# Enable memory efficient attention for better quality
try:
self.image_model.enable_xformers_memory_efficient_attention()
print("✅ XFormers memory efficient attention enabled")
except:
print("⚠️ XFormers not available, using standard attention")
# Enable VAE slicing for higher resolution support
self.image_model.enable_vae_slicing()
print("✅ VAE slicing enabled for high-res support")
# Enable attention slicing for memory efficiency
self.image_model.enable_attention_slicing(1)
print("✅ Attention slicing enabled")
print("✅ High-quality model loaded successfully!")
def get_weather_forecast(self, lat, lon, days=7):
"""Get weather forecast from Open-Meteo API using official client"""
start_date = datetime.now().strftime("%Y-%m-%d")
end_date = (datetime.now() + timedelta(days=days)).strftime("%Y-%m-%d")
url = "https://api.open-meteo.com/v1/forecast"
params = {
"latitude": lat,
"longitude": lon,
"daily": [
"temperature_2m_max",
"temperature_2m_min",
"precipitation_sum",
"rain_sum",
"uv_index_max",
"sunshine_duration"
],
"start_date": start_date,
"end_date": end_date,
"timezone": "auto"
}
try:
responses = self.openmeteo.weather_api(url, params=params)
response = responses[0] # Process first location
print(f"Coordinates: {response.Latitude()}°N {response.Longitude()}°E")
print(f"Elevation: {response.Elevation()} m asl")
print(f"Timezone: UTC{response.UtcOffsetSeconds()//3600:+d}")
# Process daily data
daily = response.Daily()
# Extract data as numpy arrays (much faster!)
daily_data = {
"date": pd.date_range(
start=pd.to_datetime(daily.Time(), unit="s", utc=True),
end=pd.to_datetime(daily.TimeEnd(), unit="s", utc=True),
freq=pd.Timedelta(seconds=daily.Interval()),
inclusive="left"
),
"temperature_2m_max": daily.Variables(0).ValuesAsNumpy(),
"temperature_2m_min": daily.Variables(1).ValuesAsNumpy(),
"precipitation_sum": daily.Variables(2).ValuesAsNumpy(),
"rain_sum": daily.Variables(3).ValuesAsNumpy(),
"uv_index_max": daily.Variables(4).ValuesAsNumpy(),
"sunshine_duration": daily.Variables(5).ValuesAsNumpy()
}
# Create DataFrame for easy analysis
daily_dataframe = pd.DataFrame(data=daily_data)
return daily_dataframe, response
except Exception as e:
print(f"Error fetching weather data: {e}")
return None, None
def analyze_weather_for_plants(self, weather_df):
"""Analyze weather data and create plant-specific metrics"""
if weather_df is None or weather_df.empty:
return None
# Handle NaN values by filling with 0 or mean
weather_df = weather_df.fillna(0)
# Calculate plant-relevant metrics using pandas (more efficient)
plant_conditions = {
"avg_temp_max": round(weather_df['temperature_2m_max'].mean(), 1),
"avg_temp_min": round(weather_df['temperature_2m_min'].mean(), 1),
"total_precipitation": round(weather_df['precipitation_sum'].sum(), 1),
"total_rain": round(weather_df['rain_sum'].sum(), 1),
"total_sunshine_hours": round(weather_df['sunshine_duration'].sum() / 3600, 1), # Convert to hours
"max_uv_index": round(weather_df['uv_index_max'].max(), 1),
"days_analyzed": len(weather_df),
"temp_range": round(weather_df['temperature_2m_max'].max() - weather_df['temperature_2m_min'].min(), 1)
}
return plant_conditions
def create_transformation_prompt(self, image_path, plant_conditions):
"""Create a detailed prompt for image transformation based on weather AND image analysis"""
if not plant_conditions:
return "Show this plant after one week of growth", "generic plant", "unknown health"
# STEP 3A: Analyze original image
plant_type = "generic plant"
plant_health = "unknown health"
try:
image = Image.open(image_path).convert("RGB")
# Basic image analysis
width, height = image.size
aspect_ratio = width / height
# Simple plant type detection based on image characteristics
plant_type = self.detect_plant_type(image)
plant_health = self.assess_plant_health(image)
print(f"📸 Image Analysis:")
print(f" Plant type detected: {plant_type}")
print(f" Current health: {plant_health}")
print(f" Image size: {width}x{height}")
except Exception as e:
print(f"Warning: Could not analyze image: {e}")
plant_type = "generic plant"
plant_health = "healthy"
# STEP 3B: Weather analysis with plant-specific logic
temp_avg = (plant_conditions['avg_temp_max'] + plant_conditions['avg_temp_min']) / 2
# Temperature effects (adjusted by plant type)
if plant_type == "basil" or "herb" in plant_type:
if temp_avg > 25:
temp_effect = "warm weather promoting vigorous basil growth with larger, aromatic leaves and bushier structure"
elif temp_avg < 15:
temp_effect = "cool weather slowing basil growth with smaller, less vibrant leaves"
else:
temp_effect = "optimal temperature for basil supporting steady growth with healthy green foliage"
else:
if temp_avg > 25:
temp_effect = "warm weather promoting vigorous growth with larger, darker green leaves"
elif temp_avg < 10:
temp_effect = "cool weather slowing growth with smaller, pale leaves"
else:
temp_effect = "moderate temperature supporting steady growth with healthy green foliage"
# Water effects
if plant_conditions['total_rain'] > 20:
water_effect = "abundant rainfall keeping leaves lush, turgid and deep green"
elif plant_conditions['total_rain'] < 5:
water_effect = "dry conditions causing slight leaf wilting and browning at edges"
else:
water_effect = "adequate moisture maintaining crisp, healthy leaf appearance"
# Sunlight effects
if plant_conditions['total_sunshine_hours'] > 50:
sun_effect = "plenty of sunlight encouraging dense, compact foliage growth"
elif plant_conditions['total_sunshine_hours'] < 20:
sun_effect = "limited sunlight causing elongated stems and sparse leaf growth"
else:
sun_effect = "moderate sunlight supporting balanced, proportional growth"
# UV effects
if plant_conditions['max_uv_index'] > 7:
uv_effect = "high UV causing slight leaf thickening and waxy appearance"
else:
uv_effect = "moderate UV maintaining normal leaf texture"
# STEP 3C: Create comprehensive prompt combining image + weather analysis
# // FINAL PROMT HERE FOR PLANT
prompt = f"""Transform this {plant_type} showing realistic growth after {plant_conditions['days_analyzed']} days. The plant should still be realistic and its surrounding how it would look like in the real world and a human should be able to say the picture looks normal and only focus on the plant. Current state: {plant_health}. Apply these weather effects: {temp_effect}, {water_effect}, {sun_effect}, and {uv_effect}. Show natural changes in leaf size, color saturation, stem thickness, and overall plant structure while maintaining the original composition and lighting. Weather summary: {plant_conditions['avg_temp_min']}-{plant_conditions['avg_temp_max']}°C, {plant_conditions['total_rain']}mm rain, {plant_conditions['total_sunshine_hours']}h sun"""
return prompt, plant_type, plant_health
def detect_plant_type(self, image):
"""Simple plant type detection based on image characteristics"""
# This is a simplified version - in a real app you'd use a plant classification model
# For now, we'll do basic analysis
# Convert to array for analysis
img_array = np.array(image)
# Analyze color distribution
green_pixels = np.sum((img_array[:,:,1] > img_array[:,:,0]) & (img_array[:,:,1] > img_array[:,:,2]))
total_pixels = img_array.shape[0] * img_array.shape[1]
green_ratio = green_pixels / total_pixels
# Simple heuristics (could be improved with ML)
if green_ratio > 0.4:
return "basil" # Assume basil for high green content
else:
return "generic plant"
def assess_plant_health(self, image):
"""Assess basic plant health from image"""
img_array = np.array(image)
# Analyze brightness and color vibrancy
brightness = np.mean(img_array)
green_channel = np.mean(img_array[:,:,1])
if brightness > 150 and green_channel > 120:
return "healthy and vibrant"
elif brightness > 100 and green_channel > 80:
return "moderately healthy"
else:
return "showing some stress"
def transform_plant_image(self, image_path, prompt, num_samples=1):
"""STEP 4: Generate ULTRA HIGH-QUALITY image with 60 inference steps"""
if self.image_model is None:
self.load_image_model()
try:
# Load and prepare image with HIGHER RESOLUTION
print(f"📸 Loading image for high-quality processing: {image_path}")
image = Image.open(image_path).convert("RGB")
original_size = image.size
# Use HIGHER resolution for better quality (up to 1024x1024)
max_size = 1024 # Increased from 512 for better quality
if max(image.size) < max_size:
# Upscale smaller images for better quality
scale_factor = max_size / max(image.size)
new_size = (int(image.size[0] * scale_factor), int(image.size[1] * scale_factor))
image = image.resize(new_size, Image.Resampling.LANCZOS)
print(f"📈 Upscaled image from {original_size} to {image.size} for better quality")
elif max(image.size) > max_size:
# Resize but maintain higher resolution
image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
print(f"📏 Resized image from {original_size} to {image.size}")
print(f"🎨 Generating 1 ULTRA HIGH-QUALITY sample with 60 inference steps...")
print(f"📝 Using enhanced prompt: {prompt[:120]}...")
generated_images = []
# Clear GPU cache before generation
if torch.cuda.is_available():
torch.cuda.empty_cache()
for i in range(num_samples):
print(f"🔄 Generating ultra high-quality sample {i+1}/{num_samples} with 60 steps...")
# Use different seeds for variety
seed = 42 + i * 137 # Prime number spacing for better variety
generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu").manual_seed(seed)
# ULTRA HIGH-QUALITY SETTINGS (60 steps for maximum quality)
result = self.image_model(
prompt,
image=image,
num_inference_steps=60, # Increased to 60 for ultra high quality
image_guidance_scale=2.0, # Increased from 1.5 for stronger conditioning
guidance_scale=9.0, # Increased from 7.5 for better prompt following
generator=generator,
eta=0.0, # Deterministic for better quality
# Add additional quality parameters
).images[0]
generated_images.append(result)
print(f"✅ Ultra high-quality sample {i+1} completed with 60 inference steps!")
# Clean up GPU memory between generations
if torch.cuda.is_available():
torch.cuda.empty_cache()
print(f"🎉 Ultra high-quality sample generated with 60 inference steps!")
return generated_images
except torch.cuda.OutOfMemoryError:
print("❌ GPU out of memory! Try reducing num_samples or image resolution")
print("💡 Current settings are optimized for high-end GPUs")
if torch.cuda.is_available():
torch.cuda.empty_cache()
return None
except Exception as e:
print(f"❌ Error transforming image: {e}")
if torch.cuda.is_available():
torch.cuda.empty_cache()
return None
def predict_plant_growth(self, image_path, lat=None, lon=None, output_path="predicted_plant.jpg", days=7, num_samples=1, high_quality=True):
"""Complete ULTRA HIGH-QUALITY pipeline with 60 inference steps for maximum quality"""
# Auto-detect location if not provided
if lat is None or lon is None:
print("🌍 Auto-detecting location...")
lat, lon = self.get_current_location()
print(f"🌱 Starting ULTRA HIGH-QUALITY plant prediction for coordinates: {lat:.4f}, {lon:.4f}")
print(f"📅 Analyzing {days} days of weather data...")
print(f"🎯 Generating 1 ultra high-quality sample with 60 inference steps")
print(f"⚠️ This will take longer but produce maximum quality results")
# Step 1: Get weather data using official Open-Meteo client
print("🌤️ Fetching weather data with caching and retry...")
weather_df, response_info = self.get_weather_forecast(lat, lon, days)
if weather_df is None:
print("❌ Failed to get weather data")
return None
print(f"✅ Weather data retrieved for {len(weather_df)} days")
print("\n📊 Weather Overview:")
print(weather_df[['date', 'temperature_2m_max', 'temperature_2m_min', 'precipitation_sum', 'sunshine_duration']].head())
# Step 2: Analyze weather for plants
plant_conditions = self.analyze_weather_for_plants(weather_df)
print(f"\n🔬 Plant-specific weather analysis: {plant_conditions}")
# Step 3: Analyze image + weather to create intelligent prompt
print("\n🧠 STEP 3: Advanced image analysis and prompt creation...")
try:
prompt, plant_type, plant_health = self.create_transformation_prompt(image_path, plant_conditions)
print(f"🌿 Plant identified as: {plant_type}")
print(f"💚 Current health: {plant_health}")
except Exception as e:
print(f"❌ Error in Step 3: {e}")
return None
# Step 4: Generate ULTRA HIGH-QUALITY transformed image
print(f"\n STEP 4: Generating 1 prediction with 60 inference steps...")
print(" This may take 5-8 minutes for absolute maximum quality...")
import time
start_time = time.time()
try:
result_images = self.transform_plant_image(image_path, prompt, num_samples=num_samples)
except Exception as e:
print(f" Error in Step 4: {e}")
return None
end_time = time.time()
total_time = end_time - start_time
if result_images and len(result_images) > 0:
# Save the ultra high-quality result
saved_paths = []
# Save with maximum quality JPEG settings
result_images[0].save(output_path, "JPEG", quality=98, optimize=True)
saved_paths.append(output_path)
print(f" prediction saved to: {output_path}")
# Create comparison with original
self.create_comparison_grid(image_path, result_images, f"{output_path.replace('.jpg', '')}_comparison.jpg")
print(f"⏱️ Total generation time: {total_time:.1f} seconds")
print(f"🏆 Generated with 60 inference steps for maximum quality!")
# GPU memory usage info
if torch.cuda.is_available():
memory_used = torch.cuda.max_memory_allocated() / 1024**3
print(f" Peak GPU memory usage: {memory_used:.2f} GB")
torch.cuda.reset_peak_memory_stats()
return result_images, plant_conditions, weather_df, plant_type, plant_health, saved_paths
else:
print(" Failed to generate image")
return None
def create_comparison_grid(self, original_path, generated_images, output_path):
"""Create a comparison grid"""
try:
from PIL import Image, ImageDraw, ImageFont
# Load original
original = Image.open(original_path).convert("RGB")
# Use higher resolution for grid
target_size = (512, 512)
original = original.resize(target_size, Image.Resampling.LANCZOS)
resized_generated = [img.resize(target_size, Image.Resampling.LANCZOS) for img in generated_images]
# Calculate grid
total_images = len(generated_images) + 1
cols = min(3, total_images) # 3 columns max for better layout
rows = (total_images + cols - 1) // cols
# Create high-quality grid
grid_width = cols * target_size[0]
grid_height = rows * target_size[1] + 80 # More space for labels
grid_image = Image.new('RGB', (grid_width, grid_height), 'white')
# Add images
grid_image.paste(original, (0, 80))
for i, img in enumerate(resized_generated):
col = (i + 1) % cols
row = (i + 1) // cols
x = col * target_size[0]
y = row * target_size[1] + 80
grid_image.paste(img, (x, y))
# Add labels
try:
draw = ImageDraw.Draw(grid_image)
try:
font = ImageFont.truetype("arial.ttf", 32) # Larger font
except:
font = ImageFont.load_default()
draw.text((10, 20), "Original", fill='black', font=font)
for i in range(len(resized_generated)):
col = (i + 1) % cols
x = col * target_size[0] + 10
draw.text((x, 20), f"HQ Sample {i+1}", fill='black', font=font)
except:
pass
# Save with high quality
grid_image.save(output_path, "JPEG", quality=95, optimize=True)
print(f" High-quality comparison grid saved to: {output_path}")
except Exception as e:
print(f" Could not create comparison grid: {e}")
# Example usage - HIGH QUALITY MODE
if __name__ == "__main__":
# Initialize predictor
predictor = PlantPredictor()
# Example coordinates (Milan, Italy)
latitude = 45.4642
longitude = 9.1900
print(" Starting ULTRA HIGH-QUALITY plant prediction with 60 inference steps...")
print(" This will use maximum GPU power and time for absolute best quality")
# Ultra high-quality prediction with single sample
result = predictor.predict_plant_growth(
image_path="./foto/basilico1 originale.png",
lat=latitude,
lon=longitude,
output_path="./predicted_plant_ultra_hq.jpg",
days=7,
num_samples=1, # Single ultra high-quality sample
high_quality=True
)
if result:
images, conditions, weather_data, plant_type, plant_health, saved_paths = result
print("\n" + "="*60)
print("🎉 PLANT PREDICTION COMPLETED!")
print("="*60)
print(f"🌿 Plant type: {plant_type}")
print(f"💚 Plant health: {plant_health}")
print(f"🎯 Generated 1 ultra high-quality sample with 60 inference steps")
print(f"📊 Weather data points: {weather_data.shape}")
print(f"🌡️ Temperature range: {conditions['avg_temp_min']}°C to {conditions['avg_temp_max']}°C")
print(f"🌧️ Total precipitation: {conditions['total_rain']}mm")
print(f"☀️ Sunshine hours: {conditions['total_sunshine_hours']}h")
print(f"\n💾 Saved files:")
print(f" 📸 Ultra HQ prediction: ./predicted_plant_ultra_hq.jpg")
print(f" 📊 Comparison image: ./predicted_plant_ultra_hq_comparison.jpg")
print(f"\n🏆 Ultra quality improvements:")
print(f" ✅ 60 inference steps (maximum quality)")
print(f" ✅ Higher guidance scales for perfect accuracy")
print(f" ✅ Up to 1024x1024 resolution support")
print(f" ✅ Single focused sample for consistency")
print(f" ✅ Enhanced prompt engineering")
print(f" ✅ Maximum quality JPEG compression (98%)")
print("")
else:
print("❌ Ultra high-quality plant prediction failed.")
print("💡 Check GPU memory and ensure RTX 3060 is available")