This commit is contained in:
Leon Astner 2025-08-02 11:27:45 +02:00
parent efba4c33ba
commit 5145c42632
30 changed files with 1270 additions and 5 deletions

View file

Before

Width:  |  Height:  |  Size: 1.4 MiB

After

Width:  |  Height:  |  Size: 1.4 MiB

Before After
Before After

BIN
FINAL/foto/basilico.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

BIN
FINAL/foto/basilico1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1,023 KiB

535
FINAL/script.py Normal file
View file

@ -0,0 +1,535 @@
import openmeteo_requests
import pandas as pd
import requests_cache
from retry_requests import retry
from datetime import datetime, timedelta
from PIL import Image
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
import numpy as np
import geocoder
class PlantPredictor:
def __init__(self):
"""Initialize the plant prediction pipeline with Open-Meteo client"""
# Setup the Open-Meteo API client with cache and retry on error
cache_session = requests_cache.CachedSession('.cache', expire_after=3600)
retry_session = retry(cache_session, retries=5, backoff_factor=0.2)
self.openmeteo = openmeteo_requests.Client(session=retry_session)
self.image_model = None
def get_current_location(self):
"""Get current location using IP geolocation"""
try:
g = geocoder.ip('me')
if g.ok:
print(f"📍 Location detected: {g.city}, {g.country}")
print(f"📍 Coordinates: {g.latlng[0]:.4f}, {g.latlng[1]:.4f}")
return g.latlng[0], g.latlng[1] # lat, lon
else:
print("⚠️ Could not detect location, using default (Milan)")
self.image_model = None
except Exception as e:
print(f"⚠️ Location detection failed: {e}, using default (Milan)")
self.image_model = None
def load_image_model(self):
"""Load the image transformation model with high-quality settings"""
print("🔄 Loading Stable Diffusion model with high-quality settings...")
# Check if CUDA is available and print GPU info
if torch.cuda.is_available():
gpu_name = torch.cuda.get_device_name(0)
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3
print(f"🚀 GPU: {gpu_name} ({gpu_memory:.1f} GB)")
self.image_model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
use_safetensors=True,
safety_checker=None,
requires_safety_checker=False
)
if torch.cuda.is_available():
self.image_model = self.image_model.to("cuda")
# Enable memory efficient attention for better quality
try:
self.image_model.enable_xformers_memory_efficient_attention()
print("✅ XFormers memory efficient attention enabled")
except:
print("⚠️ XFormers not available, using standard attention")
# Enable VAE slicing for higher resolution support
self.image_model.enable_vae_slicing()
print("✅ VAE slicing enabled for high-res support")
# Enable attention slicing for memory efficiency
self.image_model.enable_attention_slicing(1)
print("✅ Attention slicing enabled")
print("✅ High-quality model loaded successfully!")
def get_weather_forecast(self, lat, lon, days=7):
"""Get weather forecast from Open-Meteo API using official client"""
start_date = datetime.now().strftime("%Y-%m-%d")
end_date = (datetime.now() + timedelta(days=days)).strftime("%Y-%m-%d")
url = "https://api.open-meteo.com/v1/forecast"
params = {
"latitude": lat,
"longitude": lon,
"daily": [
"temperature_2m_max",
"temperature_2m_min",
"precipitation_sum",
"rain_sum",
"uv_index_max",
"sunshine_duration"
],
"start_date": start_date,
"end_date": end_date,
"timezone": "auto"
}
try:
responses = self.openmeteo.weather_api(url, params=params)
response = responses[0] # Process first location
print(f"Coordinates: {response.Latitude()}°N {response.Longitude()}°E")
print(f"Elevation: {response.Elevation()} m asl")
print(f"Timezone: UTC{response.UtcOffsetSeconds()//3600:+d}")
# Process daily data
daily = response.Daily()
# Extract data as numpy arrays (much faster!)
daily_data = {
"date": pd.date_range(
start=pd.to_datetime(daily.Time(), unit="s", utc=True),
end=pd.to_datetime(daily.TimeEnd(), unit="s", utc=True),
freq=pd.Timedelta(seconds=daily.Interval()),
inclusive="left"
),
"temperature_2m_max": daily.Variables(0).ValuesAsNumpy(),
"temperature_2m_min": daily.Variables(1).ValuesAsNumpy(),
"precipitation_sum": daily.Variables(2).ValuesAsNumpy(),
"rain_sum": daily.Variables(3).ValuesAsNumpy(),
"uv_index_max": daily.Variables(4).ValuesAsNumpy(),
"sunshine_duration": daily.Variables(5).ValuesAsNumpy()
}
# Create DataFrame for easy analysis
daily_dataframe = pd.DataFrame(data=daily_data)
return daily_dataframe, response
except Exception as e:
print(f"Error fetching weather data: {e}")
return None, None
def analyze_weather_for_plants(self, weather_df):
"""Analyze weather data and create plant-specific metrics"""
if weather_df is None or weather_df.empty:
return None
# Handle NaN values by filling with 0 or mean
weather_df = weather_df.fillna(0)
# Calculate plant-relevant metrics using pandas (more efficient)
plant_conditions = {
"avg_temp_max": round(weather_df['temperature_2m_max'].mean(), 1),
"avg_temp_min": round(weather_df['temperature_2m_min'].mean(), 1),
"total_precipitation": round(weather_df['precipitation_sum'].sum(), 1),
"total_rain": round(weather_df['rain_sum'].sum(), 1),
"total_sunshine_hours": round(weather_df['sunshine_duration'].sum() / 3600, 1), # Convert to hours
"max_uv_index": round(weather_df['uv_index_max'].max(), 1),
"days_analyzed": len(weather_df),
"temp_range": round(weather_df['temperature_2m_max'].max() - weather_df['temperature_2m_min'].min(), 1)
}
return plant_conditions
def create_transformation_prompt(self, image_path, plant_conditions):
"""Create a detailed prompt for image transformation based on weather AND image analysis"""
if not plant_conditions:
return "Show this plant after one week of growth", "generic plant", "unknown health"
# STEP 3A: Analyze original image
plant_type = "generic plant"
plant_health = "unknown health"
try:
image = Image.open(image_path).convert("RGB")
# Basic image analysis
width, height = image.size
aspect_ratio = width / height
# Simple plant type detection based on image characteristics
plant_type = self.detect_plant_type(image)
plant_health = self.assess_plant_health(image)
print(f"📸 Image Analysis:")
print(f" Plant type detected: {plant_type}")
print(f" Current health: {plant_health}")
print(f" Image size: {width}x{height}")
except Exception as e:
print(f"Warning: Could not analyze image: {e}")
plant_type = "generic plant"
plant_health = "healthy"
# STEP 3B: Weather analysis with plant-specific logic
temp_avg = (plant_conditions['avg_temp_max'] + plant_conditions['avg_temp_min']) / 2
# Temperature effects (adjusted by plant type)
if plant_type == "basil" or "herb" in plant_type:
if temp_avg > 25:
temp_effect = "warm weather promoting vigorous basil growth with larger, aromatic leaves and bushier structure"
elif temp_avg < 15:
temp_effect = "cool weather slowing basil growth with smaller, less vibrant leaves"
else:
temp_effect = "optimal temperature for basil supporting steady growth with healthy green foliage"
else:
if temp_avg > 25:
temp_effect = "warm weather promoting vigorous growth with larger, darker green leaves"
elif temp_avg < 10:
temp_effect = "cool weather slowing growth with smaller, pale leaves"
else:
temp_effect = "moderate temperature supporting steady growth with healthy green foliage"
# Water effects
if plant_conditions['total_rain'] > 20:
water_effect = "abundant rainfall keeping leaves lush, turgid and deep green"
elif plant_conditions['total_rain'] < 5:
water_effect = "dry conditions causing slight leaf wilting and browning at edges"
else:
water_effect = "adequate moisture maintaining crisp, healthy leaf appearance"
# Sunlight effects
if plant_conditions['total_sunshine_hours'] > 50:
sun_effect = "plenty of sunlight encouraging dense, compact foliage growth"
elif plant_conditions['total_sunshine_hours'] < 20:
sun_effect = "limited sunlight causing elongated stems and sparse leaf growth"
else:
sun_effect = "moderate sunlight supporting balanced, proportional growth"
# UV effects
if plant_conditions['max_uv_index'] > 7:
uv_effect = "high UV causing slight leaf thickening and waxy appearance"
else:
uv_effect = "moderate UV maintaining normal leaf texture"
# STEP 3C: Create comprehensive prompt combining image + weather analysis
# // FINAL PROMT HERE FOR PLANT
prompt = f"""Transform this {plant_type} showing realistic growth after {plant_conditions['days_analyzed']} days. The plant should still be realistic and its surrounding how it would look like in the real world and a human should be able to say the picture looks normal and only focus on the plant. Current state: {plant_health}. Apply these weather effects: {temp_effect}, {water_effect}, {sun_effect}, and {uv_effect}. Show natural changes in leaf size, color saturation, stem thickness, and overall plant structure while maintaining the original composition and lighting. Weather summary: {plant_conditions['avg_temp_min']}-{plant_conditions['avg_temp_max']}°C, {plant_conditions['total_rain']}mm rain, {plant_conditions['total_sunshine_hours']}h sun"""
return prompt, plant_type, plant_health
def detect_plant_type(self, image):
"""Simple plant type detection based on image characteristics"""
# This is a simplified version - in a real app you'd use a plant classification model
# For now, we'll do basic analysis
# Convert to array for analysis
img_array = np.array(image)
# Analyze color distribution
green_pixels = np.sum((img_array[:,:,1] > img_array[:,:,0]) & (img_array[:,:,1] > img_array[:,:,2]))
total_pixels = img_array.shape[0] * img_array.shape[1]
green_ratio = green_pixels / total_pixels
# Simple heuristics (could be improved with ML)
if green_ratio > 0.4:
return "basil" # Assume basil for high green content
else:
return "generic plant"
def assess_plant_health(self, image):
"""Assess basic plant health from image"""
img_array = np.array(image)
# Analyze brightness and color vibrancy
brightness = np.mean(img_array)
green_channel = np.mean(img_array[:,:,1])
if brightness > 150 and green_channel > 120:
return "healthy and vibrant"
elif brightness > 100 and green_channel > 80:
return "moderately healthy"
else:
return "showing some stress"
def transform_plant_image(self, image_path, prompt, num_samples=1):
"""STEP 4: Generate ULTRA HIGH-QUALITY image with 60 inference steps"""
if self.image_model is None:
self.load_image_model()
try:
# Load and prepare image with HIGHER RESOLUTION
print(f"📸 Loading image for high-quality processing: {image_path}")
image = Image.open(image_path).convert("RGB")
original_size = image.size
# Use HIGHER resolution for better quality (up to 1024x1024)
max_size = 1024 # Increased from 512 for better quality
if max(image.size) < max_size:
# Upscale smaller images for better quality
scale_factor = max_size / max(image.size)
new_size = (int(image.size[0] * scale_factor), int(image.size[1] * scale_factor))
image = image.resize(new_size, Image.Resampling.LANCZOS)
print(f"📈 Upscaled image from {original_size} to {image.size} for better quality")
elif max(image.size) > max_size:
# Resize but maintain higher resolution
image.thumbnail((max_size, max_size), Image.Resampling.LANCZOS)
print(f"📏 Resized image from {original_size} to {image.size}")
print(f"🎨 Generating 1 ULTRA HIGH-QUALITY sample with 60 inference steps...")
print(f"📝 Using enhanced prompt: {prompt[:120]}...")
generated_images = []
# Clear GPU cache before generation
if torch.cuda.is_available():
torch.cuda.empty_cache()
for i in range(num_samples):
print(f"🔄 Generating ultra high-quality sample {i+1}/{num_samples} with 60 steps...")
# Use different seeds for variety
seed = 42 + i * 137 # Prime number spacing for better variety
generator = torch.Generator(device="cuda" if torch.cuda.is_available() else "cpu").manual_seed(seed)
# ULTRA HIGH-QUALITY SETTINGS (60 steps for maximum quality)
result = self.image_model(
prompt,
image=image,
num_inference_steps=60, # Increased to 60 for ultra high quality
image_guidance_scale=2.0, # Increased from 1.5 for stronger conditioning
guidance_scale=9.0, # Increased from 7.5 for better prompt following
generator=generator,
eta=0.0, # Deterministic for better quality
# Add additional quality parameters
).images[0]
generated_images.append(result)
print(f"✅ Ultra high-quality sample {i+1} completed with 60 inference steps!")
# Clean up GPU memory between generations
if torch.cuda.is_available():
torch.cuda.empty_cache()
print(f"🎉 Ultra high-quality sample generated with 60 inference steps!")
return generated_images
except torch.cuda.OutOfMemoryError:
print("❌ GPU out of memory! Try reducing num_samples or image resolution")
print("💡 Current settings are optimized for high-end GPUs")
if torch.cuda.is_available():
torch.cuda.empty_cache()
return None
except Exception as e:
print(f"❌ Error transforming image: {e}")
if torch.cuda.is_available():
torch.cuda.empty_cache()
return None
def predict_plant_growth(self, image_path, lat=None, lon=None, output_path="predicted_plant.jpg", days=7, num_samples=1, high_quality=True):
"""Complete ULTRA HIGH-QUALITY pipeline with 60 inference steps for maximum quality"""
# Auto-detect location if not provided
if lat is None or lon is None:
print("🌍 Auto-detecting location...")
lat, lon = self.get_current_location()
print(f"🌱 Starting ULTRA HIGH-QUALITY plant prediction for coordinates: {lat:.4f}, {lon:.4f}")
print(f"📅 Analyzing {days} days of weather data...")
print(f"🎯 Generating 1 ultra high-quality sample with 60 inference steps")
print(f"⚠️ This will take longer but produce maximum quality results")
# Step 1: Get weather data using official Open-Meteo client
print("🌤️ Fetching weather data with caching and retry...")
weather_df, response_info = self.get_weather_forecast(lat, lon, days)
if weather_df is None:
print("❌ Failed to get weather data")
return None
print(f"✅ Weather data retrieved for {len(weather_df)} days")
print("\n📊 Weather Overview:")
print(weather_df[['date', 'temperature_2m_max', 'temperature_2m_min', 'precipitation_sum', 'sunshine_duration']].head())
# Step 2: Analyze weather for plants
plant_conditions = self.analyze_weather_for_plants(weather_df)
print(f"\n🔬 Plant-specific weather analysis: {plant_conditions}")
# Step 3: Analyze image + weather to create intelligent prompt
print("\n🧠 STEP 3: Advanced image analysis and prompt creation...")
try:
prompt, plant_type, plant_health = self.create_transformation_prompt(image_path, plant_conditions)
print(f"🌿 Plant identified as: {plant_type}")
print(f"💚 Current health: {plant_health}")
print(f"📝 Enhanced transformation prompt: {prompt}")
except Exception as e:
print(f"❌ Error in Step 3: {e}")
return None
# Step 4: Generate ULTRA HIGH-QUALITY transformed image
print(f"\n STEP 4: Generating 1 prediction with 60 inference steps...")
print(" This may take 5-8 minutes for absolute maximum quality...")
import time
start_time = time.time()
try:
result_images = self.transform_plant_image(image_path, prompt, num_samples=num_samples)
except Exception as e:
print(f" Error in Step 4: {e}")
return None
end_time = time.time()
total_time = end_time - start_time
if result_images and len(result_images) > 0:
# Save the ultra high-quality result
saved_paths = []
# Save with maximum quality JPEG settings
result_images[0].save(output_path, "JPEG", quality=98, optimize=True)
saved_paths.append(output_path)
print(f" prediction saved to: {output_path}")
# Create comparison with original
self.create_comparison_grid(image_path, result_images, f"{output_path.replace('.jpg', '')}_comparison.jpg")
print(f"⏱️ Total generation time: {total_time:.1f} seconds")
print(f"🏆 Generated with 60 inference steps for maximum quality!")
# GPU memory usage info
if torch.cuda.is_available():
memory_used = torch.cuda.max_memory_allocated() / 1024**3
print(f" Peak GPU memory usage: {memory_used:.2f} GB")
torch.cuda.reset_peak_memory_stats()
return result_images, plant_conditions, weather_df, plant_type, plant_health, saved_paths
else:
print(" Failed to generate image")
return None
def create_comparison_grid(self, original_path, generated_images, output_path):
"""Create a comparison grid"""
try:
from PIL import Image, ImageDraw, ImageFont
# Load original
original = Image.open(original_path).convert("RGB")
# Use higher resolution for grid
target_size = (512, 512)
original = original.resize(target_size, Image.Resampling.LANCZOS)
resized_generated = [img.resize(target_size, Image.Resampling.LANCZOS) for img in generated_images]
# Calculate grid
total_images = len(generated_images) + 1
cols = min(3, total_images) # 3 columns max for better layout
rows = (total_images + cols - 1) // cols
# Create high-quality grid
grid_width = cols * target_size[0]
grid_height = rows * target_size[1] + 80 # More space for labels
grid_image = Image.new('RGB', (grid_width, grid_height), 'white')
# Add images
grid_image.paste(original, (0, 80))
for i, img in enumerate(resized_generated):
col = (i + 1) % cols
row = (i + 1) // cols
x = col * target_size[0]
y = row * target_size[1] + 80
grid_image.paste(img, (x, y))
# Add labels
try:
draw = ImageDraw.Draw(grid_image)
try:
font = ImageFont.truetype("arial.ttf", 32) # Larger font
except:
font = ImageFont.load_default()
draw.text((10, 20), "Original", fill='black', font=font)
for i in range(len(resized_generated)):
col = (i + 1) % cols
x = col * target_size[0] + 10
draw.text((x, 20), f"HQ Sample {i+1}", fill='black', font=font)
except:
pass
# Save with high quality
grid_image.save(output_path, "JPEG", quality=95, optimize=True)
print(f" High-quality comparison grid saved to: {output_path}")
except Exception as e:
print(f" Could not create comparison grid: {e}")
# Example usage - HIGH QUALITY MODE
if __name__ == "__main__":
# Initialize predictor
predictor = PlantPredictor()
# Example coordinates (Milan, Italy)
latitude = 45.4642
longitude = 9.1900
print(" Starting ULTRA HIGH-QUALITY plant prediction with 60 inference steps...")
print(" This will use maximum GPU power and time for absolute best quality")
# Ultra high-quality prediction with single sample
result = predictor.predict_plant_growth(
image_path="./foto/basilico.png",
lat=latitude,
lon=longitude,
output_path="./predicted_plant_ultra_hq.jpg",
days=7,
num_samples=1, # Single ultra high-quality sample
high_quality=True
)
if result:
images, conditions, weather_data, plant_type, plant_health, saved_paths = result
print("\n" + "="*60)
print("🎉 PLANT PREDICTION COMPLETED!")
print("="*60)
print(f"🌿 Plant type: {plant_type}")
print(f"💚 Plant health: {plant_health}")
print(f"🎯 Generated 1 ultra high-quality sample with 60 inference steps")
print(f"📊 Weather data points: {weather_data.shape}")
print(f"🌡️ Temperature range: {conditions['avg_temp_min']}°C to {conditions['avg_temp_max']}°C")
print(f"🌧️ Total precipitation: {conditions['total_rain']}mm")
print(f"☀️ Sunshine hours: {conditions['total_sunshine_hours']}h")
print(f"\n💾 Saved files:")
print(f" 📸 Ultra HQ prediction: ./predicted_plant_ultra_hq.jpg")
print(f" 📊 Comparison image: ./predicted_plant_ultra_hq_comparison.jpg")
print(f"\n🏆 Ultra quality improvements:")
print(f" ✅ 60 inference steps (maximum quality)")
print(f" ✅ Higher guidance scales for perfect accuracy")
print(f" ✅ Up to 1024x1024 resolution support")
print(f" ✅ Single focused sample for consistency")
print(f" ✅ Enhanced prompt engineering")
print(f" ✅ Maximum quality JPEG compression (98%)")
print("")
else:
print("❌ Ultra high-quality plant prediction failed.")
print("💡 Check GPU memory and ensure RTX 3060 is available")

Binary file not shown.

Before

Width:  |  Height:  |  Size: 436 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 256 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1,023 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1,023 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

View file

@ -0,0 +1 @@
Descrizione non disponibile.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

After

Width:  |  Height:  |  Size: 112 KiB

Before After
Before After

Binary file not shown.

After

Width:  |  Height:  |  Size: 880 KiB

View file

@ -0,0 +1 @@
Descrizione non disponibile.

View file

@ -1,5 +1,4 @@
import io
import os
import openmeteo_requests
import pandas as pd
import requests_cache
@ -13,6 +12,8 @@ from torchvision import transforms
from model import PlantClassifier # personalizzalo secondo il tuo file
import geocoder
import sys
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
import os
print(sys.stdout.encoding) # Check what encoding your console is using
# Force UTF-8 encoding for the entire script
@ -26,6 +27,7 @@ class PlantPredictor:
cache_session = requests_cache.CachedSession('.cache', expire_after=3600)
retry_session = retry(cache_session, retries=5, backoff_factor=0.2)
self.openmeteo = openmeteo_requests.Client(session=retry_session)
self.image_model = None
self.trained_model = None
self.class_labels = ["basil", "tomato"] # oppure caricali dinamicamente
@ -227,7 +229,7 @@ class PlantPredictor:
# Weather + growth prompt logic (come da tua versione)
temp_avg = (plant_conditions['avg_temp_max'] + plant_conditions['avg_temp_min']) / 2
if plant_type == "basilico" or ("herb" in plant_type):
if plant_type == "basil" or plant_type == "tomato" or ("herb" in plant_type):
if temp_avg > 25:
temp_effect = "warm weather promoting vigorous basil growth with larger, aromatic leaves and bushier structure"
elif temp_avg < 15:
@ -325,7 +327,51 @@ class PlantPredictor:
except Exception as e:
print(f"⚠️ Error in health assessment: {e}")
return "unknown health"
def describe_image_with_llava(self, image_pil, prompt=None):
"""Use LLaVA-Next on CPU to generate a description of the plant image."""
try:
from transformers import LlavaNextForConditionalGeneration, AutoProcessor
if not hasattr(self, "llava_model"):
print("🔄 Caricamento modello LLaVA-Next su CPU…")
model_id = "llava-hf/llava-v1.6-mistral-7b-hf"
# 1) Load the processor
self.llava_processor = AutoProcessor.from_pretrained(model_id)
# 2) Load the model in half-precision, low memory mode
self.llava_model = LlavaNextForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch.float16,
low_cpu_mem_usage=True
).to("cpu")
print("✅ LLaVA-Next caricato su CPU correttamente")
# Free GPU memory if you still have SD components loaded
if torch.cuda.is_available():
del self.image_model
torch.cuda.empty_cache()
# 3) Prepend the <img> token so the processor knows where the image belongs
llava_prompt = "<img> " + (prompt or "Describe the plant growth and condition in this image.")
# 4) Build inputs explicitly
inputs = self.llava_processor(
images=image_pil,
text=llava_prompt,
return_tensors="pt"
).to("cpu")
# 5) Generate
output = self.llava_model.generate(**inputs, max_new_tokens=150)
description = self.llava_processor.decode(output[0], skip_special_tokens=True)
return description
except Exception as e:
print(f"⚠️ Errore durante la descrizione con LLaVA-Next: {e}")
return "Descrizione non disponibile."
def transform_plant_image(self, image_path, prompt):
"""STEP 4: Generate new image based on analyzed prompt"""
@ -413,8 +459,37 @@ class PlantPredictor:
return None
if result_image:
# Salva limmagine predetta
result_image.save(output_path)
print(f"Plant growth prediction saved to: {output_path}")
# —————— Qui inizia il codice per il .txt ——————
# Componi la descrizione
description = (
f"{plant_type.capitalize()} prevista dopo {plant_conditions['days_analyzed']} giorni:\n"
f"- Temperatura: {plant_conditions['avg_temp_min']}{plant_conditions['avg_temp_max']} °C\n"
f"- Pioggia: {plant_conditions['total_rain']} mm\n"
f"- Sole: {plant_conditions['total_sunshine_hours']} h\n"
f"- UV max: {plant_conditions['max_uv_index']}\n"
f"- Range termico giornaliero: {plant_conditions['temp_range']} °C\n"
f"Salute stimata: {plant_health}."
)
# STEP 4.5: Descrizione immagine predetta con LLaVA-Next
try:
llava_description = self.describe_image_with_llava(result_image, prompt)
print("🧠 Descrizione generata da LLaVA-Next:")
print(llava_description)
# Salva descrizione in file .txt separato
llava_txt_path = os.path.splitext(output_path)[0] + "_llava_description.txt"
with open(llava_txt_path, "w", encoding="utf-8") as f:
f.write(llava_description)
print(f"📄 Descrizione visiva salvata in: {llava_txt_path}")
except Exception as e:
print(f"⚠️ LLaVA-Next non ha potuto descrivere limmagine: {e}")
return result_image, plant_conditions, weather_df, plant_type, plant_health
else:
print("Failed to transform image")
@ -432,10 +507,10 @@ if __name__ == "__main__":
# Predict plant growth
# Replace 'your_plant_image.jpg' with actual image path
result = predictor.predict_plant_growth(
image_path="./basilico.jpg",
image_path="./tomato.jpg",
lat=latitude,
lon=longitude,
output_path="./predicted_plant_growth.jpg",
output_path="./tomato_new2.jpg",
days=7
)

View file

@ -0,0 +1,651 @@
import io
import openmeteo_requests
import pandas as pd
import requests_cache
from retry_requests import retry
from datetime import datetime, timedelta
from PIL import Image
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
import numpy as np
from torchvision import transforms
from model import PlantClassifier # personalizzalo secondo il tuo file
import geocoder
import sys
from accelerate import init_empty_weights, load_checkpoint_and_dispatch
import os
print(sys.stdout.encoding) # Check what encoding your console is using
# Force UTF-8 encoding for the entire script
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace')
class PlantPredictor:
def __init__(self):
"""Initialize the plant prediction pipeline with Open-Meteo client"""
# Setup the Open-Meteo API client with cache and retry on error
cache_session = requests_cache.CachedSession('.cache', expire_after=3600)
retry_session = retry(cache_session, retries=5, backoff_factor=0.2)
self.openmeteo = openmeteo_requests.Client(session=retry_session)
self.image_model = None
self.trained_model = None
self.class_labels = ["basil", "tomato"] # oppure caricali dinamicamente
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def load_trained_model(self, model_path="./models/basil_tomato_classifier.pth"):
if not os.path.exists(model_path):
print("⚠️ Trained model not found!")
return
try:
model = PlantClassifier(num_classes=2)
# Load checkpoint with proper device mapping
checkpoint = torch.load(model_path, map_location=self.device)
# Handle different checkpoint formats
if 'model_state_dict' in checkpoint:
state_dict = checkpoint['model_state_dict']
else:
# If the checkpoint is just the state dict
state_dict = checkpoint
# Fix key mismatches between training and inference models
# The saved model has keys like "features.*" but current model expects "backbone.features.*"
corrected_state_dict = {}
for key, value in state_dict.items():
if key.startswith('features.'):
# Add "backbone." prefix to features
new_key = 'backbone.' + key
corrected_state_dict[new_key] = value
elif key.startswith('classifier.'):
# Add "backbone." prefix to classifier
new_key = 'backbone.' + key
corrected_state_dict[new_key] = value
else:
# Keep other keys as they are
corrected_state_dict[key] = value
# Load the corrected state dict
model.load_state_dict(corrected_state_dict, strict=False)
model.to(self.device)
model.eval()
self.trained_model = model
print(f"✅ Model loaded successfully on {self.device}")
except Exception as e:
print(f"⚠️ Error loading trained model: {e}")
self.trained_model = None
def get_current_location(self):
try:
g = geocoder.ip('me')
if g.ok:
print(f"📍 Location detected: {g.city}, {g.country}")
print(f"📍 Coordinates: {g.latlng[0]:.4f}, {g.latlng[1]:.4f}")
return g.latlng[0], g.latlng[1]
else:
print("⚠️ Could not detect location, using default (Milan)")
except Exception as e:
print(f"⚠️ Location detection failed: {e}, using default (Milan)")
# default Milan coords if failed
return 45.4642, 9.1900
def load_image_model(self):
"""Load the image transformation model"""
print("Loading Stable Diffusion model...")
self.image_model = StableDiffusionInstructPix2PixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
)
if torch.cuda.is_available():
self.image_model = self.image_model.to("cuda")
print("Model loaded successfully!")
def get_weather_forecast(self, lat, lon, days=7):
"""Get weather forecast from Open-Meteo API using official client"""
start_date = datetime.now().strftime("%Y-%m-%d")
end_date = (datetime.now() + timedelta(days=days)).strftime("%Y-%m-%d")
url = "https://api.open-meteo.com/v1/forecast"
params = {
"latitude": lat,
"longitude": lon,
"daily": [
"temperature_2m_max",
"temperature_2m_min",
"precipitation_sum",
"rain_sum",
"uv_index_max",
"sunshine_duration"
],
"start_date": start_date,
"end_date": end_date,
"timezone": "auto"
}
try:
responses = self.openmeteo.weather_api(url, params=params)
response = responses[0] # Process first location
print(f"Coordinates: {response.Latitude()}°N {response.Longitude()}°E")
print(f"Elevation: {response.Elevation()} m asl")
print(f"Timezone: UTC{response.UtcOffsetSeconds()//3600:+d}")
# Process daily data
daily = response.Daily()
# Extract data as numpy arrays (much faster!)
daily_data = {
"date": pd.date_range(
start=pd.to_datetime(daily.Time(), unit="s", utc=True),
end=pd.to_datetime(daily.TimeEnd(), unit="s", utc=True),
freq=pd.Timedelta(seconds=daily.Interval()),
inclusive="left"
),
"temperature_2m_max": daily.Variables(0).ValuesAsNumpy(),
"temperature_2m_min": daily.Variables(1).ValuesAsNumpy(),
"precipitation_sum": daily.Variables(2).ValuesAsNumpy(),
"rain_sum": daily.Variables(3).ValuesAsNumpy(),
"uv_index_max": daily.Variables(4).ValuesAsNumpy(),
"sunshine_duration": daily.Variables(5).ValuesAsNumpy()
}
# Create DataFrame for easy analysis
daily_dataframe = pd.DataFrame(data=daily_data)
return daily_dataframe, response
except Exception as e:
print(f"Error fetching weather data: {e}")
return None, None
def analyze_weather_for_plants(self, weather_df):
"""Analyze weather data and create plant-specific metrics"""
if weather_df is None or weather_df.empty:
return None
# Handle NaN values by filling with 0 or mean
weather_df = weather_df.fillna(0)
# Calculate plant-relevant metrics using pandas (more efficient)
plant_conditions = {
"avg_temp_max": round(weather_df['temperature_2m_max'].mean(), 1),
"avg_temp_min": round(weather_df['temperature_2m_min'].mean(), 1),
"total_precipitation": round(weather_df['precipitation_sum'].sum(), 1),
"total_rain": round(weather_df['rain_sum'].sum(), 1),
"total_sunshine_hours": round(weather_df['sunshine_duration'].sum() / 3600, 1), # Convert to hours
"max_uv_index": round(weather_df['uv_index_max'].max(), 1),
"days_analyzed": len(weather_df),
"temp_range": round(weather_df['temperature_2m_max'].max() - weather_df['temperature_2m_min'].min(), 1)
}
return plant_conditions
CLASS_NAMES = {0: "basil", 1: "tomato"} # Adatta se usi nomi diversi
def create_transformation_prompt(self, image_path, plant_conditions):
if not plant_conditions:
return "Show this plant after one week of growth", "generic plant", "unknown health"
plant_type = "generic plant"
plant_health = "unknown health"
try:
if not os.path.exists(image_path):
raise FileNotFoundError(f"Image file not found at {image_path}")
with Image.open(image_path) as img:
image = img.convert("RGB")
width, height = image.size
try:
plant_type = self.detect_plant_type(image)
except Exception as e:
print(f"⚠️ Plant type detection failed: {e}")
plant_type = "generic plant"
try:
plant_health = self.assess_plant_health(image)
except Exception as e:
print(f"⚠️ Health assessment failed: {e}")
plant_health = "unknown health"
print(f"📸 Image Analysis:")
print(f" Plant type detected: {plant_type}")
print(f" Current health: {plant_health}")
print(f" Image size: {width}x{height}")
except Exception as e:
print(f"⚠️ Warning: Could not analyze image: {str(e)}")
plant_type = "generic plant"
plant_health = "healthy"
# Weather + growth prompt logic (come da tua versione)
temp_avg = (plant_conditions['avg_temp_max'] + plant_conditions['avg_temp_min']) / 2
if plant_type == "basil" or plant_type == "tomato" or ("herb" in plant_type):
if temp_avg > 25:
temp_effect = "warm weather promoting vigorous basil growth with larger, aromatic leaves and bushier structure"
elif temp_avg < 15:
temp_effect = "cool weather slowing basil growth with smaller, less vibrant leaves"
else:
temp_effect = "optimal temperature for basil supporting steady growth with healthy green foliage"
else:
if temp_avg > 25:
temp_effect = "warm weather promoting vigorous growth with larger, darker green leaves"
elif temp_avg < 10:
temp_effect = "cool weather slowing growth with smaller, pale leaves"
else:
temp_effect = "moderate temperature supporting steady growth with healthy green foliage"
if plant_conditions['total_rain'] > 20:
water_effect = "abundant rainfall keeping leaves lush, turgid and deep green"
elif plant_conditions['total_rain'] < 5:
water_effect = "dry conditions causing slight leaf wilting and browning at edges"
else:
water_effect = "adequate moisture maintaining crisp, healthy leaf appearance"
if plant_conditions['total_sunshine_hours'] > 50:
sun_effect = "plenty of sunlight encouraging dense, compact foliage growth"
elif plant_conditions['total_sunshine_hours'] < 20:
sun_effect = "limited sunlight causing elongated stems and sparse leaf growth"
else:
sun_effect = "moderate sunlight supporting balanced, proportional growth"
if plant_conditions['max_uv_index'] > 7:
uv_effect = "high UV causing slight leaf thickening and waxy appearance"
else:
uv_effect = "moderate UV maintaining normal leaf texture"
prompt = (
f"Transform this {plant_type} showing realistic growth after {plant_conditions['days_analyzed']} days. "
f"Current state: {plant_health}. Apply these weather effects: {temp_effect}, {water_effect}, {sun_effect}, and {uv_effect}. "
f"Show natural changes in leaf size, color saturation, stem thickness, and overall plant structure while maintaining the original composition and lighting. "
f"Weather summary: {plant_conditions['avg_temp_min']}-{plant_conditions['avg_temp_max']}°C, "
f"{plant_conditions['total_rain']}mm rain, {plant_conditions['total_sunshine_hours']}h sun"
)
return prompt, plant_type, plant_health
def detect_plant_type(self, image):
"""Use trained model to classify the plant type"""
if self.trained_model is None:
self.load_trained_model()
if self.trained_model is None:
print("⚠️ Trained model not available, using fallback rule.")
return "generic plant"
try:
transform = transforms.Compose([
transforms.Resize((224, 224)), # usa la stessa dimensione del training
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], # mean/std di ImageNet o dataset tuo
[0.229, 0.224, 0.225])
])
input_tensor = transform(image).unsqueeze(0).to(self.device)
with torch.no_grad():
output = self.trained_model(input_tensor)
_, predicted = torch.max(output, 1)
predicted_class = self.class_labels[predicted.item()]
# Get confidence score
probabilities = torch.nn.functional.softmax(output, dim=1)
confidence = probabilities[0][predicted].item()
print(f"🌱 Plant classification: {predicted_class} (confidence: {confidence:.2f})")
return predicted_class
except Exception as e:
print(f"⚠️ Error in plant type detection: {e}")
return "generic plant"
def cleanup_gpu_memory(self):
"""Clean up GPU memory and move models appropriately"""
if torch.cuda.is_available():
# Move Stable Diffusion model to CPU if LLaVA is being used
if hasattr(self, 'image_model') and self.image_model is not None:
print("💾 Moving Stable Diffusion to CPU to free GPU memory...")
self.image_model = self.image_model.to("cpu")
torch.cuda.empty_cache()
torch.cuda.synchronize()
# Print memory stats
allocated = torch.cuda.memory_allocated() / 1024**3
cached = torch.cuda.memory_reserved() / 1024**3
print(f"📊 GPU Memory: {allocated:.1f}GB allocated, {cached:.1f}GB cached")
def assess_plant_health(self, image):
"""Assess basic plant health from image"""
try:
img_array = np.array(image)
# Analyze brightness and color vibrancy
brightness = np.mean(img_array)
green_channel = np.mean(img_array[:,:,1])
if brightness > 150 and green_channel > 120:
return "healthy and vibrant"
elif brightness > 100 and green_channel > 80:
return "moderately healthy"
else:
return "showing some stress"
except Exception as e:
print(f"⚠️ Error in health assessment: {e}")
return "unknown health"
def describe_image_with_llava(self, image_pil, prompt=None):
"""Use LLaVA-Next to generate a description of the plant image with proper device handling."""
try:
from transformers import LlavaNextForConditionalGeneration, LlavaNextProcessor
import torch
if not hasattr(self, "llava_model"):
print("🔄 Loading LLaVA-Next model...")
model_id = "llava-hf/llava-v1.6-mistral-7b-hf"
# Use the correct processor for LLaVA-Next
self.llava_processor = LlavaNextProcessor.from_pretrained(model_id)
# Determine optimal device configuration
if torch.cuda.is_available():
# Check available GPU memory
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3 # GB
print(f"📊 Available GPU memory: {gpu_memory:.1f} GB")
if gpu_memory >= 12: # High memory GPU
device_map = "auto"
torch_dtype = torch.float16
print("🚀 Using GPU with auto device mapping")
else: # Lower memory GPU - use CPU offloading
device_map = {"": "cpu"}
torch_dtype = torch.float32
print("💾 Using CPU due to limited GPU memory")
else:
device_map = {"": "cpu"}
torch_dtype = torch.float32
print("🖥️ Using CPU (no GPU available)")
# Load model with explicit device mapping
self.llava_model = LlavaNextForConditionalGeneration.from_pretrained(
model_id,
torch_dtype=torch_dtype,
low_cpu_mem_usage=True,
device_map=device_map,
offload_folder="./offload_cache", # Explicit offload directory
offload_state_dict=True if device_map != "auto" else False
)
# Ensure model is in eval mode
self.llava_model.eval()
print("✅ LLaVA-Next loaded successfully")
# Clear CUDA cache before inference
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Prepare the conversation format that LLaVA-Next expects
conversation = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": prompt or "Describe this plant's current condition, growth stage, health indicators, leaf characteristics, and any visible signs of stress or vitality. Focus on botanical details."}
]
}
]
# Apply chat template and process inputs
prompt_text = self.llava_processor.apply_chat_template(conversation, add_generation_prompt=True)
# Process inputs properly
inputs = self.llava_processor(
images=image_pil,
text=prompt_text,
return_tensors="pt"
)
# Handle device placement more carefully
target_device = "cpu" # Default to CPU for stability
if hasattr(self.llava_model, 'device'):
target_device = self.llava_model.device
elif hasattr(self.llava_model, 'hf_device_map'):
# Get the device of the first layer
for module_name, device in self.llava_model.hf_device_map.items():
if device != 'disk':
target_device = device
break
print(f"🎯 Moving inputs to device: {target_device}")
inputs = {k: v.to(target_device) if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
# Generate with proper parameters and error handling
with torch.no_grad():
try:
output = self.llava_model.generate(
**inputs,
max_new_tokens=150, # Reduced for stability
do_sample=False, # Use greedy decoding for consistency
temperature=None, # Not used with do_sample=False
top_p=None, # Not used with do_sample=False
pad_token_id=self.llava_processor.tokenizer.eos_token_id,
use_cache=True,
repetition_penalty=1.1
)
except RuntimeError as e:
if "out of memory" in str(e).lower():
print("⚠️ GPU OOM, retrying with CPU...")
# Move everything to CPU and retry
inputs = {k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in inputs.items()}
if hasattr(self.llava_model, 'cpu'):
self.llava_model = self.llava_model.cpu()
output = self.llava_model.generate(
**inputs,
max_new_tokens=150,
do_sample=False,
pad_token_id=self.llava_processor.tokenizer.eos_token_id
)
else:
raise e
# Decode only the new tokens (exclude input tokens)
input_length = inputs["input_ids"].shape[1]
generated_tokens = output[0][input_length:]
description = self.llava_processor.tokenizer.decode(generated_tokens, skip_special_tokens=True)
# Clean up cache after generation
if torch.cuda.is_available():
torch.cuda.empty_cache()
return description.strip()
except ImportError as e:
print(f"⚠️ LLaVA-Next dependencies not available: {e}")
return "Visual description not available - missing dependencies."
except Exception as e:
print(f"⚠️ Error during LLaVA-Next description: {e}")
print(f"🔍 Error details: {type(e).__name__}: {str(e)}")
return f"Visual description failed: {str(e)}"
def transform_plant_image(self, image_path, prompt):
"""STEP 4: Generate new image based on analyzed prompt"""
if self.image_model is None:
self.load_image_model()
try:
# Load and prepare image
image = Image.open(image_path).convert("RGB")
# Resize if too large (for memory efficiency)
if max(image.size) > 1024:
image.thumbnail((1024, 1024), Image.Resampling.LANCZOS)
print(f" STEP 4: Generating transformed image...")
print(f" Using prompt: {prompt}")
# Transform image
result = self.image_model(
prompt,
image=image,
num_inference_steps=70,
image_guidance_scale=1.5,
guidance_scale=7.5
).images[0]
return result
except Exception as e:
print(f"Error transforming image: {e}")
return None
@staticmethod
def safe_print(text):
try:
print(text)
except UnicodeEncodeError:
# Fallback for systems with limited encoding support
print(text.encode('ascii', errors='replace').decode('ascii'))
def predict_plant_growth(self, image_path, lat=None, lon=None, output_path="./predicted_plant.jpg", days=7):
"""Complete pipeline: weather + image transformation"""
# Auto-detect location if not provided
if lat is None or lon is None:
print(" Auto-detecting location...")
lat, lon = self.get_current_location()
print(f" Starting plant prediction for coordinates: {lat:.4f}, {lon:.4f}")
print(f" Analyzing {days} days of weather data...")
# Step 1: Get weather data using official Open-Meteo client
print("Fetching weather data with caching and retry...")
weather_df, response_info = self.get_weather_forecast(lat, lon, days)
if weather_df is None:
print("Failed to get weather data")
return None
print(f"Weather data retrieved for {len(weather_df)} days")
print("\nWeather Overview:")
print(weather_df[['date', 'temperature_2m_max', 'temperature_2m_min', 'precipitation_sum', 'sunshine_duration']].head())
# Step 2: Analyze weather for plants
plant_conditions = self.analyze_weather_for_plants(weather_df)
print(f"\nPlant-specific weather analysis: {plant_conditions}")
# Step 3: Analyze image + weather to create intelligent prompt
print("\n STEP 3: Analyzing image and creating transformation prompt...")
try:
prompt, plant_type, plant_health = self.create_transformation_prompt(image_path, plant_conditions)
self.safe_print(f" Plant identified as: {plant_type}")
self.safe_print(f" Current health: {plant_health}")
self.safe_print(f" Generated transformation prompt: {prompt}")
except Exception as e:
print(f" Error in Step 3: {e}")
return None
# Step 4: Generate transformed image
print("\nSTEP 4: Generating prediction image...")
try:
result_image = self.transform_plant_image(image_path, prompt)
except Exception as e:
print(f" Error in Step 4: {e}")
return None
if result_image:
# Save the predicted image
result_image.save(output_path)
print(f"Plant growth prediction saved to: {output_path}")
# Compose the basic description
description = (
f"{plant_type.capitalize()} predicted after {plant_conditions['days_analyzed']} days:\n"
f"- Temperature: {plant_conditions['avg_temp_min']}{plant_conditions['avg_temp_max']} °C\n"
f"- Rain: {plant_conditions['total_rain']} mm\n"
f"- Sunshine: {plant_conditions['total_sunshine_hours']} h\n"
f"- UV max: {plant_conditions['max_uv_index']}\n"
f"- Daily temperature range: {plant_conditions['temp_range']} °C\n"
f"Estimated health: {plant_health}."
)
# STEP 4.5: Enhanced visual description with LLaVA-Next
try:
print("\n🧠 STEP 4.5: Generating detailed visual analysis...")
# Clean up GPU memory before loading LLaVA
self.cleanup_gpu_memory()
llava_description = self.describe_image_with_llava(
result_image,
f"Analyze this {plant_type} plant prediction image. Describe the visible growth changes, leaf development, overall health indicators, and how the plant appears to have responded to the weather conditions: {plant_conditions['avg_temp_min']}-{plant_conditions['avg_temp_max']}°C, {plant_conditions['total_rain']}mm rain, {plant_conditions['total_sunshine_hours']}h sun over {plant_conditions['days_analyzed']} days."
)
print("🧠 AI Visual Analysis:")
print(llava_description)
# Save comprehensive description
complete_description = f"{description}\n\nAI Visual Analysis:\n{llava_description}"
description_txt_path = os.path.splitext(output_path)[0] + "_analysis.txt"
with open(description_txt_path, "w", encoding="utf-8") as f:
f.write(complete_description)
print(f"📄 Complete analysis saved to: {description_txt_path}")
except Exception as e:
print(f"⚠️ Visual analysis failed: {e}")
# Still save basic description
basic_txt_path = os.path.splitext(output_path)[0] + "_basic_info.txt"
with open(basic_txt_path, "w", encoding="utf-8") as f:
f.write(description)
print(f"📄 Basic info saved to: {basic_txt_path}")
return result_image, plant_conditions, weather_df, plant_type, plant_health
else:
print("Failed to transform image")
return None
# Example usage
if __name__ == "__main__":
# Initialize predictor
predictor = PlantPredictor()
# Example coordinates (Milan, Italy)
latitude = 45.4642
longitude = 9.1900
# Predict plant growth
# Replace 'your_plant_image.jpg' with actual image path
result = predictor.predict_plant_growth(
image_path="./basilico.jpg",
lat=latitude,
lon=longitude,
output_path="./basilico_new2.jpg",
days=7
)
if result:
image, conditions, weather_data, plant_type, plant_health = result
print("\n" + "="*50)
print(" PLANT PREDICTION COMPLETED SUCCESSFULLY!")
print("="*50)
print(f" Plant type: {plant_type}")
print(f" Plant health: {plant_health}")
print(f" Weather conditions: {conditions}")
print(f" Data points: {weather_data.shape}")
print(f" Temperature: {conditions['avg_temp_min']}°C to {conditions['avg_temp_max']}°C")
print(f" Total rain: {conditions['total_rain']}mm")
print(f" Sunshine: {conditions['total_sunshine_hours']}h")
else:
print("Plant prediction failed.")

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

View file

@ -0,0 +1 @@
Descrizione non disponibile.

View file

@ -0,0 +1 @@
Descrizione non disponibile.