Cari file su "/"
This commit is contained in:
parent
144baf1c85
commit
6c9e127bdc
2 changed files with 159 additions and 0 deletions
124
app.py
Normal file
124
app.py
Normal file
|
@ -0,0 +1,124 @@
|
|||
import streamlit as st
|
||||
import pandas as pd
|
||||
import joblib
|
||||
from PIL import Image
|
||||
import torch
|
||||
from torchvision import models
|
||||
from llama_cpp import Llama
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
st.set_page_config(page_title="Plant Growth Predictor", layout="centered")
|
||||
st.title("🌱 Plant Growth Predictor")
|
||||
|
||||
|
||||
@st.cache
|
||||
def load_imagenet_labels():
|
||||
import urllib
|
||||
url = "https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt"
|
||||
response = urllib.request.urlopen(url)
|
||||
labels = [line.strip() for line in response.read().decode("utf-8").split("\n")]
|
||||
return labels
|
||||
|
||||
labels = load_imagenet_labels()
|
||||
|
||||
# Load Mistral LLM via llama-cpp-python with custom hash to avoid Streamlit caching issues
|
||||
@st.cache(hash_funcs={Llama: lambda _: None})
|
||||
def load_mistral_model():
|
||||
llm = Llama(
|
||||
model_path="./models/mistral-7b-instruct-v0.1.Q5_K_S.gguf",
|
||||
n_ctx=2048,
|
||||
n_threads=4,
|
||||
n_batch=512,
|
||||
verbose=False
|
||||
)
|
||||
return llm
|
||||
|
||||
llm = load_mistral_model()
|
||||
|
||||
# Generate a description using the Mistral model
|
||||
def generate_growth_description(plant_type, soil_type, sunlight_hours, water_frequency,
|
||||
fertilizer_type, temperature, humidity, days, additional_info):
|
||||
prompt = (
|
||||
f" Instruction:\n"
|
||||
f"You are a botanical expert. Describe how a {plant_type} plant will likely look in {days} days "
|
||||
f"based on these conditions:\n"
|
||||
f"Important additional conditions: {additional_info}\n"
|
||||
f"- Soil Type: {soil_type}\n"
|
||||
f"- Sunlight: {sunlight_hours} hours per day\n"
|
||||
f"- Water Frequency: {water_frequency} times per week\n"
|
||||
f"- Fertilizer Type: {fertilizer_type}\n"
|
||||
f"- Temperature: {temperature}°C\n"
|
||||
f"- Humidity: {humidity}%\n"
|
||||
f"### Response:\n"
|
||||
)
|
||||
output = llm(prompt, max_tokens=250, stop=["###"])
|
||||
return output["choices"][0]["text"].strip()
|
||||
|
||||
|
||||
def generate_condition_image(description: str, input_image: Image.Image) -> Image.Image:
|
||||
input_image = input_image.convert("RGB").resize((512, 512))
|
||||
st.spinner("Generating predicted plant condition image...")
|
||||
|
||||
st.header("Plant Info")
|
||||
plant_input_mode = st.radio("How would you like to provide plant info?", ("Type name", "Upload image"))
|
||||
plant_type = None
|
||||
uploaded_image = None
|
||||
|
||||
if plant_input_mode == "Type name":
|
||||
plant_type = st.selectbox("Select Plant Type", ["Basil", "Tomato", "Lettuce", "Rosemary", "Other"])
|
||||
|
||||
elif plant_input_mode == "Upload image":
|
||||
plant_type = st.selectbox("Select Plant Type", ["Basil", "Tomato", "Lettuce", "Rosemary", "Other"])
|
||||
image_file = st.file_uploader("Upload an image of your plant", type=["jpg", "jpeg", "png"])
|
||||
if image_file:
|
||||
uploaded_image = Image.open(image_file)
|
||||
st.image(uploaded_image, caption="Uploaded Plant Image", use_column_width=True)
|
||||
|
||||
|
||||
col1, col2 = st.columns(2)
|
||||
|
||||
with col1:
|
||||
st.header("Environmental Parameters")
|
||||
soil_options = ["Sandy", "Clay", "Loamy", "Peaty", "Chalky", "Silty"]
|
||||
|
||||
soil_type = st.selectbox("Soil Type", soil_options)
|
||||
sunlight_hours = st.slider("Sunlight Hours per day", 0, 24, 6)
|
||||
water_frequency = st.slider("Water Frequency (times per week)", 0, 14, 3)
|
||||
|
||||
# --- Column 2: Environmental Parameters
|
||||
with col2:
|
||||
fertilizer_options = ["Organic", "Chemical", "None"]
|
||||
|
||||
fertilizer_type = st.selectbox("Fertilizer Type", fertilizer_options)
|
||||
temperature = st.slider("Temperature (°C)", -10, 50, 22)
|
||||
humidity = st.slider("Humidity (%)", 0, 100, 60)
|
||||
days = st.slider("Prediction Interval (in days)", min_value=1, max_value=30, value=7)
|
||||
|
||||
additional_info = st.text_area("Feel free to include any additional detail")
|
||||
|
||||
# Prediction + Description + Image Generation
|
||||
if st.button("Predict Growth Milestone and Generate Description & Image"):
|
||||
if plant_type and plant_type.strip() != "":
|
||||
if plant_input_mode == "Upload image" and uploaded_image is None:
|
||||
st.warning("Please upload a plant image to proceed.")
|
||||
else:
|
||||
with st.spinner("Analyzing data and generating description..."):
|
||||
description = generate_growth_description(
|
||||
plant_type, soil_type, sunlight_hours, water_frequency,
|
||||
fertilizer_type, temperature, humidity, days, additional_info
|
||||
)
|
||||
st.subheader(f"📝 Predicted Plant Condition in {days} Days:")
|
||||
st.write(description)
|
||||
|
||||
# Use uploaded image if available, else placeholder or skip image generation
|
||||
if plant_input_mode == "Upload image" and uploaded_image:
|
||||
manipulated_img = generate_condition_image(description, uploaded_image)
|
||||
st.image(manipulated_img, caption="Predicted Plant Condition Image")
|
||||
else:
|
||||
st.info("Image prediction requires uploading a plant image.")
|
||||
|
||||
else:
|
||||
st.warning("Please select or enter a plant type.")
|
||||
|
||||
st.markdown("---")
|
||||
st.caption("Made with ❤️ by Sandwich Craftz.")
|
35
temp.py
Normal file
35
temp.py
Normal file
|
@ -0,0 +1,35 @@
|
|||
import torch
|
||||
from diffusers import StableDiffusionImg2ImgPipeline
|
||||
from PIL import Image
|
||||
import requests
|
||||
from io import BytesIO
|
||||
|
||||
# Load the pipeline
|
||||
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
||||
"runwayml/stable-diffusion-v1-5",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
# Function to load image from local path or URL
|
||||
def load_image(path_or_url):
|
||||
if path_or_url.startswith("http"):
|
||||
response = requests.get(path_or_url)
|
||||
return Image.open(BytesIO(response.content)).convert("RGB")
|
||||
else:
|
||||
return Image.open(path_or_url).convert("RGB")
|
||||
|
||||
# Load original plant image
|
||||
input_image = st.file_uploader("Upload an image of your plant", type=["jpg", "jpeg", "png"])
|
||||
|
||||
# Resize to 512x512 for best results
|
||||
input_image = input_image.resize((512, 512))
|
||||
|
||||
# Define environmental condition prompt
|
||||
prompt = "A withered plant with brown leaves, dry soil, and signs of dehydration, after 10 days of heat and no water"
|
||||
|
||||
# Generate the modified image
|
||||
output = pipe(prompt=prompt, image=input_image, strength=0.75, guidance_scale=7.5)
|
||||
output_image = output.images[0]
|
||||
|
||||
# Save the result
|
||||
output_image.save("plant_future_prediction.png")
|
Loading…
Add table
Add a link
Reference in a new issue