60 lines
1.6 KiB
JavaScript
60 lines
1.6 KiB
JavaScript
![]() |
import 'dotenv/config';
|
||
|
|
||
|
// Setup LLM
|
||
|
import {Document, Settings, storageContextFromDefaults, VectorStoreIndex} from "llamaindex";
|
||
|
import {OpenAI, OpenAIEmbedding} from "@llamaindex/openai";
|
||
|
import path from "path";
|
||
|
import fs from "fs";
|
||
|
import {QdrantVectorStore} from "@llamaindex/qdrant";
|
||
|
|
||
|
// Initialize Qdrant vector store for plants
|
||
|
|
||
|
Settings.llm = new OpenAI({
|
||
|
model: process.env.OPENAI_MODEL,
|
||
|
apiKey: process.env.OPENAI_API_KEY,
|
||
|
});
|
||
|
|
||
|
Settings.embedModel = new OpenAIEmbedding({
|
||
|
apiKey: process.env.OPENAI_API_KEY,
|
||
|
});
|
||
|
|
||
|
const vectorStore = new QdrantVectorStore({
|
||
|
url: 'http://localhost:6333',
|
||
|
collectionName: 'plants-rag',
|
||
|
});
|
||
|
|
||
|
const storageContext = await storageContextFromDefaults({
|
||
|
vectorStore
|
||
|
});
|
||
|
|
||
|
try {
|
||
|
const storageDir = path.join(process.cwd(), 'storage', 'plants');
|
||
|
|
||
|
// Read all markdown files from the storage directory
|
||
|
const files = fs.readdirSync(storageDir).filter(file => file.endsWith('.md'));
|
||
|
|
||
|
const documents = [];
|
||
|
for (const [index, file] of files.entries()) {
|
||
|
console.log(`Processing file ${index + 1} of ${files.length}: ${file}`);
|
||
|
const filePath = path.join(storageDir, file);
|
||
|
const content = fs.readFileSync(filePath, 'utf-8');
|
||
|
|
||
|
documents.push(new Document({
|
||
|
text: content,
|
||
|
metadata: {
|
||
|
source: file
|
||
|
}
|
||
|
}));
|
||
|
}
|
||
|
|
||
|
// Create vector store index
|
||
|
await VectorStoreIndex.fromDocuments(documents, {
|
||
|
storageContext: storageContext
|
||
|
});
|
||
|
|
||
|
|
||
|
} catch (error) {
|
||
|
console.error('Error initializing plants RAG:', error);
|
||
|
|
||
|
}
|