Die Llamaindex-Textknoten und ihr Abruf mithilfe der Chat-Funktionalität von Llamaindex:
Code: Select all
###other dependencies##
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.core.chat_engine import CondensePlusContextChatEngine
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.llms.llm import LLM
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.callbacks import CallbackManager
###initialize Qdrant client, etc
vector_store = QdrantVectorStore(client=client, collection_name="reports", enable_hybrid=True)
index = VectorStoreIndex.from_vector_store(
vector_store=vector_store) #vector store is specified that contains nodes as Textnode objects
#llamaindex retriever
retriever = index.as_retriever(similarity_top_k=6)