diff --git a/env.example b/env.example new file mode 100644 index 0000000..98d3900 --- /dev/null +++ b/env.example @@ -0,0 +1,34 @@ +# Kubeflow docs-agent environment configuration +# Copy this file to .env and fill in your values +# cp .env.example .env + +# LLM Serving +# KServe inference endpoint URL +KSERVE_URL=http://llama.docs-agent.svc.cluster.local/openai/v1/chat/completions + +# Model name registered in KServe +MODEL=llama3.1-8B + +# API Server +# Port for the API server (WebSocket or HTTPS) +PORT=8000 + +# Milvus Vector Database +MILVUS_HOST=my-release-milvus.docs-agent.svc.cluster.local +MILVUS_PORT=19530 +MILVUS_COLLECTION=docs_rag + +# Embedding Model +EMBEDDING_MODEL=sentence-transformers/all-mpnet-base-v2 + +# HuggingFace +# Required to download the LLM model from HuggingFace Hub +HF_TOKEN=your_huggingface_token_here + +# Pipeline Parameters +REPO_OWNER=kubeflow +REPO_NAME=website +DIRECTORY_PATH=content/en +CHUNK_SIZE=1000 +CHUNK_OVERLAP=100 +BASE_URL=https://www.kubeflow.org/docs