spring.application.name=rag-service # --- LLM Provider: Groq (OpenAI-compatible API) --- spring.ai.openai.base-url=${SPRING_AI_OPENAI_BASE_URL:https://api.groq.com/openai/v1} spring.ai.openai.api-key=${SPRING_AI_OPENAI_API_KEY:} spring.ai.openai.chat.model=${SPRING_AI_OPENAI_CHAT_MODEL:llama-3.3-70b-versatile} # --- Embedding model: use local pgvector with a lightweight model --- # Groq does not provide an embedding endpoint, so we disable OpenAI embedding auto-config # and rely on the pgvector store's existing embedding setup spring.ai.openai.embedding.enabled=false jwt.secret=${JWT_SECRET:ywfI6dBznYmHbokihB/OBzZz6E0Fj+6PiqrM8dQ5c3t0HeYarblCbOGM8vQtOt472AtQ+MsCH7OVIKHOzjrPsQ==} jwt.expiration=103600000 spring.datasource.url=${SPRING_DATASOURCE_URL:jdbc:postgresql://localhost:5432/ragdb} spring.datasource.username=${SPRING_DATASOURCE_USERNAME:postgres} spring.datasource.password=${SPRING_DATASOURCE_PASSWORD:postgres} logging.level.org.springframework.ai.chat.client.advisor=DEBUG logging.level.org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerMapping=DEBUG logging.level.org.springframework.web=DEBUG logging.level.org.flywaydb=DEBUG logging.level.com.balex.rag.controller=DEBUG app.document.chunk-size=200 server.compression.enabled=false server.tomcat.connection-timeout=60000 spring.mvc.async.request-timeout=60000 end.points.users=/users end.points.id=/{id} end.points.all=/all end.points.create=/create end.points.userinfo=/userinfo end.points.refresh.token=/refresh/token end.points.auth=/auth end.points.login=/login end.points.register=/register end.points.chat=/chat end.points.entry=/entry end.points.document=/documents rag.rerank-fetch-multiplier=2 #Swagger swagger.servers.first=http://localhost:8080 springdoc.swagger-ui.path=/swagger-ui.html springdoc.api-docs.path=/v3/api-docs #Kafka spring.kafka.bootstrap-servers=${KAFKA_BOOTSTRAP_SERVERS:localhost:9092} analytics.kafka.topic=user-events