-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathexample.env
More file actions
119 lines (99 loc) · 4.96 KB
/
example.env
File metadata and controls
119 lines (99 loc) · 4.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# The environment setting.
# Use 'development' for local development, and 'production' in production.
ENVIRONMENT=development
# Ports that will be mapped to the host during development.
WEB_DEV_PORT=8000
POSTGRES_DEV_PORT=5432
LLM_SERVICE_DEV_PORT=8080
# Ports that will be mapped to the host during production.
WEB_HTTP_PORT=80
WEB_HTTPS_PORT=443
# Django debug settings (only used in development).
FORCE_DEBUG_TOOLBAR=true
REMOTE_DEBUGGING_ENABLED=false
REMOTE_DEBUGGING_PORT=5678
# The Django secret key used for cryptographic signing.
# IMPORTANT: Use a unique and secure key in production!
DJANGO_SECRET_KEY="your_django_secret_key_here"
# The Postgres database password (only used in production).
POSTGRES_PASSWORD="your_postgres_password_here"
# Miscellaneous Django security settings.
DJANGO_ALLOWED_HOSTS=localhost,127.0.0.1
DJANGO_CSRF_TRUSTED_ORIGINS=
DJANGO_INTERNAL_IPS=127.0.0.1
# Redirect all HTTP requests to HTTPS (only used in production).
DJANGO_SECURE_SSL_REDIRECT=true
# The salt that is used for hashing tokens in the token authentication app.
# Cave, changing the salt after some tokens were already generated makes them all invalid!
TOKEN_AUTHENTICATION_SALT="your_token_authentication_salt_here"
# Email configuration.
# The email address that is used for sending emails to the users and critical errors
# to the admins. The smtp server is only used in production. In development the emails
# are just logged to the console.
DJANGO_SERVER_EMAIL="server@example-project.example"
DJANGO_EMAIL_URL="smtp://localhost:25"
# The Django server admins that will receive critical error notifications.
# Also used by django-registration-redux to send account approval emails to.
DJANGO_ADMIN_EMAIL="admin@radis.example"
DJANGO_ADMIN_FULL_NAME="RADIS Admin"
# A support Email address that is presented to the users where they can get support.
SUPPORT_EMAIL="support@radis.example"
# A superuser that will have access to the Django admin interface.
# Optionally with a provided auth token for the API.
SUPERUSER_USERNAME="superuser"
SUPERUSER_EMAIL="superuser@radis.example"
SUPERUSER_PASSWORD="your_superuser_password_here"
SUPERUSER_AUTH_TOKEN="your_superuser_auth_token_here"
# Location of the backup folder.
BACKUP_DIR="/tmp/backups"
# Site information that is synced to the database and used by the sites framework.
SITE_NAME="RADIS"
SITE_DOMAIN=localhost
# Settings for SSL encryption (only used in production).
# SSL_HOSTNAME and SSL_IP_ADDRESSES are used to generate self-signed certificates
# with 'uv run cli generate-certificate-files', but you can also provide both files
# on your own.
SSL_HOSTNAME=localhost
SSL_IP_ADDRESSES=127.0.0.1
SSL_SERVER_CERT_FILE="./cert.pem"
SSL_SERVER_KEY_FILE="./key.pem"
SSL_SERVER_CHAIN_FILE="./chain.pem"
# The timezone used by the server.
TIME_ZONE="Europe/Berlin"
# LLM configuration.
# The name of the LLM model that should be used for inference. The model will be downloaded
# automatically (if no external LLM provider is used, see below) by LLaMA.cpp (in development)
# or SGLang (in production) from the Hugging Face model hub. If an external LLM provider is used,
# this setting must match the model name supported by that provider.
LLM_MODEL_NAME="unsloth/SmolLM2-135M-Instruct-GGUF"
# An optional Hugging Face token that is used to authenticate with the Hugging Face API when
# downloading models.
HF_TOKEN=
# An optional external LLM provider that can be used for inference. The provider must support
# the OpenAI API (beta) client and structured outputs. The LLM_MODEL_NAME must be set accordingly.
# If set no internal SGLang service or local LLaMA.cpp container will be started.
EXTERNAL_LLM_PROVIDER_URL=
# The API key that is used to authenticate with the external LLM provider. Not all providers really use
# that key, but the OpenAI API client needs it to be set.
EXTERNAL_LLM_PROVIDER_API_KEY="your_external_llm_provider_api_key_here"
# Indicates if a local LLaMA.cpp container in development should use the GPU for inference.
# During production SGLang always requires a GPU.
LLAMACPP_USE_GPU=false
# The language of the example reports that will be seeded to the development database.
# Possible values are 'en' or 'de'.
EXAMPLE_REPORTS_LANGUAGE=en
# Dedicated LLM configuration for generating sample reports via the CLI. These do not
# impact inference. The provider must support the OpenAI API client.
REPORT_LLM_MODEL_NAME="gemma3"
REPORT_LLM_PROVIDER_URL="http://host.docker.internal:11434/v1"
# API key (if required by the generation provider). Only used with
# 'cli generate-example-reports'.
REPORT_LLM_PROVIDER_API_KEY="ollama"
# Docker swarm mode does not respect the Docker Proxy client configuration
# (see https://docs.docker.com/network/proxy/#configure-the-docker-client),
# but we can set those environment variables manually.
# Malke sure to use .local in NO_PROXY as otherwise the communication with
# the other services will not work.
# HTTP_PROXY="http://user:pass@myproxy.net:8080"
# HTTPS_PROXY="http://user:pass@myproxy.net:8080"
# NO_PROXY="localhost,.local"