| # |
| # Licensed to the Apache Software Foundation (ASF) under one |
| # or more contributor license agreements. See the NOTICE file |
| # distributed with this work for additional information |
| # regarding copyright ownership. The ASF licenses this file |
| # to you under the Apache License, Version 2.0 (the |
| # "License"); you may not use this file except in compliance |
| # with the License. You may obtain a copy of the License at |
| # |
| # http://www.apache.org/licenses/LICENSE-2.0 |
| # |
| # Unless required by applicable law or agreed to in writing, |
| # software distributed under the License is distributed on an |
| # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| # KIND, either express or implied. See the License for the |
| # specific language governing permissions and limitations |
| # under the License. |
| |
| # This is the template for Airflow's default configuration. When Airflow is |
| # imported, it looks for a configuration file at $AIRFLOW_HOME/airflow.cfg. If |
| # it doesn't exist, Airflow uses this template to generate it by replacing |
| # variables in curly braces with their global values from configuration.py. |
| |
| # Users should not modify this file; they should customize the generated |
| # airflow.cfg instead. |
| |
| |
| # ----------------------- TEMPLATE BEGINS HERE ----------------------- |
| |
| [core] |
| # The folder where your airflow pipelines live, most likely a |
| # subfolder in a code repository. This path must be absolute. |
| dags_folder = {AIRFLOW_HOME}/dags |
| |
| # Hostname by providing a path to a callable, which will resolve the hostname. |
| # The format is "package.function". |
| # |
| # For example, default value "airflow.utils.net.getfqdn" means that result from patched |
| # version of socket.getfqdn() - see https://github.com/python/cpython/issues/49254. |
| # |
| # No argument should be required in the function specified. |
| # If using IP address as hostname is preferred, use value ``airflow.utils.net.get_host_ip_address`` |
| hostname_callable = airflow.utils.net.getfqdn |
| |
| # A callable to check if a python file has airflow dags defined or not |
| # with argument as: `(file_path: str, zip_file: zipfile.ZipFile | None = None)` |
| # return True if it has dags otherwise False |
| # If this is not provided, Airflow uses its own heuristic rules. |
| might_contain_dag_callable = airflow.utils.file.might_contain_dag_via_default_heuristic |
| |
| # Default timezone in case supplied date times are naive |
| # can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam) |
| default_timezone = utc |
| |
| # The executor class that airflow should use. Choices include |
| # ``SequentialExecutor``, ``LocalExecutor``, ``CeleryExecutor``, ``DaskExecutor``, |
| # ``KubernetesExecutor``, ``CeleryKubernetesExecutor`` or the |
| # full import path to the class when using a custom executor. |
| executor = SequentialExecutor |
| |
| # This defines the maximum number of task instances that can run concurrently per scheduler in |
| # Airflow, regardless of the worker count. Generally this value, multiplied by the number of |
| # schedulers in your cluster, is the maximum number of task instances with the running |
| # state in the metadata database. |
| parallelism = 32 |
| |
| # The maximum number of task instances allowed to run concurrently in each DAG. To calculate |
| # the number of tasks that is running concurrently for a DAG, add up the number of running |
| # tasks for all DAG runs of the DAG. This is configurable at the DAG level with ``max_active_tasks``, |
| # which is defaulted as ``max_active_tasks_per_dag``. |
| # |
| # An example scenario when this would be useful is when you want to stop a new dag with an early |
| # start date from stealing all the executor slots in a cluster. |
| max_active_tasks_per_dag = 16 |
| |
| # Are DAGs paused by default at creation |
| dags_are_paused_at_creation = True |
| |
| # The maximum number of active DAG runs per DAG. The scheduler will not create more DAG runs |
| # if it reaches the limit. This is configurable at the DAG level with ``max_active_runs``, |
| # which is defaulted as ``max_active_runs_per_dag``. |
| max_active_runs_per_dag = 16 |
| |
| # The name of the method used in order to start Python processes via the multiprocessing module. |
| # This corresponds directly with the options available in the Python docs: |
| # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.set_start_method. |
| # Must be one of the values returned by: |
| # https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_all_start_methods. |
| # Example: mp_start_method = fork |
| # mp_start_method = |
| |
| # Whether to load the DAG examples that ship with Airflow. It's good to |
| # get started, but you probably want to set this to ``False`` in a production |
| # environment |
| load_examples = True |
| |
| # Path to the folder containing Airflow plugins |
| plugins_folder = {AIRFLOW_HOME}/plugins |
| |
| # Should tasks be executed via forking of the parent process ("False", |
| # the speedier option) or by spawning a new python process ("True" slow, |
| # but means plugin changes picked up by tasks straight away) |
| execute_tasks_new_python_interpreter = False |
| |
| # Secret key to save connection passwords in the db |
| fernet_key = {FERNET_KEY} |
| |
| # Whether to disable pickling dags |
| donot_pickle = True |
| |
| # How long before timing out a python file import |
| dagbag_import_timeout = 30.0 |
| |
| # Should a traceback be shown in the UI for dagbag import errors, |
| # instead of just the exception message |
| dagbag_import_error_tracebacks = True |
| |
| # If tracebacks are shown, how many entries from the traceback should be shown |
| dagbag_import_error_traceback_depth = 2 |
| |
| # How long before timing out a DagFileProcessor, which processes a dag file |
| dag_file_processor_timeout = 50 |
| |
| # The class to use for running task instances in a subprocess. |
| # Choices include StandardTaskRunner, CgroupTaskRunner or the full import path to the class |
| # when using a custom task runner. |
| task_runner = StandardTaskRunner |
| |
| # If set, tasks without a ``run_as_user`` argument will be run with this user |
| # Can be used to de-elevate a sudo user running Airflow when executing tasks |
| default_impersonation = |
| |
| # What security module to use (for example kerberos) |
| security = |
| |
| # Turn unit test mode on (overwrites many configuration options with test |
| # values at runtime) |
| unit_test_mode = False |
| |
| # Whether to enable pickling for xcom (note that this is insecure and allows for |
| # RCE exploits). |
| enable_xcom_pickling = False |
| |
| # What classes can be imported during deserialization. This is a multi line value. |
| # The individual items will be parsed as regexp. Python built-in classes (like dict) |
| # are always allowed. Bare "." will be replaced so you can set airflow.* . |
| allowed_deserialization_classes = airflow\..* |
| |
| # When a task is killed forcefully, this is the amount of time in seconds that |
| # it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED |
| killed_task_cleanup_time = 60 |
| |
| # Whether to override params with dag_run.conf. If you pass some key-value pairs |
| # through ``airflow dags backfill -c`` or |
| # ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params. |
| dag_run_conf_overrides_params = True |
| |
| # If enabled, Airflow will only scan files containing both ``DAG`` and ``airflow`` (case-insensitive). |
| dag_discovery_safe_mode = True |
| |
| # The pattern syntax used in the ".airflowignore" files in the DAG directories. Valid values are |
| # ``regexp`` or ``glob``. |
| dag_ignore_file_syntax = regexp |
| |
| # The number of retries each task is going to have by default. Can be overridden at dag or task level. |
| default_task_retries = 0 |
| |
| # The number of seconds each task is going to wait by default between retries. Can be overridden at |
| # dag or task level. |
| default_task_retry_delay = 300 |
| |
| # The maximum delay (in seconds) each task is going to wait by default between retries. |
| # This is a global setting and cannot be overridden at task or DAG level. |
| max_task_retry_delay = 86400 |
| |
| # The weighting method used for the effective total priority weight of the task |
| default_task_weight_rule = downstream |
| |
| # The default task execution_timeout value for the operators. Expected an integer value to |
| # be passed into timedelta as seconds. If not specified, then the value is considered as None, |
| # meaning that the operators are never timed out by default. |
| default_task_execution_timeout = |
| |
| # Updating serialized DAG can not be faster than a minimum interval to reduce database write rate. |
| min_serialized_dag_update_interval = 30 |
| |
| # If True, serialized DAGs are compressed before writing to DB. |
| # Note: this will disable the DAG dependencies view |
| compress_serialized_dags = False |
| |
| # Fetching serialized DAG can not be faster than a minimum interval to reduce database |
| # read rate. This config controls when your DAGs are updated in the Webserver |
| min_serialized_dag_fetch_interval = 10 |
| |
| # Maximum number of Rendered Task Instance Fields (Template Fields) per task to store |
| # in the Database. |
| # All the template_fields for each of Task Instance are stored in the Database. |
| # Keeping this number small may cause an error when you try to view ``Rendered`` tab in |
| # TaskInstance view for older tasks. |
| max_num_rendered_ti_fields_per_task = 30 |
| |
| # On each dagrun check against defined SLAs |
| check_slas = True |
| |
| # Path to custom XCom class that will be used to store and resolve operators results |
| # Example: xcom_backend = path.to.CustomXCom |
| xcom_backend = airflow.models.xcom.BaseXCom |
| |
| # By default Airflow plugins are lazily-loaded (only loaded when required). Set it to ``False``, |
| # if you want to load plugins whenever 'airflow' is invoked via cli or loaded from module. |
| lazy_load_plugins = True |
| |
| # By default Airflow providers are lazily-discovered (discovery and imports happen only when required). |
| # Set it to False, if you want to discover providers whenever 'airflow' is invoked via cli or |
| # loaded from module. |
| lazy_discover_providers = True |
| |
| # Hide sensitive Variables or Connection extra json keys from UI and task logs when set to True |
| # |
| # (Connection passwords are always hidden in logs) |
| hide_sensitive_var_conn_fields = True |
| |
| # A comma-separated list of extra sensitive keywords to look for in variables names or connection's |
| # extra JSON. |
| sensitive_var_conn_names = |
| |
| # Task Slot counts for ``default_pool``. This setting would not have any effect in an existing |
| # deployment where the ``default_pool`` is already created. For existing deployments, users can |
| # change the number of slots using Webserver, API or the CLI |
| default_pool_task_slot_count = 128 |
| |
| # The maximum list/dict length an XCom can push to trigger task mapping. If the pushed list/dict has a |
| # length exceeding this value, the task pushing the XCom will be failed automatically to prevent the |
| # mapped tasks from clogging the scheduler. |
| max_map_length = 1024 |
| |
| # The default umask to use for process when run in daemon mode (scheduler, worker, etc.) |
| # |
| # This controls the file-creation mode mask which determines the initial value of file permission bits |
| # for newly created files. |
| # |
| # This value is treated as an octal-integer. |
| daemon_umask = 0o077 |
| |
| # Class to use as dataset manager. |
| # Example: dataset_manager_class = airflow.datasets.manager.DatasetManager |
| # dataset_manager_class = |
| |
| # Kwargs to supply to dataset manager. |
| # Example: dataset_manager_kwargs = {{"some_param": "some_value"}} |
| # dataset_manager_kwargs = |
| |
| # (experimental) Whether components should use Airflow Internal API for DB connectivity. |
| database_access_isolation = False |
| |
| # (experimental) Airflow Internal API url. Only used if [core] database_access_isolation is True. |
| # Example: internal_api_url = http://localhost:8080 |
| # internal_api_url = |
| |
| [database] |
| # Path to the ``alembic.ini`` file. You can either provide the file path relative |
| # to the Airflow home directory or the absolute path if it is located elsewhere. |
| alembic_ini_file_path = alembic.ini |
| |
| # The SqlAlchemy connection string to the metadata database. |
| # SqlAlchemy supports many different database engines. |
| # More information here: |
| # http://airflow.apache.org/docs/apache-airflow/stable/howto/set-up-database.html#database-uri |
| sql_alchemy_conn = sqlite:///{AIRFLOW_HOME}/airflow.db |
| |
| # Extra engine specific keyword args passed to SQLAlchemy's create_engine, as a JSON-encoded value |
| # Example: sql_alchemy_engine_args = {{"arg1": True}} |
| # sql_alchemy_engine_args = |
| |
| # The encoding for the databases |
| sql_engine_encoding = utf-8 |
| |
| # Collation for ``dag_id``, ``task_id``, ``key``, ``external_executor_id`` columns |
| # in case they have different encoding. |
| # By default this collation is the same as the database collation, however for ``mysql`` and ``mariadb`` |
| # the default is ``utf8mb3_bin`` so that the index sizes of our index keys will not exceed |
| # the maximum size of allowed index when collation is set to ``utf8mb4`` variant |
| # (see https://github.com/apache/airflow/pull/17603#issuecomment-901121618). |
| # sql_engine_collation_for_ids = |
| |
| # If SqlAlchemy should pool database connections. |
| sql_alchemy_pool_enabled = True |
| |
| # The SqlAlchemy pool size is the maximum number of database connections |
| # in the pool. 0 indicates no limit. |
| sql_alchemy_pool_size = 5 |
| |
| # The maximum overflow size of the pool. |
| # When the number of checked-out connections reaches the size set in pool_size, |
| # additional connections will be returned up to this limit. |
| # When those additional connections are returned to the pool, they are disconnected and discarded. |
| # It follows then that the total number of simultaneous connections the pool will allow |
| # is pool_size + max_overflow, |
| # and the total number of "sleeping" connections the pool will allow is pool_size. |
| # max_overflow can be set to ``-1`` to indicate no overflow limit; |
| # no limit will be placed on the total number of concurrent connections. Defaults to ``10``. |
| sql_alchemy_max_overflow = 10 |
| |
| # The SqlAlchemy pool recycle is the number of seconds a connection |
| # can be idle in the pool before it is invalidated. This config does |
| # not apply to sqlite. If the number of DB connections is ever exceeded, |
| # a lower config value will allow the system to recover faster. |
| sql_alchemy_pool_recycle = 1800 |
| |
| # Check connection at the start of each connection pool checkout. |
| # Typically, this is a simple statement like "SELECT 1". |
| # More information here: |
| # https://docs.sqlalchemy.org/en/14/core/pooling.html#disconnect-handling-pessimistic |
| sql_alchemy_pool_pre_ping = True |
| |
| # The schema to use for the metadata database. |
| # SqlAlchemy supports databases with the concept of multiple schemas. |
| sql_alchemy_schema = |
| |
| # Import path for connect args in SqlAlchemy. Defaults to an empty dict. |
| # This is useful when you want to configure db engine args that SqlAlchemy won't parse |
| # in connection string. |
| # See https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.connect_args |
| # Example: sql_alchemy_connect_args = {{"timeout": 30}} |
| # sql_alchemy_connect_args = |
| |
| # Whether to load the default connections that ship with Airflow. It's good to |
| # get started, but you probably want to set this to ``False`` in a production |
| # environment |
| load_default_connections = True |
| |
| # Number of times the code should be retried in case of DB Operational Errors. |
| # Not all transactions will be retried as it can cause undesired state. |
| # Currently it is only used in ``DagFileProcessor.process_file`` to retry ``dagbag.sync_to_db``. |
| max_db_retries = 3 |
| |
| # Whether to run alembic migrations during Airflow start up. Sometimes this operation can be expensive, |
| # and the users can assert the correct version through other means (e.g. through a Helm chart). |
| # Accepts "True" or "False". |
| check_migrations = True |
| |
| [logging] |
| # The folder where airflow should store its log files. |
| # This path must be absolute. |
| # There are a few existing configurations that assume this is set to the default. |
| # If you choose to override this you may need to update the dag_processor_manager_log_location and |
| # dag_processor_manager_log_location settings as well. |
| base_log_folder = {AIRFLOW_HOME}/logs |
| |
| # Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. |
| # Set this to True if you want to enable remote logging. |
| remote_logging = False |
| |
| # Users must supply an Airflow connection id that provides access to the storage |
| # location. Depending on your remote logging service, this may only be used for |
| # reading logs, not writing them. |
| remote_log_conn_id = |
| |
| # Whether the local log files for GCS, S3, WASB and OSS remote logging should be deleted after |
| # they are uploaded to the remote location. |
| delete_local_logs = False |
| |
| # Path to Google Credential JSON file. If omitted, authorization based on `the Application Default |
| # Credentials |
| # <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will |
| # be used. |
| google_key_path = |
| |
| # Storage bucket URL for remote logging |
| # S3 buckets should start with "s3://" |
| # Cloudwatch log groups should start with "cloudwatch://" |
| # GCS buckets should start with "gs://" |
| # WASB buckets should start with "wasb" just to help Airflow select correct handler |
| # Stackdriver logs should start with "stackdriver://" |
| remote_base_log_folder = |
| |
| # The remote_task_handler_kwargs param is loaded into a dictionary and passed to __init__ of remote |
| # task handler and it overrides the values provided by Airflow config. For example if you set |
| # `delete_local_logs=False` and you provide ``{{"delete_local_copy": true}}``, then the local |
| # log files will be deleted after they are uploaded to remote location. |
| # Example: remote_task_handler_kwargs = {{"delete_local_copy": true}} |
| remote_task_handler_kwargs = |
| |
| # Use server-side encryption for logs stored in S3 |
| encrypt_s3_logs = False |
| |
| # Logging level. |
| # |
| # Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. |
| logging_level = INFO |
| |
| # Logging level for celery. If not set, it uses the value of logging_level |
| # |
| # Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. |
| celery_logging_level = |
| |
| # Logging level for Flask-appbuilder UI. |
| # |
| # Supported values: ``CRITICAL``, ``ERROR``, ``WARNING``, ``INFO``, ``DEBUG``. |
| fab_logging_level = WARNING |
| |
| # Logging class |
| # Specify the class that will specify the logging configuration |
| # This class has to be on the python classpath |
| # Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG |
| logging_config_class = |
| |
| # Flag to enable/disable Colored logs in Console |
| # Colour the logs when the controlling terminal is a TTY. |
| colored_console_log = True |
| |
| # Log format for when Colored logs is enabled |
| colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {{%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d}} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s |
| colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter |
| |
| # Format of Log line |
| log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s |
| simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s |
| |
| # Where to send dag parser logs. If "file", logs are sent to log files defined by child_process_log_directory. |
| dag_processor_log_target = file |
| |
| # Format of Dag Processor Log line |
| dag_processor_log_format = [%%(asctime)s] [SOURCE:DAG_PROCESSOR] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s |
| log_formatter_class = airflow.utils.log.timezone_aware.TimezoneAware |
| |
| # An import path to a function to add adaptations of each secret added with |
| # `airflow.utils.log.secrets_masker.mask_secret` to be masked in log messages. The given function |
| # is expected to require a single parameter: the secret to be adapted. It may return a |
| # single adaptation of the secret or an iterable of adaptations to each be masked as secrets. |
| # The original secret will be masked as well as any adaptations returned. |
| # Example: secret_mask_adapter = urllib.parse.quote |
| secret_mask_adapter = |
| |
| # Specify prefix pattern like mentioned below with stream handler TaskHandlerWithCustomFormatter |
| # Example: task_log_prefix_template = {{ti.dag_id}}-{{ti.task_id}}-{{execution_date}}-{{try_number}} |
| task_log_prefix_template = |
| |
| # Formatting for how airflow generates file names/paths for each task run. |
| log_filename_template = dag_id={{{{ ti.dag_id }}}}/run_id={{{{ ti.run_id }}}}/task_id={{{{ ti.task_id }}}}/{{%% if ti.map_index >= 0 %%}}map_index={{{{ ti.map_index }}}}/{{%% endif %%}}attempt={{{{ try_number }}}}.log |
| |
| # Formatting for how airflow generates file names for log |
| log_processor_filename_template = {{{{ filename }}}}.log |
| |
| # Full path of dag_processor_manager logfile. |
| dag_processor_manager_log_location = {AIRFLOW_HOME}/logs/dag_processor_manager/dag_processor_manager.log |
| |
| # Name of handler to read task instance logs. |
| # Defaults to use ``task`` handler. |
| task_log_reader = task |
| |
| # A comma\-separated list of third-party logger names that will be configured to print messages to |
| # consoles\. |
| # Example: extra_logger_names = connexion,sqlalchemy |
| extra_logger_names = |
| |
| # When you start an airflow worker, airflow starts a tiny web server |
| # subprocess to serve the workers local log files to the airflow main |
| # web server, who then builds pages and sends them to users. This defines |
| # the port on which the logs are served. It needs to be unused, and open |
| # visible from the main web server to connect into the workers. |
| worker_log_server_port = 8793 |
| |
| # Port to serve logs from for triggerer. See worker_log_server_port description |
| # for more info. |
| trigger_log_server_port = 8794 |
| |
| # We must parse timestamps to interleave logs between trigger and task. To do so, |
| # we need to parse timestamps in log files. In case your log format is non-standard, |
| # you may provide import path to callable which takes a string log line and returns |
| # the timestamp (datetime.datetime compatible). |
| # Example: interleave_timestamp_parser = path.to.my_func |
| # interleave_timestamp_parser = |
| |
| # Permissions in the form or of octal string as understood by chmod. The permissions are important |
| # when you use impersonation, when logs are written by a different user than airflow. The most secure |
| # way of configuring it in this case is to add both users to the same group and make it the default |
| # group of both users. Group-writeable logs are default in airflow, but you might decide that you are |
| # OK with having the logs other-writeable, in which case you should set it to `0o777`. You might |
| # decide to add more security if you do not use impersonation and change it to `0o755` to make it |
| # only owner-writeable. You can also make it just readable only for owner by changing it to `0o700` if |
| # all the access (read/write) for your logs happens from the same user. |
| # Example: file_task_handler_new_folder_permissions = 0o775 |
| file_task_handler_new_folder_permissions = 0o775 |
| |
| # Permissions in the form or of octal string as understood by chmod. The permissions are important |
| # when you use impersonation, when logs are written by a different user than airflow. The most secure |
| # way of configuring it in this case is to add both users to the same group and make it the default |
| # group of both users. Group-writeable logs are default in airflow, but you might decide that you are |
| # OK with having the logs other-writeable, in which case you should set it to `0o666`. You might |
| # decide to add more security if you do not use impersonation and change it to `0o644` to make it |
| # only owner-writeable. You can also make it just readable only for owner by changing it to `0o600` if |
| # all the access (read/write) for your logs happens from the same user. |
| # Example: file_task_handler_new_file_permissions = 0o664 |
| file_task_handler_new_file_permissions = 0o664 |
| |
| # By default Celery sends all logs into stderr. |
| # If enabled any previous logging handlers will get *removed*. |
| # With this option AirFlow will create new handlers |
| # and send low level logs like INFO and WARNING to stdout, |
| # while sending higher severity logs to stderr. |
| celery_stdout_stderr_separation = False |
| |
| [metrics] |
| |
| # StatsD (https://github.com/etsy/statsd) integration settings. |
| # If you want to avoid emitting all the available metrics, you can configure an |
| # allow list of prefixes (comma separated) to send only the metrics that start |
| # with the elements of the list (e.g: "scheduler,executor,dagrun") |
| metrics_allow_list = |
| |
| # If you want to avoid emitting all the available metrics, you can configure a |
| # block list of prefixes (comma separated) to filter out metrics that start with |
| # the elements of the list (e.g: "scheduler,executor,dagrun"). |
| # If metrics_allow_list and metrics_block_list are both configured, metrics_block_list is ignored. |
| metrics_block_list = |
| |
| # Enables sending metrics to StatsD. |
| statsd_on = False |
| statsd_host = localhost |
| statsd_port = 8125 |
| statsd_prefix = airflow |
| |
| # A function that validate the StatsD stat name, apply changes to the stat name if necessary and return |
| # the transformed stat name. |
| # |
| # The function should have the following signature: |
| # def func_name(stat_name: str) -> str: |
| stat_name_handler = |
| |
| # To enable datadog integration to send airflow metrics. |
| statsd_datadog_enabled = False |
| |
| # List of datadog tags attached to all metrics(e.g: key1:value1,key2:value2) |
| statsd_datadog_tags = |
| |
| # Set to False to disable metadata tags for some of the emitted metrics |
| statsd_datadog_metrics_tags = True |
| |
| # If you want to utilise your own custom StatsD client set the relevant |
| # module path below. |
| # Note: The module path must exist on your PYTHONPATH for Airflow to pick it up |
| # statsd_custom_client_path = |
| |
| # If you want to avoid sending all the available metrics tags to StatsD, |
| # you can configure a block list of prefixes (comma separated) to filter out metric tags |
| # that start with the elements of the list (e.g: "job_id,run_id") |
| # Example: statsd_disabled_tags = job_id,run_id,dag_id,task_id |
| statsd_disabled_tags = job_id,run_id |
| |
| # To enable sending Airflow metrics with StatsD-Influxdb tagging convention. |
| statsd_influxdb_enabled = False |
| |
| # Enables sending metrics to OpenTelemetry. |
| otel_on = False |
| otel_host = localhost |
| otel_port = 8889 |
| otel_prefix = airflow |
| otel_interval_milliseconds = 60000 |
| |
| # If True, all metrics are also emitted to the console. Defaults to False. |
| otel_debugging_on = False |
| |
| [secrets] |
| # Full class name of secrets backend to enable (will precede env vars and metastore in search path) |
| # Example: backend = airflow.providers.amazon.aws.secrets.systems_manager.SystemsManagerParameterStoreBackend |
| backend = |
| |
| # The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class. |
| # See documentation for the secrets backend you are using. JSON is expected. |
| # Example for AWS Systems Manager ParameterStore: |
| # ``{{"connections_prefix": "/airflow/connections", "profile_name": "default"}}`` |
| backend_kwargs = |
| |
| [cli] |
| # In what way should the cli access the API. The LocalClient will use the |
| # database directly, while the json_client will use the api running on the |
| # webserver |
| api_client = airflow.api.client.local_client |
| |
| # If you set web_server_url_prefix, do NOT forget to append it here, ex: |
| # ``endpoint_url = http://localhost:8080/myroot`` |
| # So api will look like: ``http://localhost:8080/myroot/api/experimental/...`` |
| endpoint_url = http://localhost:8080 |
| |
| [debug] |
| # Used only with ``DebugExecutor``. If set to ``True`` DAG will fail with first |
| # failed task. Helpful for debugging purposes. |
| fail_fast = False |
| |
| [api] |
| # Enables the deprecated experimental API. Please note that these APIs do not have access control. |
| # The authenticated user has full access. |
| # |
| # .. warning:: |
| # |
| # This `Experimental REST API <https://airflow.readthedocs.io/en/latest/rest-api-ref.html>`__ is |
| # deprecated since version 2.0. Please consider using |
| # `the Stable REST API <https://airflow.readthedocs.io/en/latest/stable-rest-api-ref.html>`__. |
| # For more information on migration, see |
| # `RELEASE_NOTES.rst <https://github.com/apache/airflow/blob/main/RELEASE_NOTES.rst>`_ |
| enable_experimental_api = False |
| |
| # Comma separated list of auth backends to authenticate users of the API. See |
| # https://airflow.apache.org/docs/apache-airflow/stable/security/api.html for possible values. |
| # ("airflow.api.auth.backend.default" allows all requests for historic reasons) |
| auth_backends = airflow.api.auth.backend.session |
| |
| # Used to set the maximum page limit for API requests. If limit passed as param |
| # is greater than maximum page limit, it will be ignored and maximum page limit value |
| # will be set as the limit |
| maximum_page_limit = 100 |
| |
| # Used to set the default page limit when limit param is zero or not provided in API |
| # requests. Otherwise if positive integer is passed in the API requests as limit, the |
| # smallest number of user given limit or maximum page limit is taken as limit. |
| fallback_page_limit = 100 |
| |
| # The intended audience for JWT token credentials used for authorization. This value must match on the client and server sides. If empty, audience will not be tested. |
| # Example: google_oauth2_audience = project-id-random-value.apps.googleusercontent.com |
| google_oauth2_audience = |
| |
| # Path to Google Cloud Service Account key file (JSON). If omitted, authorization based on |
| # `the Application Default Credentials |
| # <https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will |
| # be used. |
| # Example: google_key_path = /files/service-account-json |
| google_key_path = |
| |
| # Used in response to a preflight request to indicate which HTTP |
| # headers can be used when making the actual request. This header is |
| # the server side response to the browser's |
| # Access-Control-Request-Headers header. |
| access_control_allow_headers = |
| |
| # Specifies the method or methods allowed when accessing the resource. |
| access_control_allow_methods = |
| |
| # Indicates whether the response can be shared with requesting code from the given origins. |
| # Separate URLs with space. |
| access_control_allow_origins = |
| |
| [lineage] |
| # what lineage backend to use |
| backend = |
| |
| [openlineage] |
| |
| # This section applies settings for OpenLineage integration. |
| # For backwards compatibility with `openlineage-python` one can still use |
| # `openlineage.yml` file or `OPENLINEAGE_` environment variables. However, below |
| # configuration takes precedence over those. |
| # More in documentation - https://openlineage.io/docs/client/python#configuration. |
| # Set this to true if you don't want OpenLineage to emit events. |
| disabled = False |
| |
| # OpenLineage namespace |
| # Example: namespace = food_delivery |
| # namespace = |
| |
| # Comma-separated paths to custom OpenLineage extractors. |
| # Example: extractors = full.path.to.ExtractorClass;full.path.to.AnotherExtractorClass |
| extractors = |
| |
| # Path to YAML config. This provides backwards compatibility to pass config as |
| # `openlineage.yml` file. |
| config_path = |
| |
| # OpenLineage Client transport configuration. It should contain type |
| # and additional options per each type. |
| # |
| # Currently supported types are: |
| # |
| # * HTTP |
| # * Kafka |
| # * Console |
| # Example: transport = {{"type": "http", "url": "http://localhost:5000"}} |
| transport = |
| |
| # If disabled, OpenLineage events do not contain source code of particular |
| # operators, like PythonOperator. |
| # disable_source_code = |
| |
| [atlas] |
| sasl_enabled = False |
| host = |
| port = 21000 |
| username = |
| password = |
| |
| [operators] |
| # The default owner assigned to each new operator, unless |
| # provided explicitly or passed via ``default_args`` |
| default_owner = airflow |
| default_cpus = 1 |
| default_ram = 512 |
| default_disk = 512 |
| default_gpus = 0 |
| |
| # Default queue that tasks get assigned to and that worker listen on. |
| default_queue = default |
| |
| # Is allowed to pass additional/unused arguments (args, kwargs) to the BaseOperator operator. |
| # If set to False, an exception will be thrown, otherwise only the console message will be displayed. |
| allow_illegal_arguments = False |
| |
| [hive] |
| # Default mapreduce queue for HiveOperator tasks |
| default_hive_mapred_queue = |
| |
| # Template for mapred_job_name in HiveOperator, supports the following named parameters |
| # hostname, dag_id, task_id, execution_date |
| # mapred_job_name_template = |
| |
| [webserver] |
| # Path of webserver config file used for configuring the webserver parameters |
| config_file = {AIRFLOW_HOME}/webserver_config.py |
| |
| # The base url of your website as airflow cannot guess what domain or |
| # cname you are using. This is used in automated emails that |
| # airflow sends to point links to the right web server |
| base_url = http://localhost:8080 |
| |
| # Default timezone to display all dates in the UI, can be UTC, system, or |
| # any IANA timezone string (e.g. Europe/Amsterdam). If left empty the |
| # default value of core/default_timezone will be used |
| # Example: default_ui_timezone = America/New_York |
| default_ui_timezone = UTC |
| |
| # The ip specified when starting the web server |
| web_server_host = 0.0.0.0 |
| |
| # The port on which to run the web server |
| web_server_port = 8080 |
| |
| # Paths to the SSL certificate and key for the web server. When both are |
| # provided SSL will be enabled. This does not change the web server port. |
| web_server_ssl_cert = |
| |
| # Paths to the SSL certificate and key for the web server. When both are |
| # provided SSL will be enabled. This does not change the web server port. |
| web_server_ssl_key = |
| |
| # The type of backend used to store web session data, can be 'database' or 'securecookie' |
| # Example: session_backend = securecookie |
| session_backend = database |
| |
| # Number of seconds the webserver waits before killing gunicorn master that doesn't respond |
| web_server_master_timeout = 120 |
| |
| # Number of seconds the gunicorn webserver waits before timing out on a worker |
| web_server_worker_timeout = 120 |
| |
| # Number of workers to refresh at a time. When set to 0, worker refresh is |
| # disabled. When nonzero, airflow periodically refreshes webserver workers by |
| # bringing up new ones and killing old ones. |
| worker_refresh_batch_size = 1 |
| |
| # Number of seconds to wait before refreshing a batch of workers. |
| worker_refresh_interval = 6000 |
| |
| # If set to True, Airflow will track files in plugins_folder directory. When it detects changes, |
| # then reload the gunicorn. |
| reload_on_plugin_change = False |
| |
| # Secret key used to run your flask app. It should be as random as possible. However, when running |
| # more than 1 instances of webserver, make sure all of them use the same ``secret_key`` otherwise |
| # one of them will error with "CSRF session token is missing". |
| # The webserver key is also used to authorize requests to Celery workers when logs are retrieved. |
| # The token generated using the secret key has a short expiry time though - make sure that time on |
| # ALL the machines that you run airflow components on is synchronized (for example using ntpd) |
| # otherwise you might get "forbidden" errors when the logs are accessed. |
| secret_key = {SECRET_KEY} |
| |
| # Number of workers to run the Gunicorn web server |
| workers = 4 |
| |
| # The worker class gunicorn should use. Choices include |
| # sync (default), eventlet, gevent. Note when using gevent you might also want to set the |
| # "_AIRFLOW_PATCH_GEVENT" environment variable to "1" to make sure gevent patching is done as |
| # early as possible. |
| worker_class = sync |
| |
| # Log files for the gunicorn webserver. '-' means log to stderr. |
| access_logfile = - |
| |
| # Log files for the gunicorn webserver. '-' means log to stderr. |
| error_logfile = - |
| |
| # Access log format for gunicorn webserver. |
| # default format is %%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s" |
| # documentation - https://docs.gunicorn.org/en/stable/settings.html#access-log-format |
| access_logformat = |
| |
| # Expose the configuration file in the web server. Set to "non-sensitive-only" to show all values |
| # except those that have security implications. "True" shows all values. "False" hides the |
| # configuration completely. |
| expose_config = False |
| |
| # Expose hostname in the web server |
| expose_hostname = False |
| |
| # Expose stacktrace in the web server |
| expose_stacktrace = False |
| |
| # Default DAG view. Valid values are: ``grid``, ``graph``, ``duration``, ``gantt``, ``landing_times`` |
| dag_default_view = grid |
| |
| # Default DAG orientation. Valid values are: |
| # ``LR`` (Left->Right), ``TB`` (Top->Bottom), ``RL`` (Right->Left), ``BT`` (Bottom->Top) |
| dag_orientation = LR |
| |
| # The amount of time (in secs) webserver will wait for initial handshake |
| # while fetching logs from other worker machine |
| log_fetch_timeout_sec = 5 |
| |
| # Time interval (in secs) to wait before next log fetching. |
| log_fetch_delay_sec = 2 |
| |
| # Distance away from page bottom to enable auto tailing. |
| log_auto_tailing_offset = 30 |
| |
| # Animation speed for auto tailing log display. |
| log_animation_speed = 1000 |
| |
| # By default, the webserver shows paused DAGs. Flip this to hide paused |
| # DAGs by default |
| hide_paused_dags_by_default = False |
| |
| # Consistent page size across all listing views in the UI |
| page_size = 100 |
| |
| # Define the color of navigation bar |
| navbar_color = #fff |
| |
| # Default dagrun to show in UI |
| default_dag_run_display_number = 25 |
| |
| # Enable werkzeug ``ProxyFix`` middleware for reverse proxy |
| enable_proxy_fix = False |
| |
| # Number of values to trust for ``X-Forwarded-For``. |
| # More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/ |
| proxy_fix_x_for = 1 |
| |
| # Number of values to trust for ``X-Forwarded-Proto`` |
| proxy_fix_x_proto = 1 |
| |
| # Number of values to trust for ``X-Forwarded-Host`` |
| proxy_fix_x_host = 1 |
| |
| # Number of values to trust for ``X-Forwarded-Port`` |
| proxy_fix_x_port = 1 |
| |
| # Number of values to trust for ``X-Forwarded-Prefix`` |
| proxy_fix_x_prefix = 1 |
| |
| # Set secure flag on session cookie |
| cookie_secure = False |
| |
| # Set samesite policy on session cookie |
| cookie_samesite = Lax |
| |
| # Default setting for wrap toggle on DAG code and TI log views. |
| default_wrap = False |
| |
| # Allow the UI to be rendered in a frame |
| x_frame_enabled = True |
| |
| # Send anonymous user activity to your analytics tool |
| # choose from google_analytics, segment, or metarouter |
| # analytics_tool = |
| |
| # Unique ID of your account in the analytics tool |
| # analytics_id = |
| |
| # 'Recent Tasks' stats will show for old DagRuns if set |
| show_recent_stats_for_completed_runs = True |
| |
| # Update FAB permissions and sync security manager roles |
| # on webserver startup |
| update_fab_perms = True |
| |
| # The UI cookie lifetime in minutes. User will be logged out from UI after |
| # ``session_lifetime_minutes`` of non-activity |
| session_lifetime_minutes = 43200 |
| |
| # Sets a custom page title for the DAGs overview page and site title for all pages |
| # instance_name = |
| |
| # Whether the custom page title for the DAGs overview page contains any Markup language |
| instance_name_has_markup = False |
| |
| # How frequently, in seconds, the DAG data will auto-refresh in graph or grid view |
| # when auto-refresh is turned on |
| auto_refresh_interval = 3 |
| |
| # Boolean for displaying warning for publicly viewable deployment |
| warn_deployment_exposure = True |
| |
| # Comma separated string of view events to exclude from dag audit view. |
| # All other events will be added minus the ones passed here. |
| # The audit logs in the db will not be affected by this parameter. |
| audit_view_excluded_events = gantt,landing_times,tries,duration,calendar,graph,grid,tree,tree_data |
| |
| # Comma separated string of view events to include in dag audit view. |
| # If passed, only these events will populate the dag audit view. |
| # The audit logs in the db will not be affected by this parameter. |
| # Example: audit_view_included_events = dagrun_cleared,failed |
| # audit_view_included_events = |
| |
| # Boolean for running SwaggerUI in the webserver. |
| enable_swagger_ui = True |
| |
| # Boolean for running Internal API in the webserver. |
| run_internal_api = False |
| |
| # Boolean for enabling rate limiting on authentication endpoints. |
| auth_rate_limited = True |
| |
| # Rate limit for authentication endpoints. |
| auth_rate_limit = 5 per 40 second |
| |
| # The caching algorithm used by the webserver. Must be a valid hashlib function name. |
| # Example: caching_hash_method = sha256 |
| caching_hash_method = md5 |
| |
| [email] |
| |
| # Configuration email backend and whether to |
| # send email alerts on retry or failure |
| # Email backend to use |
| email_backend = airflow.utils.email.send_email_smtp |
| |
| # Email connection to use |
| email_conn_id = smtp_default |
| |
| # Whether email alerts should be sent when a task is retried |
| default_email_on_retry = True |
| |
| # Whether email alerts should be sent when a task failed |
| default_email_on_failure = True |
| |
| # File that will be used as the template for Email subject (which will be rendered using Jinja2). |
| # If not set, Airflow uses a base template. |
| # Example: subject_template = /path/to/my_subject_template_file |
| # subject_template = |
| |
| # File that will be used as the template for Email content (which will be rendered using Jinja2). |
| # If not set, Airflow uses a base template. |
| # Example: html_content_template = /path/to/my_html_content_template_file |
| # html_content_template = |
| |
| # Email address that will be used as sender address. |
| # It can either be raw email or the complete address in a format ``Sender Name <sender@email.com>`` |
| # Example: from_email = Airflow <airflow@example.com> |
| # from_email = |
| |
| [smtp] |
| |
| # If you want airflow to send emails on retries, failure, and you want to use |
| # the airflow.utils.email.send_email_smtp function, you have to configure an |
| # smtp server here |
| smtp_host = localhost |
| smtp_starttls = True |
| smtp_ssl = False |
| # Example: smtp_user = airflow |
| # smtp_user = |
| # Example: smtp_password = airflow |
| # smtp_password = |
| smtp_port = 25 |
| smtp_mail_from = airflow@example.com |
| smtp_timeout = 30 |
| smtp_retry_limit = 5 |
| |
| [sentry] |
| |
| # Sentry (https://docs.sentry.io) integration. Here you can supply |
| # additional configuration options based on the Python platform. See: |
| # https://docs.sentry.io/error-reporting/configuration/?platform=python. |
| # Unsupported options: ``integrations``, ``in_app_include``, ``in_app_exclude``, |
| # ``ignore_errors``, ``before_breadcrumb``, ``transport``. |
| # Enable error reporting to Sentry |
| sentry_on = false |
| sentry_dsn = |
| |
| # Dotted path to a before_send function that the sentry SDK should be configured to use. |
| # before_send = |
| |
| [local_kubernetes_executor] |
| |
| # This section only applies if you are using the ``LocalKubernetesExecutor`` in |
| # ``[core]`` section above |
| # Define when to send a task to ``KubernetesExecutor`` when using ``LocalKubernetesExecutor``. |
| # When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``), |
| # the task is executed via ``KubernetesExecutor``, |
| # otherwise via ``LocalExecutor`` |
| kubernetes_queue = kubernetes |
| |
| [celery_kubernetes_executor] |
| |
| # This section only applies if you are using the ``CeleryKubernetesExecutor`` in |
| # ``[core]`` section above |
| # Define when to send a task to ``KubernetesExecutor`` when using ``CeleryKubernetesExecutor``. |
| # When the queue of a task is the value of ``kubernetes_queue`` (default ``kubernetes``), |
| # the task is executed via ``KubernetesExecutor``, |
| # otherwise via ``CeleryExecutor`` |
| kubernetes_queue = kubernetes |
| |
| [celery] |
| |
| # This section only applies if you are using the CeleryExecutor in |
| # ``[core]`` section above |
| # The app name that will be used by celery |
| celery_app_name = airflow.executors.celery_executor |
| |
| # The concurrency that will be used when starting workers with the |
| # ``airflow celery worker`` command. This defines the number of task instances that |
| # a worker will take, so size up your workers based on the resources on |
| # your worker box and the nature of your tasks |
| worker_concurrency = 16 |
| |
| # The maximum and minimum concurrency that will be used when starting workers with the |
| # ``airflow celery worker`` command (always keep minimum processes, but grow |
| # to maximum if necessary). Note the value should be max_concurrency,min_concurrency |
| # Pick these numbers based on resources on worker box and the nature of the task. |
| # If autoscale option is available, worker_concurrency will be ignored. |
| # http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale |
| # Example: worker_autoscale = 16,12 |
| # worker_autoscale = |
| |
| # Used to increase the number of tasks that a worker prefetches which can improve performance. |
| # The number of processes multiplied by worker_prefetch_multiplier is the number of tasks |
| # that are prefetched by a worker. A value greater than 1 can result in tasks being unnecessarily |
| # blocked if there are multiple workers and one worker prefetches tasks that sit behind long |
| # running tasks while another worker has unutilized processes that are unable to process the already |
| # claimed blocked tasks. |
| # https://docs.celeryproject.org/en/stable/userguide/optimizing.html#prefetch-limits |
| worker_prefetch_multiplier = 1 |
| |
| # Specify if remote control of the workers is enabled. |
| # In some cases when the broker does not support remote control, Celery creates lots of |
| # ``.*reply-celery-pidbox`` queues. You can prevent this by setting this to false. |
| # However, with this disabled Flower won't work. |
| # https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/index.html#broker-overview |
| worker_enable_remote_control = true |
| |
| # The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally |
| # a sqlalchemy database. Refer to the Celery documentation for more information. |
| broker_url = redis://redis:6379/0 |
| |
| # The Celery result_backend. When a job finishes, it needs to update the |
| # metadata of the job. Therefore it will post a message on a message bus, |
| # or insert it into a database (depending of the backend) |
| # This status is used by the scheduler to update the state of the task |
| # The use of a database is highly recommended |
| # When not specified, sql_alchemy_conn with a db+ scheme prefix will be used |
| # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings |
| # Example: result_backend = db+postgresql://postgres:airflow@postgres/airflow |
| # result_backend = |
| |
| # Optional configuration dictionary to pass to the Celery result backend SQLAlchemy engine. |
| # Example: result_backend_sqlalchemy_engine_options = {{"pool_recycle": 1800}} |
| result_backend_sqlalchemy_engine_options = |
| |
| # Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start |
| # it ``airflow celery flower``. This defines the IP that Celery Flower runs on |
| flower_host = 0.0.0.0 |
| |
| # The root URL for Flower |
| # Example: flower_url_prefix = /flower |
| flower_url_prefix = |
| |
| # This defines the port that Celery Flower runs on |
| flower_port = 5555 |
| |
| # Securing Flower with Basic Authentication |
| # Accepts user:password pairs separated by a comma |
| # Example: flower_basic_auth = user1:password1,user2:password2 |
| flower_basic_auth = |
| |
| # How many processes CeleryExecutor uses to sync task state. |
| # 0 means to use max(1, number of cores - 1) processes. |
| sync_parallelism = 0 |
| |
| # Import path for celery configuration options |
| celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG |
| ssl_active = False |
| |
| # Path to the client key. |
| ssl_key = |
| |
| # Path to the client certificate. |
| ssl_cert = |
| |
| # Path to the CA certificate. |
| ssl_cacert = |
| |
| # Celery Pool implementation. |
| # Choices include: ``prefork`` (default), ``eventlet``, ``gevent`` or ``solo``. |
| # See: |
| # https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency |
| # https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html |
| pool = prefork |
| |
| # The number of seconds to wait before timing out ``send_task_to_executor`` or |
| # ``fetch_celery_task_state`` operations. |
| operation_timeout = 1.0 |
| |
| # Celery task will report its status as 'started' when the task is executed by a worker. |
| # This is used in Airflow to keep track of the running tasks and if a Scheduler is restarted |
| # or run in HA mode, it can adopt the orphan tasks launched by previous SchedulerJob. |
| task_track_started = True |
| |
| # The Maximum number of retries for publishing task messages to the broker when failing |
| # due to ``AirflowTaskTimeout`` error before giving up and marking Task as failed. |
| task_publish_max_retries = 3 |
| |
| # Worker initialisation check to validate Metadata Database connection |
| worker_precheck = False |
| |
| [celery_broker_transport_options] |
| |
| # This section is for specifying options which can be passed to the |
| # underlying celery broker transport. See: |
| # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options |
| # The visibility timeout defines the number of seconds to wait for the worker |
| # to acknowledge the task before the message is redelivered to another worker. |
| # Make sure to increase the visibility timeout to match the time of the longest |
| # ETA you're planning to use. |
| # visibility_timeout is only supported for Redis and SQS celery brokers. |
| # See: |
| # https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/redis.html#visibility-timeout |
| # Example: visibility_timeout = 21600 |
| # visibility_timeout = |
| |
| # The sentinel_kwargs parameter allows passing additional options to the Sentinel client. |
| # In a typical scenario where Redis Sentinel is used as the broker and Redis servers are |
| # password-protected, the password needs to be passed through this parameter. Although its |
| # type is string, it is required to pass a string that conforms to the dictionary format. |
| # See: |
| # https://docs.celeryq.dev/en/stable/getting-started/backends-and-brokers/redis.html#configuration |
| # Example: sentinel_kwargs = {{"password": "password_for_redis_server"}} |
| # sentinel_kwargs = |
| |
| [dask] |
| |
| # This section only applies if you are using the DaskExecutor in |
| # [core] section above |
| # The IP address and port of the Dask cluster's scheduler. |
| cluster_address = 127.0.0.1:8786 |
| |
| # Path to a CA certificate file encoded in PEM format to access a secured Dask scheduler. |
| tls_ca = |
| |
| # Path to a certificate file for the client, encoded in PEM format. |
| tls_cert = |
| |
| # Path to a key file for the client, encoded in PEM format. |
| tls_key = |
| |
| [scheduler] |
| # Task instances listen for external kill signal (when you clear tasks |
| # from the CLI or the UI), this defines the frequency at which they should |
| # listen (in seconds). |
| job_heartbeat_sec = 5 |
| |
| # The scheduler constantly tries to trigger new tasks (look at the |
| # scheduler section in the docs for more information). This defines |
| # how often the scheduler should run (in seconds). |
| scheduler_heartbeat_sec = 5 |
| |
| # The number of times to try to schedule each DAG file |
| # -1 indicates unlimited number |
| num_runs = -1 |
| |
| # Controls how long the scheduler will sleep between loops, but if there was nothing to do |
| # in the loop. i.e. if it scheduled something then it will start the next loop |
| # iteration straight away. |
| scheduler_idle_sleep_time = 1 |
| |
| # Number of seconds after which a DAG file is parsed. The DAG file is parsed every |
| # ``min_file_process_interval`` number of seconds. Updates to DAGs are reflected after |
| # this interval. Keeping this number low will increase CPU usage. |
| min_file_process_interval = 30 |
| |
| # How often (in seconds) to check for stale DAGs (DAGs which are no longer present in |
| # the expected files) which should be deactivated, as well as datasets that are no longer |
| # referenced and should be marked as orphaned. |
| parsing_cleanup_interval = 60 |
| |
| # How long (in seconds) to wait after we have re-parsed a DAG file before deactivating stale |
| # DAGs (DAGs which are no longer present in the expected files). The reason why we need |
| # this threshold is to account for the time between when the file is parsed and when the |
| # DAG is loaded. The absolute maximum that this could take is `dag_file_processor_timeout`, |
| # but when you have a long timeout configured, it results in a significant delay in the |
| # deactivation of stale dags. |
| stale_dag_threshold = 50 |
| |
| # How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes. |
| dag_dir_list_interval = 300 |
| |
| # How often should stats be printed to the logs. Setting to 0 will disable printing stats |
| print_stats_interval = 30 |
| |
| # How often (in seconds) should pool usage stats be sent to StatsD (if statsd_on is enabled) |
| pool_metrics_interval = 5.0 |
| |
| # If the last scheduler heartbeat happened more than scheduler_health_check_threshold |
| # ago (in seconds), scheduler is considered unhealthy. |
| # This is used by the health check in the "/health" endpoint and in `airflow jobs check` CLI |
| # for SchedulerJob. |
| scheduler_health_check_threshold = 30 |
| |
| # When you start a scheduler, airflow starts a tiny web server |
| # subprocess to serve a health check if this is set to True |
| enable_health_check = False |
| |
| # When you start a scheduler, airflow starts a tiny web server |
| # subprocess to serve a health check on this port |
| scheduler_health_check_server_port = 8974 |
| |
| # How often (in seconds) should the scheduler check for orphaned tasks and SchedulerJobs |
| orphaned_tasks_check_interval = 300.0 |
| child_process_log_directory = {AIRFLOW_HOME}/logs/scheduler |
| |
| # Local task jobs periodically heartbeat to the DB. If the job has |
| # not heartbeat in this many seconds, the scheduler will mark the |
| # associated task instance as failed and will re-schedule the task. |
| scheduler_zombie_task_threshold = 300 |
| |
| # How often (in seconds) should the scheduler check for zombie tasks. |
| zombie_detection_interval = 10.0 |
| |
| # Turn off scheduler catchup by setting this to ``False``. |
| # Default behavior is unchanged and |
| # Command Line Backfills still work, but the scheduler |
| # will not do scheduler catchup if this is ``False``, |
| # however it can be set on a per DAG basis in the |
| # DAG definition (catchup) |
| catchup_by_default = True |
| |
| # Setting this to True will make first task instance of a task |
| # ignore depends_on_past setting. A task instance will be considered |
| # as the first task instance of a task when there is no task instance |
| # in the DB with an execution_date earlier than it., i.e. no manual marking |
| # success will be needed for a newly added task to be scheduled. |
| ignore_first_depends_on_past_by_default = True |
| |
| # This changes the batch size of queries in the scheduling main loop. |
| # If this is too high, SQL query performance may be impacted by |
| # complexity of query predicate, and/or excessive locking. |
| # Additionally, you may hit the maximum allowable query length for your db. |
| # Set this to 0 for no limit (not advised) |
| max_tis_per_query = 512 |
| |
| # Should the scheduler issue ``SELECT ... FOR UPDATE`` in relevant queries. |
| # If this is set to False then you should not run more than a single |
| # scheduler at once |
| use_row_level_locking = True |
| |
| # Max number of DAGs to create DagRuns for per scheduler loop. |
| max_dagruns_to_create_per_loop = 10 |
| |
| # How many DagRuns should a scheduler examine (and lock) when scheduling |
| # and queuing tasks. |
| max_dagruns_per_loop_to_schedule = 20 |
| |
| # Should the Task supervisor process perform a "mini scheduler" to attempt to schedule more tasks of the |
| # same DAG. Leaving this on will mean tasks in the same DAG execute quicker, but might starve out other |
| # dags in some circumstances |
| schedule_after_task_execution = True |
| |
| # The scheduler reads dag files to extract the airflow modules that are going to be used, |
| # and imports them ahead of time to avoid having to re-do it for each parsing process. |
| # This flag can be set to False to disable this behavior in case an airflow module needs to be freshly |
| # imported each time (at the cost of increased DAG parsing time). |
| parsing_pre_import_modules = True |
| |
| # The scheduler can run multiple processes in parallel to parse dags. |
| # This defines how many processes will run. |
| parsing_processes = 2 |
| |
| # One of ``modified_time``, ``random_seeded_by_host`` and ``alphabetical``. |
| # The scheduler will list and sort the dag files to decide the parsing order. |
| # |
| # * ``modified_time``: Sort by modified time of the files. This is useful on large scale to parse the |
| # recently modified DAGs first. |
| # * ``random_seeded_by_host``: Sort randomly across multiple Schedulers but with same order on the |
| # same host. This is useful when running with Scheduler in HA mode where each scheduler can |
| # parse different DAG files. |
| # * ``alphabetical``: Sort by filename |
| file_parsing_sort_mode = modified_time |
| |
| # Whether the dag processor is running as a standalone process or it is a subprocess of a scheduler |
| # job. |
| standalone_dag_processor = False |
| |
| # Only applicable if `[scheduler]standalone_dag_processor` is true and callbacks are stored |
| # in database. Contains maximum number of callbacks that are fetched during a single loop. |
| max_callbacks_per_loop = 20 |
| |
| # Only applicable if `[scheduler]standalone_dag_processor` is true. |
| # Time in seconds after which dags, which were not updated by Dag Processor are deactivated. |
| dag_stale_not_seen_duration = 600 |
| |
| # Turn off scheduler use of cron intervals by setting this to False. |
| # DAGs submitted manually in the web UI or with trigger_dag will still run. |
| use_job_schedule = True |
| |
| # Allow externally triggered DagRuns for Execution Dates in the future |
| # Only has effect if schedule_interval is set to None in DAG |
| allow_trigger_in_future = False |
| |
| # How often to check for expired trigger requests that have not run yet. |
| trigger_timeout_check_interval = 15 |
| |
| # Amount of time a task can be in the queued state before being retried or set to failed. |
| task_queued_timeout = 600.0 |
| |
| # How often to check for tasks that have been in the queued state for |
| # longer than `[scheduler] task_queued_timeout`. |
| task_queued_timeout_check_interval = 120.0 |
| |
| [triggerer] |
| # How many triggers a single Triggerer will run at once, by default. |
| default_capacity = 1000 |
| |
| [kerberos] |
| ccache = /tmp/airflow_krb5_ccache |
| |
| # gets augmented with fqdn |
| principal = airflow |
| reinit_frequency = 3600 |
| kinit_path = kinit |
| keytab = airflow.keytab |
| |
| # Allow to disable ticket forwardability. |
| forwardable = True |
| |
| # Allow to remove source IP from token, useful when using token behind NATted Docker host. |
| include_ip = True |
| |
| [elasticsearch] |
| # Elasticsearch host |
| host = |
| |
| # Format of the log_id, which is used to query for a given tasks logs |
| log_id_template = {{dag_id}}-{{task_id}}-{{run_id}}-{{map_index}}-{{try_number}} |
| |
| # Used to mark the end of a log stream for a task |
| end_of_log_mark = end_of_log |
| |
| # Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id |
| # Code will construct log_id using the log_id template from the argument above. |
| # NOTE: scheme will default to https if one is not provided |
| # Example: frontend = http://localhost:5601/app/kibana#/discover?_a=(columns:!(message),query:(language:kuery,query:'log_id: "{{log_id}}"'),sort:!(log.offset,asc)) |
| frontend = |
| |
| # Write the task logs to the stdout of the worker, rather than the default files |
| write_stdout = False |
| |
| # Instead of the default log formatter, write the log lines as JSON |
| json_format = False |
| |
| # Log fields to also attach to the json output, if enabled |
| json_fields = asctime, filename, lineno, levelname, message |
| |
| # The field where host name is stored (normally either `host` or `host.name`) |
| host_field = host |
| |
| # The field where offset is stored (normally either `offset` or `log.offset`) |
| offset_field = offset |
| |
| # Comma separated list of index patterns to use when searching for logs (default: `_all`). |
| # Example: index_patterns = something-* |
| index_patterns = _all |
| |
| [elasticsearch_configs] |
| use_ssl = False |
| verify_certs = True |
| |
| [kubernetes_executor] |
| # Kwargs to override the default urllib3 Retry used in the kubernetes API client |
| # Example: api_client_retry_configuration = {{ "total": 3, "backoff_factor": 0.5 }} |
| api_client_retry_configuration = |
| |
| # Flag to control the information added to kubernetes executor logs for better traceability |
| logs_task_metadata = False |
| |
| # Path to the YAML pod file that forms the basis for KubernetesExecutor workers. |
| pod_template_file = |
| |
| # The repository of the Kubernetes Image for the Worker to Run |
| worker_container_repository = |
| |
| # The tag of the Kubernetes Image for the Worker to Run |
| worker_container_tag = |
| |
| # The Kubernetes namespace where airflow workers should be created. Defaults to ``default`` |
| namespace = default |
| |
| # If True, all worker pods will be deleted upon termination |
| delete_worker_pods = True |
| |
| # If False (and delete_worker_pods is True), |
| # failed worker pods will not be deleted so users can investigate them. |
| # This only prevents removal of worker pods where the worker itself failed, |
| # not when the task it ran failed. |
| delete_worker_pods_on_failure = False |
| |
| # Number of Kubernetes Worker Pod creation calls per scheduler loop. |
| # Note that the current default of "1" will only launch a single pod |
| # per-heartbeat. It is HIGHLY recommended that users increase this |
| # number to match the tolerance of their kubernetes cluster for |
| # better performance. |
| worker_pods_creation_batch_size = 1 |
| |
| # Allows users to launch pods in multiple namespaces. |
| # Will require creating a cluster-role for the scheduler, |
| # or use multi_namespace_mode_namespace_list configuration. |
| multi_namespace_mode = False |
| |
| # If multi_namespace_mode is True while scheduler does not have a cluster-role, |
| # give the list of namespaces where the scheduler will schedule jobs |
| # Scheduler needs to have the necessary permissions in these namespaces. |
| multi_namespace_mode_namespace_list = |
| |
| # Use the service account kubernetes gives to pods to connect to kubernetes cluster. |
| # It's intended for clients that expect to be running inside a pod running on kubernetes. |
| # It will raise an exception if called from a process not running in a kubernetes environment. |
| in_cluster = True |
| |
| # When running with in_cluster=False change the default cluster_context or config_file |
| # options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has. |
| # cluster_context = |
| |
| # Path to the kubernetes configfile to be used when ``in_cluster`` is set to False |
| # config_file = |
| |
| # Keyword parameters to pass while calling a kubernetes client core_v1_api methods |
| # from Kubernetes Executor provided as a single line formatted JSON dictionary string. |
| # List of supported params are similar for all core_v1_apis, hence a single config |
| # variable for all apis. See: |
| # https://raw.githubusercontent.com/kubernetes-client/python/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/api/core_v1_api.py |
| kube_client_request_args = |
| |
| # Optional keyword arguments to pass to the ``delete_namespaced_pod`` kubernetes client |
| # ``core_v1_api`` method when using the Kubernetes Executor. |
| # This should be an object and can contain any of the options listed in the ``v1DeleteOptions`` |
| # class defined here: |
| # https://github.com/kubernetes-client/python/blob/41f11a09995efcd0142e25946adc7591431bfb2f/kubernetes/client/models/v1_delete_options.py#L19 |
| # Example: delete_option_kwargs = {{"grace_period_seconds": 10}} |
| delete_option_kwargs = |
| |
| # Enables TCP keepalive mechanism. This prevents Kubernetes API requests to hang indefinitely |
| # when idle connection is time-outed on services like cloud load balancers or firewalls. |
| enable_tcp_keepalive = True |
| |
| # When the `enable_tcp_keepalive` option is enabled, TCP probes a connection that has |
| # been idle for `tcp_keep_idle` seconds. |
| tcp_keep_idle = 120 |
| |
| # When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond |
| # to a keepalive probe, TCP retransmits the probe after `tcp_keep_intvl` seconds. |
| tcp_keep_intvl = 30 |
| |
| # When the `enable_tcp_keepalive` option is enabled, if Kubernetes API does not respond |
| # to a keepalive probe, TCP retransmits the probe `tcp_keep_cnt number` of times before |
| # a connection is considered to be broken. |
| tcp_keep_cnt = 6 |
| |
| # Set this to false to skip verifying SSL certificate of Kubernetes python client. |
| verify_ssl = True |
| |
| # How often in seconds to check for task instances stuck in "queued" status without a pod |
| worker_pods_queued_check_interval = 60 |
| |
| # Path to a CA certificate to be used by the Kubernetes client to verify the server's SSL certificate. |
| ssl_ca_cert = |
| |
| [sensors] |
| # Sensor default timeout, 7 days by default (7 * 24 * 60 * 60). |
| default_timeout = 604800 |