Register kafka1.staging in staging kafka cluster
Only the top-level commit is to be reviewed.
This registers the new node in the kafka cluster.
Modifying the kafka1.staging [1] and the storage1.staging [2].
[1]
octo-diff kafka1.staging
$ $SWH_PUPPET_ENVIRONMENT_HOME/bin/octocatalog-diff --to staging_update_kafka_cluster kafka1.internal.staging.swh.network Found host kafka1.internal.staging.swh.network Cloning into '/tmp/swh-ocd.8YCSmdHf/swh-site'... done. branch 'staging_update_kafka_cluster' set up to track 'origin/staging_update_kafka_cluster'. Switched to a new branch 'staging_update_kafka_cluster' Cloning into '/tmp/swh-ocd.8YCSmdHf/environments/production/data/private'... done. Cloning into '/tmp/swh-ocd.8YCSmdHf/environments/staging_update_kafka_cluster/data/private'... done. *** Running octocatalog-diff on host kafka1.internal.staging.swh.network I, [2023-04-07T11:52:19.132801 #3278911] INFO -- : Catalogs compiled for kafka1.internal.staging.swh.network I, [2023-04-07T11:52:19.482795 #3278911] INFO -- : Diffs computed for kafka1.internal.staging.swh.network diff origin/production/kafka1.internal.staging.swh.network current/kafka1.internal.staging.swh.network ******************************************* + Anchor[zookeeper::end] ******************************************* + Anchor[zookeeper::install::begin] ******************************************* + Anchor[zookeeper::install::end] ******************************************* + Anchor[zookeeper::install::intermediate] ******************************************* + Anchor[zookeeper::start] ******************************************* + Exec[create /srv/kafka/logdir] => parameters => "command": "mkdir -p /srv/kafka/logdir", "creates": "/srv/kafka/logdir", "path": [ "/bin", "/usr/bin", "/sbin", "/usr/sbin" ] ******************************************* + Exec[kafka-reload-tls:EXTERNAL] => parameters => "command": "/opt/kafka/bin/kafka-configs.sh --bootstrap-server kafka1.intern... "refreshonly": true ******************************************* + Exec[kafka-reload-tls:INTERNAL] => parameters => "command": "/opt/kafka/bin/kafka-configs.sh --bootstrap-server kafka1.intern... "refreshonly": true ******************************************* + File[/etc/init.d/kafka] => parameters => "ensure": "absent" ******************************************* + File[/etc/ssl/certs/letsencrypt/kafka1.internal.staging.swh.network/cert.pem] => parameters => "ensure": "present", "group": "root", "mode": "0644", "owner": "root", "source": "puppet:///le_certs/kafka1.internal.staging.swh.network/cert.pem" ******************************************* + File[/etc/ssl/certs/letsencrypt/kafka1.internal.staging.swh.network/chain.pem] => parameters => "ensure": "present", "group": "root", "mode": "0644", "owner": "root", "source": "puppet:///le_certs/kafka1.internal.staging.swh.network/chain.pem"... ******************************************* + File[/etc/ssl/certs/letsencrypt/kafka1.internal.staging.swh.network/fullchain.pem] => parameters => "ensure": "present", "group": "root", "mode": "0644", "owner": "root", "source": "puppet:///le_certs/kafka1.internal.staging.swh.network/fullchain.... ******************************************* + File[/etc/ssl/certs/letsencrypt/kafka1.internal.staging.swh.network/privkey.pem] => parameters => "ensure": "present", "group": "root", "mode": "0600", "owner": "root", "source": "puppet:///le_certs/kafka1.internal.staging.swh.network/privkey.pe... ******************************************* + File[/etc/ssl/certs/letsencrypt/kafka1.internal.staging.swh.network] => parameters => "ensure": "directory", "group": "root", "mode": "0755", "owner": "root" ******************************************* + File[/etc/ssl/certs/letsencrypt] => parameters => "ensure": "directory", "group": "root", "mode": "0755", "owner": "root", "purge": true, "recurse": true ******************************************* + File[/etc/systemd/system/kafka.service.d/exitcode.conf] => parameters => "content": "[Service]\nSuccessExitStatus=143\n", "ensure": "file", "group": "root", "mode": "0444", "notify": [ "Class[Systemd::Systemctl::Daemon_reload]" ], "owner": "root", "selinux_ignore_defaults": false, "show_diff": true ******************************************* + File[/etc/systemd/system/kafka.service.d/killmode.conf] => parameters => "content": "[Service]\nKillMode=mixed\n", "ensure": "file", "group": "root", "mode": "0444", "notify": [ "Class[Systemd::Systemctl::Daemon_reload]" ], "owner": "root", "selinux_ignore_defaults": false, "show_diff": true ******************************************* + File[/etc/systemd/system/kafka.service.d/restart.conf] => parameters => "content": "[Service]\nRestart=on-failure\nRestartSec=5\n", "ensure": "file", "group": "root", "mode": "0444", "notify": [ "Class[Systemd::Systemctl::Daemon_reload]" ], "owner": "root", "selinux_ignore_defaults": false, "show_diff": true ******************************************* + File[/etc/systemd/system/kafka.service.d/stop-timeout.conf] => parameters => "content": "[Service]\nTimeoutStopSec=infinity\n", "ensure": "file", "group": "root", "mode": "0444", "notify": [ "Class[Systemd::Systemctl::Daemon_reload]" ], "owner": "root", "selinux_ignore_defaults": false, "show_diff": true ******************************************* + File[/etc/systemd/system/kafka.service.d] => parameters => "ensure": "directory", "group": "root", "owner": "root", "purge": true, "recurse": true, "selinux_ignore_defaults": false ******************************************* + File[/etc/systemd/system/kafka.service] => parameters => "content": "[Unit]\nDescription=Apache Kafka server (broker)\nDocumentation=... "ensure": "file", "mode": "0644", "notify": [ "Service[kafka]" ] ******************************************* + File[/etc/zookeeper/conf/environment] => parameters => "content": "NAME=zookeeper\nZOOCFGDIR=/etc/zookeeper/conf\n\n# TODO this is ... "group": "zookeeper", "mode": "0644", "notify": [ "Service[zookeeper]" ], "owner": "zookeeper" ******************************************* + File[/etc/zookeeper/conf/log4j.properties] => parameters => "content": "# Copyright 2012 The Apache Software Foundation\n#\n# Licensed t... "group": "zookeeper", "mode": "0644", "notify": [ "Service[zookeeper]" ], "owner": "zookeeper" ******************************************* + File[/etc/zookeeper/conf/myid] => parameters => "content": "1\n", "ensure": "file", "group": "zookeeper", "mode": "0644", "notify": [ "Service[zookeeper]" ], "owner": "zookeeper" ******************************************* + File[/etc/zookeeper/conf/zoo.cfg] => parameters => "content": "# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin... "group": "zookeeper", "mode": "0644", "notify": [ "Service[zookeeper]" ], "owner": "zookeeper" ******************************************* + File[/etc/zookeeper/conf] => parameters => "ensure": "directory", "group": "zookeeper", "mode": "0644", "owner": "zookeeper", "recurse": true ******************************************* + File[/opt/kafka/config/log4j.properties] => parameters => "content": "# Licensed to the Apache Software Foundation (ASF) under one or ... "ensure": "file", "group": "kafka", "mode": "0644", "notify": "Service[kafka]", "owner": "kafka" ******************************************* + File[/opt/kafka/config/server.properties] => parameters => "content": "#\n# Note: This file is managed by Puppet.\n#\n# See: http://kaf... "ensure": "file", "group": "kafka", "mode": "0644", "notify": "Service[kafka]", "owner": "kafka" ******************************************* + File[/opt/prometheus-jmx-exporter/jmx_prometheus_javaagent-0.17.2.jar] => parameters => "ensure": "present", "group": "root", "owner": "root", "source": "https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_j... ******************************************* + File[/opt/prometheus-jmx-exporter/kafka.yml] => parameters => "content": "# Fetched from https://github.com/prometheus/jmx_exporter exampl... "group": "root", "mode": "0644", "owner": "root" ******************************************* + File[/opt/prometheus-jmx-exporter] => parameters => "ensure": "directory", "group": "root", "mode": "0644", "owner": "root" ******************************************* + File[/srv/kafka/logdir] => parameters => "ensure": "directory", "group": "kafka", "mode": "0750", "owner": "kafka" ******************************************* + File[/var/lib/zookeeper/myid] => parameters => "ensure": "link", "target": "/etc/zookeeper/conf/myid" ******************************************* + File[/var/lib/zookeeper] => parameters => "ensure": "directory", "group": "zookeeper", "mode": "0644", "owner": "zookeeper", "recurse": false ******************************************* + File[/var/log/zookeeper] => parameters => "ensure": "directory", "group": "zookeeper", "mode": "0644", "notify": [ "Service[zookeeper]" ], "owner": "zookeeper", "recurse": false ******************************************* + Group[zookeeper] => parameters => "ensure": "present", "system": false ******************************************* + Java_ks[kafka:broker] => parameters => "certificate": "/etc/ssl/certs/letsencrypt/kafka1.internal.staging.swh.netwo... "ensure": "latest", "name": "kafka1.internal.staging.swh.network", "notify": [ "Exec[kafka-reload-tls:EXTERNAL]", "Exec[kafka-reload-tls:INTERNAL]" ], "password": "HOmB5OVJGr9145aC", "private_key": "/etc/ssl/certs/letsencrypt/kafka1.internal.staging.swh.netwo... "target": "/opt/kafka/config/broker.ks", "trustcacerts": true ******************************************* - Notify[Kafka broker misconfigured: found in 0 clusters: []] ******************************************* - Notify[Zookeeper node misconfigured: found in 0 clusters: []] ******************************************* + Package[zookeeper] => parameters => "ensure": "present" ******************************************* + Package[zookeeperd] => parameters => "ensure": "present" ******************************************* + Profile::Letsencrypt::Certificate[kafka1.internal.staging.swh.network] => parameters => "basename": "kafka1.internal.staging.swh.network", "privkey_group": "root", "privkey_mode": "0600", "privkey_owner": "root", "source_cert": "kafka1.internal.staging.swh.network" ******************************************* + Profile::Prometheus::Export_scrape_config[kafka] => parameters => "job": "kafka", "labels": { "cluster": "rocquencourt_staging" }, "scrape_interval": "1m", "scrape_timeout": "45s", "target": "192.168.130.201:7071" ******************************************* + Service[kafka] => parameters => "enable": true, "ensure": "running", "hasrestart": true, "hasstatus": true ******************************************* + Service[zookeeper] => parameters => "enable": true, "ensure": "running", "hasrestart": true, "hasstatus": true, "provider": "systemd" ******************************************* + Systemd::Dropin_file[kafka/exitcode.conf] => parameters => "content": "[Service]\nSuccessExitStatus=143\n", "daemon_reload": "lazy", "ensure": "present", "filename": "exitcode.conf", "group": "root", "mode": "0444", "owner": "root", "path": "/etc/systemd/system", "selinux_ignore_defaults": false, "show_diff": true, "unit": "kafka.service" ******************************************* + Systemd::Dropin_file[kafka/killmode.conf] => parameters => "content": "[Service]\nKillMode=mixed\n", "daemon_reload": "lazy", "ensure": "present", "filename": "killmode.conf", "group": "root", "mode": "0444", "owner": "root", "path": "/etc/systemd/system", "selinux_ignore_defaults": false, "show_diff": true, "unit": "kafka.service" ******************************************* + Systemd::Dropin_file[kafka/restart.conf] => parameters => "content": "[Service]\nRestart=on-failure\nRestartSec=5\n", "daemon_reload": "lazy", "ensure": "present", "filename": "restart.conf", "group": "root", "mode": "0444", "owner": "root", "path": "/etc/systemd/system", "selinux_ignore_defaults": false, "show_diff": true, "unit": "kafka.service" ******************************************* + Systemd::Dropin_file[kafka/stop-timeout.conf] => parameters => "content": "[Service]\nTimeoutStopSec=infinity\n", "daemon_reload": "lazy", "ensure": "present", "filename": "stop-timeout.conf", "group": "root", "mode": "0444", "owner": "root", "path": "/etc/systemd/system", "selinux_ignore_defaults": false, "show_diff": true, "unit": "kafka.service" ******************************************* + User[zookeeper] => parameters => "comment": "Zookeeper", "ensure": "present", "gid": "zookeeper", "home": "/var/lib/zookeeper", "shell": "/bin/false", "system": false ******************************************* *** End octocatalog-diff on kafka1.internal.staging.swh.networkFound host kafka1.internal.staging.swh.network Cloning into '/tmp/swh-ocd.H0HFEMna/swh-site'... done. branch 'staging_update_kafka_cluster' set up to track 'origin/staging_update_kafka_cluster'. Switched to a new branch 'staging_update_kafka_cluster' Cloning into '/tmp/swh-ocd.H0HFEMna/environments/production/data/private'... done. Cloning into '/tmp/swh-ocd.H0HFEMna/environments/staging_update_kafka_cluster/data/private'... done. *** Running octocatalog-diff on host kafka1.internal.staging.swh.network I, [2023-03-28T17:00:42.124483 #1167123] INFO -- : Catalogs compiled for kafka1.internal.staging.swh.network I, [2023-03-28T17:00:42.569688 #1167123] INFO -- : Diffs computed for kafka1.internal.staging.swh.network diff origin/production/kafka1.internal.staging.swh.network current/kafka1.internal.staging.swh.network
- Anchor[zookeeper::end]
- Anchor[zookeeper::install::begin]
- Anchor[zookeeper::install::end]
- Anchor[zookeeper::install::intermediate]
- Anchor[zookeeper::start]
Apt::Setting[list-backports] => parameters => content => @@ -1,3 +1,3 @@ # This file is managed by Puppet. DO NOT EDIT. # backports -deb http://deb.debian.org/debian/ bullseye-backports main +deb http://deb.debian.org/debian/ bullseye-backports main contrib non-free
Apt::Setting[list-debian-security] => parameters => content => @@ -1,3 +1,3 @@ # This file is managed by Puppet. DO NOT EDIT. # debian-security -deb http://deb.debian.org/debian-security/ bullseye-security main +deb http://deb.debian.org/debian-security/ bullseye-security main contrib non-free
Apt::Setting[list-debian-updates] => parameters => content => @@ -1,3 +1,3 @@ # This file is managed by Puppet. DO NOT EDIT. # debian-updates -deb http://deb.debian.org/debian/ bullseye-updates main +deb http://deb.debian.org/debian/ bullseye-updates main contrib non-free
Apt::Setting[list-debian] => parameters => content => @@ -1,3 +1,3 @@ # This file is managed by Puppet. DO NOT EDIT. # debian -deb http://deb.debian.org/debian/ bullseye main +deb http://deb.debian.org/debian/ bullseye main contrib non-free
[2]
octo-diff journal1.staging
$ $SWH_PUPPET_ENVIRONMENT_HOME/bin/octocatalog-diff --to staging_update_kafka_cluster storage1.internal.staging.swh.network Found host storage1.internal.staging.swh.network Cloning into '/tmp/swh-ocd.5PXjPnfi/swh-site'... done. branch 'staging_update_kafka_cluster' set up to track 'origin/staging_update_kafka_cluster'. Switched to a new branch 'staging_update_kafka_cluster' Cloning into '/tmp/swh-ocd.5PXjPnfi/environments/production/data/private'... done. Cloning into '/tmp/swh-ocd.5PXjPnfi/environments/staging_update_kafka_cluster/data/private'... done. *** Running octocatalog-diff on host storage1.internal.staging.swh.network I, [2023-04-07T11:52:29.209052 #3281210] INFO -- : Catalogs compiled for storage1.internal.staging.swh.network I, [2023-04-07T11:52:29.632775 #3281210] INFO -- : Diffs computed for storage1.internal.staging.swh.network diff origin/production/storage1.internal.staging.swh.network current/storage1.internal.staging.swh.network ******************************************* File[/etc/softwareheritage/journal/backfill.yml] => parameters => content => @@ -9,4 +9,5 @@ brokers: - journal1.internal.staging.swh.network + - journal2.internal.staging.swh.network prefix: swh.journal.objects client_id: swh.storage.journal_writer.storage1 ******************************************* File[/etc/softwareheritage/storage/indexer.yml] => parameters => content => @@ -8,4 +8,5 @@ brokers: - journal1.internal.staging.swh.network + - journal2.internal.staging.swh.network prefix: swh.journal.indexed client_id: swh.idx_storage.journal_writer.storage1 ******************************************* File[/etc/softwareheritage/storage/storage.yml] => parameters => content => @@ -11,4 +11,5 @@ brokers: - journal1.internal.staging.swh.network + - journal2.internal.staging.swh.network prefix: swh.journal.objects client_id: swh.storage.journal_writer.storage1 ******************************************* File[/etc/zookeeper/conf/zoo.cfg] => parameters => content => @@ -31,4 +31,5 @@ #server.2=zookeeper2:2888:3888 #server.3=zookeeper3:2888:3888 +server.1=kafka1.internal.staging.swh.network:2888:3888 server.2=storage1.internal.staging.swh.network:2888:3888 _ ******************************************* File[/opt/kafka/config/server.properties] => parameters => content => @@ -34,5 +34,5 @@ ssl.keystore.location=/opt/kafka/config/broker.ks ssl.keystore.password=LBXrf6suU4cedMtM -super.users=User:broker-storage1.internal.staging.swh.network;User:swh-admin-olasd;User:ANONYMOUS +super.users=User:broker-kafka1.internal.staging.swh.network;User:broker-storage1.internal.staging.swh.network;User:swh-admin-olasd;User:ANONYMOUS zookeeper.connect=journal1.internal.staging.swh.network:2181/kafka/softwareheritage zookeeper.session.timeout.ms=18000 ******************************************* *** End octocatalog-diff on storage1.internal.staging.swh.network
Depends on !612 (merged)