summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFabien Boucher <fboucher@redhat.com>2020-03-16 10:34:44 +0100
committerFabien Boucher <fboucher@redhat.com>2020-03-16 10:34:48 +0100
commit330e8cc98a6ac1f78fe51d72f371c3fc1ed91624 (patch)
tree94dbefb5bff1bd23f9d5c9a1e8ec9450ab23c99f
parente1f915262f36efa0d4be741f60903028253b531d (diff)
Bump to last version
https://softwarefactory-project.io/r/#/c/17779/ Change-Id: Ic5fabdafc99b0c2f679afe2822fcd9ab52dfbbc6
-rw-r--r--playbooks/chart-testing/pre.yaml5
-rw-r--r--playbooks/chart-testing/run.yaml4
-rw-r--r--playbooks/go/pre.yaml3
-rw-r--r--playbooks/go/run.yaml4
-rw-r--r--playbooks/helm/post.yaml3
-rw-r--r--playbooks/helm/pre.yaml5
-rw-r--r--playbooks/helm/run.yaml8
-rw-r--r--playbooks/markdownlint/post.yaml4
-rw-r--r--playbooks/markdownlint/pre.yaml5
-rw-r--r--playbooks/markdownlint/run.yaml5
-rw-r--r--playbooks/tox/molecule-vars/redhat-7.yaml6
-rw-r--r--playbooks/tox/molecule-vars/redhat-8.yaml6
-rw-r--r--playbooks/tox/pre-molecule.yaml44
-rw-r--r--roles/add-build-sshkey/tasks/create-key-and-replace.yaml2
-rw-r--r--roles/ara-report/tasks/main.yaml4
-rw-r--r--roles/build-container-image/README.rst3
-rw-r--r--roles/build-container-image/common.rst147
-rw-r--r--roles/build-container-image/defaults/main.yaml2
-rw-r--r--roles/build-container-image/tasks/build.yaml50
-rw-r--r--roles/build-container-image/tasks/main.yaml42
-rw-r--r--roles/build-container-image/tasks/push.yaml12
-rw-r--r--roles/build-docker-image/common.rst11
-rw-r--r--roles/build-docker-image/tasks/build.yaml55
-rw-r--r--roles/build-docker-image/tasks/main.yaml19
-rw-r--r--roles/build-releasenotes/tasks/main.yaml2
-rw-r--r--roles/chart-testing/README.rst19
-rw-r--r--roles/chart-testing/defaults/main.yaml2
-rw-r--r--roles/chart-testing/tasks/main.yaml4
-rw-r--r--roles/collect-container-logs/README.rst8
-rw-r--r--roles/collect-container-logs/defaults/main.yaml1
-rw-r--r--roles/collect-container-logs/tasks/main.yaml26
-rw-r--r--roles/configure-mirrors/README.rst7
-rw-r--r--roles/configure-mirrors/defaults/main.yaml3
-rw-r--r--roles/configure-mirrors/handlers/main.yaml16
-rw-r--r--roles/configure-mirrors/tasks/main.yaml5
-rw-r--r--roles/configure-mirrors/tasks/mirror.yaml11
-rw-r--r--roles/configure-mirrors/tasks/mirror/CentOS-7.yaml (renamed from roles/configure-mirrors/tasks/mirror/CentOS.yaml)4
-rw-r--r--roles/configure-mirrors/tasks/mirror/CentOS-8.yaml29
-rw-r--r--roles/configure-mirrors/tasks/mirror/Debian.yaml2
-rw-r--r--roles/configure-mirrors/tasks/mirror/Fedora.yaml4
-rw-r--r--roles/configure-mirrors/tasks/mirror/Suse.yaml10
-rw-r--r--roles/configure-mirrors/tasks/mirror/Ubuntu.yaml2
-rw-r--r--roles/configure-mirrors/templates/.pydistutils.cfg.j24
-rw-r--r--roles/configure-mirrors/templates/apt/etc/apt/apt.conf.d/99unauthenticated.j2 (renamed from roles/configure-mirrors/templates/etc/apt/apt.conf.d/99unauthenticated.j2)0
-rw-r--r--roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/backports.list.j2 (renamed from roles/configure-mirrors/templates/etc/apt/sources.list.d/backports.list.j2)0
-rw-r--r--roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/default.list.j2 (renamed from roles/configure-mirrors/templates/etc/apt/sources.list.d/default.list.j2)0
-rw-r--r--roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/security.list.j2 (renamed from roles/configure-mirrors/templates/etc/apt/sources.list.d/security.list.j2)0
-rw-r--r--roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/updates.list.j2 (renamed from roles/configure-mirrors/templates/etc/apt/sources.list.d/updates.list.j2)0
-rw-r--r--roles/configure-mirrors/templates/apt/etc/apt/sources.list.j2 (renamed from roles/configure-mirrors/templates/etc/apt/sources.list.j2)0
-rw-r--r--roles/configure-mirrors/templates/centos7/etc/yum.repos.d/CentOS-Base.repo.j2 (renamed from roles/configure-mirrors/templates/etc/yum.repos.d/CentOS-Base.repo.j2)0
-rw-r--r--roles/configure-mirrors/templates/centos7/etc/yum.repos.d/epel.repo.j2 (renamed from roles/configure-mirrors/templates/etc/yum.repos.d/epel.repo.j2)0
-rw-r--r--roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-AppStream.repo.j27
-rw-r--r--roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-Base.repo.j27
-rw-r--r--roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-Extras.repo.j27
-rw-r--r--roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-HA.repo.j27
-rw-r--r--roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-PowerTools.repo.j27
-rw-r--r--roles/configure-mirrors/templates/centos8/etc/yum.repos.d/epel.repo.j221
-rw-r--r--roles/configure-mirrors/templates/etc/pip.conf.j22
-rw-r--r--roles/configure-mirrors/templates/fedora/etc/yum.repos.d/fedora-updates.repo.j2 (renamed from roles/configure-mirrors/templates/etc/yum.repos.d/fedora-updates.repo.j2)6
-rw-r--r--roles/configure-mirrors/templates/fedora/etc/yum.repos.d/fedora.repo.j2 (renamed from roles/configure-mirrors/templates/etc/yum.repos.d/fedora.repo.j2)0
-rw-r--r--roles/configure-mirrors/templates/suse/etc/zypp/repos.d/repo-oss.repo.j2 (renamed from roles/configure-mirrors/templates/etc/zypp/repos.d/repo-oss.repo.j2)0
-rw-r--r--roles/configure-mirrors/templates/suse/etc/zypp/repos.d/repo-update.repo.j2 (renamed from roles/configure-mirrors/templates/etc/zypp/repos.d/repo-update.repo.j2)0
-rw-r--r--roles/emit-job-header/tasks/main.yaml3
-rw-r--r--roles/enable-netconsole/README.rst32
-rw-r--r--roles/enable-netconsole/tasks/main.yaml82
-rw-r--r--roles/ensure-bazelisk/README.rst34
-rw-r--r--roles/ensure-bazelisk/defaults/main.yaml6
-rw-r--r--roles/ensure-bazelisk/tasks/main.yaml25
-rw-r--r--roles/ensure-chart-testing/README.rst7
-rw-r--r--roles/ensure-chart-testing/defaults/main.yaml2
-rw-r--r--roles/ensure-chart-testing/tasks/main.yaml29
-rw-r--r--roles/ensure-helm/README.rst7
-rw-r--r--roles/ensure-helm/defaults/main.yaml2
-rw-r--r--roles/ensure-helm/tasks/main.yaml20
-rw-r--r--roles/ensure-java/README.rst10
-rw-r--r--roles/ensure-java/defaults/main.yaml1
-rw-r--r--roles/ensure-java/tasks/main.yaml5
-rw-r--r--roles/ensure-markdownlint/README.rst1
-rw-r--r--roles/ensure-markdownlint/tasks/main.yaml4
-rw-r--r--roles/ensure-tox/README.rst15
-rw-r--r--roles/ensure-tox/defaults/main.yml2
-rw-r--r--roles/ensure-tox/tasks/main.yaml32
-rw-r--r--roles/fetch-javascript-tarball/README.rst8
-rw-r--r--roles/fetch-javascript-tarball/defaults/main.yaml2
-rw-r--r--roles/fetch-javascript-tarball/tasks/main.yaml30
-rw-r--r--roles/fetch-markdownlint/README.rst9
-rw-r--r--roles/fetch-markdownlint/defaults/main.yaml1
-rw-r--r--roles/fetch-markdownlint/tasks/main.yaml12
-rw-r--r--roles/fetch-output-openshift/README.rst26
-rw-r--r--roles/fetch-output-openshift/defaults/main.yaml3
-rw-r--r--roles/fetch-output-openshift/tasks/main.yaml29
-rw-r--r--roles/fetch-output-openshift/tasks/rsync.yaml19
-rw-r--r--roles/fetch-sphinx-tarball/tasks/html.yaml2
-rw-r--r--roles/fetch-subunit-output/README.rst15
-rw-r--r--roles/fetch-subunit-output/defaults/main.yaml3
-rw-r--r--roles/fetch-subunit-output/tasks/main.yaml30
-rw-r--r--roles/fetch-subunit-output/tasks/process.yaml28
-rw-r--r--roles/fetch-tox-output/README.rst8
-rw-r--r--roles/fetch-tox-output/defaults/main.yaml3
-rw-r--r--roles/fetch-tox-output/tasks/main.yaml22
-rw-r--r--roles/generate-zuul-manifest/README.rst6
-rw-r--r--roles/generate-zuul-manifest/defaults/main.yaml1
-rw-r--r--roles/generate-zuul-manifest/library/generate_manifest.py12
-rw-r--r--roles/generate-zuul-manifest/library/test_generate_manifest.py9
-rw-r--r--roles/generate-zuul-manifest/tasks/main.yaml1
-rw-r--r--roles/go/README.rst24
-rw-r--r--roles/go/defaults/main.yaml3
-rw-r--r--roles/go/tasks/main.yaml13
-rw-r--r--roles/helm-template/README.rst17
-rw-r--r--roles/helm-template/defaults/main.yaml1
-rw-r--r--roles/helm-template/tasks/main.yaml39
-rw-r--r--roles/install-devstack/templates/local.conf.j21
-rw-r--r--roles/install-docker/README.rst13
-rw-r--r--roles/install-docker/defaults/main.yaml1
-rw-r--r--roles/install-docker/tasks/docker-setup.yaml34
-rw-r--r--roles/install-docker/tasks/main.yaml15
-rw-r--r--roles/install-docker/tasks/upstream-package-installation.yaml16
-rw-r--r--roles/install-go/README.rst17
-rw-r--r--roles/install-go/defaults/main.yaml4
-rw-r--r--roles/install-go/tasks/main.yaml39
-rw-r--r--roles/install-go/vars/main.yaml6
-rw-r--r--roles/install-javascript-packages/README.rst6
-rw-r--r--roles/install-javascript-packages/defaults/main.yaml2
-rw-r--r--roles/install-javascript-packages/tasks/main.yaml25
-rw-r--r--roles/install-kubernetes/README.rst12
-rw-r--r--roles/install-kubernetes/defaults/main.yaml2
-rw-r--r--roles/install-kubernetes/tasks/crio.yaml40
-rw-r--r--roles/install-kubernetes/tasks/minikube.yaml22
-rw-r--r--roles/install-kubernetes/templates/resolv.conf.j23
-rw-r--r--roles/install-openshift/tasks/main.yaml23
-rw-r--r--roles/install-podman/README.rst4
-rw-r--r--roles/install-podman/tasks/Ubuntu.yaml14
-rw-r--r--roles/install-podman/tasks/default.yaml3
-rw-r--r--roles/install-podman/tasks/main.yaml8
-rw-r--r--roles/markdownlint/README.rst8
-rw-r--r--roles/markdownlint/defaults/main.yaml1
-rw-r--r--roles/markdownlint/tasks/main.yaml16
-rw-r--r--roles/mirror-workspace-git-repos/tasks/main.yaml2
-rw-r--r--roles/multi-node-bridge/tasks/common.yaml13
-rw-r--r--roles/multi-node-bridge/templates/zuul-multi-node-bridge-ovs.repo.j28
-rw-r--r--roles/multi-node-bridge/vars/CentOS.yaml7
-rw-r--r--roles/pause-buildset-registry/README.rst10
-rw-r--r--roles/pause-buildset-registry/tasks/main.yaml10
-rw-r--r--roles/persistent-firewall/tasks/main.yaml10
-rw-r--r--roles/persistent-firewall/tasks/persist/Debian.yaml2
-rw-r--r--roles/persistent-firewall/tasks/persist/Suse.yaml26
-rw-r--r--roles/persistent-firewall/tasks/persist/Ubuntu_trusty.yaml2
-rw-r--r--roles/prepare-workspace-openshift/README.rst16
-rw-r--r--roles/prepare-workspace-openshift/defaults/main.yaml1
-rw-r--r--roles/prepare-workspace-openshift/tasks/main.yaml4
-rw-r--r--roles/prepare-workspace-openshift/tasks/rsync.yaml17
-rw-r--r--roles/promote-docker-image/tasks/main.yaml6
-rw-r--r--roles/pull-from-intermediate-registry/tasks/main.yaml13
-rw-r--r--roles/push-to-intermediate-registry/tasks/push-image.yaml6
-rw-r--r--roles/run-buildset-registry/README.rst20
-rw-r--r--roles/run-buildset-registry/defaults/main.yaml2
-rw-r--r--roles/run-buildset-registry/tasks/main.yaml114
-rw-r--r--roles/run-buildset-registry/templates/registry.yaml.j214
-rw-r--r--roles/stage-output/README.rst7
-rw-r--r--roles/stage-output/defaults/main.yaml2
-rw-r--r--roles/test-upload-logs-swift/README.rst94
-rw-r--r--roles/test-upload-logs-swift/__init__.py (renamed from roles/upload-afs/__init__.py)0
-rw-r--r--roles/test-upload-logs-swift/defaults/main.yaml6
-rw-r--r--roles/test-upload-logs-swift/library/__init__.py (renamed from roles/upload-afs/library/__init__.py)0
-rw-r--r--roles/test-upload-logs-swift/library/delete_container.py65
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/artifacts/foo.tar.gzbin0 -> 115 bytes
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/artifacts/foo.tgzbin0 -> 115 bytes
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/download-logs-sample.sh84
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/links/controller/service_log.txt0
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/links/job-output.json1
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/links/symlink_loop/placeholder0
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/controller/compressed.gzbin0 -> 31 bytes
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/controller/cpu-load.svg3
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/controller/journal.xzbin0 -> 32 bytes
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/controller/service_log.txt0
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/controller/subdir/foo::3.txt2
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/controller/subdir/subdir.txt0
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/controller/syslog0
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/job-output.json1
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/zuul-info/inventory.yaml0
-rw-r--r--roles/test-upload-logs-swift/library/test-fixtures/logs/zuul-info/zuul-info.controller.txt0
-rw-r--r--roles/test-upload-logs-swift/library/test_zuul_swift_upload.py397
-rwxr-xr-xroles/test-upload-logs-swift/library/zuul_swift_upload.py981
-rw-r--r--roles/test-upload-logs-swift/tasks/main.yaml46
-rw-r--r--roles/test-upload-logs-swift/templates/download-logs.sh.j257
-rw-r--r--roles/tox/library/test-constraints.txt2
-rw-r--r--roles/tox/library/test_tox_install_sibling_packages.py63
-rw-r--r--roles/tox/library/tox_install_sibling_packages.py3
-rw-r--r--roles/upload-afs-roots/README.rst26
-rw-r--r--roles/upload-afs-roots/__init__.py0
-rw-r--r--roles/upload-afs-roots/defaults/main.yaml (renamed from roles/upload-afs/defaults/main.yaml)0
-rw-r--r--roles/upload-afs-roots/library/__init__.py0
-rw-r--r--roles/upload-afs-roots/library/zuul_afs.py (renamed from roles/upload-afs/library/zuul_afs.py)0
-rw-r--r--roles/upload-afs-roots/tasks/main.yaml (renamed from roles/upload-afs/tasks/main.yaml)0
-rw-r--r--roles/upload-afs-synchronize/README.rst19
-rw-r--r--roles/upload-afs-synchronize/defaults/main.yaml2
-rw-r--r--roles/upload-afs-synchronize/tasks/main.yaml18
-rw-r--r--roles/upload-afs/README.rst11
-rw-r--r--roles/upload-docker-image/tasks/main.yaml6
-rw-r--r--roles/upload-docker-image/tasks/push.yaml8
-rw-r--r--roles/upload-logs-gcs/README.rst63
-rw-r--r--roles/upload-logs-gcs/__init__.py0
-rw-r--r--roles/upload-logs-gcs/defaults/main.yaml3
-rw-r--r--roles/upload-logs-gcs/library/__init__.py0
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/artifacts/foo.tar.gzbin0 -> 115 bytes
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/artifacts/foo.tgzbin0 -> 115 bytes
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/links/controller/service_log.txt0
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/links/job-output.json1
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/links/symlink_loop/placeholder0
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/controller/compressed.gzbin0 -> 31 bytes
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/controller/cpu-load.svg3
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/controller/journal.xzbin0 -> 32 bytes
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/controller/service_log.txt0
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/controller/subdir/foo::3.txt2
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/controller/subdir/subdir.txt0
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/controller/syslog0
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/job-output.json1
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/zuul-info/inventory.yaml0
-rw-r--r--roles/upload-logs-gcs/library/test-fixtures/logs/zuul-info/zuul-info.controller.txt0
-rw-r--r--roles/upload-logs-gcs/library/test_zuul_google_storage_upload.py406
-rwxr-xr-xroles/upload-logs-gcs/library/zuul_google_storage_upload.py862
-rw-r--r--roles/upload-logs-gcs/tasks/main.yaml37
-rw-r--r--roles/upload-logs-swift/library/test-fixtures/logs/controller/subdir/foo::3.txt2
-rw-r--r--roles/upload-logs-swift/library/test_zuul_swift_upload.py47
-rwxr-xr-xroles/upload-logs-swift/library/zuul_swift_upload.py159
-rw-r--r--roles/upload-logs/README.rst7
-rw-r--r--roles/upload-logs/tasks/main.yaml14
-rw-r--r--roles/upload-logs/vars/main.yaml1
-rw-r--r--roles/use-buildset-registry/README.rst16
-rw-r--r--roles/use-buildset-registry/__init__.py0
-rw-r--r--roles/use-buildset-registry/defaults/main.yaml4
-rw-r--r--roles/use-buildset-registry/library/__init__.py0
-rw-r--r--roles/use-buildset-registry/library/modify_registries_conf.py77
-rw-r--r--roles/use-buildset-registry/module_utils/pytoml.py551
-rw-r--r--roles/use-buildset-registry/module_utils/remarshal.py418
-rw-r--r--roles/use-buildset-registry/tasks/main.yaml54
-rw-r--r--roles/use-buildset-registry/tasks/user-config.yaml10
-rw-r--r--roles/use-buildset-registry/vars/CentOS.yaml2
-rw-r--r--roles/use-buildset-registry/vars/default.yaml2
-rw-r--r--roles/validate-host/library/zuul_debug_info.py4
-rw-r--r--roles/validate-zone-db/tasks/find.yaml4
-rw-r--r--roles/validate-zone-db/tasks/main.yaml4
-rw-r--r--zuul.d/general-jobs.yaml1
-rw-r--r--zuul.d/go-jobs.yaml92
-rw-r--r--zuul.d/helm-jobs.yaml14
-rw-r--r--zuul.d/js-jobs.yaml28
-rw-r--r--zuul.d/python-jobs.yaml41
247 files changed, 6502 insertions, 337 deletions
diff --git a/playbooks/chart-testing/pre.yaml b/playbooks/chart-testing/pre.yaml
new file mode 100644
index 0000000..2a8a6b6
--- /dev/null
+++ b/playbooks/chart-testing/pre.yaml
@@ -0,0 +1,5 @@
+---
+- hosts: all
+ roles:
+ - ensure-helm
+ - ensure-chart-testing \ No newline at end of file
diff --git a/playbooks/chart-testing/run.yaml b/playbooks/chart-testing/run.yaml
new file mode 100644
index 0000000..a544a27
--- /dev/null
+++ b/playbooks/chart-testing/run.yaml
@@ -0,0 +1,4 @@
+---
+- hosts: all
+ roles:
+ - chart-testing
diff --git a/playbooks/go/pre.yaml b/playbooks/go/pre.yaml
new file mode 100644
index 0000000..9ab00c0
--- /dev/null
+++ b/playbooks/go/pre.yaml
@@ -0,0 +1,3 @@
+- hosts: all
+ roles:
+ - install-go
diff --git a/playbooks/go/run.yaml b/playbooks/go/run.yaml
new file mode 100644
index 0000000..e129600
--- /dev/null
+++ b/playbooks/go/run.yaml
@@ -0,0 +1,4 @@
+- hosts: all
+ roles:
+ - revoke-sudo
+ - go
diff --git a/playbooks/helm/post.yaml b/playbooks/helm/post.yaml
new file mode 100644
index 0000000..5242b7b
--- /dev/null
+++ b/playbooks/helm/post.yaml
@@ -0,0 +1,3 @@
+- hosts: all
+ roles:
+ - collect-container-logs
diff --git a/playbooks/helm/pre.yaml b/playbooks/helm/pre.yaml
new file mode 100644
index 0000000..4394152
--- /dev/null
+++ b/playbooks/helm/pre.yaml
@@ -0,0 +1,5 @@
+- hosts: all
+ roles:
+ - role: clear-firewall
+ - role: install-kubernetes
+ - role: ensure-helm
diff --git a/playbooks/helm/run.yaml b/playbooks/helm/run.yaml
new file mode 100644
index 0000000..829ded9
--- /dev/null
+++ b/playbooks/helm/run.yaml
@@ -0,0 +1,8 @@
+- hosts: all
+ tasks:
+ - include_role:
+ name: helm-template
+ vars:
+ helm_release_name: "{{ item.key }}"
+ helm_chart: "{{ item.value }}"
+ loop: "{{ helm_charts | dict2items }}"
diff --git a/playbooks/markdownlint/post.yaml b/playbooks/markdownlint/post.yaml
new file mode 100644
index 0000000..90b0517
--- /dev/null
+++ b/playbooks/markdownlint/post.yaml
@@ -0,0 +1,4 @@
+- name: Get markdownlint report
+ hosts: all
+ roles:
+ - fetch-markdownlint
diff --git a/playbooks/markdownlint/pre.yaml b/playbooks/markdownlint/pre.yaml
new file mode 100644
index 0000000..c8180f9
--- /dev/null
+++ b/playbooks/markdownlint/pre.yaml
@@ -0,0 +1,5 @@
+- name: Ensure Markdownlint is installed via NPM
+ hosts: all
+ roles:
+ - install-nodejs
+ - ensure-markdownlint
diff --git a/playbooks/markdownlint/run.yaml b/playbooks/markdownlint/run.yaml
new file mode 100644
index 0000000..d0f51f2
--- /dev/null
+++ b/playbooks/markdownlint/run.yaml
@@ -0,0 +1,5 @@
+- name: Run markdownlint
+ hosts: all
+ roles:
+ - revoke-sudo
+ - markdownlint
diff --git a/playbooks/tox/molecule-vars/redhat-7.yaml b/playbooks/tox/molecule-vars/redhat-7.yaml
new file mode 100644
index 0000000..e4de3ed
--- /dev/null
+++ b/playbooks/tox/molecule-vars/redhat-7.yaml
@@ -0,0 +1,6 @@
+---
+
+_tox_molecule_packages:
+ - python-devel
+ - libselinux-python
+ - gcc
diff --git a/playbooks/tox/molecule-vars/redhat-8.yaml b/playbooks/tox/molecule-vars/redhat-8.yaml
new file mode 100644
index 0000000..37b996f
--- /dev/null
+++ b/playbooks/tox/molecule-vars/redhat-8.yaml
@@ -0,0 +1,6 @@
+---
+
+_tox_molecule_packages:
+ - python3-devel
+ - python3-libselinux
+ - gcc
diff --git a/playbooks/tox/pre-molecule.yaml b/playbooks/tox/pre-molecule.yaml
index 4c221dd..1c3a6be 100644
--- a/playbooks/tox/pre-molecule.yaml
+++ b/playbooks/tox/pre-molecule.yaml
@@ -1,29 +1,27 @@
- hosts: all
+ vars:
+ tox_molecule_packages: "{{ _tox_molecule_packages | default([]) }}"
tasks:
+ - name: Gather variables for each operating system
+ include_vars: "{{ item }}"
+ with_first_found:
+ - skip: true
+ files:
+ - "molecule-vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yaml"
+ - "molecule-vars/{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yaml"
+ - "molecule-vars/{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yaml"
+ - "molecule-vars/{{ ansible_distribution | lower }}.yaml"
+ - "molecule-vars/{{ ansible_os_family | lower }}-{{ ansible_distribution_version.split('.')[0] }}.yaml"
+ - "molecule-vars/{{ ansible_os_family | lower }}.yaml"
+ tags:
+ - always
- # psutil->python-devel
- # psutil->gcc
- # ansible->selinux
- - name: install packages needed by molecule
- when: ansible_os_family == "RedHat" and ansible_lsb.major_release|int >= 8
+ - name: Install packages needed by molecule
become: true
package:
- name:
- - python2-devel
- - python2-libselinux
- - python3-devel
- - python3-libselinux
- - gcc
+ name: "{{ tox_molecule_packages }}"
+ when:
+ - (tox_molecule_packages | length) > 0
- - name: install packages needed by molecule
- when: ansible_os_family == "RedHat" and ansible_lsb.major_release|int < 8
- become: true
- package:
- name:
- - python-devel
- - libselinux-python
- - gcc
-
- - name: install docker
- include_role:
- name: install-docker
+ roles:
+ - role: install-docker
diff --git a/roles/add-build-sshkey/tasks/create-key-and-replace.yaml b/roles/add-build-sshkey/tasks/create-key-and-replace.yaml
index a336ccb..0a5f441 100644
--- a/roles/add-build-sshkey/tasks/create-key-and-replace.yaml
+++ b/roles/add-build-sshkey/tasks/create-key-and-replace.yaml
@@ -1,5 +1,5 @@
- name: Create Temp SSH key
- command: ssh-keygen -t rsa -b 1024 -N '' -C 'zuul-build-sshkey' -f {{ zuul_temp_ssh_key }}
+ command: ssh-keygen -t rsa -N '' -C 'zuul-build-sshkey' -f {{ zuul_temp_ssh_key }}
delegate_to: localhost
run_once: true
diff --git a/roles/ara-report/tasks/main.yaml b/roles/ara-report/tasks/main.yaml
index 4d708a4..d138cff 100644
--- a/roles/ara-report/tasks/main.yaml
+++ b/roles/ara-report/tasks/main.yaml
@@ -59,9 +59,9 @@
command: gzip --recursive --best {{ final_ara_report_path }}
when:
- ara_compress_html | bool
- - not ara_generated | skipped
+ - not ara_generated is skipped
- name: Return ARA report
- when: not ara_generated | skipped
+ when: not ara_generated is skipped
zuul_return:
data:
zuul:
diff --git a/roles/build-container-image/README.rst b/roles/build-container-image/README.rst
new file mode 100644
index 0000000..1cc166a
--- /dev/null
+++ b/roles/build-container-image/README.rst
@@ -0,0 +1,3 @@
+Build one or more container images.
+
+.. include:: ../../roles/build-container-image/common.rst
diff --git a/roles/build-container-image/common.rst b/roles/build-container-image/common.rst
new file mode 100644
index 0000000..b9a3d0e
--- /dev/null
+++ b/roles/build-container-image/common.rst
@@ -0,0 +1,147 @@
+This is one of a collection of roles which are designed to work
+together to build, upload, and promote container images in a gating
+context:
+
+* :zuul:role:`build-container-image`: Build the images.
+
+.. note:: Build and upload roles are forthcoming.
+
+The :zuul:role:`build-container-image` role is designed to be used in
+`check` and `gate` pipelines and simply builds the images. It can be
+used to verify that the build functions, or it can be followed by the
+use of subsequent roles to upload the images to a registry.
+
+They all accept the same input data, principally a list of
+dictionaries representing the images to build. YAML anchors_ can be
+used to supply the same data to all three jobs.
+
+Use the :zuul:role:`install-docker` or :zuul:role:`install-podman`
+role to install Docker or Podman before using these roles.
+
+**Role Variables**
+
+.. zuul:rolevar:: zuul_work_dir
+ :default: {{ zuul.project.src_dir }}
+
+ The project directory. Serves as the base for
+ :zuul:rolevar:`build-container-image.container_images.context`.
+
+.. zuul:rolevar:: container_filename
+
+ The default container filename name to use. Serves as the base for
+ :zuul:rolevar:`build-container-image.container_images.container_filename`.
+ This allows a global overriding of the container filename name, for
+ example when building all images from different folders with
+ similarily named containerfiles.
+
+ If omitted, the default depends on the container command used.
+ Typically, this is ``Dockerfile`` for ``docker`` and
+ ``Containerfile`` (with a fallback on ``Dockerfile``) for
+ ``podman``.
+
+.. zuul:rolevar:: container_command
+ :default: podman
+
+ The command to use when building the image (E.g., ``docker``).
+
+.. zuul:rolevar:: container_registry_credentials
+ :type: dict
+
+ This is only required for the upload and promote roles. This is
+ expected to be a Zuul Secret in dictionary form. Each key is the
+ name of a registry, and its value a dictionary with information
+ about that registry.
+
+ Example:
+
+ .. code-block:: yaml
+
+ container_registry_credentials:
+ quay.io:
+ username: foo
+ password: bar
+
+ .. zuul:rolevar:: [registry name]
+ :type: dict
+
+ Information about a registry. The key is the registry name, and
+ its value a dict as follows:
+
+ .. zuul:rolevar:: username
+
+ The registry username.
+
+ .. zuul:rolevar:: password
+
+ The registry password.
+
+ .. zuul:rolevar:: repository
+
+ Optional; if supplied this is a regular expression which
+ restricts to what repositories the image may be uploaded. The
+ following example allows projects to upload images to
+ repositories within an organization based on their own names::
+
+ repository: "^myorgname/{{ zuul.project.short_name }}.*"
+
+.. zuul:rolevar:: container_images
+ :type: list
+
+ A list of images to build. Each item in the list should have:
+
+ .. zuul:rolevar:: context
+
+ The build context; this should be a directory underneath
+ :zuul:rolevar:`build-container-image.zuul_work_dir`.
+
+ .. zuul:rolevar:: container_filename
+
+ The filename of the container file, present in the context
+ folder, used for building the image. Provide this if you are
+ using a non-standard filename for a specific image.
+
+ .. zuul:rolevar:: registry
+
+ The name of the target registry (E.g., ``quay.io``). Used by
+ the upload and promote roles.
+
+ .. zuul:rolevar:: repository
+
+ The name of the target repository in the registry for the image.
+ Supply this even if the image is not going to be uploaded (it
+ will be tagged with this in the local registry).
+
+ .. zuul:rolevar:: path
+
+ Optional: the directory that should be passed to the build
+ command. Useful for building images with a container file in
+ the context directory but a source repository elsewhere.
+
+ .. zuul:rolevar:: build_args
+ :type: list
+
+ Optional: a list of values to pass to the ``--build-arg``
+ parameter.
+
+ .. zuul:rolevar:: target
+
+ Optional: the target for a multi-stage build.
+
+ .. zuul:rolevar:: tags
+ :type: list
+ :default: ['latest']
+
+ A list of tags to be added to the image when promoted.
+
+ .. zuul:rolevar:: siblings
+ :type: list
+ :default: []
+
+ A list of sibling projects to be copied into
+ ``{{zuul_work_dir}}/.zuul-siblings``. This can be useful to
+ collect multiple projects to be installed within the same Docker
+ context. A ``-build-arg`` called ``ZUUL_SIBLINGS`` will be
+ added with each sibling project. Note that projects here must
+ be listed in ``required-projects``.
+
+.. _anchors: https://yaml.org/spec/1.2/spec.html#&%20anchor//
diff --git a/roles/build-container-image/defaults/main.yaml b/roles/build-container-image/defaults/main.yaml
new file mode 100644
index 0000000..916550c
--- /dev/null
+++ b/roles/build-container-image/defaults/main.yaml
@@ -0,0 +1,2 @@
+zuul_work_dir: "{{ zuul.project.src_dir }}"
+container_command: podman
diff --git a/roles/build-container-image/tasks/build.yaml b/roles/build-container-image/tasks/build.yaml
new file mode 100644
index 0000000..b5e5d0b
--- /dev/null
+++ b/roles/build-container-image/tasks/build.yaml
@@ -0,0 +1,50 @@
+- name: Check sibling directory
+ stat:
+ path: '{{ zuul_work_dir }}/{{ item.context }}/.zuul-siblings'
+ register: _dot_zuul_siblings
+
+# This should have been cleaned up; multiple builds may specify
+# different siblings to include so we need to start fresh.
+- name: Check for clean build
+ assert:
+ that: not _dot_zuul_siblings.stat.exists
+
+- name: Create sibling source directory
+ file:
+ path: '{{ zuul_work_dir }}/{{ item.context }}/.zuul-siblings'
+ state: directory
+ mode: 0755
+ when: item.siblings is defined
+
+- name: Copy sibling source directories
+ command:
+ cmd: 'cp --parents -r {{ sibling }} /home/zuul/{{ zuul_work_dir }}/{{ item.context }}/.zuul-siblings'
+ chdir: '~/src'
+ loop: '{{ item.siblings }}'
+ loop_control:
+ loop_var: sibling
+ when: item.siblings is defined
+
+- name: Build a container image
+ command: >-
+ {{ container_command }} build {{ item.path | default('.') }} {% if containerfile %}-f {{ containerfile }}{% endif %}
+ {% if item.target | default(false) -%}
+ --target {{ item.target }}
+ {% endif -%}
+ {% for build_arg in item.build_args | default([]) -%}
+ --build-arg {{ build_arg }}
+ {% endfor -%}
+ {% if items.siblings | default(false) -%}
+ --build-arg "ZUUL_SIBLINGS={{ item.siblings | join(' ') }}"
+ {% endif -%}
+ {% for tag in item.tags | default(['latest']) -%}
+ --tag {{ item.repository }}:change_{{ zuul.change }}_{{ tag }}
+ --tag {{ item.repository }}:{{ tag }}
+ {% endfor -%}
+ args:
+ chdir: "{{ zuul_work_dir }}/{{ item.context }}"
+
+- name: Cleanup sibling source directory
+ file:
+ path: '{{ zuul_work_dir }}/.zuul-siblings'
+ state: absent
diff --git a/roles/build-container-image/tasks/main.yaml b/roles/build-container-image/tasks/main.yaml
new file mode 100644
index 0000000..42dfd71
--- /dev/null
+++ b/roles/build-container-image/tasks/main.yaml
@@ -0,0 +1,42 @@
+# This can be removed if we add this functionality to Zuul directly
+- name: Load information from zuul_return
+ when: buildset_registry is not defined
+ set_fact:
+ buildset_registry: "{{ (lookup('file', zuul.executor.work_root + '/results.json') | from_json)['buildset_registry'] }}"
+ ignore_errors: true
+
+- name: Set container filename arg
+ set_fact:
+ containerfile: "{{ item.container_filename|default(container_filename|default('')) }}"
+
+- name: Build container images
+ include_tasks: build.yaml
+ loop: "{{ container_images }}"
+
+# Docker, and therefore skopeo and podman, don't understand docker
+# push [1234:5678::]:5000/image/path:tag so we set up /etc/hosts with
+# a registry alias name to support ipv6 and 4.
+- name: Configure /etc/hosts for buildset_registry to workaround not understanding ipv6 addresses
+ become: yes
+ lineinfile:
+ path: /etc/hosts
+ state: present
+ regex: "^{{ buildset_registry.host }}\tzuul-jobs.buildset-registry$"
+ line: "{{ buildset_registry.host }}\tzuul-jobs.buildset-registry"
+ insertafter: EOF
+ when: buildset_registry is defined and buildset_registry.host | ipaddr
+- name: Set buildset_registry alias variable when using ip
+ set_fact:
+ buildset_registry_alias: zuul-jobs.buildset-registry
+ when: buildset_registry is defined and buildset_registry.host | ipaddr
+- name: Set buildset_registry alias variable when using name
+ set_fact:
+ buildset_registry_alias: "{{ buildset_registry.host }}"
+ when: buildset_registry is defined and not ( buildset_registry.host | ipaddr )
+# Push each image.
+- name: Push image to buildset registry
+ when: buildset_registry is defined
+ include_tasks: push.yaml
+ loop: "{{ container_images }}"
+ loop_control:
+ loop_var: image
diff --git a/roles/build-container-image/tasks/push.yaml b/roles/build-container-image/tasks/push.yaml
new file mode 100644
index 0000000..226c437
--- /dev/null
+++ b/roles/build-container-image/tasks/push.yaml
@@ -0,0 +1,12 @@
+- name: Tag image for buildset registry
+ command: >-
+ {{ container_command }} tag {{ image.repository }}:{{ image_tag }} {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ image.repository }}:{{ image_tag }}
+ loop: "{{ image.tags | default(['latest']) }}"
+ loop_control:
+ loop_var: image_tag
+- name: Push tag to buildset registry
+ command: >-
+ {{ container_command }} push {{ buildset_registry_alias }}:{{ buildset_registry.port }}/{{ image.repository }}:{{ image_tag }}
+ loop: "{{ image.tags | default(['latest']) }}"
+ loop_control:
+ loop_var: image_tag
diff --git a/roles/build-docker-image/common.rst b/roles/build-docker-image/common.rst
index 95d37ad..8a5cc97 100644
--- a/roles/build-docker-image/common.rst
+++ b/roles/build-docker-image/common.rst
@@ -119,4 +119,15 @@ using this role.
A list of tags to be added to the image when promoted.
+ .. zuul:rolevar:: siblings
+ :type: list
+ :default: []
+
+ A list of sibling projects to be copied into
+ ``{{zuul_work_dir}}/.zuul-siblings``. This can be useful to
+ collect multiple projects to be installed within the same Docker
+ context. A ``-build-arg`` called ``ZUUL_SIBLINGS`` will be
+ added with each sibling project. Note that projects here must
+ be listed in ``required-projects``.
+
.. _anchors: https://yaml.org/spec/1.2/spec.html#&%20anchor//
diff --git a/roles/build-docker-image/tasks/build.yaml b/roles/build-docker-image/tasks/build.yaml
new file mode 100644
index 0000000..cb6aa01
--- /dev/null
+++ b/roles/build-docker-image/tasks/build.yaml
@@ -0,0 +1,55 @@
+- name: Check sibling directory
+ stat:
+ path: '{{ zuul_work_dir }}/{{ item.context }}/.zuul-siblings'
+ register: _dot_zuul_siblings
+
+# This should have been cleaned up; multiple builds may specify
+# different siblings to include so we need to start fresh.
+- name: Check for clean build
+ assert:
+ that: not _dot_zuul_siblings.stat.exists
+
+- name: Create sibling source directory
+ file:
+ path: '{{ zuul_work_dir }}/{{ item.context }}/.zuul-siblings'
+ state: directory
+ mode: 0755
+ when: item.siblings is defined
+
+# NOTE(ianw): could use recursive copy: with remote_src, but it's
+# Ansible 2.8 only. take the simple approach.
+- name: Copy sibling source directories
+ command:
+ cmd: 'cp --parents -r {{ sibling }} /home/zuul/{{ zuul_work_dir }}/{{ item.context }}/.zuul-siblings'
+ chdir: '~/src'
+ loop: '{{ item.siblings }}'
+ loop_control:
+ loop_var: sibling
+ when: item.siblings is defined
+
+- name: Build a docker image
+ command: >-
+ docker build {{ item.path | default('.') }} -f {{ item.dockerfile | default(docker_dockerfile) }}
+ {% if item.target | default(false) -%}
+ --target {{ item.target }}
+ {% endif -%}
+ {% for build_arg in item.build_args | default([]) -%}
+ --build-arg {{ build_arg }}
+ {% endfor -%}
+ {% if item.siblings | default(false) -%}
+ --build-arg "ZUUL_SIBLINGS={{ item.siblings | join(' ') }}"
+ {% endif -%}
+ {% for tag in item.tags | default(['latest']) -%}
+ {% if zuul.change | default(false) -%}
+ --tag {{ item.repository }}:change_{{ zuul.change }}_{{ tag }}
+ {% endif -%}
+ --tag {{ item.repository }}:{{ tag }}
+ {% endfor -%}
+ args:
+ chdir: "{{ zuul_work_dir }}/{{ item.context }}"
+
+- name: Cleanup sibling source directory
+ file:
+ path: '{{ zuul_work_dir }}/.zuul-siblings'
+ state: absent
+
diff --git a/roles/build-docker-image/tasks/main.yaml b/roles/build-docker-image/tasks/main.yaml
index 4dceac0..c5d0898 100644
--- a/roles/build-docker-image/tasks/main.yaml
+++ b/roles/build-docker-image/tasks/main.yaml
@@ -4,22 +4,11 @@
set_fact:
buildset_registry: "{{ (lookup('file', zuul.executor.work_root + '/results.json') | from_json)['buildset_registry'] }}"
ignore_errors: true
-- name: Build a docker image
- command: >-
- docker build {{ item.path | default('.') }} -f {{ item.dockerfile | default(docker_dockerfile) }}
- {% if item.target | default(false) -%}
- --target {{ item.target }}
- {% endif -%}
- {% for build_arg in item.build_args | default([]) -%}
- --build-arg {{ build_arg }}
- {% endfor -%}
- {% for tag in item.tags | default(['latest']) -%}
- --tag {{ item.repository }}:change_{{ zuul.change }}_{{ tag }}
- --tag {{ item.repository }}:{{ tag }}
- {% endfor -%}
- args:
- chdir: "{{ zuul_work_dir }}/{{ item.context }}"
+
+- name: Build docker images
+ include_tasks: build.yaml
loop: "{{ docker_images }}"
+
# Docker doesn't understand docker push [1234:5678::]:5000/image/path:tag
# so we set up /etc/hosts with a registry alias name to support ipv6 and 4.
- name: Configure /etc/hosts for buildset_registry to workaround docker not understanding ipv6 addresses
diff --git a/roles/build-releasenotes/tasks/main.yaml b/roles/build-releasenotes/tasks/main.yaml
index 3e021b0..d6ca1b3 100644
--- a/roles/build-releasenotes/tasks/main.yaml
+++ b/roles/build-releasenotes/tasks/main.yaml
@@ -22,6 +22,7 @@
# Mapping of language codes to language names
declare -A LANG_NAME=(
+ ["cs"]="Czech"
["de"]="German"
["en_AU"]="English (Australian)"
["en_GB"]="English (United Kingdom)"
@@ -31,6 +32,7 @@
["it"]="Italian"
["ja"]="Japanese"
["ko_KR"]="Korean (South Korea)"
+ ["ne"]="Nepali"
["pt_BR"]="Portuguese (Brazil)"
["ru"]="Russian"
["tr_TR"]="Turkish (Turkey)"
diff --git a/roles/chart-testing/README.rst b/roles/chart-testing/README.rst
new file mode 100644
index 0000000..087776b
--- /dev/null
+++ b/roles/chart-testing/README.rst
@@ -0,0 +1,19 @@
+Run chart-testing (for helm charts)
+
+**Role Variables**
+
+.. zuul:rolevar:: zuul_work_dir
+ :default: {{ zuul.project.src_dir }}
+
+ The location of the main working directory of the job.
+
+.. zuul:rolevar:: chart_testing_options
+ :default: --validate-maintainers=false --check-version-increment=false
+
+ Arguments passed to chart testing.
+
+ The defaults are suitable for a Zuul environment because
+ `validate-maintainers` requires a valid git remote (which is not
+ present in Zuul) and `check-version-increment` requires each commit
+ to have a new version; Zuul users are expected to set the version
+ when tagging/publishing a release.
diff --git a/roles/chart-testing/defaults/main.yaml b/roles/chart-testing/defaults/main.yaml
new file mode 100644
index 0000000..ae1ea85
--- /dev/null
+++ b/roles/chart-testing/defaults/main.yaml
@@ -0,0 +1,2 @@
+zuul_work_dir: "{{ zuul.project.src_dir }}"
+chart_testing_options: --validate-maintainers=false --check-version-increment=false
diff --git a/roles/chart-testing/tasks/main.yaml b/roles/chart-testing/tasks/main.yaml
new file mode 100644
index 0000000..df33d3c
--- /dev/null
+++ b/roles/chart-testing/tasks/main.yaml
@@ -0,0 +1,4 @@
+- name: Run chart-testing
+ command: "ct lint {{ chart_testing_options }}"
+ args:
+ chdir: "{{ zuul_work_dir }}"
diff --git a/roles/collect-container-logs/README.rst b/roles/collect-container-logs/README.rst
new file mode 100644
index 0000000..52e452c
--- /dev/null
+++ b/roles/collect-container-logs/README.rst
@@ -0,0 +1,8 @@
+An ansible role to collect all container logs.
+
+**Role Variables**
+
+.. zuul:rolevar:: container_command
+ :default: docker
+
+ Container run-time CLI command
diff --git a/roles/collect-container-logs/defaults/main.yaml b/roles/collect-container-logs/defaults/main.yaml
new file mode 100644
index 0000000..1d18111
--- /dev/null
+++ b/roles/collect-container-logs/defaults/main.yaml
@@ -0,0 +1 @@
+container_command: docker \ No newline at end of file
diff --git a/roles/collect-container-logs/tasks/main.yaml b/roles/collect-container-logs/tasks/main.yaml
new file mode 100644
index 0000000..beb658e
--- /dev/null
+++ b/roles/collect-container-logs/tasks/main.yaml
@@ -0,0 +1,26 @@
+- name: List containers
+ command: "{{ container_command }} ps -a --format '{{ '{{ .Names }}' }}'"
+ register: docker_containers
+ ignore_errors: true
+
+- name: Create container log dir
+ file:
+ path: "{{ ansible_user_dir }}/zuul-output/logs/{{ container_command }}"
+ state: directory
+
+- name: Save container logs
+ loop: "{{ docker_containers.stdout_lines | default([]) }}"
+ # We can't use the default 'item' because roles may be used in
+ # higher level loops and 'item' could conflict in that case.
+ loop_control:
+ loop_var: zj_container_name
+ shell: "{{ container_command }} logs {{ zj_container_name }} &> {{ ansible_user_dir }}/zuul-output/logs/{{ container_command }}/{{ zj_container_name }}.txt"
+ args:
+ executable: /bin/bash
+ ignore_errors: true
+
+- name: Open container logs permissions
+ file:
+ dest: "{{ ansible_user_dir }}/zuul-output/logs/{{ container_command }}"
+ mode: u=rwX,g=rX,o=rX
+ recurse: yes
diff --git a/roles/configure-mirrors/README.rst b/roles/configure-mirrors/README.rst
index 4359993..d7d6570 100644
--- a/roles/configure-mirrors/README.rst
+++ b/roles/configure-mirrors/README.rst
@@ -7,10 +7,15 @@ An ansible role to configure services to use mirrors.
The base host for mirror servers.
+.. zuul:rolevar:: pypi_fqdn
+ :default: {{ mirror_fqdn }}
+
+ The base host for PyPi mirror server.
+
.. zuul:rolevar:: pypi_mirror
URL to override the generated pypi mirror url based on
- :zuul:rolevar:`configure-mirrors.mirror_fqdn`.
+ :zuul:rolevar:`configure-mirrors.pypi_fqdn`.
.. zuul:rolevar:: set_apt_mirrors_trusted
:default: False
diff --git a/roles/configure-mirrors/defaults/main.yaml b/roles/configure-mirrors/defaults/main.yaml
index fc2bd7c..4a07e2c 100644
--- a/roles/configure-mirrors/defaults/main.yaml
+++ b/roles/configure-mirrors/defaults/main.yaml
@@ -1,4 +1,5 @@
mirror_fqdn: "{{ zuul_site_mirror_fqdn|default(omit) }}"
-pypi_mirror: "http://{{ mirror_fqdn }}/pypi/simple"
+pypi_fqdn: "{{ mirror_fqdn }}"
+pypi_mirror: "http://{{ pypi_fqdn }}/pypi/simple"
set_apt_mirrors_trusted: False
wheel_mirror: "http://{{ mirror_fqdn }}/wheel/{{ ansible_distribution | lower }}-{{ ansible_distribution_version }}-{{ ansible_architecture | lower }}"
diff --git a/roles/configure-mirrors/handlers/main.yaml b/roles/configure-mirrors/handlers/main.yaml
index 97aaf76..560ad78 100644
--- a/roles/configure-mirrors/handlers/main.yaml
+++ b/roles/configure-mirrors/handlers/main.yaml
@@ -7,19 +7,15 @@
tags:
- skip_ansible_lint
-- name: Update dnf cache
+- name: Update yum/dnf cache
become: yes
command: "{{ item }}"
+ args:
+ warn: false
with_items:
- - dnf clean all
- - dnf makecache
-
-- name: Update yum cache
- become: yes
- command: "{{ item }}"
- with_items:
- - yum clean all
- - yum makecache
+ - "{{ ansible_pkg_mgr }} clean all"
+ - "{{ ansible_pkg_mgr }} makecache -v"
+ # verbose is needed in order to make it possible to debug potential failures
- name: Update zypper cache
become: yes
diff --git a/roles/configure-mirrors/tasks/main.yaml b/roles/configure-mirrors/tasks/main.yaml
index 4f65d42..c98c53d 100644
--- a/roles/configure-mirrors/tasks/main.yaml
+++ b/roles/configure-mirrors/tasks/main.yaml
@@ -1,3 +1,8 @@
+- name: gather needed facts
+ when: ansible_pkg_mgr is not defined
+ setup:
+ gather_subset: pkg_mgr
+
- name: Set up infrastructure mirrors
include: mirror.yaml
when: mirror_fqdn is defined
diff --git a/roles/configure-mirrors/tasks/mirror.yaml b/roles/configure-mirrors/tasks/mirror.yaml
index f550da3..4271709 100644
--- a/roles/configure-mirrors/tasks/mirror.yaml
+++ b/roles/configure-mirrors/tasks/mirror.yaml
@@ -1,12 +1,12 @@
-- name: Install .pydistutils.cfg configuration in homedir
- template:
- dest: ~/.pydistutils.cfg
- mode: 0644
- src: .pydistutils.cfg.j2
+- name: Remove .pydistutils.cfg configuration in homedir
+ file:
+ path: ~/.pydistutils.cfg
+ state: absent
- name: Include OS-specific variables
include_vars: "{{ item }}"
with_first_found:
+ - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yaml"
- "{{ ansible_distribution }}.{{ ansible_architecture }}.yaml"
- "{{ ansible_distribution }}.yaml"
- "{{ ansible_os_family }}.yaml"
@@ -25,6 +25,7 @@
include: "{{ item }}"
static: no
with_first_found:
+ - "mirror/{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yaml"
- "mirror/{{ ansible_distribution }}.yaml"
- "mirror/{{ ansible_os_family }}.yaml"
- "mirror/default.yaml"
diff --git a/roles/configure-mirrors/tasks/mirror/CentOS.yaml b/roles/configure-mirrors/tasks/mirror/CentOS-7.yaml
index 68d6ba6..799147c 100644
--- a/roles/configure-mirrors/tasks/mirror/CentOS.yaml
+++ b/roles/configure-mirrors/tasks/mirror/CentOS-7.yaml
@@ -5,12 +5,12 @@
group: root
mode: 0644
owner: root
- src: "{{ item }}.j2"
+ src: "centos7/{{ item }}.j2"
with_items:
- etc/yum.repos.d/CentOS-Base.repo
- etc/yum.repos.d/epel.repo
notify:
- - Update yum cache
+ - Update yum/dnf cache
# http://dnf.readthedocs.io/en/latest/conf_ref.html#options-for-both-main-and-repo
# deltarpm is useful when the bottleneck is the network throughput.
diff --git a/roles/configure-mirrors/tasks/mirror/CentOS-8.yaml b/roles/configure-mirrors/tasks/mirror/CentOS-8.yaml
new file mode 100644
index 0000000..68d9a57
--- /dev/null
+++ b/roles/configure-mirrors/tasks/mirror/CentOS-8.yaml
@@ -0,0 +1,29 @@
+- name: Install CentOS 8 repository files
+ become: yes
+ template:
+ dest: "/{{ item }}"
+ group: root
+ mode: 0644
+ owner: root
+ src: "centos8/{{ item }}.j2"
+ with_items:
+ - etc/yum.repos.d/CentOS-AppStream.repo
+ - etc/yum.repos.d/CentOS-Base.repo
+ - etc/yum.repos.d/CentOS-HA.repo
+ - etc/yum.repos.d/CentOS-Extras.repo
+ - etc/yum.repos.d/CentOS-PowerTools.repo
+ - etc/yum.repos.d/epel.repo
+ notify:
+ - Update yum/dnf cache
+
+# http://dnf.readthedocs.io/en/latest/conf_ref.html#options-for-both-main-and-repo
+# deltarpm is useful when the bottleneck is the network throughput.
+# It also requires additional drpm packages to be hosted by the mirrors which
+# is not done by default.
+- name: Disable deltrarpm
+ become: yes
+ ini_file:
+ path: /etc/dnf.conf
+ section: main
+ option: deltarpm
+ value: 0
diff --git a/roles/configure-mirrors/tasks/mirror/Debian.yaml b/roles/configure-mirrors/tasks/mirror/Debian.yaml
index 7d72205..169773f 100644
--- a/roles/configure-mirrors/tasks/mirror/Debian.yaml
+++ b/roles/configure-mirrors/tasks/mirror/Debian.yaml
@@ -5,7 +5,7 @@
group: root
mode: 0644
owner: root
- src: "{{ item }}.j2"
+ src: "apt/{{ item }}.j2"
with_items:
- etc/apt/sources.list.d/default.list
- etc/apt/sources.list.d/updates.list
diff --git a/roles/configure-mirrors/tasks/mirror/Fedora.yaml b/roles/configure-mirrors/tasks/mirror/Fedora.yaml
index 778c874..81eb5f4 100644
--- a/roles/configure-mirrors/tasks/mirror/Fedora.yaml
+++ b/roles/configure-mirrors/tasks/mirror/Fedora.yaml
@@ -5,12 +5,12 @@
group: root
mode: 0644
owner: root
- src: "{{ item }}.j2"
+ src: "fedora/{{ item }}.j2"
with_items:
- etc/yum.repos.d/fedora.repo
- etc/yum.repos.d/fedora-updates.repo
notify:
- - Update dnf cache
+ - Update yum/dnf cache
# http://dnf.readthedocs.io/en/latest/conf_ref.html#options-for-both-main-and-repo
# deltarpm is useful when the bottleneck is the network throughput.
diff --git a/roles/configure-mirrors/tasks/mirror/Suse.yaml b/roles/configure-mirrors/tasks/mirror/Suse.yaml
index 3df34a5..549ef44 100644
--- a/roles/configure-mirrors/tasks/mirror/Suse.yaml
+++ b/roles/configure-mirrors/tasks/mirror/Suse.yaml
@@ -1,12 +1,12 @@
- name: set zypper base package repository (openSUSE Leap)
set_fact:
opensuse_repo_baseurl: "{{ package_mirror }}/distribution/leap/$releasever/repo/oss/"
- when: not ansible_distribution | search("Tumbleweed")
+ when: ansible_distribution is not search("Tumbleweed")
- name: set zypper base package repository (openSUSE Tumbleweed)
set_fact:
opensuse_repo_baseurl: "{{ package_mirror }}/tumbleweed/repo/oss/"
- when: ansible_distribution | search("Tumbleweed")
+ when: ansible_distribution is search("Tumbleweed")
- name: Install Suse repository files
become: yes
@@ -15,7 +15,7 @@
group: root
mode: 0644
owner: root
- src: "{{ item }}.j2"
+ src: "suse/{{ item }}.j2"
with_items:
- etc/zypp/repos.d/repo-oss.repo
notify:
@@ -28,7 +28,7 @@
group: root
mode: 0644
owner: root
- src: etc/zypp/repos.d/repo-update.repo.j2
+ src: suse/etc/zypp/repos.d/repo-update.repo.j2
notify:
- Update zypper cache
- when: not ansible_distribution | search("Tumbleweed")
+ when: ansible_distribution is not search("Tumbleweed")
diff --git a/roles/configure-mirrors/tasks/mirror/Ubuntu.yaml b/roles/configure-mirrors/tasks/mirror/Ubuntu.yaml
index eb68a8c..8356f53 100644
--- a/roles/configure-mirrors/tasks/mirror/Ubuntu.yaml
+++ b/roles/configure-mirrors/tasks/mirror/Ubuntu.yaml
@@ -5,7 +5,7 @@
group: root
mode: 0644
owner: root
- src: "{{ item }}.j2"
+ src: "apt/{{ item }}.j2"
with_items:
- etc/apt/sources.list
- etc/apt/apt.conf.d/99unauthenticated
diff --git a/roles/configure-mirrors/templates/.pydistutils.cfg.j2 b/roles/configure-mirrors/templates/.pydistutils.cfg.j2
deleted file mode 100644
index 082ac44..0000000
--- a/roles/configure-mirrors/templates/.pydistutils.cfg.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-# {{ ansible_managed }}
-[easy_install]
-index_url = {{ pypi_mirror }}
-allow_hosts = {{ mirror_fqdn }}
diff --git a/roles/configure-mirrors/templates/etc/apt/apt.conf.d/99unauthenticated.j2 b/roles/configure-mirrors/templates/apt/etc/apt/apt.conf.d/99unauthenticated.j2
index 6bf00a0..6bf00a0 100644
--- a/roles/configure-mirrors/templates/etc/apt/apt.conf.d/99unauthenticated.j2
+++ b/roles/configure-mirrors/templates/apt/etc/apt/apt.conf.d/99unauthenticated.j2
diff --git a/roles/configure-mirrors/templates/etc/apt/sources.list.d/backports.list.j2 b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/backports.list.j2
index 82a9787..82a9787 100644
--- a/roles/configure-mirrors/templates/etc/apt/sources.list.d/backports.list.j2
+++ b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/backports.list.j2
diff --git a/roles/configure-mirrors/templates/etc/apt/sources.list.d/default.list.j2 b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/default.list.j2
index ca3240d..ca3240d 100644
--- a/roles/configure-mirrors/templates/etc/apt/sources.list.d/default.list.j2
+++ b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/default.list.j2
diff --git a/roles/configure-mirrors/templates/etc/apt/sources.list.d/security.list.j2 b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/security.list.j2
index 9f85d59..9f85d59 100644
--- a/roles/configure-mirrors/templates/etc/apt/sources.list.d/security.list.j2
+++ b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/security.list.j2
diff --git a/roles/configure-mirrors/templates/etc/apt/sources.list.d/updates.list.j2 b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/updates.list.j2
index 155b230..155b230 100644
--- a/roles/configure-mirrors/templates/etc/apt/sources.list.d/updates.list.j2
+++ b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.d/updates.list.j2
diff --git a/roles/configure-mirrors/templates/etc/apt/sources.list.j2 b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.j2
index 8461d40..8461d40 100644
--- a/roles/configure-mirrors/templates/etc/apt/sources.list.j2
+++ b/roles/configure-mirrors/templates/apt/etc/apt/sources.list.j2
diff --git a/roles/configure-mirrors/templates/etc/yum.repos.d/CentOS-Base.repo.j2 b/roles/configure-mirrors/templates/centos7/etc/yum.repos.d/CentOS-Base.repo.j2
index 13ca67f..13ca67f 100644
--- a/roles/configure-mirrors/templates/etc/yum.repos.d/CentOS-Base.repo.j2
+++ b/roles/configure-mirrors/templates/centos7/etc/yum.repos.d/CentOS-Base.repo.j2
diff --git a/roles/configure-mirrors/templates/etc/yum.repos.d/epel.repo.j2 b/roles/configure-mirrors/templates/centos7/etc/yum.repos.d/epel.repo.j2
index 4c19390..4c19390 100644
--- a/roles/configure-mirrors/templates/etc/yum.repos.d/epel.repo.j2
+++ b/roles/configure-mirrors/templates/centos7/etc/yum.repos.d/epel.repo.j2
diff --git a/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-AppStream.repo.j2 b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-AppStream.repo.j2
new file mode 100644
index 0000000..5941b4b
--- /dev/null
+++ b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-AppStream.repo.j2
@@ -0,0 +1,7 @@
+# {{ ansible_managed }}
+[AppStream]
+name=CentOS-$releasever - AppStream
+baseurl={{ package_mirror }}/$releasever/AppStream/$basearch/os/
+gpgcheck=1
+enabled=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial \ No newline at end of file
diff --git a/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-Base.repo.j2 b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-Base.repo.j2
new file mode 100644
index 0000000..1437657
--- /dev/null
+++ b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-Base.repo.j2
@@ -0,0 +1,7 @@
+# {{ ansible_managed }}
+[BaseOS]
+name=CentOS-$releasever - Base
+baseurl={{ package_mirror }}/$releasever/BaseOS/$basearch/os/
+gpgcheck=1
+enabled=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
diff --git a/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-Extras.repo.j2 b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-Extras.repo.j2
new file mode 100644
index 0000000..226f967
--- /dev/null
+++ b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-Extras.repo.j2
@@ -0,0 +1,7 @@
+# {{ ansible_managed }}
+[extras]
+name=CentOS-$releasever - Extras
+baseurl={{ package_mirror }}/$releasever/extras/$basearch/os/
+gpgcheck=1
+enabled=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
diff --git a/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-HA.repo.j2 b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-HA.repo.j2
new file mode 100644
index 0000000..9726d5a
--- /dev/null
+++ b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-HA.repo.j2
@@ -0,0 +1,7 @@
+# {{ ansible_managed }}
+[HighAvailability]
+name=CentOS-$releasever - HA
+baseurl={{ package_mirror }}/$releasever/HighAvailability/$basearch/os/
+gpgcheck=1
+enabled=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
diff --git a/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-PowerTools.repo.j2 b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-PowerTools.repo.j2
new file mode 100644
index 0000000..b3a2c1b
--- /dev/null
+++ b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/CentOS-PowerTools.repo.j2
@@ -0,0 +1,7 @@
+# {{ ansible_managed }}
+[PowerTools]
+name=CentOS-$releasever - PowerTools
+baseurl={{ package_mirror }}/$releasever/PowerTools/$basearch/os/
+gpgcheck=1
+enabled=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
diff --git a/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/epel.repo.j2 b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/epel.repo.j2
new file mode 100644
index 0000000..ecf2064
--- /dev/null
+++ b/roles/configure-mirrors/templates/centos8/etc/yum.repos.d/epel.repo.j2
@@ -0,0 +1,21 @@
+# {{ ansible_managed }}
+[epel]
+name=Extra Packages for Enterprise Linux 8 - $basearch
+baseurl={{ epel_mirror }}/8/Everything/$basearch
+enabled=0
+gpgcheck=1
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8
+
+[epel-debuginfo]
+name=Extra Packages for Enterprise Linux 8 - $basearch - Debug
+baseurl={{ epel_mirror }}/8/Everything/$basearch/debug
+enabled=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8
+gpgcheck=1
+
+[epel-source]
+name=Extra Packages for Enterprise Linux 8 - $basearch - Source
+baseurl={{ epel_mirror }}/8/Everything/SRPMS
+enabled=0
+gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-8
+gpgcheck=1
diff --git a/roles/configure-mirrors/templates/etc/pip.conf.j2 b/roles/configure-mirrors/templates/etc/pip.conf.j2
index cd47f28..7545be9 100644
--- a/roles/configure-mirrors/templates/etc/pip.conf.j2
+++ b/roles/configure-mirrors/templates/etc/pip.conf.j2
@@ -2,5 +2,5 @@
[global]
timeout = 60
index-url = {{ pypi_mirror }}
-trusted-host = {{ mirror_fqdn }}
+trusted-host = {{ pypi_fqdn }}
extra-index-url = {{ wheel_mirror }}
diff --git a/roles/configure-mirrors/templates/etc/yum.repos.d/fedora-updates.repo.j2 b/roles/configure-mirrors/templates/fedora/etc/yum.repos.d/fedora-updates.repo.j2
index b1b6f79..64669fa 100644
--- a/roles/configure-mirrors/templates/etc/yum.repos.d/fedora-updates.repo.j2
+++ b/roles/configure-mirrors/templates/fedora/etc/yum.repos.d/fedora-updates.repo.j2
@@ -2,7 +2,7 @@
[updates]
name=Fedora $releasever - $basearch - Updates
failovermethod=priority
-{% if ansible_distribution_version | version_compare('28', '<') %}
+{% if ansible_distribution_version is version('28', '<') %}
baseurl={{ package_mirror }}/updates/$releasever/$basearch/
{% else %}
baseurl={{ package_mirror }}/updates/$releasever/Everything/$basearch/
@@ -18,7 +18,7 @@ skip_if_unavailable=False
[updates-debuginfo]
name=Fedora $releasever - $basearch - Updates - Debug
failovermethod=priority
-{% if ansible_distribution_version | version_compare('28', '<') %}
+{% if ansible_distribution_version is version('28', '<') %}
baseurl={{ package_mirror }}/updates/$releasever/$basearch/debug/
{% else %}
baseurl={{ package_mirror }}/updates/$releasever/Everything/$basearch/debug/tree/
@@ -34,7 +34,7 @@ skip_if_unavailable=False
[updates-source]
name=Fedora $releasever - Updates Source
failovermethod=priority
-{% if ansible_distribution_version | version_compare('28', '<') %}
+{% if ansible_distribution_version is version('28', '<') %}
baseurl={{ package_mirror }}/updates/$releasever/SRPMS/
{% else %}
baseurl={{ package_mirror }}/updates/$releasever/Everything/source/tree/
diff --git a/roles/configure-mirrors/templates/etc/yum.repos.d/fedora.repo.j2 b/roles/configure-mirrors/templates/fedora/etc/yum.repos.d/fedora.repo.j2
index 3abdca5..3abdca5 100644
--- a/roles/configure-mirrors/templates/etc/yum.repos.d/fedora.repo.j2
+++ b/roles/configure-mirrors/templates/fedora/etc/yum.repos.d/fedora.repo.j2
diff --git a/roles/configure-mirrors/templates/etc/zypp/repos.d/repo-oss.repo.j2 b/roles/configure-mirrors/templates/suse/etc/zypp/repos.d/repo-oss.repo.j2
index d345196..d345196 100644
--- a/roles/configure-mirrors/templates/etc/zypp/repos.d/repo-oss.repo.j2
+++ b/roles/configure-mirrors/templates/suse/etc/zypp/repos.d/repo-oss.repo.j2
diff --git a/roles/configure-mirrors/templates/etc/zypp/repos.d/repo-update.repo.j2 b/roles/configure-mirrors/templates/suse/etc/zypp/repos.d/repo-update.repo.j2
index c21dbda..c21dbda 100644
--- a/roles/configure-mirrors/templates/etc/zypp/repos.d/repo-update.repo.j2
+++ b/roles/configure-mirrors/templates/suse/etc/zypp/repos.d/repo-update.repo.j2
diff --git a/roles/emit-job-header/tasks/main.yaml b/roles/emit-job-header/tasks/main.yaml
index 2e35a3e..e1cf31e 100644
--- a/roles/emit-job-header/tasks/main.yaml
+++ b/roles/emit-job-header/tasks/main.yaml
@@ -19,6 +19,9 @@
{% if zuul_log_url is defined and zuul_log_path is defined %}
Log URL (when completed): {{ zuul_log_url }}/{{ zuul_log_path }}/
{% endif %}
+ {% if zuul.event_id is defined %}
+ Event ID: {{ zuul.event_id }}
+ {% endif %}
- name: Print node information
debug:
diff --git a/roles/enable-netconsole/README.rst b/roles/enable-netconsole/README.rst
new file mode 100644
index 0000000..194d885
--- /dev/null
+++ b/roles/enable-netconsole/README.rst
@@ -0,0 +1,32 @@
+Enable netconsole for host
+
+This enables the netconsole on a host to send kernel/dmesg logs to a
+remote host. This can be very useful if a node is experiencing a
+kernel oops or another form of unexpected disconnect where you can not
+retrieve information via standard logging methods.
+
+The ``netconsole_remote_ip`` and ``netconsole_remote_port`` variables
+must be set. This host can capture the logs with a command like::
+
+ $ nc -v -u -l -p 6666 | tee console-output.log
+
+or::
+
+ $ socat udp-recv:6666 - | tee console-output.log
+
+One further trick is to send interesting data to ``/dev/kmsg``, this
+should make it across the netconsole even if the main interface has
+been disabled, etc. e.g.::
+
+ $ ip addr | sudo tee /dev/kmsg
+
+
+**Role Variables**
+
+.. zuul:rolevar:: netconsole_remote_ip
+
+ The IP address of the remote host to send to.
+
+.. zuul:rolevar:: netconsole_remote_port
+
+ The port listening on the remote host.
diff --git a/roles/enable-netconsole/tasks/main.yaml b/roles/enable-netconsole/tasks/main.yaml
new file mode 100644
index 0000000..e46e064
--- /dev/null
+++ b/roles/enable-netconsole/tasks/main.yaml
@@ -0,0 +1,82 @@
+- name: Check remote IP set
+ assert:
+ that:
+ - netconsole_remote_ip is defined
+ - netconsole_remote_port is defined
+ fail_msg: "Must set remote host and port"
+
+- name: Register netconsole target var
+ set_fact:
+ nc_target: '/sys/kernel/config/netconsole/target1'
+
+- name: Everything needs root
+ become: yes
+ block:
+
+ # netconsole requires the device to send packtes from and the
+ # destination MAC. This works for hosts on the same subnet, but the
+ # way to get packets out to the world is to specify the default gw as
+ # the remote destination.
+
+ - name: Get default gateway
+ shell: "ip route | grep default | awk '{print $3}'"
+ register: default_gw_cmd
+
+ - name: Save default gateway
+ set_fact:
+ default_gw: '{{ default_gw_cmd.stdout }}'
+
+ - name: Get default gateway MAC
+ shell: "arp {{ default_gw }} | grep {{ default_gw }} | awk '{print $3}'"
+ register: default_gw_cmd_mac
+
+ - name: Save default gateway MAC
+ set_fact:
+ default_gw_mac: '{{ default_gw_cmd_mac.stdout }}'
+
+ - name: Get default device
+ shell: "ip route | grep default | awk '{print $5}'"
+ register: default_gw_cmd_dev
+
+ - name: Save default device
+ set_fact:
+ default_gw_dev: '{{ default_gw_cmd_dev.stdout }}'
+
+ - name: Install configfs
+ command: modprobe configfs
+
+ - name: Install netconsole
+ command: modprobe netconsole
+
+ - name: Mount configfs
+ mount:
+ path: /sys/kernel/config
+ fstype: configfs
+ src: configfs
+ state: mounted
+
+ - name: Make netconsole target directory
+ file:
+ path: '{{ nc_target }}'
+ state: directory
+
+ - name: Configure gateway
+ shell: 'echo {{ default_gw_dev }} > {{ nc_target }}/dev_name'
+
+ - name: Configure gateway MAC
+ shell: 'echo {{ default_gw_mac }} > {{ nc_target }}/remote_mac'
+
+ - name: Configure remote IP
+ shell: 'echo {{ netconsole_remote_ip }} > {{ nc_target }}/remote_ip'
+
+ - name: Configure remote port
+ shell: 'echo {{ netconsole_remote_port }} > {{ nc_target }}/remote_port'
+
+ - name: Enable netconsole
+ shell: 'echo 1 > {{ nc_target }}/enabled'
+
+ - name: Turn up dmesg
+ command: dmesg -n 8
+
+ - name: Send a test message
+ shell: "echo 'netconsole enabled' > /dev/kmsg"
diff --git a/roles/ensure-bazelisk/README.rst b/roles/ensure-bazelisk/README.rst
new file mode 100644
index 0000000..102d8ee
--- /dev/null
+++ b/roles/ensure-bazelisk/README.rst
@@ -0,0 +1,34 @@
+Ensure that bazelisk is present.
+
+If bazelisk is already installed, this role does nothing. Otherwise,
+it downloads bazelisk from GitHub and installs it in the user's
+home directory by default.
+
+**Role Variables**
+
+.. zuul:rolevar:: bazelisk_version
+ :default: v1.3.0
+
+ Version of bazelisk to install.
+
+.. zuul:rolevar:: bazelisk_arch
+ :default: linux-amd64
+
+ Architecture to install.
+
+.. zuul:rolevar:: bazelisk_url
+ :default: https://github.com/bazelbuild/bazelisk/releases/download/{{ bazelisk_version }}/bazelisk-{{ bazelisk_arch }}
+
+ The URL from which to download bazelisk.
+
+.. zuul:rolevar:: bazelisk_executable
+ :default: bazelisk
+
+ The bazelisk executable. If this already exists, the
+ role will not perform any further actions.
+
+.. zuul:rolevar:: bazelisk_target
+ :default: "{{ ansible_user_dir }}/.local/bin/bazelisk"
+
+ Where to install bazelisk. If the role downloads bazelisk, it will
+ set :zuul:rolevar:`ensure-bazelisk.bazelisk_executable` to this value as well.
diff --git a/roles/ensure-bazelisk/defaults/main.yaml b/roles/ensure-bazelisk/defaults/main.yaml
new file mode 100644
index 0000000..813baad
--- /dev/null
+++ b/roles/ensure-bazelisk/defaults/main.yaml
@@ -0,0 +1,6 @@
+bazelisk_version: v1.3.0
+bazelisk_arch: linux-amd64
+bazelisk_url: "https://github.com/bazelbuild/bazelisk/releases/download/{{ bazelisk_version }}/bazelisk-{{ bazelisk_arch }}"
+bazelisk_executable: bazelisk
+# If we have to download it, store it here:
+bazelisk_target: "{{ ansible_user_dir }}/.local/bin/bazelisk"
diff --git a/roles/ensure-bazelisk/tasks/main.yaml b/roles/ensure-bazelisk/tasks/main.yaml
new file mode 100644
index 0000000..efe48c3
--- /dev/null
+++ b/roles/ensure-bazelisk/tasks/main.yaml
@@ -0,0 +1,25 @@
+- name: Check if bazelisk is installed
+ command: bash -c "type -p {{ bazelisk_executable }}"
+ failed_when: false
+ register: bazelisk_installed
+
+- name: Ensure target directory exists
+ file:
+ state: directory
+ path: "{{ bazelisk_target | dirname }}"
+ when: bazelisk_installed.rc != 0
+
+- name: Download bazelisk
+ get_url:
+ url: "{{ bazelisk_url }}"
+ dest: "{{ bazelisk_target }}"
+ mode: '0755'
+ when: bazelisk_installed.rc != 0
+ register: bazelisk_downloaded
+
+# This will apply to further plays and playbooks
+- name: Set bazelisk_executable fact
+ set_fact:
+ bazelisk_executable: "{{ bazelisk_target }}"
+ cacheable: true
+ when: bazelisk_downloaded is changed
diff --git a/roles/ensure-chart-testing/README.rst b/roles/ensure-chart-testing/README.rst
new file mode 100644
index 0000000..60d0720
--- /dev/null
+++ b/roles/ensure-chart-testing/README.rst
@@ -0,0 +1,7 @@
+Ensure chart-testing is installed
+
+**Role Variables**
+
+.. zuul:rolevar:: chart_testing_version
+
+ Version of chart-testing to install
diff --git a/roles/ensure-chart-testing/defaults/main.yaml b/roles/ensure-chart-testing/defaults/main.yaml
new file mode 100644
index 0000000..475c92b
--- /dev/null
+++ b/roles/ensure-chart-testing/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+chart_testing_version: 2.4.0 \ No newline at end of file
diff --git a/roles/ensure-chart-testing/tasks/main.yaml b/roles/ensure-chart-testing/tasks/main.yaml
new file mode 100644
index 0000000..75a843c
--- /dev/null
+++ b/roles/ensure-chart-testing/tasks/main.yaml
@@ -0,0 +1,29 @@
+---
+- name: Install Python dependencies
+ become: true
+ pip:
+ name:
+ - yamale==1.8.0
+ - yamllint==1.13.0
+
+- name: Install chart-testing
+ become: true
+ unarchive:
+ remote_src: true
+ src: "https://github.com/helm/chart-testing/releases/download/v{{ chart_testing_version }}/chart-testing_{{ chart_testing_version }}_linux_amd64.tar.gz"
+ dest: /usr/local/bin
+
+- name: Setup /etc/ct
+ become: true
+ file:
+ path: /etc/ct
+ state: directory
+
+- name: Install configuration files
+ become: true
+ get_url:
+ url: "https://raw.githubusercontent.com/helm/chart-testing/v{{ chart_testing_version }}/etc/{{ item }}"
+ dest: "/etc/ct/{{ item }}"
+ loop:
+ - chart_schema.yaml
+ - lintconf.yaml \ No newline at end of file
diff --git a/roles/ensure-helm/README.rst b/roles/ensure-helm/README.rst
new file mode 100644
index 0000000..cfb1889
--- /dev/null
+++ b/roles/ensure-helm/README.rst
@@ -0,0 +1,7 @@
+Ensure Helm is installed
+
+**Role Variables**
+
+.. zuul:rolevar:: helm_version
+
+ Version of Helm to install
diff --git a/roles/ensure-helm/defaults/main.yaml b/roles/ensure-helm/defaults/main.yaml
new file mode 100644
index 0000000..e77eca7
--- /dev/null
+++ b/roles/ensure-helm/defaults/main.yaml
@@ -0,0 +1,2 @@
+---
+helm_version: 2.16.1 \ No newline at end of file
diff --git a/roles/ensure-helm/tasks/main.yaml b/roles/ensure-helm/tasks/main.yaml
new file mode 100644
index 0000000..b9c31a9
--- /dev/null
+++ b/roles/ensure-helm/tasks/main.yaml
@@ -0,0 +1,20 @@
+---
+- name: Download Helm
+ unarchive:
+ remote_src: true
+ src: "https://get.helm.sh/helm-v{{ helm_version }}-linux-amd64.tar.gz"
+ dest: /tmp
+
+- name: Install Helm
+ become: true
+ copy:
+ remote_src: true
+ src: /tmp/linux-amd64/helm
+ dest: /usr/local/bin/helm
+ mode: '0755'
+
+- name: Initialize Helm
+ shell: helm init --client-only
+ tags:
+ # NOTE(mnaser): The `helm` module does not support running init only.
+ - skip_ansible_lint \ No newline at end of file
diff --git a/roles/ensure-java/README.rst b/roles/ensure-java/README.rst
new file mode 100644
index 0000000..08eeb43
--- /dev/null
+++ b/roles/ensure-java/README.rst
@@ -0,0 +1,10 @@
+Ensure that Java is installed
+
+Installs the specified version of the JDK.
+
+**Role Variables**
+
+.. zuul:rolevar:: java_version
+ :default: 8
+
+ Version of Java to install.
diff --git a/roles/ensure-java/defaults/main.yaml b/roles/ensure-java/defaults/main.yaml
new file mode 100644
index 0000000..9ed782d
--- /dev/null
+++ b/roles/ensure-java/defaults/main.yaml
@@ -0,0 +1 @@
+java_version: 8
diff --git a/roles/ensure-java/tasks/main.yaml b/roles/ensure-java/tasks/main.yaml
new file mode 100644
index 0000000..a0d4366
--- /dev/null
+++ b/roles/ensure-java/tasks/main.yaml
@@ -0,0 +1,5 @@
+- name: Install JDK
+ become: true
+ package:
+ name: openjdk-{{ java_version }}-jdk
+ state: present
diff --git a/roles/ensure-markdownlint/README.rst b/roles/ensure-markdownlint/README.rst
new file mode 100644
index 0000000..085b698
--- /dev/null
+++ b/roles/ensure-markdownlint/README.rst
@@ -0,0 +1 @@
+Ensure markdownlint-cli from NPM is installed.
diff --git a/roles/ensure-markdownlint/tasks/main.yaml b/roles/ensure-markdownlint/tasks/main.yaml
new file mode 100644
index 0000000..d45cb8e
--- /dev/null
+++ b/roles/ensure-markdownlint/tasks/main.yaml
@@ -0,0 +1,4 @@
+- name: Install markdownlint-cli
+ npm:
+ name: markdownlint-cli
+ path: ~/.markdownlint
diff --git a/roles/ensure-tox/README.rst b/roles/ensure-tox/README.rst
index 60e0cac..31a7a4a 100644
--- a/roles/ensure-tox/README.rst
+++ b/roles/ensure-tox/README.rst
@@ -2,3 +2,18 @@ Ensure tox is installed
If tox is not already installed, it will be installed via pip in the
user install directory (i.e., "pip install --user").
+
+**Role Variables**
+
+.. zuul:rolevar:: tox_executable
+ :default: ``tox``
+
+ Optional path to point tox executable
+
+.. zuul:rolevar:: tox_prefer_python2
+ :default: ``true``
+
+ If tox is not detected, prefer to install tox inside Python 2 instead of
+ Python 3. The default value is currently set to ``true`` for compatibility
+ reasons, it will be switched to false eventually. It's best to set this to
+ ``false`` when using it.
diff --git a/roles/ensure-tox/defaults/main.yml b/roles/ensure-tox/defaults/main.yml
new file mode 100644
index 0000000..36efbb4
--- /dev/null
+++ b/roles/ensure-tox/defaults/main.yml
@@ -0,0 +1,2 @@
+tox_executable: tox
+tox_prefer_python2: true
diff --git a/roles/ensure-tox/tasks/main.yaml b/roles/ensure-tox/tasks/main.yaml
index b2b7487..ec44cc1 100644
--- a/roles/ensure-tox/tasks/main.yaml
+++ b/roles/ensure-tox/tasks/main.yaml
@@ -1,2 +1,32 @@
- name: Ensure tox is installed
- shell: type tox || pip install --user tox
+ shell: |
+ set -euo pipefail
+
+ {% if tox_prefer_python2 %}
+ if command -v pip; then
+ PIP=pip
+ elif command -v pip3; then
+ PIP=pip3
+ fi
+ {% else %}
+ if command -v pip3; then
+ PIP=pip3
+ elif command -v pip; then
+ PIP=pip
+ fi
+ {% endif %}
+
+ type tox || $PIP install --user tox
+ args:
+ executable: /bin/bash
+ register: result
+ changed_when: "'Successfully installed' in result.stdout"
+
+- name: Set tox_executable fact
+ set_fact:
+ tox_executable: "{{ ansible_user_dir }}/.local/bin/tox"
+ cacheable: true
+ when: result is changed
+
+- name: Output tox version
+ command: "{{ tox_executable }} --version"
diff --git a/roles/fetch-javascript-tarball/README.rst b/roles/fetch-javascript-tarball/README.rst
index 10df258..a09046a 100644
--- a/roles/fetch-javascript-tarball/README.rst
+++ b/roles/fetch-javascript-tarball/README.rst
@@ -6,3 +6,11 @@ Fetch a Javascript tarball back to be published.
:default: {{ zuul.project.src_dir }}
Directory to run npm in.
+
+.. zuul:rolevar:: zuul_use_fetch_output
+ :default: false
+
+ Whether to synchronize files to the executor work dir, or to copy them
+ on the test instance.
+ When set to false, the role synchronizes the file to the executor.
+ When set to true, the job needs to use the fetch-output role later.
diff --git a/roles/fetch-javascript-tarball/defaults/main.yaml b/roles/fetch-javascript-tarball/defaults/main.yaml
index 9739eb1..3de4191 100644
--- a/roles/fetch-javascript-tarball/defaults/main.yaml
+++ b/roles/fetch-javascript-tarball/defaults/main.yaml
@@ -1 +1,3 @@
zuul_work_dir: "{{ zuul.project.src_dir }}"
+zuul_output_dir: "{{ ansible_user_dir }}/zuul-output"
+zuul_use_fetch_output: "{{ zuul_site_use_fetch_output|default(false) }}"
diff --git a/roles/fetch-javascript-tarball/tasks/main.yaml b/roles/fetch-javascript-tarball/tasks/main.yaml
index 20bafcc..4906115 100644
--- a/roles/fetch-javascript-tarball/tasks/main.yaml
+++ b/roles/fetch-javascript-tarball/tasks/main.yaml
@@ -1,23 +1,25 @@
- name: Rename tarball for uploading
shell: |
- mkdir -p dist
- cp *.tgz dist/{{ zuul.project.short_name }}-{{ project_ver }}.tar.gz
- cp *.tgz dist/{{ zuul.project.short_name }}-latest.tar.gz
+ mkdir -p {{ zuul_output_dir }}/artifacts
+ cp *.tgz {{ zuul_output_dir }}/artifacts/{{ zuul.project.short_name }}-{{ project_ver }}.tar.gz
+ cp *.tgz {{ zuul_output_dir }}/artifacts/{{ zuul.project.short_name }}-latest.tar.gz
args:
chdir: "{{ zuul_work_dir }}"
tags:
# Ignore ANSIBLE0007: No need to use file module instead of mkdir
- skip_ansible_lint
-- name: Ensure artifacts directory exists
- file:
- path: "{{ zuul.executor.work_root }}/artifacts"
- state: directory
- delegate_to: localhost
+- block:
+ - name: Ensure artifacts directory exists
+ file:
+ path: "{{ zuul.executor.work_root }}/artifacts"
+ state: directory
+ delegate_to: localhost
-- name: Collect artifacts
- synchronize:
- dest: "{{ zuul.executor.work_root }}/artifacts/"
- mode: pull
- src: "{{ zuul_work_dir }}/dist/"
- verify_host: true
+ - name: Collect artifacts
+ synchronize:
+ dest: "{{ zuul.executor.work_root }}/artifacts/"
+ mode: pull
+ src: "{{ zuul_output_dir }}/artifacts/"
+ verify_host: true
+ when: not zuul_use_fetch_output
diff --git a/roles/fetch-markdownlint/README.rst b/roles/fetch-markdownlint/README.rst
new file mode 100644
index 0000000..2f3a253
--- /dev/null
+++ b/roles/fetch-markdownlint/README.rst
@@ -0,0 +1,9 @@
+Collect output from a markdownlint run. Assumes you will only run one repo, and
+one node.
+
+**Role Variables**
+
+.. zuul:rolevar:: zuul_work_dir
+ :default: {{ zuul.project.src_dir }}
+
+ The location of the main working directory of the job.
diff --git a/roles/fetch-markdownlint/defaults/main.yaml b/roles/fetch-markdownlint/defaults/main.yaml
new file mode 100644
index 0000000..9739eb1
--- /dev/null
+++ b/roles/fetch-markdownlint/defaults/main.yaml
@@ -0,0 +1 @@
+zuul_work_dir: "{{ zuul.project.src_dir }}"
diff --git a/roles/fetch-markdownlint/tasks/main.yaml b/roles/fetch-markdownlint/tasks/main.yaml
new file mode 100644
index 0000000..5ad52b1
--- /dev/null
+++ b/roles/fetch-markdownlint/tasks/main.yaml
@@ -0,0 +1,12 @@
+- name: Is there a markdownlint.txt
+ register: stat_mdl_txt
+ stat:
+ path: "{{ zuul_work_dir }}/markdownlint.txt"
+
+- name: Store on executor
+ when: stat_mdl_txt.stat.exists
+ synchronize:
+ mode: pull
+ src: "{{ zuul_work_dir }}/markdownlint.txt"
+ dest: "{{ zuul.executor.log_root }}/markdownlint.txt"
+ verify_host: true
diff --git a/roles/fetch-output-openshift/README.rst b/roles/fetch-output-openshift/README.rst
new file mode 100644
index 0000000..a7ae6b4
--- /dev/null
+++ b/roles/fetch-output-openshift/README.rst
@@ -0,0 +1,26 @@
+Collect output from build pods
+
+This role can be used instead of the :zuul:role:`fetch-output` role when the
+synchronize module doesn't work with kubectl connection.
+
+This role requires the origin-client `oc` to be installed.
+
+**Role Variables**
+
+.. zuul:rolevar:: zuul_output_dir
+ :default: {{ ansible_user_dir }}/zuul-output
+
+ Base directory for collecting job output.
+
+.. zuul:rolevar:: openshift_pods
+ :default: {{ zuul.resources }}
+
+ The dictionary of pod name, pod information to copy the sources to.
+
+.. zuul:rolevar:: zuul_log_verbose
+ :default: false
+
+ The synchronize task in this role outputs a lot of information. By
+ default, no_log is set to avoid overwhelming a reader of the logs.
+ Set this to true to disable that behavior if it becomes necessary
+ to debug this role.
diff --git a/roles/fetch-output-openshift/defaults/main.yaml b/roles/fetch-output-openshift/defaults/main.yaml
new file mode 100644
index 0000000..14e08d3
--- /dev/null
+++ b/roles/fetch-output-openshift/defaults/main.yaml
@@ -0,0 +1,3 @@
+openshift_pods: "{{ zuul.resources }}"
+zuul_output_dir: "{{ ansible_user_dir }}/zuul-output"
+zuul_log_verbose: false
diff --git a/roles/fetch-output-openshift/tasks/main.yaml b/roles/fetch-output-openshift/tasks/main.yaml
new file mode 100644
index 0000000..28ba3b3
--- /dev/null
+++ b/roles/fetch-output-openshift/tasks/main.yaml
@@ -0,0 +1,29 @@
+- name: Set log path for multiple nodes
+ set_fact:
+ log_path: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
+ when: groups['all'] | length > 1
+
+- name: Set log path for single node
+ set_fact:
+ log_path: "{{ zuul.executor.log_root }}"
+ when: log_path is not defined
+
+- name: Ensure local output dirs
+ delegate_to: localhost
+ file:
+ path: "{{ item }}"
+ state: directory
+ with_items:
+ - "{{ log_path }}"
+ - "{{ log_path }}/npm"
+ - "{{ zuul.executor.work_root }}/artifacts"
+ - "{{ zuul.executor.work_root }}/docs"
+
+- include_tasks: rsync.yaml
+ when: item.1.pod is defined
+ loop: "{{ openshift_pods.items()|list }}"
+ run_once: true
+
+- name: Remove empty directory
+ command: find "{{ zuul.executor.work_root }}" -empty -type d -delete
+ delegate_to: localhost
diff --git a/roles/fetch-output-openshift/tasks/rsync.yaml b/roles/fetch-output-openshift/tasks/rsync.yaml
new file mode 100644
index 0000000..394fafa
--- /dev/null
+++ b/roles/fetch-output-openshift/tasks/rsync.yaml
@@ -0,0 +1,19 @@
+---
+- name: Fetch zuul-output from the pod
+ command: >
+ oc --context "{{ item.1.context }}"
+ --namespace "{{ item.1.namespace }}"
+ rsync -q --progress=false
+ {{ item.1.pod }}:{{ output.src }}/
+ {{ output.dst }}/
+ no_log: "{{ not zuul_log_verbose }}"
+ delegate_to: localhost
+ loop:
+ - src: "{{ zuul_output_dir }}/logs"
+ dst: "{{ log_path }}"
+ - src: "{{ zuul_output_dir }}/artifacts"
+ dst: "{{ zuul.executor.work_root }}/artifacts"
+ - src: "{{ zuul_output_dir }}/docs"
+ dst: "{{ zuul.executor.work_root }}/docs"
+ loop_control:
+ loop_var: output
diff --git a/roles/fetch-sphinx-tarball/tasks/html.yaml b/roles/fetch-sphinx-tarball/tasks/html.yaml
index 73c2a27..3ef5593 100644
--- a/roles/fetch-sphinx-tarball/tasks/html.yaml
+++ b/roles/fetch-sphinx-tarball/tasks/html.yaml
@@ -5,7 +5,7 @@
register: html_archive
- name: Archive HTML
- command: "tar -f {{ html_archive.path }} -C {{ zuul_work_dir }}/{{ sphinx_build_dir }}/html -cj ."
+ command: "tar -f {{ html_archive.path }} -C {{ zuul_work_dir }}/{{ sphinx_build_dir }}/html --exclude=.doctrees -cj ."
args:
warn: false
diff --git a/roles/fetch-subunit-output/README.rst b/roles/fetch-subunit-output/README.rst
index 283b1d7..61add6f 100644
--- a/roles/fetch-subunit-output/README.rst
+++ b/roles/fetch-subunit-output/README.rst
@@ -7,6 +7,21 @@ Collect subunit outputs
Directory to work in. It has to be a fully qualified path.
+.. zuul:rolevar:: fetch_subunit_output_additional_dirs
+ :default: []
+
+ List of additional directories which contains subunit files
+ to collect. The content of zuul_work_dir is always checked,
+ so it should not be added here.
+
.. zuul:rolevar:: tox_envlist
tox environment that was used to run the tests originally.
+
+.. zuul:rolevar:: zuul_use_fetch_output
+ :default: false
+
+ Whether to synchronize files to the executor work dir, or to copy them
+ on the test instance.
+ When set to false, the role synchronizes the file to the executor.
+ When set to true, the job needs to use the fetch-output role later.
diff --git a/roles/fetch-subunit-output/defaults/main.yaml b/roles/fetch-subunit-output/defaults/main.yaml
index 8cc97ff..0b04778 100644
--- a/roles/fetch-subunit-output/defaults/main.yaml
+++ b/roles/fetch-subunit-output/defaults/main.yaml
@@ -1,3 +1,6 @@
---
tox_envlist: ""
+fetch_subunit_output_additional_dirs: []
zuul_work_dir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}"
+zuul_output_dir: "{{ ansible_user_dir }}/zuul-output"
+zuul_use_fetch_output: "{{ zuul_site_use_fetch_output|default(false) }}"
diff --git a/roles/fetch-subunit-output/tasks/main.yaml b/roles/fetch-subunit-output/tasks/main.yaml
index 496ed7d..741b5a2 100644
--- a/roles/fetch-subunit-output/tasks/main.yaml
+++ b/roles/fetch-subunit-output/tasks/main.yaml
@@ -9,11 +9,37 @@
- testr_command.rc == 0
- testr_command.stdout_lines
block:
+ - name: Get the list of directories with subunit files
+ set_fact:
+ all_subunit_dirs: "{{ [ zuul_work_dir ] + fetch_subunit_output_additional_dirs }}"
+
+ # The usage an independent target file instead of sending the output
+ # to zuul_work_dir prevents issues related to zuul_work_dir being
+ # a relative path, which may happen despite what the documentation
+ # of this role claims.
+ - name: Create a temporary file to store the subunit stream
+ tempfile:
+ state: file
+ prefix: subunit.
+ register: temp_subunit_file
- name: Generate subunit file
shell:
- cmd: "{{ testr_command.stdout_lines[0] }} last --subunit > ./testrepository.subunit"
- chdir: "{{ zuul_work_dir }}"
+ cmd: "{{ testr_command.stdout_lines[0] }} last --subunit >>{{ temp_subunit_file.path }}"
+ chdir: "{{ item }}"
+ loop: "{{ all_subunit_dirs }}"
+
+ - name: Copy the combined subunit file to the zuul work directory
+ copy:
+ src: "{{ temp_subunit_file.path }}"
+ dest: "{{ zuul_work_dir }}/testrepository.subunit"
+ remote_src: yes
+
+ - name: Remove the temporary file
+ file:
+ name: "{{ temp_subunit_file.path }}"
+ state: absent
+ ignore_errors: true
- name: Process and fetch subunit results
include: process.yaml
diff --git a/roles/fetch-subunit-output/tasks/process.yaml b/roles/fetch-subunit-output/tasks/process.yaml
index 71edd0a..e9becfc 100644
--- a/roles/fetch-subunit-output/tasks/process.yaml
+++ b/roles/fetch-subunit-output/tasks/process.yaml
@@ -16,15 +16,31 @@
- testrepository.subunit
register: subunit_files
-- name: Compress subunit files
- archive:
- path: "{{ item.path }}"
- with_items: "{{ subunit_files.files }}"
-
- name: Collect test-results
synchronize:
dest: "{{ zuul.executor.log_root }}"
mode: pull
- src: "{{ item.path }}.gz"
+ src: "{{ item.path }}"
verify_host: true
with_items: "{{ subunit_files.files }}"
+ when: not zuul_use_fetch_output
+
+- name: Copy test-results
+ copy:
+ dest: "{{ zuul_output_dir }}/logs/"
+ src: "{{ item.path }}"
+ remote_src: true
+ with_items: "{{ subunit_files.files }}"
+ when: zuul_use_fetch_output
+
+- name: Return artifact to Zuul
+ zuul_return:
+ data:
+ zuul:
+ artifacts:
+ - name: "Unit Test Report"
+ url: "testr_results.html"
+ metadata:
+ type: unit_test_report
+ when: "'testr_results.html' in item.path"
+ with_items: "{{ subunit_files.files }}"
diff --git a/roles/fetch-tox-output/README.rst b/roles/fetch-tox-output/README.rst
index 7fe6f12..a20fb1f 100644
--- a/roles/fetch-tox-output/README.rst
+++ b/roles/fetch-tox-output/README.rst
@@ -16,3 +16,11 @@ Collect log output from a tox build
:default: {{ zuul.project.src_dir }}
Directory tox was run in.
+
+.. zuul:rolevar:: zuul_use_fetch_output
+ :default: false
+
+ Whether to synchronize files to the executor work dir, or to copy them
+ on the test instance.
+ When set to false, the role synchronizes the file to the executor.
+ When set to true, the job needs to use the fetch-output role later.
diff --git a/roles/fetch-tox-output/defaults/main.yaml b/roles/fetch-tox-output/defaults/main.yaml
index dddc1c8..cce0be9 100644
--- a/roles/fetch-tox-output/defaults/main.yaml
+++ b/roles/fetch-tox-output/defaults/main.yaml
@@ -5,3 +5,6 @@ tox_envlist: venv
tox_executable: tox
zuul_work_dir: "{{ zuul.project.src_dir }}"
+
+zuul_output_dir: "{{ ansible_user_dir }}/zuul-output"
+zuul_use_fetch_output: "{{ zuul_site_use_fetch_output|default(false) }}"
diff --git a/roles/fetch-tox-output/tasks/main.yaml b/roles/fetch-tox-output/tasks/main.yaml
index 9dfb1bf..c6540cd 100644
--- a/roles/fetch-tox-output/tasks/main.yaml
+++ b/roles/fetch-tox-output/tasks/main.yaml
@@ -14,6 +14,12 @@
state: directory
delegate_to: localhost
+- name: Ensure zuul-output tox dir
+ file:
+ path: "{{ zuul_output_dir }}/logs/tox"
+ state: directory
+ when: zuul_use_fetch_output
+
- name: Set envlist fact
set_fact:
envlist: "{{ tox_envlist.split(',') }}"
@@ -24,12 +30,23 @@
args:
chdir: "{{ zuul_work_dir }}"
register: tox_environments
- when: tox_envlist is not defined or tox_envlist == 'all'
+ when: tox_envlist is not defined or tox_envlist|lower == 'all'
- name: Set envlist fact
set_fact:
envlist: "{{ tox_environments.stdout_lines }}"
- when: tox_envlist is not defined or tox_envlist == 'all'
+ when: tox_envlist is not defined or tox_envlist|lower == 'all'
+
+- name: Copy tox logs
+ copy:
+ dest: "{{ zuul_output_dir }}/logs/tox/"
+ src: "{{ zuul_work_dir }}/.tox/{{ item }}/log/"
+ remote_src: true
+ with_items: "{{ envlist }}"
+ # some tox runs may not create a virtualenv and thus have
+ # no ./tox/env directory
+ failed_when: false
+ when: zuul_use_fetch_output
- name: Collect tox logs
synchronize:
@@ -41,3 +58,4 @@
# some tox runs may not create a virtualenv and thus have
# no ./tox/env directory
failed_when: false
+ when: not zuul_use_fetch_output
diff --git a/roles/generate-zuul-manifest/README.rst b/roles/generate-zuul-manifest/README.rst
index b61c0c1..6c6ee47 100644
--- a/roles/generate-zuul-manifest/README.rst
+++ b/roles/generate-zuul-manifest/README.rst
@@ -26,3 +26,9 @@ display logs from a build.
:default: zuul_manifest
The artifact type to return to Zuul.
+
+.. zuul:rolevar:: generate_zuul_manifest_index_links
+ :default: False
+
+ If True, the Zuul dashboard will link to "index.html" for directory
+ entries; if False, it will link to the bare directory.
diff --git a/roles/generate-zuul-manifest/defaults/main.yaml b/roles/generate-zuul-manifest/defaults/main.yaml
index 01f26d7..a2d0c7c 100644
--- a/roles/generate-zuul-manifest/defaults/main.yaml
+++ b/roles/generate-zuul-manifest/defaults/main.yaml
@@ -2,3 +2,4 @@ generate_zuul_manifest_root: "{{ zuul.executor.log_root }}"
generate_zuul_manifest_filename: "zuul-manifest.json"
generate_zuul_manifest_output: "{{ zuul.executor.log_root }}/{{ generate_zuul_manifest_filename }}"
generate_zuul_manifest_type: "zuul_manifest"
+generate_zuul_manifest_index_links: False
diff --git a/roles/generate-zuul-manifest/library/generate_manifest.py b/roles/generate-zuul-manifest/library/generate_manifest.py
index 5518e53..81fed2c 100644
--- a/roles/generate-zuul-manifest/library/generate_manifest.py
+++ b/roles/generate-zuul-manifest/library/generate_manifest.py
@@ -87,10 +87,11 @@ def walk(root, original_root=None):
return data
-def run(root_path, output):
+def run(root_path, output, index_links):
data = walk(root_path, root_path)
with open(output, 'w') as f:
- f.write(json.dumps({'tree': data}))
+ f.write(json.dumps({'tree': data,
+ 'index_links': index_links}))
def ansible_main():
@@ -98,11 +99,12 @@ def ansible_main():
argument_spec=dict(
root=dict(type='path'),
output=dict(type='path'),
+ index_links=dict(type='bool', default=False),
)
)
p = module.params
- run(p.get('root'), p.get('output'))
+ run(p.get('root'), p.get('output'), p.get('index_links'))
module.exit_json(changed=True)
@@ -117,13 +119,15 @@ def cli_main():
help='Root of upload directory')
parser.add_argument('output',
help='Output file path')
+ parser.add_argument('index_links', action='store_true',
+ help='Link to index.html instead of dirs')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
- run(args.root, args.output)
+ run(args.root, args.output, args.index_links)
if __name__ == '__main__':
diff --git a/roles/generate-zuul-manifest/library/test_generate_manifest.py b/roles/generate-zuul-manifest/library/test_generate_manifest.py
index a239523..69ce4a1 100644
--- a/roles/generate-zuul-manifest/library/test_generate_manifest.py
+++ b/roles/generate-zuul-manifest/library/test_generate_manifest.py
@@ -43,17 +43,10 @@ class SymlinkFixture(fixtures.Fixture):
]
def _setUp(self):
- self._cleanup()
for (src, target) in self.links:
path = os.path.join(FIXTURE_DIR, 'links', src)
os.symlink(target, path)
- self.addCleanup(self._cleanup)
-
- def _cleanup(self):
- for (src, target) in self.links:
- path = os.path.join(FIXTURE_DIR, 'links', src)
- if os.path.exists(path):
- os.unlink(path)
+ self.addCleanup(os.unlink, path)
class TestFileList(testtools.TestCase):
diff --git a/roles/generate-zuul-manifest/tasks/main.yaml b/roles/generate-zuul-manifest/tasks/main.yaml
index eedcda4..0934c9c 100644
--- a/roles/generate-zuul-manifest/tasks/main.yaml
+++ b/roles/generate-zuul-manifest/tasks/main.yaml
@@ -2,6 +2,7 @@
generate_manifest:
root: "{{ generate_zuul_manifest_root }}"
output: "{{ generate_zuul_manifest_output }}"
+ index_links: "{{ generate_zuul_manifest_index_links }}"
- name: Return Zuul manifest URL to Zuul
zuul_return:
diff --git a/roles/go/README.rst b/roles/go/README.rst
new file mode 100644
index 0000000..8fc8359
--- /dev/null
+++ b/roles/go/README.rst
@@ -0,0 +1,24 @@
+Run go command in a source directory. Assumes the appropriate version
+of go has been installed.
+
+**Role Variables**
+
+.. zuul:rolevar:: go_command
+
+ Go command to run.
+ This parameter is mandatory.
+ Examples are "test", "run" or "build"
+
+.. zuul:rolevar:: go_package_dir
+
+ Directory of the affected go package.
+
+.. zuul:rolevar:: go_bin_path
+ :default: {{ go_install_dir }}/go/bin
+
+ Path to go bin directory
+
+.. zuul:rolevar:: zuul_work_dir
+ :default: {{ zuul.project.src_dir }}
+
+ Directory to run go in.
diff --git a/roles/go/defaults/main.yaml b/roles/go/defaults/main.yaml
new file mode 100644
index 0000000..50f8d8d
--- /dev/null
+++ b/roles/go/defaults/main.yaml
@@ -0,0 +1,3 @@
+zuul_work_dir: "{{ zuul.project.src_dir }}"
+go_install_dir: "/usr/local"
+go_bin_path: "{{ go_install_dir }}/go/bin"
diff --git a/roles/go/tasks/main.yaml b/roles/go/tasks/main.yaml
new file mode 100644
index 0000000..a5b3dea
--- /dev/null
+++ b/roles/go/tasks/main.yaml
@@ -0,0 +1,13 @@
+- name: Require go_command variable
+ fail:
+ msg: go_command is required for this role
+ when: go_command is not defined
+
+- name: Run go command
+ command: >-
+ go {{ go_command }}
+ {% if go_package_dir is defined %}'./{{ go_package_dir }}'{% endif %}
+ args:
+ chdir: "{{ zuul_work_dir }}"
+ environment:
+ PATH: "{{ ansible_env.PATH }}:{{ go_bin_path }}"
diff --git a/roles/helm-template/README.rst b/roles/helm-template/README.rst
new file mode 100644
index 0000000..4717b0b
--- /dev/null
+++ b/roles/helm-template/README.rst
@@ -0,0 +1,17 @@
+Run Helm by templating the chart, it assumes that a Kubernetes cluster is
+already setup and the Helm executable is installed.
+
+**Role Variables**
+
+.. zuul:rolevar:: helm_release_name
+
+ Helm release name (mandatory)
+
+.. zuul:rolevar:: helm_chart
+
+ Directory of the Helm chart.
+
+.. zuul:rolevar:: zuul_work_dir
+ :default: {{ zuul.project.src_dir }}
+
+ Directory in which to run helm.
diff --git a/roles/helm-template/defaults/main.yaml b/roles/helm-template/defaults/main.yaml
new file mode 100644
index 0000000..9739eb1
--- /dev/null
+++ b/roles/helm-template/defaults/main.yaml
@@ -0,0 +1 @@
+zuul_work_dir: "{{ zuul.project.src_dir }}"
diff --git a/roles/helm-template/tasks/main.yaml b/roles/helm-template/tasks/main.yaml
new file mode 100644
index 0000000..77e0443
--- /dev/null
+++ b/roles/helm-template/tasks/main.yaml
@@ -0,0 +1,39 @@
+- name: Install dependencies
+ command: "helm dep up {{ helm_chart }}"
+ args:
+ chdir: "{{ zuul_work_dir }}"
+
+- name: Print templated charts
+ command: "helm template -n zuul {{ helm_chart }}"
+ args:
+ chdir: "{{ zuul_work_dir }}"
+
+- name: Deploy templated charts
+ shell: |
+ set -o pipefail
+ helm template -n {{ helm_release_name }} {{ helm_chart }} | kubectl apply -f-
+ args:
+ executable: /bin/bash
+ chdir: "{{ zuul_work_dir }}"
+
+# NOTE(mnaser): When a StatefulSet is deployed, it creates the pods one
+# by one, which means the `kubectl wait` can race if it
+# is ran before the other pods are created. We instead
+# check for all the StatefulSets here manually instead
+# and then use the second check below to do a "confirmation"
+- name: Wait for all StatefulSets to become ready
+ block:
+ - name: Retrieve all StatefulSets
+ command: kubectl get statefulset -o name
+ register: _statefulsets
+
+ - name: Ensure the number of ready replicas matches the replicas
+ shell: kubectl get {{ item }} -ogo-template='{{ '{{' }}eq .status.replicas .status.readyReplicas{{ '}}' }}'
+ register: _is_ready
+ until: _is_ready.stdout == 'true'
+ retries: 60
+ delay: 5
+ loop: "{{ _statefulsets.stdout_lines }}"
+
+- name: Wait for all pods to become ready
+ command: kubectl wait --for=condition=Ready --timeout=120s pod --all
diff --git a/roles/install-devstack/templates/local.conf.j2 b/roles/install-devstack/templates/local.conf.j2
index c2ac2c3..d880223 100644
--- a/roles/install-devstack/templates/local.conf.j2
+++ b/roles/install-devstack/templates/local.conf.j2
@@ -13,3 +13,4 @@ IPV4_ADDRS_SAFE_TO_USE=10.1.0.0/20
FLOATING_RANGE=172.24.5.0/24
PUBLIC_NETWORK_GATEWAY=172.24.5.1
LIBVIRT_TYPE=qemu
+USE_PYTHON3=True \ No newline at end of file
diff --git a/roles/install-docker/README.rst b/roles/install-docker/README.rst
index a83e3ad..0ffdd98 100644
--- a/roles/install-docker/README.rst
+++ b/roles/install-docker/README.rst
@@ -18,6 +18,12 @@ An ansible role to install docker and configure it to use mirrors if available.
By default this role adds repositories to install docker from upstream
docker. Set this to False to use the docker that comes with the distro.
+.. zuul:rolevar:: docker_compose_install
+ :default: False
+
+ This role does not install docker-compose by default but you can use
+ this setting to install docker-compose as well.
+
.. zuul:rolevar:: docker_update_channel
:default: stable
@@ -75,3 +81,10 @@ An ansible role to install docker and configure it to use mirrors if available.
based on :zuul:rolevar:`install-docker.docker_download_fqdn`. When this
option is unset, the role will use distro specific variables which are
loaded at the time of execution.
+
+.. zuul:rolevar:: docker_userland_proxy
+ :type: bool
+
+ Set to false to disable the docker userland proxy. This variable is useful
+ when docker is causing routing problem, such as when a kubernetes deployment
+ is unable to reach its own service.
diff --git a/roles/install-docker/defaults/main.yaml b/roles/install-docker/defaults/main.yaml
index ab0d074..3e73d4b 100644
--- a/roles/install-docker/defaults/main.yaml
+++ b/roles/install-docker/defaults/main.yaml
@@ -4,6 +4,7 @@ docker_group: docker
# The default option will return an empty list which
# allows folks to override these lists as they see fit.
docker_distro_packages: "{{ _docker_distro_packages | default([]) }}"
+docker_compose_install: false
docker_upstream_distro_packages: "{{ _docker_upstream_distro_packages | default([]) }}"
docker_upstream_distro_required_packages: "{{ _docker_upstream_distro_required_packages | default([]) }}"
docker_update_channel: stable
diff --git a/roles/install-docker/tasks/docker-setup.yaml b/roles/install-docker/tasks/docker-setup.yaml
index dbc3ecc..e398c70 100644
--- a/roles/install-docker/tasks/docker-setup.yaml
+++ b/roles/install-docker/tasks/docker-setup.yaml
@@ -12,5 +12,37 @@
- "{{ docker_group }}"
append: yes
+- name: Update docker daemon configuration
+ when: docker_userland_proxy is defined
+ block:
+ - name: Check if docker daemon configuration exists
+ stat:
+ path: /etc/docker/daemon.json
+ register: docker_config_stat
+ - name: Load docker daemon configuration
+ when: docker_config_stat.stat.exists
+ slurp:
+ path: /etc/docker/daemon.json
+ register: docker_config
+ - name: Parse docker daemon configuration
+ when: docker_config_stat.stat.exists
+ set_fact:
+ docker_config: "{{ docker_config.content | b64decode | from_json }}"
+ - name: Set default docker daemon configuration
+ when: not docker_config_stat.stat.exists
+ set_fact:
+ docker_config: {}
+ - name: Add registry to docker daemon configuration
+ vars:
+ new_config:
+ userland-proxy: "{{ docker_userland_proxy }}"
+ set_fact:
+ docker_config: "{{ docker_config | combine(new_config) }}"
+ - name: Save docker daemon configuration
+ copy:
+ content: "{{ docker_config | to_nice_json }}"
+ dest: /etc/docker/daemon.json
+ become: true
+
- name: Reset ssh connection to pick up docker group
- meta: reset_connection \ No newline at end of file
+ meta: reset_connection
diff --git a/roles/install-docker/tasks/main.yaml b/roles/install-docker/tasks/main.yaml
index 86598e7..be1ff7e 100644
--- a/roles/install-docker/tasks/main.yaml
+++ b/roles/install-docker/tasks/main.yaml
@@ -57,6 +57,13 @@
- include_tasks: "docker-{{ (use_upstream_docker | bool) | ternary('upstream', 'distro') }}.yaml"
+- name: Install docker-compose
+ become: true
+ package:
+ name: docker-compose
+ state: present
+ when: docker_compose_install | bool
+
- name: Flush handlers before role exit
meta: flush_handlers
@@ -65,3 +72,11 @@
changed_when: false
args:
warn: no
+
+- name: Get version details
+ command: docker version
+ register: docker_installed_version
+
+- name: Dump installed docker details
+ debug:
+ msg: '{{ docker_installed_version }}'
diff --git a/roles/install-docker/tasks/upstream-package-installation.yaml b/roles/install-docker/tasks/upstream-package-installation.yaml
index 7e83127..0f22d1e 100644
--- a/roles/install-docker/tasks/upstream-package-installation.yaml
+++ b/roles/install-docker/tasks/upstream-package-installation.yaml
@@ -1,8 +1,20 @@
---
-
-- name: Install docker
+# package/dnf module do not support `--nobest` option which is needed for
+# installing docker-ce on centos-8
+- name: Install upstream docker using package
+ when: not (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8')
package:
name: "{{ docker_upstream_distro_packages }}"
state: present
update_cache: yes
notify: Restart docker
+
+- name: Install upstream docker using shell
+ when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == '8'
+ shell:
+ cmd: |
+ dnf install --nobest -y {{ docker_upstream_distro_packages | join(' ') }}
+ warn: false
+ register: result
+ changed_when: "'Complete!' in result.stdout"
+ notify: Restart docker
diff --git a/roles/install-go/README.rst b/roles/install-go/README.rst
new file mode 100644
index 0000000..169d3a7
--- /dev/null
+++ b/roles/install-go/README.rst
@@ -0,0 +1,17 @@
+Install go
+
+**Role Variables**
+
+.. zuul:rolevar:: go_install_dir
+ :default: /usr/local/
+
+ Directory to install go in.
+
+.. zuul:rolevar:: go_version
+ :default: 1.13
+
+.. zuul:rolevar:: go_os
+ :default: {{ ansible_system | lower }}
+
+.. zuul:rolevar:: go_arch
+ :default: amd64 / 386
diff --git a/roles/install-go/defaults/main.yaml b/roles/install-go/defaults/main.yaml
new file mode 100644
index 0000000..8f4386f
--- /dev/null
+++ b/roles/install-go/defaults/main.yaml
@@ -0,0 +1,4 @@
+go_version: "1.13.5"
+go_os: "{{ ansible_system | lower }}"
+go_arch: "{{ go_arch_translation[ansible_architecture] }}"
+go_install_dir: "/usr/local"
diff --git a/roles/install-go/tasks/main.yaml b/roles/install-go/tasks/main.yaml
new file mode 100644
index 0000000..1b22842
--- /dev/null
+++ b/roles/install-go/tasks/main.yaml
@@ -0,0 +1,39 @@
+- name: Check installed go version
+ command: go version
+ register: go_installed_version
+ environment:
+ PATH: "{{ ansible_env.PATH }}:{{ go_install_dir }}/go/bin"
+ ignore_errors: yes
+
+- name: Skip if correct version of go is installed
+ meta: end_host
+ when:
+ - go_installed_version.rc == 0
+ - go_version == (go_installed_version.stdout|regex_replace(go_version_pattern, '\\g<version>'))
+ - go_os == (go_installed_version.stdout|regex_replace(go_version_pattern, '\\g<os>'))
+ - go_arch == (go_installed_version.stdout|regex_replace(go_version_pattern, '\\g<arch>'))
+
+- name: Create temp directory
+ tempfile:
+ state: directory
+ register: go_archive_tempdir
+
+- name: Get archive checksum
+ uri:
+ url: "https://dl.google.com/go/go{{ go_version }}.{{ go_os }}-{{ go_arch }}.tar.gz.sha256"
+ return_content: true
+ register: go_archive_checksum
+
+
+- name: Download go archive
+ get_url:
+ url: "https://dl.google.com/go/go{{ go_version }}.{{ go_os }}-{{ go_arch }}.tar.gz"
+ dest: "{{ go_archive_tempdir.path }}/go{{ go_version }}.{{ go_os }}-{{ go_arch }}.tar.gz"
+ checksum: "sha256:{{ go_archive_checksum.content }}"
+
+- name: Install go
+ unarchive:
+ src: "{{ go_archive_tempdir.path }}/go{{ go_version }}.{{ go_os }}-{{ go_arch }}.tar.gz"
+ dest: "{{ go_install_dir }}"
+ remote_src: yes
+ become: true
diff --git a/roles/install-go/vars/main.yaml b/roles/install-go/vars/main.yaml
new file mode 100644
index 0000000..c99ce7b
--- /dev/null
+++ b/roles/install-go/vars/main.yaml
@@ -0,0 +1,6 @@
+go_arch_translation:
+ amd64: amd64
+ x86_64: amd64
+ i386: 386
+
+go_version_pattern: ^go version go(?P<version>.*?) (?P<os>.*?)/(?P<arch>.*?)$
diff --git a/roles/install-javascript-packages/README.rst b/roles/install-javascript-packages/README.rst
index 96bb947..529c9df 100644
--- a/roles/install-javascript-packages/README.rst
+++ b/roles/install-javascript-packages/README.rst
@@ -6,3 +6,9 @@ Install javascript dependencies needed for a project
:default: {{ zuul.project.src_dir }}
The directory to work in.
+
+.. zuul:rolevar:: tox_constraints_file
+
+ Path to a pip constraints file. Will set the
+ ``UPPER_CONSTRAINTS_FILE`` environment variable. Useful if npm
+ ``postinstall`` runs tox.
diff --git a/roles/install-javascript-packages/defaults/main.yaml b/roles/install-javascript-packages/defaults/main.yaml
index 9739eb1..79e5a2e 100644
--- a/roles/install-javascript-packages/defaults/main.yaml
+++ b/roles/install-javascript-packages/defaults/main.yaml
@@ -1 +1,3 @@
+npm_environment:
+ DISPLAY: ':99'
zuul_work_dir: "{{ zuul.project.src_dir }}"
diff --git a/roles/install-javascript-packages/tasks/main.yaml b/roles/install-javascript-packages/tasks/main.yaml
index 3c56b89..70f3443 100644
--- a/roles/install-javascript-packages/tasks/main.yaml
+++ b/roles/install-javascript-packages/tasks/main.yaml
@@ -1,3 +1,25 @@
+- name: Check to see if the constraints file exists
+ stat:
+ path: "{{ tox_constraints_file }}"
+ get_checksum: false
+ get_mime: false
+ get_md5: false
+ register: stat_results
+ when: tox_constraints_file is defined
+
+- name: Fail if constraints file is missing
+ when: tox_constraints_file is defined and not stat_results.stat.exists
+ fail:
+ msg: tox_constraints_file is defined but was not found
+
+- name: Record file location
+ set_fact:
+ tox_constraints_env:
+ TOX_CONSTRAINTS_FILE: "{{ tox_constraints_file }}"
+ # Backward compatibility, to be removed
+ UPPER_CONSTRAINTS_FILE: "{{ tox_constraints_file }}"
+ when: tox_constraints_file is defined
+
- name: Check for yarn.lock file
stat:
path: "{{ zuul_work_dir }}/yarn.lock"
@@ -16,8 +38,7 @@
- name: Install npm dependencies
command: npm install --verbose
- environment:
- DISPLAY: ':99'
+ environment: "{{ npm_environment|combine(tox_constraints_env|default({})) }}"
args:
chdir: "{{ zuul_work_dir }}"
when: not yarn_lock.stat.exists
diff --git a/roles/install-kubernetes/README.rst b/roles/install-kubernetes/README.rst
index a487989..aaf75a1 100644
--- a/roles/install-kubernetes/README.rst
+++ b/roles/install-kubernetes/README.rst
@@ -11,3 +11,15 @@ An ansible role to install kubernetes.
:default: latest
The version of Minikube to install.
+
+.. zuul:rolevar:: minikube_dns_resolvers
+ :default: []
+
+ List of dns resolvers to configure in k8s. Use this to override the
+ resolvers that are found by default.
+
+.. zuul:rolevar:: kubernetes_runtime
+ :default: docker
+
+ Which kubernetes runtime to use; values are ``docker`` or
+ ``cri-o``.
diff --git a/roles/install-kubernetes/defaults/main.yaml b/roles/install-kubernetes/defaults/main.yaml
index 6040df0..d9cc6f4 100644
--- a/roles/install-kubernetes/defaults/main.yaml
+++ b/roles/install-kubernetes/defaults/main.yaml
@@ -1,2 +1,4 @@
install_kubernetes_with_cluster: True
minikube_version: latest
+minikube_dns_resolvers: []
+kubernetes_runtime: docker
diff --git a/roles/install-kubernetes/tasks/crio.yaml b/roles/install-kubernetes/tasks/crio.yaml
new file mode 100644
index 0000000..bede49e
--- /dev/null
+++ b/roles/install-kubernetes/tasks/crio.yaml
@@ -0,0 +1,40 @@
+- name: Add project atomic PPA
+ apt_repository:
+ repo: ppa:projectatomic/ppa
+ become: true
+- name: Install packages
+ package:
+ name:
+ - cri-o-1.15
+ - containernetworking-plugins
+ - podman
+ - cri-tools
+ state: present
+ become: true
+- name: Fix conmon symlink
+ file:
+ src: /usr/bin/conmon
+ dest: /usr/libexec/crio/conmon
+ owner: root
+ group: root
+ state: link
+ become: true
+- name: Fix missing registries.conf
+ # See: https://github.com/containers/podman.io/issues/127
+ copy:
+ content: |
+ [registries.search]
+ registries = []
+ [registries.insecure]
+ registries = []
+ [registries.block]
+ registries = []
+ dest: /etc/containers/registries.conf
+ become: true
+- name: Set crio cgroup driver
+ ini_file:
+ path: /etc/crio/crio.conf
+ section: crio.runtime
+ option: cgroup_manager
+ value: '"cgroupfs"'
+ become: true
diff --git a/roles/install-kubernetes/tasks/minikube.yaml b/roles/install-kubernetes/tasks/minikube.yaml
index 706cc4c..a05d324 100644
--- a/roles/install-kubernetes/tasks/minikube.yaml
+++ b/roles/install-kubernetes/tasks/minikube.yaml
@@ -14,6 +14,10 @@
include_role:
name: install-docker
+- name: Install crio
+ when: kubernetes_runtime == 'cri-o'
+ include_tasks: crio.yaml
+
- name: Create .kube directory
file:
path: "{{ ansible_user_dir }}/.kube"
@@ -32,9 +36,25 @@
state: directory
mode: 0755
+- name: Default args
+ set_fact:
+ minikube_args: ""
+
+- name: Configure dns options if set
+ block:
+ - name: Write resolv.conf
+ template:
+ src: resolv.conf.j2
+ dest: "{{ ansible_user_dir }}/.minikube/k8s_resolv.conf"
+ mode: "0444"
+ - name: Set extra kube setttings
+ set_fact:
+ minikube_args: "--extra-config=kubelet.resolv-conf={{ ansible_user_dir }}/.minikube/k8s_resolv.conf"
+ when: minikube_dns_resolvers
+
- name: Start Minikube
become: yes
- command: /tmp/minikube --vm-driver=none start
+ command: "/tmp/minikube start --v=7 --vm-driver=none --container-runtime={{ kubernetes_runtime }} {{ minikube_args }}"
environment:
MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false
diff --git a/roles/install-kubernetes/templates/resolv.conf.j2 b/roles/install-kubernetes/templates/resolv.conf.j2
new file mode 100644
index 0000000..657fc0d
--- /dev/null
+++ b/roles/install-kubernetes/templates/resolv.conf.j2
@@ -0,0 +1,3 @@
+{% for x in minikube_dns_resolvers %}
+nameserver {{ x }}
+{% endfor %}
diff --git a/roles/install-openshift/tasks/main.yaml b/roles/install-openshift/tasks/main.yaml
index d897efa..a36d229 100644
--- a/roles/install-openshift/tasks/main.yaml
+++ b/roles/install-openshift/tasks/main.yaml
@@ -25,6 +25,20 @@
state: absent
become: yes
+- name: Ensure "docker" group exists
+ become: true
+ group:
+ name: docker
+ state: present
+
+- name: Add user to docker group
+ become: true
+ user:
+ name: "{{ ansible_user }}"
+ groups:
+ - docker
+ append: yes
+
- name: Start docker service
service:
name: docker
@@ -41,3 +55,12 @@
- origin-pod
- origin
become: yes
+
+- name: Set group ownership of docker socket
+ become: true
+ file:
+ path: /var/run/docker.sock
+ group: docker
+
+- name: Reset ssh connection to pick up docker group
+ meta: reset_connection
diff --git a/roles/install-podman/README.rst b/roles/install-podman/README.rst
new file mode 100644
index 0000000..0103e63
--- /dev/null
+++ b/roles/install-podman/README.rst
@@ -0,0 +1,4 @@
+Install podman container manager
+
+**Role Variables**
+
diff --git a/roles/install-podman/tasks/Ubuntu.yaml b/roles/install-podman/tasks/Ubuntu.yaml
new file mode 100644
index 0000000..4725573
--- /dev/null
+++ b/roles/install-podman/tasks/Ubuntu.yaml
@@ -0,0 +1,14 @@
+- name: Install projectatomic PPA
+ apt_repository:
+ repo: 'ppa:projectatomic/ppa'
+ state: present
+ become: yes
+
+- name: Install podman
+ package:
+ name:
+ - podman
+ - uidmap
+ - slirp4netns
+ state: present
+ become: yes
diff --git a/roles/install-podman/tasks/default.yaml b/roles/install-podman/tasks/default.yaml
new file mode 100644
index 0000000..43657ee
--- /dev/null
+++ b/roles/install-podman/tasks/default.yaml
@@ -0,0 +1,3 @@
+- name: Not implemented
+ fail:
+ msg: Role not implemented on this platform yet
diff --git a/roles/install-podman/tasks/main.yaml b/roles/install-podman/tasks/main.yaml
new file mode 100644
index 0000000..6c8da73
--- /dev/null
+++ b/roles/install-podman/tasks/main.yaml
@@ -0,0 +1,8 @@
+- name: Find distribution installation
+ include: "{{ item }}"
+ static: no
+ with_first_found:
+ - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yaml"
+ - "{{ ansible_distribution }}.yaml"
+ - "{{ ansible_os_family }}.yaml"
+ - "default.yaml"
diff --git a/roles/markdownlint/README.rst b/roles/markdownlint/README.rst
new file mode 100644
index 0000000..b8d8615
--- /dev/null
+++ b/roles/markdownlint/README.rst
@@ -0,0 +1,8 @@
+Run markdownlint against all markdown files in the given project.
+
+**Role Variables**
+
+.. zuul:rolevar:: zuul_work_dir
+ :default: {{ zuul.project.src_dir }}
+
+ Directory to search for markdown files in.
diff --git a/roles/markdownlint/defaults/main.yaml b/roles/markdownlint/defaults/main.yaml
new file mode 100644
index 0000000..9739eb1
--- /dev/null
+++ b/roles/markdownlint/defaults/main.yaml
@@ -0,0 +1 @@
+zuul_work_dir: "{{ zuul.project.src_dir }}"
diff --git a/roles/markdownlint/tasks/main.yaml b/roles/markdownlint/tasks/main.yaml
new file mode 100644
index 0000000..9f58e6b
--- /dev/null
+++ b/roles/markdownlint/tasks/main.yaml
@@ -0,0 +1,16 @@
+- name: find all .md files
+ find:
+ paths: "{{ zuul_work_dir }}"
+ pattern: "*.md"
+ register: markdown_find
+
+- name: Run markdownlint
+ shell: |
+ set -o pipefail
+ set -e
+ ~/.markdownlint/node_modules/.bin/markdownlint {{ item|relpath(zuul_work_dir) }} 2>&1 | tee -a markdownlint.txt
+ args:
+ chdir: "{{ zuul_work_dir }}"
+ executable: /bin/bash
+ with_items: "{{ markdown_find.files|map(attribute='path')|list }}"
+ changed_when: false
diff --git a/roles/mirror-workspace-git-repos/tasks/main.yaml b/roles/mirror-workspace-git-repos/tasks/main.yaml
index 4fe069d..41cefa9 100644
--- a/roles/mirror-workspace-git-repos/tasks/main.yaml
+++ b/roles/mirror-workspace-git-repos/tasks/main.yaml
@@ -29,6 +29,8 @@
shell: |
# Reset is needed because we pushed to a non-bare repo
git reset --hard
+ # Clean is needed because we pushed to a non-bare repo
+ git clean -xdf
# Undo the config setting we did above
git config --local --unset receive.denyCurrentBranch
# checkout the branch matching the branch set up by the executor
diff --git a/roles/multi-node-bridge/tasks/common.yaml b/roles/multi-node-bridge/tasks/common.yaml
index 4b9a492..8fda29f 100644
--- a/roles/multi-node-bridge/tasks/common.yaml
+++ b/roles/multi-node-bridge/tasks/common.yaml
@@ -40,7 +40,18 @@
become: yes
package:
name: "{{ ovs_package }}"
- state: installed
+ state: present
+ when:
+ - ansible_distribution != 'Gentoo'
+
+- name: Install openvswitch (Gentoo)
+ become: yes
+ package:
+ name: "{{ ovs_package }}"
+ state: present
+ jobs: 8
+ when:
+ - ansible_distribution == 'Gentoo'
- name: Ensure openvswitch is started
become: yes
diff --git a/roles/multi-node-bridge/templates/zuul-multi-node-bridge-ovs.repo.j2 b/roles/multi-node-bridge/templates/zuul-multi-node-bridge-ovs.repo.j2
index 796111b..eab30dc 100644
--- a/roles/multi-node-bridge/templates/zuul-multi-node-bridge-ovs.repo.j2
+++ b/roles/multi-node-bridge/templates/zuul-multi-node-bridge-ovs.repo.j2
@@ -12,8 +12,14 @@ enabled=1
gpgkey=file:///tmp/RPM-GPG-KEY-CentOS-SIG-Cloud
{% elif ansible_distribution == 'RedHat' and ansible_distribution_major_version|int >= 8 %}
[RDO-RHEL8-deps]
-name=RedHat Openstack deps repo
+name=RedHat OpenStack deps repo
baseurl=https://trunk.rdoproject.org/rhel8-master/deps/latest/
gpgcheck=0
enabled=1
+{% elif ansible_distribution == 'CentOS' and ansible_distribution_major_version|int >= 8 %}
+[RDO-CentOS8-deps]
+name=CentOS OpenStack deps repo
+baseurl=https://trunk.rdoproject.org/centos8-master/deps/latest/
+gpgcheck=0
+enabled=1
{% endif %}
diff --git a/roles/multi-node-bridge/vars/CentOS.yaml b/roles/multi-node-bridge/vars/CentOS.yaml
index 1961892..6f26a79 100644
--- a/roles/multi-node-bridge/vars/CentOS.yaml
+++ b/roles/multi-node-bridge/vars/CentOS.yaml
@@ -1,3 +1,8 @@
---
-ovs_package: "openvswitch"
+ovs_package: >-
+ {% if ansible_distribution_major_version|int >= 8 -%}
+ rhosp-openvswitch
+ {%- else -%}
+ openvswitch
+ {%- endif %}
ovs_service: "openvswitch"
diff --git a/roles/pause-buildset-registry/README.rst b/roles/pause-buildset-registry/README.rst
new file mode 100644
index 0000000..695bb88
--- /dev/null
+++ b/roles/pause-buildset-registry/README.rst
@@ -0,0 +1,10 @@
+Pause a buildset registry
+
+Utility role to pause a job providing a buildset registry.
+
+**Role Variables**
+
+.. zuul:rolevar:: buildset_registry
+
+ Location of external buildset registry. If this is defined,
+ the job will not pause.
diff --git a/roles/pause-buildset-registry/tasks/main.yaml b/roles/pause-buildset-registry/tasks/main.yaml
new file mode 100644
index 0000000..e7c1a26
--- /dev/null
+++ b/roles/pause-buildset-registry/tasks/main.yaml
@@ -0,0 +1,10 @@
+# If buildset_registry is defined, that means a parent job is running it;
+# only if it is not defined does it mean that we are running it. If we
+# are running it, pause the job so that child jobs will automatically
+# use it.
+- name: Pause the job
+ when: buildset_registry is not defined
+ zuul_return:
+ data:
+ zuul:
+ pause: true
diff --git a/roles/persistent-firewall/tasks/main.yaml b/roles/persistent-firewall/tasks/main.yaml
index 984cf4e..ea22b4e 100644
--- a/roles/persistent-firewall/tasks/main.yaml
+++ b/roles/persistent-firewall/tasks/main.yaml
@@ -1,16 +1,22 @@
- name: List current ipv4 rules
become: yes
- command: iptables-save
+ # Using shell to try and debug why this task when run sometimes returns -13
+ shell: iptables-save
changed_when: false
failed_when: false
register: iptables_rules
+ tags:
+ - skip_ansible_lint
- name: List current ipv6 rules
become: yes
- command: ip6tables-save
+ # Using shell to try and debug why this task when run sometimes returns -13
+ shell: ip6tables-save
changed_when: false
failed_when: false
register: ip6tables_rules
+ tags:
+ - skip_ansible_lint
- name: Configure persistent iptables rules
include: "{{ item }}"
diff --git a/roles/persistent-firewall/tasks/persist/Debian.yaml b/roles/persistent-firewall/tasks/persist/Debian.yaml
index 8f4a04c..ecf6ce4 100644
--- a/roles/persistent-firewall/tasks/persist/Debian.yaml
+++ b/roles/persistent-firewall/tasks/persist/Debian.yaml
@@ -2,7 +2,7 @@
become: yes
package:
name: iptables-persistent
- state: installed
+ state: present
- name: Persist ipv4 rules
become: yes
diff --git a/roles/persistent-firewall/tasks/persist/Suse.yaml b/roles/persistent-firewall/tasks/persist/Suse.yaml
index c37b46a..d3d3a8f 100644
--- a/roles/persistent-firewall/tasks/persist/Suse.yaml
+++ b/roles/persistent-firewall/tasks/persist/Suse.yaml
@@ -10,27 +10,11 @@
content: "{{ ip6tables_rules.stdout }}"
dest: "/etc/sysconfig/ip6tables"
-- name: Set up SuSEfirewall2 custom rules to be loaded
- become: yes
- replace:
- path: /etc/sysconfig/SuSEfirewall2
- regexp: '^FW_CUSTOMRULES=.*$'
- replace: 'FW_CUSTOMRULES="/etc/sysconfig/scripts/SuSEfirewall2-custom"'
-
-- name: Configure SuSEfirewall2 to restore saved rules on restart
+- name: Configure rc.local to restore saved rules on restart
become: yes
blockinfile:
- path: /etc/sysconfig/scripts/SuSEfirewall2-custom
- insertafter: EOF
+ path: /etc/init.d/boot.local
+ insertbefore: "exit 0"
content: |
- fw_custom_after_finished() {
- /usr/sbin/iptables-restore /etc/sysconfig/iptables
- /usr/sbin/ip6tables-restore /etc/sysconfig/ip6tables
- }
-
-- name: Ensure SuSEfirewall2 is started
- become: yes
- service:
- name: SuSEfirewall2
- state: started
- enabled: yes
+ iptables-restore /etc/sysconfig/iptables
+ ip6tables-restore /etc/sysconfig/ip6tables
diff --git a/roles/persistent-firewall/tasks/persist/Ubuntu_trusty.yaml b/roles/persistent-firewall/tasks/persist/Ubuntu_trusty.yaml
index a5cea2d..b5dff46 100644
--- a/roles/persistent-firewall/tasks/persist/Ubuntu_trusty.yaml
+++ b/roles/persistent-firewall/tasks/persist/Ubuntu_trusty.yaml
@@ -2,7 +2,7 @@
become: yes
package:
name: iptables-persistent
- state: installed
+ state: present
- name: Persist ipv4 rules
become: yes
diff --git a/roles/prepare-workspace-openshift/README.rst b/roles/prepare-workspace-openshift/README.rst
new file mode 100644
index 0000000..46057f4
--- /dev/null
+++ b/roles/prepare-workspace-openshift/README.rst
@@ -0,0 +1,16 @@
+Prepare remote workspaces in OpenShift
+
+This role can be used instead of the :zuul:role:`prepare-workspace`
+role when the synchronize module doesn't work with kubectl connection.
+It copies the prepared source repos to the pods' cwd using the `oc
+rsync` command.
+
+This role is intended to run once before any other role in a Zuul job.
+This role requires the origin-clients to be installed.
+
+**Role Variables**
+
+.. zuul:rolevar:: openshift_pods
+ :default: {{ zuul.resources }}
+
+ The dictionary of pod name, pod information to copy the sources to.
diff --git a/roles/prepare-workspace-openshift/defaults/main.yaml b/roles/prepare-workspace-openshift/defaults/main.yaml
new file mode 100644
index 0000000..fa94895
--- /dev/null
+++ b/roles/prepare-workspace-openshift/defaults/main.yaml
@@ -0,0 +1 @@
+openshift_pods: "{{ zuul.resources }}"
diff --git a/roles/prepare-workspace-openshift/tasks/main.yaml b/roles/prepare-workspace-openshift/tasks/main.yaml
new file mode 100644
index 0000000..0d6d50b
--- /dev/null
+++ b/roles/prepare-workspace-openshift/tasks/main.yaml
@@ -0,0 +1,4 @@
+---
+- include_tasks: rsync.yaml
+ when: item.1.pod is defined
+ loop: "{{ openshift_pods.items()|list }}"
diff --git a/roles/prepare-workspace-openshift/tasks/rsync.yaml b/roles/prepare-workspace-openshift/tasks/rsync.yaml
new file mode 100644
index 0000000..c90c4ed
--- /dev/null
+++ b/roles/prepare-workspace-openshift/tasks/rsync.yaml
@@ -0,0 +1,17 @@
+---
+- name: Create src directory
+ command: >
+ oc --context "{{ item.1.context }}"
+ --namespace "{{ item.1.namespace }}"
+ exec {{ item.1.pod }} mkdir src
+ delegate_to: localhost
+
+- name: Copy src repos to the pod
+ command: >
+ oc --context "{{ item.1.context }}"
+ --namespace "{{ item.1.namespace }}"
+ rsync -q --progress=false
+ {{ zuul.executor.src_root }}/
+ {{ item.1.pod }}:src/
+ no_log: true
+ delegate_to: localhost
diff --git a/roles/promote-docker-image/tasks/main.yaml b/roles/promote-docker-image/tasks/main.yaml
index 9f1d030..4116f3f 100644
--- a/roles/promote-docker-image/tasks/main.yaml
+++ b/roles/promote-docker-image/tasks/main.yaml
@@ -1,10 +1,12 @@
- name: Verify repository names
when: |
docker_credentials.repository is defined
- and not item.repository | regex_search(docker_credentials.repository)
+ and not zj_image.repository | regex_search(docker_credentials.repository)
loop: "{{ docker_images }}"
+ loop_control:
+ loop_var: zj_image
fail:
- msg: "{{ item.repository }} not permitted by {{ docker_credentials.repository }}"
+ msg: "{{ zj_image.repository }} not permitted by {{ docker_credentials.repository }}"
# This is used by the delete tasks
- name: Get dockerhub JWT token
no_log: true
diff --git a/roles/pull-from-intermediate-registry/tasks/main.yaml b/roles/pull-from-intermediate-registry/tasks/main.yaml
index 4623a63..236bba1 100644
--- a/roles/pull-from-intermediate-registry/tasks/main.yaml
+++ b/roles/pull-from-intermediate-registry/tasks/main.yaml
@@ -74,18 +74,25 @@
mode: 0600
# Pull the images
+
+# To support usage with both docker and podman, the buildset registry
+# keeps "docker.io" entries un-namespaced, and any other namespaces
+# are namespaced. Therefore, if we see docker.io in the repository
+# name, we strip it here.
- name: Pull artifacts from intermediate registry
block:
- name: Pull artifacts from intermediate registry
command: >-
skopeo --insecure-policy copy
- {{ item.url }}
- docker://127.0.0.1:{{ socat_port }}/{{ item.metadata.repository }}:{{ item.metadata.tag }}
+ {{ zj_zuul_artifact.url }}
+ docker://127.0.0.1:{{ socat_port }}/{{ zj_zuul_artifact.metadata.repository | regex_replace('^docker\.io/(.*)', '\1') }}:{{ zj_zuul_artifact.metadata.tag }}
retries: 3
register: result
until: result is success
- when: "'metadata' in item and item.metadata.type | default('') == 'container_image'"
+ when: "'metadata' in zj_zuul_artifact and zj_zuul_artifact.metadata.type | default('') == 'container_image'"
loop: "{{ zuul.artifacts | default([]) }}"
+ loop_control:
+ loop_var: zj_zuul_artifact
always:
- name: Remove docker user config
command: "shred ~/.docker/config.json"
diff --git a/roles/push-to-intermediate-registry/tasks/push-image.yaml b/roles/push-to-intermediate-registry/tasks/push-image.yaml
index 44f76a5..c2bc307 100644
--- a/roles/push-to-intermediate-registry/tasks/push-image.yaml
+++ b/roles/push-to-intermediate-registry/tasks/push-image.yaml
@@ -1,7 +1,11 @@
+# To support usage with both docker and podman, the buildset registry
+# keeps "docker.io" entries un-namespaced, and any other namespaces
+# are namespaced. Therefore, if we see docker.io in the repository
+# name, we strip it here.
- name: Push tag to intermediate registry
command: >-
skopeo --insecure-policy copy
- docker://127.0.0.1:{{ socat_port }}/{{ image.repository }}:{{ image_tag }}
+ docker://127.0.0.1:{{ socat_port }}/{{ image.repository | regex_replace('^docker\.io/(.*)', '\1') }}:{{ image_tag }}
docker://{{ intermediate_registry.host | ipwrap }}:{{ intermediate_registry.port }}/{{ image.repository }}:{{ zuul.build }}_{{ image_tag }}
retries: 3
register: result
diff --git a/roles/run-buildset-registry/README.rst b/roles/run-buildset-registry/README.rst
index 4f93764..4f9f91b 100644
--- a/roles/run-buildset-registry/README.rst
+++ b/roles/run-buildset-registry/README.rst
@@ -1,9 +1,8 @@
-Runs a docker registry for the use of this buildset.
+Runs a container registry for the use of this buildset.
This may be used for a single job running on a single node, or it may
be used at the root of a job graph so that multiple jobs running for a
-single change can share the registry. Two registry endpoints are
-provided -- one is a local registry, the second is an upstream proxy.
+single change can share the registry.
**Role Variables**
@@ -12,6 +11,17 @@ provided -- one is a local registry, the second is an upstream proxy.
Path for the registry volumes.
+.. zuul:rolevar:: buildset_registry_port
+ :default: 5000
+
+ The port on which the registry should listen.
+
+.. zuul:rolevar:: container_command
+ :default: docker
+
+ The command to use to run the registry container (E.g., ``podman``).
+
+
**Return Values**
.. zuul:rolevar:: buildset_registry
@@ -26,10 +36,6 @@ provided -- one is a local registry, the second is an upstream proxy.
The port on which the registry is listening.
- .. zuul:rolevar:: proxy_port
-
- The port on which the proxy is listening.
-
.. zuul:rolevar:: username
The username used to access the registry via HTTP basic auth.
diff --git a/roles/run-buildset-registry/defaults/main.yaml b/roles/run-buildset-registry/defaults/main.yaml
index 37c0730..7c24e65 100644
--- a/roles/run-buildset-registry/defaults/main.yaml
+++ b/roles/run-buildset-registry/defaults/main.yaml
@@ -1 +1,3 @@
buildset_registry_root: "{{ ansible_user_dir }}/buildset_registry"
+buildset_registry_port: 5000
+container_command: docker
diff --git a/roles/run-buildset-registry/tasks/main.yaml b/roles/run-buildset-registry/tasks/main.yaml
index 3a5291f..11504c4 100644
--- a/roles/run-buildset-registry/tasks/main.yaml
+++ b/roles/run-buildset-registry/tasks/main.yaml
@@ -2,105 +2,67 @@
become: yes
package:
name:
- - python-docker
- - python-openssl
+ - openssl
- python-passlib
- - python-bcrypt
+ - socat
state: present
- when: "'python3' not in ansible_python_interpreter"
+ when: ansible_python_version is version('3', '<')
- name: Install packages
become: yes
package:
name:
- - python3-docker
- - python3-openssl
+ - openssl
- python3-passlib
- - python3-bcrypt
+ - socat
state: present
- when: "'python3' in ansible_python_interpreter"
-- name: Ensure Docker registry volume directories exists
+ when: ansible_python_version is version('3', '>=')
+- name: Ensure registry volume directories exists
file:
state: directory
- path: "{{ buildset_registry_root }}/{{ item }}"
+ path: "{{ buildset_registry_root }}/{{ zj_dir }}"
loop:
- - certs
- - auth
-- name: Generate registry password
+ - tls
+ - conf
+ loop_control:
+ loop_var: zj_dir
+- name: Generate registry secrets
set_fact:
registry_password: "{{ lookup('password', '/dev/null') }}"
-- name: Write htpassword file
- htpasswd:
- create: true
- crypt_scheme: bcrypt
- path: "{{ buildset_registry_root }}/auth/htpasswd"
- name: "zuul"
- password: "{{ registry_password }}"
-- name: Generate a TLS key for the Docker registry
- openssl_privatekey:
- path: "{{ buildset_registry_root }}/certs/domain.key"
-- name: Generate a TLS CSR for the Docker registry
- openssl_csr:
- path: "{{ buildset_registry_root }}/certs/domain.csr"
- privatekey_path: "{{ buildset_registry_root }}/certs/domain.key"
- common_name: "{{ ansible_host }}"
- subject_alt_name: "DNS:zuul-jobs.buildset-registry,DNS:{{ ansible_host }},IP:{{ ansible_host }},IP:127.0.0.1"
-- name: Generate a TLS cert for the Docker registry
- openssl_certificate:
- path: "{{ buildset_registry_root }}/certs/domain.crt"
- csr_path: "{{ buildset_registry_root }}/certs/domain.csr"
- privatekey_path: "{{ buildset_registry_root }}/certs/domain.key"
- provider: selfsigned
- register: generated_cert
+ registry_secret: "{{ lookup('password', '/dev/null') }}"
+- name: Write registry config
+ template:
+ src: registry.yaml.j2
+ dest: "{{ buildset_registry_root }}/conf/registry.yaml"
+- name: Generate a TLS key for the registry
+ command: "openssl req -x509 -newkey rsa:2048 -keyout {{ buildset_registry_root }}/tls/cert.key -out {{ buildset_registry_root }}/tls/cert.pem -days 365 -nodes -subj '/C=US/ST=California/L=Oakland/O=Company Name/OU=Org/CN={{ ansible_host }}' -addext 'subjectAltName = DNS:zuul-jobs.buildset-registry,DNS:{{ ansible_host }},IP:{{ ansible_host }},IP:127.0.0.1'"
- name: Read TLS certificate
slurp:
- src: "{{ generated_cert.filename }}"
+ src: "{{ buildset_registry_root }}/tls/cert.pem"
register: certificate
- name: Decode TLS certificate
set_fact:
certificate: "{{ certificate.content | b64decode }}"
-- name: Start a docker registry
- docker_container:
- name: buildset_registry
- image: registry:2
- state: started
- restart_policy: always
- ports:
- - "5000:5000"
- env:
- REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt
- REGISTRY_HTTP_TLS_KEY: /certs/domain.key
- REGISTRY_AUTH: htpasswd
- REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd
- REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm
- volumes:
- - "{{ buildset_registry_root }}/certs:/certs"
- - "{{ buildset_registry_root }}/auth:/auth"
-- name: Start a docker proxy
- docker_container:
- name: buildset_proxy
- image: registry:2
- state: started
- restart_policy: always
- ports:
- - "5001:5000"
- env:
- REGISTRY_HTTP_TLS_CERTIFICATE: /certs/domain.crt
- REGISTRY_HTTP_TLS_KEY: /certs/domain.key
- REGISTRY_AUTH: htpasswd
- REGISTRY_AUTH_HTPASSWD_PATH: /auth/htpasswd
- REGISTRY_AUTH_HTPASSWD_REALM: Registry Realm
- REGISTRY_PROXY_REMOTEURL: https://registry-1.docker.io
- REGISTRY_PROXY_USERNAME: ''
- REGISTRY_PROXY_PASSWORD: ''
- volumes:
- - "{{ buildset_registry_root }}/certs:/certs"
- - "{{ buildset_registry_root }}/auth:/auth"
+- name: Start the buildset registry
+ command: >-
+ {{ container_command }} run -d
+ --name="{{ (buildset_registry_port == 5000) | ternary('buildset_registry', 'buildset_registry_' + buildset_registry_port|string) }}"
+ --restart=always
+ --publish="1{{ buildset_registry_port }}:5000"
+ --volume="{{ buildset_registry_root }}/tls:/tls"
+ --volume="{{ buildset_registry_root }}/conf:/conf"
+ docker.io/zuul/zuul-registry:latest
+
+# Start a socat tunnel to the buildset registry to work around
+# https://github.com/containers/libpod/issues/4311
+# in case we're using podman.
+- name: Start socat to work around https://github.com/containers/libpod/issues/4311
+ shell: "socat -d -d TCP6-LISTEN:{{ buildset_registry_port }},fork TCP:127.0.0.1:1{{ buildset_registry_port }} 2> {{ buildset_registry_root }}/socat_port &"
+
- name: Set registry information fact
set_fact:
buildset_registry:
host: "{{ ansible_host }}"
- port: 5000
- proxy_port: 5001
+ port: "{{ buildset_registry_port }}"
username: zuul
password: "{{ registry_password }}"
cert: "{{ certificate }}"
diff --git a/roles/run-buildset-registry/templates/registry.yaml.j2 b/roles/run-buildset-registry/templates/registry.yaml.j2
new file mode 100644
index 0000000..81667eb
--- /dev/null
+++ b/roles/run-buildset-registry/templates/registry.yaml.j2
@@ -0,0 +1,14 @@
+registry:
+ address: '::'
+ port: 5000
+ public-url: 'https://{{ ansible_host | ipwrap }}:{{ buildset_registry_port }}'
+ tls-cert: /tls/cert.pem
+ tls-key: /tls/cert.key
+ secret: "{{ registry_secret }}"
+ users:
+ - name: zuul
+ pass: "{{ registry_password }}"
+ access: write
+ storage:
+ driver: filesystem
+ root: /storage
diff --git a/roles/stage-output/README.rst b/roles/stage-output/README.rst
index df74640..8b6fb59 100644
--- a/roles/stage-output/README.rst
+++ b/roles/stage-output/README.rst
@@ -60,6 +60,11 @@ intended to be used before output fetching in a base job's post-playbook.
zuul.conf --(staged as)--> zuul_conf.txt
.. zuul:rolevar:: stage_compress_logs
- :default: True
+ :default: False
When True, files staged as logs will be compressed individually.
+ Note this option is deprecated as final log storage should control
+ whether or not contents are compressed. The reason for this is certain
+ services like swift may serve compressed files like .tar.gz tarballs
+ uncompressed when you want them to be compressed when served in this
+ way.
diff --git a/roles/stage-output/defaults/main.yaml b/roles/stage-output/defaults/main.yaml
index 61c8c89..c9bf207 100644
--- a/roles/stage-output/defaults/main.yaml
+++ b/roles/stage-output/defaults/main.yaml
@@ -1,3 +1,3 @@
stage_dir: "{{ ansible_user_dir }}"
extensions_to_txt:
-stage_compress_logs: true
+stage_compress_logs: false
diff --git a/roles/test-upload-logs-swift/README.rst b/roles/test-upload-logs-swift/README.rst
new file mode 100644
index 0000000..239a70e
--- /dev/null
+++ b/roles/test-upload-logs-swift/README.rst
@@ -0,0 +1,94 @@
+THIS IS FOR TESTING ONLY
+
+Upload logs to a swift container
+
+This uploads logs to an OpenStack Object Store (Swift) container.
+
+**Role Variables**
+
+.. zuul:rolevar:: zuul_site_upload_logs
+ :default: true
+
+ Controls when logs are uploaded. true, the default, means always
+ upload logs. false means never upload logs. 'failure' means to only
+ upload logs when the job has failed.
+
+ .. note:: Intended to be set by admins via site-variables.
+
+.. zuul:rolevar:: zuul_log_cloud_config
+
+ Complex argument which contains the cloud configuration in
+ os-cloud-config (clouds.yaml) format. It is expected that this
+ argument comes from a `Secret`.
+
+.. zuul:rolevar:: zuul_log_partition
+ :default: false
+
+ If set to true, then the first component of the log path will be
+ removed from the object name and added to the container name, so
+ that logs for different changes are distributed across a large
+ number of containers.
+
+.. zuul:rolevar:: zuul_log_container
+ :default: logs
+
+ This role will create containers which do not already exist. If
+ partitioning is not enabled, this is the name of the container
+ which will be used. If partitioning is enabled, then this will be
+ used as the prefix for the container name which will be separated
+ from the partition name by an underscore. For example, "logs_42"
+ would be the container name for partition 42.
+
+ Note that you will want to set this to a value that uniquely
+ identifies your Zuul installation if using shared object stores that
+ require globally unique container names. For example if using a
+ public cloud whose Swift API is provided by Ceph.
+
+.. zuul:rolevar:: zuul_log_container_public
+ :default: true
+
+ If the container is created, should it be created with global read
+ ACLs. If the container already exists, it will not be modified.
+
+.. zuul:rolevar:: zuul_log_delete_after
+ :default: 15552000
+
+ Number of seconds to delete objects after upload. Default is 6 months
+ (15552000 seconds) and if set to 0 X-Delete-After will not be set.
+
+.. zuul:rolevar:: zuul_log_path
+ :default: Generated by the role `set-zuul-log-path-fact`
+
+ Prepend this path to the object names when uploading.
+
+.. zuul:rolevar:: zuul_log_create_indexes
+ :default: true
+
+ Whether to create `index.html` files with directory indexes. If set
+ to false, Swift containers can be marked with a `Web-Listings=true`
+ property to activate Swift's own directory indexing.
+
+.. zuul:rolevar:: zuul_log_path_shard_build
+ :default: False
+
+ This var is consumed by set-zuul-log-path-fact which upload-logs-swift
+ calls into. If you set this you will get log paths prefixed with the
+ first three characters of the build uuid. This will improve log file
+ sharding.
+
+ More details can be found at
+ :zuul:rolevar:`set-zuul-log-path-fact.zuul_log_path_shard_build`.
+
+.. zuul:rolevar:: zuul_log_include_download_script
+ :default: False
+
+ Generate a script from ``zuul_log_download_template`` in the root
+ directory of the uploaded logs to facilitate easy bulk download.
+
+.. zuul:rolevar:: zuul_log_download_template
+ :default: templates/download-logs.sh.j2
+
+ Path to template file if ``zuul_log_include_download_script`` is
+ set. See the sample file for parameters available to the template.
+ The file will be placed in the root of the uploaded logs (with
+ ``.j2`` suffix removed).
diff --git a/roles/upload-afs/__init__.py b/roles/test-upload-logs-swift/__init__.py
index e69de29..e69de29 100644
--- a/roles/upload-afs/__init__.py
+++ b/roles/test-upload-logs-swift/__init__.py
diff --git a/roles/test-upload-logs-swift/defaults/main.yaml b/roles/test-upload-logs-swift/defaults/main.yaml
new file mode 100644
index 0000000..816d521
--- /dev/null
+++ b/roles/test-upload-logs-swift/defaults/main.yaml
@@ -0,0 +1,6 @@
+zuul_log_partition: false
+zuul_log_container: logs
+zuul_log_container_public: true
+zuul_log_create_indexes: true
+zuul_log_include_download_script: true
+zuul_log_download_template: '{{ role_path }}/templates/download-logs.sh.j2' \ No newline at end of file
diff --git a/roles/upload-afs/library/__init__.py b/roles/test-upload-logs-swift/library/__init__.py
index e69de29..e69de29 100644
--- a/roles/upload-afs/library/__init__.py
+++ b/roles/test-upload-logs-swift/library/__init__.py
diff --git a/roles/test-upload-logs-swift/library/delete_container.py b/roles/test-upload-logs-swift/library/delete_container.py
new file mode 100644
index 0000000..31e768f
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/delete_container.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+#
+# Copyright 2019 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import argparse
+import openstack
+import requests
+import logging
+import os
+
+logging.basicConfig(level=logging.INFO)
+# logging.getLogger("requests").setLevel(logging.DEBUG)
+# logging.getLogger("keystoneauth").setLevel(logging.INFO)
+# logging.getLogger("stevedore").setLevel(logging.INFO)
+logging.captureWarnings(True)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Delete a swift container"
+ )
+ parser.add_argument('cloud',
+ help='Name of the cloud to use when uploading')
+ parser.add_argument('container',
+ help='Name of the container to use when uploading')
+
+ args = parser.parse_args()
+
+ cloud = openstack.connect(cloud=args.cloud)
+
+ sess = cloud.config.get_session()
+ adapter = requests.adapters.HTTPAdapter(pool_maxsize=100)
+ sess.mount('https://', adapter)
+
+ container = cloud.get_container(args.container)
+ print('Found container', container)
+ print()
+ for x in cloud.object_store.objects(args.container):
+ print('Delete object', x.name)
+ if x.name == '/':
+ endpoint = cloud.object_store.get_endpoint()
+ container = os.path.join(endpoint, args.container)
+ cloud.session.delete(container + '//')
+ else:
+ cloud.object_store.delete_object(x)
+
+ print()
+ print('Delete container', container)
+ cloud.object_store.delete_container(args.container)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/artifacts/foo.tar.gz b/roles/test-upload-logs-swift/library/test-fixtures/artifacts/foo.tar.gz
new file mode 100644
index 0000000..9b1579d
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/artifacts/foo.tar.gz
Binary files differ
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/artifacts/foo.tgz b/roles/test-upload-logs-swift/library/test-fixtures/artifacts/foo.tgz
new file mode 100644
index 0000000..ca9fccb
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/artifacts/foo.tgz
Binary files differ
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/download-logs-sample.sh b/roles/test-upload-logs-swift/library/test-fixtures/download-logs-sample.sh
new file mode 100644
index 0000000..335d939
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/download-logs-sample.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+# Download all logs
+
+#
+# To use this file
+#
+# curl "http://fakebaseurl.com/download-logs.sh" | bash
+#
+# Logs will be copied in a temporary directory as described in the
+# output. Set DOWNLOAD_DIR to an empty directory if you wish to
+# override this.
+#
+
+BASE_URL=http://fakebaseurl.com
+
+function log {
+ echo "$(date -Iseconds) | $@"
+}
+
+function save_file {
+ local file="$1"
+
+ curl -s --compressed --create-dirs -o "${file}" "${BASE_URL}/${file}"
+
+ # Using --compressed we will send an Accept-Encoding: gzip header
+ # and the data will come to us across the network compressed.
+ # However, sometimes things like OpenStack's log server will send
+ # .gz files (as stored on its disk) uncompressed, so we check if
+ # this really looks like an ASCII file and rename for clarity.
+ if [[ "${file}" == *.gz ]]; then
+ local type=$(file "${file}")
+ if [[ "${type}" =~ "ASCII text" ]] || [[ "${type}" =~ "Unicode text" ]]; then
+ local new_name=${file%.gz}
+ log "Renaming to ${new_name}"
+ mv "${file}" "${new_name}"
+ fi
+ fi
+
+}
+
+if [[ -z "${DOWNLOAD_DIR}" ]]; then
+ DOWNLOAD_DIR=$(mktemp -d --tmpdir zuul-logs.XXXXXX)
+fi
+log "Saving logs to ${DOWNLOAD_DIR}"
+
+pushd "${DOWNLOAD_DIR}" > /dev/null
+
+
+
+log "Getting ${BASE_URL}/job-output.json [ 0001 / 0010 ]"
+save_file "job-output.json"
+
+log "Getting ${BASE_URL}/controller/compressed.gz [ 0002 / 0010 ]"
+save_file "controller/compressed.gz"
+
+log "Getting ${BASE_URL}/controller/cpu-load.svg [ 0003 / 0010 ]"
+save_file "controller/cpu-load.svg"
+
+log "Getting ${BASE_URL}/controller/journal.xz [ 0004 / 0010 ]"
+save_file "controller/journal.xz"
+
+log "Getting ${BASE_URL}/controller/service_log.txt [ 0005 / 0010 ]"
+save_file "controller/service_log.txt"
+
+log "Getting ${BASE_URL}/controller/syslog [ 0006 / 0010 ]"
+save_file "controller/syslog"
+
+log "Getting ${BASE_URL}/controller/subdir/foo::3.txt [ 0007 / 0010 ]"
+save_file "controller/subdir/foo::3.txt"
+
+log "Getting ${BASE_URL}/controller/subdir/subdir.txt [ 0008 / 0010 ]"
+save_file "controller/subdir/subdir.txt"
+
+log "Getting ${BASE_URL}/zuul-info/inventory.yaml [ 0009 / 0010 ]"
+save_file "zuul-info/inventory.yaml"
+
+log "Getting ${BASE_URL}/zuul-info/zuul-info.controller.txt [ 0010 / 0010 ]"
+save_file "zuul-info/zuul-info.controller.txt"
+
+
+popd >/dev/null
+
+log "Download complete!" \ No newline at end of file
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/links/controller/service_log.txt b/roles/test-upload-logs-swift/library/test-fixtures/links/controller/service_log.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/links/controller/service_log.txt
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/links/job-output.json b/roles/test-upload-logs-swift/library/test-fixtures/links/job-output.json
new file mode 100644
index 0000000..c8cd7e9
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/links/job-output.json
@@ -0,0 +1 @@
+{"test": "foo"}
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/links/symlink_loop/placeholder b/roles/test-upload-logs-swift/library/test-fixtures/links/symlink_loop/placeholder
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/links/symlink_loop/placeholder
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/compressed.gz b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/compressed.gz
new file mode 100644
index 0000000..4dc3bad
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/compressed.gz
Binary files differ
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/cpu-load.svg b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/cpu-load.svg
new file mode 100644
index 0000000..01a940a
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/cpu-load.svg
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg>
+</svg>
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/journal.xz b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/journal.xz
new file mode 100644
index 0000000..ea28d9e
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/journal.xz
Binary files differ
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/service_log.txt b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/service_log.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/service_log.txt
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/subdir/foo::3.txt b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/subdir/foo::3.txt
new file mode 100644
index 0000000..384ce7d
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/subdir/foo::3.txt
@@ -0,0 +1,2 @@
+This is a plan text file with a funny name.
+The index links should escape the :'s.
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/subdir/subdir.txt b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/subdir/subdir.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/subdir/subdir.txt
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/syslog b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/syslog
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/controller/syslog
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/job-output.json b/roles/test-upload-logs-swift/library/test-fixtures/logs/job-output.json
new file mode 100644
index 0000000..c8cd7e9
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/job-output.json
@@ -0,0 +1 @@
+{"test": "foo"}
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/zuul-info/inventory.yaml b/roles/test-upload-logs-swift/library/test-fixtures/logs/zuul-info/inventory.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/zuul-info/inventory.yaml
diff --git a/roles/test-upload-logs-swift/library/test-fixtures/logs/zuul-info/zuul-info.controller.txt b/roles/test-upload-logs-swift/library/test-fixtures/logs/zuul-info/zuul-info.controller.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test-fixtures/logs/zuul-info/zuul-info.controller.txt
diff --git a/roles/test-upload-logs-swift/library/test_zuul_swift_upload.py b/roles/test-upload-logs-swift/library/test_zuul_swift_upload.py
new file mode 100644
index 0000000..6577c31
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/test_zuul_swift_upload.py
@@ -0,0 +1,397 @@
+# Copyright (C) 2018 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+#
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import testtools
+import time
+import stat
+import fixtures
+
+from bs4 import BeautifulSoup
+from .zuul_swift_upload import FileList, Indexer, FileDetail
+
+
+FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
+ 'test-fixtures')
+
+
+class SymlinkFixture(fixtures.Fixture):
+ links = [
+ ('bad_symlink', '/etc'),
+ ('bad_symlink_file', '/etc/issue'),
+ ('good_symlink', 'controller'),
+ ('recursive_symlink', '.'),
+ ('symlink_file', 'job-output.json'),
+ ('symlink_loop_a', 'symlink_loop'),
+ ('symlink_loop/symlink_loop_b', '..'),
+ ]
+
+ def _setUp(self):
+ for (src, target) in self.links:
+ path = os.path.join(FIXTURE_DIR, 'links', src)
+ os.symlink(target, path)
+ self.addCleanup(os.unlink, path)
+
+
+class TestFileList(testtools.TestCase):
+
+ def assert_files(self, result, files):
+ self.assertEqual(len(result), len(files))
+ for expected, received in zip(files, result):
+ self.assertEqual(expected[0], received.relative_path)
+ if expected[0] and expected[0][-1] == '/':
+ efilename = os.path.split(
+ os.path.dirname(expected[0]))[1] + '/'
+ else:
+ efilename = os.path.split(expected[0])[1]
+ self.assertEqual(efilename, received.filename)
+ if received.folder:
+ if received.full_path is not None and expected[0] != '':
+ self.assertTrue(os.path.isdir(received.full_path))
+ else:
+ self.assertTrue(os.path.isfile(received.full_path))
+ self.assertEqual(expected[1], received.mimetype)
+ self.assertEqual(expected[2], received.encoding)
+
+ def find_file(self, file_list, path):
+ for f in file_list:
+ if f.relative_path == path:
+ return f
+
+ def test_single_dir_trailing_slash(self):
+ '''Test a single directory with a trailing slash'''
+
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs/'))
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('zuul-info', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('controller/subdir', 'application/directory', None),
+ ('controller/compressed.gz', 'text/plain', 'gzip'),
+ ('controller/cpu-load.svg', 'image/svg+xml', None),
+ ('controller/journal.xz', 'text/plain', 'xz'),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('controller/syslog', 'text/plain', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
+ ('controller/subdir/subdir.txt', 'text/plain', None),
+ ('zuul-info/inventory.yaml', 'text/plain', None),
+ ('zuul-info/zuul-info.controller.txt', 'text/plain', None),
+ ])
+
+ def test_single_dir(self):
+ '''Test a single directory without a trailing slash'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs'))
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('logs', 'application/directory', None),
+ ('logs/controller', 'application/directory', None),
+ ('logs/zuul-info', 'application/directory', None),
+ ('logs/job-output.json', 'application/json', None),
+ ('logs/controller/subdir', 'application/directory', None),
+ ('logs/controller/compressed.gz', 'text/plain', 'gzip'),
+ ('logs/controller/cpu-load.svg', 'image/svg+xml', None),
+ ('logs/controller/journal.xz', 'text/plain', 'xz'),
+ ('logs/controller/service_log.txt', 'text/plain', None),
+ ('logs/controller/syslog', 'text/plain', None),
+ ('logs/controller/subdir/foo::3.txt', 'text/plain', None),
+ ('logs/controller/subdir/subdir.txt', 'text/plain', None),
+ ('logs/zuul-info/inventory.yaml', 'text/plain', None),
+ ('logs/zuul-info/zuul-info.controller.txt',
+ 'text/plain', None),
+ ])
+
+ def test_single_file(self):
+ '''Test a single file'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR,
+ 'logs/zuul-info/inventory.yaml'))
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('inventory.yaml', 'text/plain', None),
+ ])
+
+ def test_symlinks(self):
+ '''Test symlinks'''
+ with FileList() as fl:
+ self.useFixture(SymlinkFixture())
+ fl.add(os.path.join(FIXTURE_DIR, 'links/'))
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('good_symlink', 'application/directory', None),
+ ('recursive_symlink', 'application/directory', None),
+ ('symlink_loop', 'application/directory', None),
+ ('symlink_loop_a', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('symlink_file', 'text/plain', None),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('symlink_loop/symlink_loop_b', 'application/directory', None),
+ ('symlink_loop/placeholder', 'text/plain', None),
+ ])
+
+ def test_index_files(self):
+ '''Test index generation'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs'))
+ ix = Indexer(fl)
+ ix.make_indexes()
+
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('index.html', 'text/html', None),
+ ('logs', 'application/directory', None),
+ ('logs/controller', 'application/directory', None),
+ ('logs/zuul-info', 'application/directory', None),
+ ('logs/job-output.json', 'application/json', None),
+ ('logs/index.html', 'text/html', None),
+ ('logs/controller/subdir', 'application/directory', None),
+ ('logs/controller/compressed.gz', 'text/plain', 'gzip'),
+ ('logs/controller/cpu-load.svg', 'image/svg+xml', None),
+ ('logs/controller/journal.xz', 'text/plain', 'xz'),
+ ('logs/controller/service_log.txt', 'text/plain', None),
+ ('logs/controller/syslog', 'text/plain', None),
+ ('logs/controller/index.html', 'text/html', None),
+ ('logs/controller/subdir/foo::3.txt', 'text/plain', None),
+ ('logs/controller/subdir/subdir.txt', 'text/plain', None),
+ ('logs/controller/subdir/index.html', 'text/html', None),
+ ('logs/zuul-info/inventory.yaml', 'text/plain', None),
+ ('logs/zuul-info/zuul-info.controller.txt',
+ 'text/plain', None),
+ ('logs/zuul-info/index.html', 'text/html', None),
+ ])
+
+ top_index = self.find_file(fl, 'index.html')
+ page = open(top_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ self.assertEqual(len(rows), 1)
+
+ self.assertEqual(rows[0].find('a').get('href'), 'logs/')
+ self.assertEqual(rows[0].find('a').text, 'logs/')
+
+ subdir_index = self.find_file(
+ fl, 'logs/controller/subdir/index.html')
+ page = open(subdir_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+ self.assertEqual(rows[0].find('a').get('href'), '../')
+ self.assertEqual(rows[0].find('a').text, '../')
+
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[1].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[1].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[2].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[2].find('a').text, 'subdir.txt')
+
+ def test_index_files_trailing_slash(self):
+ '''Test index generation with a trailing slash'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs/'))
+ ix = Indexer(fl)
+ ix.make_indexes()
+
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('zuul-info', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('index.html', 'text/html', None),
+ ('controller/subdir', 'application/directory', None),
+ ('controller/compressed.gz', 'text/plain', 'gzip'),
+ ('controller/cpu-load.svg', 'image/svg+xml', None),
+ ('controller/journal.xz', 'text/plain', 'xz'),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('controller/syslog', 'text/plain', None),
+ ('controller/index.html', 'text/html', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
+ ('controller/subdir/subdir.txt', 'text/plain', None),
+ ('controller/subdir/index.html', 'text/html', None),
+ ('zuul-info/inventory.yaml', 'text/plain', None),
+ ('zuul-info/zuul-info.controller.txt', 'text/plain', None),
+ ('zuul-info/index.html', 'text/html', None),
+ ])
+
+ top_index = self.find_file(fl, 'index.html')
+ page = open(top_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ self.assertEqual(len(rows), 3)
+
+ self.assertEqual(rows[0].find('a').get('href'), 'controller/')
+ self.assertEqual(rows[0].find('a').text, 'controller/')
+
+ self.assertEqual(rows[1].find('a').get('href'), 'zuul-info/')
+ self.assertEqual(rows[1].find('a').text, 'zuul-info/')
+
+ subdir_index = self.find_file(fl, 'controller/subdir/index.html')
+ page = open(subdir_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+ self.assertEqual(rows[0].find('a').get('href'), '../')
+ self.assertEqual(rows[0].find('a').text, '../')
+
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[1].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[1].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[2].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[2].find('a').text, 'subdir.txt')
+
+ def test_topdir_parent_link(self):
+ '''Test index generation creates topdir parent link'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs/'))
+ ix = Indexer(fl)
+ ix.make_indexes(
+ create_parent_links=True,
+ create_topdir_parent_link=True)
+
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('zuul-info', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('index.html', 'text/html', None),
+ ('controller/subdir', 'application/directory', None),
+ ('controller/compressed.gz', 'text/plain', 'gzip'),
+ ('controller/cpu-load.svg', 'image/svg+xml', None),
+ ('controller/journal.xz', 'text/plain', 'xz'),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('controller/syslog', 'text/plain', None),
+ ('controller/index.html', 'text/html', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
+ ('controller/subdir/subdir.txt', 'text/plain', None),
+ ('controller/subdir/index.html', 'text/html', None),
+ ('zuul-info/inventory.yaml', 'text/plain', None),
+ ('zuul-info/zuul-info.controller.txt', 'text/plain', None),
+ ('zuul-info/index.html', 'text/html', None),
+ ])
+
+ top_index = self.find_file(fl, 'index.html')
+ page = open(top_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ self.assertEqual(len(rows), 4)
+
+ self.assertEqual(rows[0].find('a').get('href'), '../')
+ self.assertEqual(rows[0].find('a').text, '../')
+
+ self.assertEqual(rows[1].find('a').get('href'), 'controller/')
+ self.assertEqual(rows[1].find('a').text, 'controller/')
+
+ self.assertEqual(rows[2].find('a').get('href'), 'zuul-info/')
+ self.assertEqual(rows[2].find('a').text, 'zuul-info/')
+
+ subdir_index = self.find_file(fl, 'controller/subdir/index.html')
+ page = open(subdir_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+ self.assertEqual(rows[0].find('a').get('href'), '../')
+ self.assertEqual(rows[0].find('a').text, '../')
+
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[1].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[1].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[2].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[2].find('a').text, 'subdir.txt')
+
+ def test_no_parent_links(self):
+ '''Test index generation creates topdir parent link'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs/'))
+ ix = Indexer(fl)
+ ix.make_indexes(
+ create_parent_links=False,
+ create_topdir_parent_link=False)
+
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('zuul-info', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('index.html', 'text/html', None),
+ ('controller/subdir', 'application/directory', None),
+ ('controller/compressed.gz', 'text/plain', 'gzip'),
+ ('controller/cpu-load.svg', 'image/svg+xml', None),
+ ('controller/journal.xz', 'text/plain', 'xz'),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('controller/syslog', 'text/plain', None),
+ ('controller/index.html', 'text/html', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
+ ('controller/subdir/subdir.txt', 'text/plain', None),
+ ('controller/subdir/index.html', 'text/html', None),
+ ('zuul-info/inventory.yaml', 'text/plain', None),
+ ('zuul-info/zuul-info.controller.txt', 'text/plain', None),
+ ('zuul-info/index.html', 'text/html', None),
+ ])
+
+ top_index = self.find_file(fl, 'index.html')
+ page = open(top_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ self.assertEqual(len(rows), 3)
+
+ self.assertEqual(rows[0].find('a').get('href'), 'controller/')
+ self.assertEqual(rows[0].find('a').text, 'controller/')
+
+ self.assertEqual(rows[1].find('a').get('href'), 'zuul-info/')
+ self.assertEqual(rows[1].find('a').text, 'zuul-info/')
+
+ subdir_index = self.find_file(fl, 'controller/subdir/index.html')
+ page = open(subdir_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[0].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[0].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[1].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[1].find('a').text, 'subdir.txt')
+
+
+class TestFileDetail(testtools.TestCase):
+
+ def test_get_file_detail(self):
+ '''Test files info'''
+ path = os.path.join(FIXTURE_DIR, 'logs/job-output.json')
+ file_detail = FileDetail(path, '')
+ path_stat = os.stat(path)
+ self.assertEqual(
+ time.gmtime(path_stat[stat.ST_MTIME]),
+ file_detail.last_modified)
+ self.assertEqual(16, file_detail.size)
+
+ def test_get_file_detail_missing_file(self):
+ '''Test files that go missing during a walk'''
+
+ file_detail = FileDetail('missing/file/that/we/cant/find', '')
+
+ self.assertEqual(time.gmtime(0), file_detail.last_modified)
+ self.assertEqual(0, file_detail.size)
diff --git a/roles/test-upload-logs-swift/library/zuul_swift_upload.py b/roles/test-upload-logs-swift/library/zuul_swift_upload.py
new file mode 100755
index 0000000..100d8e6
--- /dev/null
+++ b/roles/test-upload-logs-swift/library/zuul_swift_upload.py
@@ -0,0 +1,981 @@
+#!/usr/bin/env python3
+#
+# Copyright 2014 Rackspace Australia
+# Copyright 2018 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""
+Utility to upload files to swift
+"""
+
+import argparse
+import gzip
+import io
+import logging
+import mimetypes
+import os
+import jinja2
+try:
+ import queue as queuelib
+except ImportError:
+ import Queue as queuelib
+import shutil
+import stat
+import sys
+import tempfile
+import threading
+import time
+import traceback
+try:
+ import urllib.parse as urlparse
+except ImportError:
+ import urllib as urlparse
+import zlib
+import collections
+
+import openstack
+import requests
+import requests.exceptions
+import requestsexceptions
+import keystoneauth1.exceptions
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ # Python 3.3+
+ from collections.abc import Sequence
+except ImportError:
+ from collections import Sequence
+
+mimetypes.init()
+mimetypes.add_type('text/plain', '.yaml')
+
+MAX_UPLOAD_THREADS = 24
+POST_ATTEMPTS = 3
+
+# Map mime types to apache icons
+APACHE_MIME_ICON_MAP = {
+ '_default': 'unknown.png',
+ 'application/gzip': 'compressed.png',
+ 'application/directory': 'folder.png',
+ 'text/html': 'text.png',
+ 'text/plain': 'text.png',
+}
+
+# Map mime types to apache icons
+APACHE_FILE_ICON_MAP = {
+ '..': 'back.png',
+}
+
+# These icon files are from the Apache project and are in the public
+# domain.
+ICON_IMAGES = {
+ 'back.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAElBMVEX/'
+ '///M//+ZmZlmZmYzMzMAAACei5rnAAAAAnRSTlP/AOW3MEoAAABWSURB'
+ 'VHjabdBBCgAhDEPRRpv7X3kwEMsQ//IRRC08urjRHbha5VLFUsVSxVI9'
+ 'lmDh5hMpHD6n0EgoiZG0DNINpnWlcVXaRix76e1/8dddcL6nG0Ri9gHj'
+ 'tgSXKYeLBgAAAABJRU5ErkJggg==',
+ 'compressed.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAADAFBM'
+ 'VEX//////8z//5n//2b//zP//wD/zP//zMz/zJn/zGb/zDP/zAD/'
+ 'mf//mcz/mZn/mWb/mTP/mQD/Zv//Zsz/Zpn/Zmb/ZjP/ZgD/M///'
+ 'M8z/M5n/M2b/MzP/MwD/AP//AMz/AJn/AGb/ADP/AADM///M/8zM'
+ '/5nM/2bM/zPM/wDMzP/MzMzMzJnMzGbMzDPMzADMmf/MmczMmZnM'
+ 'mWbMmTPMmQDMZv/MZszMZpnMZmbMZjPMZgDMM//MM8zMM5nMM2bM'
+ 'MzPMMwDMAP/MAMzMAJnMAGbMADPMAACZ//+Z/8yZ/5mZ/2aZ/zOZ'
+ '/wCZzP+ZzMyZzJmZzGaZzDOZzACZmf+ZmcyZmZmZmWaZmTOZmQCZ'
+ 'Zv+ZZsyZZpmZZmaZZjOZZgCZM/+ZM8yZM5mZM2aZMzOZMwCZAP+Z'
+ 'AMyZAJmZAGaZADOZAABm//9m/8xm/5lm/2Zm/zNm/wBmzP9mzMxm'
+ 'zJlmzGZmzDNmzABmmf9mmcxmmZlmmWZmmTNmmQBmZv9mZsxmZplm'
+ 'ZmZmZjNmZgBmM/9mM8xmM5lmM2ZmMzNmMwBmAP9mAMxmAJlmAGZm'
+ 'ADNmAAAz//8z/8wz/5kz/2Yz/zMz/wAzzP8zzMwzzJkzzGYzzDMz'
+ 'zAAzmf8zmcwzmZkzmWYzmTMzmQAzZv8zZswzZpkzZmYzZjMzZgAz'
+ 'M/8zM8wzM5kzM2YzMzMzMwAzAP8zAMwzAJkzAGYzADMzAAAA//8A'
+ '/8wA/5kA/2YA/zMA/wAAzP8AzMwAzJkAzGYAzDMAzAAAmf8AmcwA'
+ 'mZkAmWYAmTMAmQAAZv8AZswAZpkAZmYAZjMAZgAAM/8AM8wAM5kA'
+ 'M2YAMzMAMwAAAP8AAMwAAJkAAGYAADPuAADdAAC7AACqAACIAAB3'
+ 'AABVAABEAAAiAAARAAAA7gAA3QAAuwAAqgAAiAAAdwAAVQAARAAA'
+ 'IgAAEQAAAO4AAN0AALsAAKoAAIgAAHcAAFUAAEQAACIAABHu7u7d'
+ '3d27u7uqqqqIiIh3d3dVVVVEREQiIiIREREAAAD7CIKZAAAAJXRS'
+ 'TlP///////////////////////////////////////////////8A'
+ 'P89CTwAAAGtJREFUeNp9z9ENgDAIhOEOco+dybVuEXasFMRDY/x5'
+ '+xJCO6Znu6kSx7BhXyjtKBWWNlwW88Loid7hFRKBXiIYCMfMEYUQ'
+ 'QohC3CjFA5nIjqx1CqlDLGR/EhM5O06yvin0ftGOyIS7lV14AsQN'
+ 'aR7rMEBYAAAAAElFTkSuQmCC',
+ 'folder.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAElBMVEX/'
+ '////zJnM//+ZZjMzMzMAAADCEvqoAAAAA3RSTlP//wDXyg1BAAAASElE'
+ 'QVR42s3KQQ6AQAhDUaXt/a/sQDrRJu7c+NmQB0e99B3lnqjT6cYx6zSI'
+ 'bV40n3D7psYMoBoz4w8/EdNYQsbGEjNxYSljXTEsA9O1pLTvAAAAAElF'
+ 'TkSuQmCC',
+ 'text.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAD1BMVEX/'
+ '///M//+ZmZkzMzMAAABVsTOVAAAAAnRSTlP/AOW3MEoAAABISURBVHja'
+ 'tcrRCgAgCENRbf7/N7dKomGvngjhMsPLD4NdMPwia438NRIyxsaL/XQZ'
+ 'hyxpkC6zyjLXGVXnkhqWJWIIrOgeinECLlUCjBCqNQoAAAAASUVORK5C'
+ 'YII=',
+ 'unknown.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAD1BMVEX/'
+ '///M//+ZmZkzMzMAAABVsTOVAAAAAnRSTlP/AOW3MEoAAABYSURBVHja'
+ 'ncvRDoAgDEPRruX/v1kmNHPBxMTLyzgD6FmsILg56g2hQnJkOco4yZhq'
+ 'tN5nYd5Zq0LsHblwxwP9GTCWsaGtoelANKzOlz/RfaLYUmLE6E28ALlN'
+ 'AupSdoFsAAAAAElFTkSuQmCC'}
+
+
+# Begin vendored code
+# This code is licensed under the Public Domain/CC0 and comes from
+# https://github.com/leenr/gzip-stream/blob/master/gzip_stream.py
+# Code was modified:
+# removed type annotations to support python2.
+# removed use of *, somearg for positional anonymous args.
+# Default compression level to 9.
+
+class GZIPCompressedStream(io.RawIOBase):
+ def __init__(self, stream, compression_level=9):
+ assert 1 <= compression_level <= 9
+
+ self._compression_level = compression_level
+ self._stream = stream
+
+ self._compressed_stream = io.BytesIO()
+ self._compressor = gzip.GzipFile(
+ mode='wb',
+ fileobj=self._compressed_stream,
+ compresslevel=compression_level
+ )
+
+ # because of the GZIP header written by `GzipFile.__init__`:
+ self._compressed_stream.seek(0)
+
+ @property
+ def compression_level(self):
+ return self._compression_level
+
+ @property
+ def stream(self):
+ return self._stream
+
+ def readable(self):
+ return True
+
+ def _read_compressed_into(self, b):
+ buf = self._compressed_stream.read(len(b))
+ b[:len(buf)] = buf
+ return len(buf)
+
+ def readinto(self, b):
+ b = memoryview(b)
+
+ offset = 0
+ size = len(b)
+ while offset < size:
+ offset += self._read_compressed_into(b[offset:])
+ if offset < size:
+ # self._compressed_buffer now empty
+ if self._compressor.closed:
+ # nothing to compress anymore
+ break
+ # compress next bytes
+ self._read_n_compress(size)
+
+ return offset
+
+ def _read_n_compress(self, size):
+ assert size > 0
+
+ data = self._stream.read(size)
+
+ # rewind buffer to the start to free up memory
+ # (because anything currently in the buffer should be already
+ # streamed off the object)
+ self._compressed_stream.seek(0)
+ self._compressed_stream.truncate(0)
+
+ if data:
+ self._compressor.write(data)
+ else:
+ # this will write final data (will flush zlib with Z_FINISH)
+ self._compressor.close()
+
+ # rewind to the buffer start
+ self._compressed_stream.seek(0)
+
+ def __repr__(self):
+ return (
+ '{self.__class__.__name__}('
+ '{self.stream!r}, '
+ 'compression_level={self.compression_level!r}'
+ ')'
+ ).format(self=self)
+
+# End vendored code
+
+
+def get_mime_icon(mime, filename=''):
+ icon = (APACHE_FILE_ICON_MAP.get(filename) or
+ APACHE_MIME_ICON_MAP.get(mime) or
+ APACHE_MIME_ICON_MAP['_default'])
+ return "data:image/png;base64,%s" % ICON_IMAGES[icon]
+
+
+def get_cloud(cloud):
+ if isinstance(cloud, dict):
+ config = openstack.config.loader.OpenStackConfig().get_one(**cloud)
+ return openstack.connection.Connection(config=config)
+ else:
+ return openstack.connect(cloud=cloud)
+
+
+def retry_function(func):
+ for attempt in range(1, POST_ATTEMPTS + 1):
+ try:
+ return func()
+ except Exception:
+ if attempt >= POST_ATTEMPTS:
+ raise
+ else:
+ logging.exception("Error on attempt %d" % attempt)
+ time.sleep(attempt * 10)
+
+
+def sizeof_fmt(num, suffix='B'):
+ # From http://stackoverflow.com/questions/1094841/
+ # reusable-library-to-get-human-readable-version-of-file-size
+ for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
+ if abs(num) < 1024.0:
+ return "%3.1f%s%s" % (num, unit, suffix)
+ num /= 1024.0
+ return "%.1f%s%s" % (num, 'Y', suffix)
+
+
+class FileDetail():
+ """
+ Used to generate indexes with links or as the file path
+ to push to swift.
+ """
+
+ def __init__(self, full_path, relative_path,
+ filename=None, is_index=False):
+ """
+ Args:
+ full_path (str): The absolute path to the file on disk.
+ relative_path (str): The relative path from the artifacts source
+ used for links.
+ filename (str): An optional alternate filename in links.
+ is_index (bool): Is this file an index
+ """
+ # Make FileNotFoundError exception to be compatible with python2
+ try:
+ FileNotFoundError # noqa: F823
+ except NameError:
+ FileNotFoundError = OSError
+
+ self.full_path = full_path
+ if filename is None:
+ self.filename = os.path.basename(full_path)
+ else:
+ self.filename = filename
+ self.relative_path = relative_path
+ self.is_index = is_index
+
+ if self.full_path and os.path.isfile(self.full_path):
+ mime_guess, encoding = mimetypes.guess_type(self.full_path)
+ self.mimetype = mime_guess if mime_guess else 'text/plain'
+ self.encoding = encoding
+ self.folder = False
+ else:
+ self.mimetype = 'application/directory'
+ self.encoding = None
+ self.folder = True
+ try:
+ st = os.stat(self.full_path)
+ self.last_modified = time.gmtime(st[stat.ST_MTIME])
+ self.size = st[stat.ST_SIZE]
+ except (FileNotFoundError, TypeError):
+ self.last_modified = time.gmtime(0)
+ self.size = 0
+
+ def __repr__(self):
+ t = 'Folder' if self.folder else 'File'
+ return '<%s %s%s>' % (t, self.relative_path,
+ ' (index)' if self.is_index else '')
+
+
+class FileList(Sequence):
+ '''A collection of FileDetail objects
+
+ This is a list-like group of FileDetail objects, intended to be
+ used as a context manager around the upload process.
+ '''
+ def __init__(self):
+ self.file_list = []
+ self.file_list.append(FileDetail(None, '', ''))
+ self.tempdirs = []
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ for tempdir in self.tempdirs:
+ shutil.rmtree(tempdir)
+
+ def __getitem__(self, item):
+ return self.file_list.__getitem__(item)
+
+ def __len__(self):
+ return self.file_list.__len__()
+
+ def get_tempdir(self):
+ '''Get a temporary directory
+
+ Returns path to a private temporary directory which will be
+ cleaned on exit
+ '''
+ tempdir = tempfile.mkdtemp(prefix='s-u-l-tmp')
+ self.tempdirs.append(tempdir)
+ return tempdir
+
+ @staticmethod
+ def _path_in_tree(root, path):
+ full_path = os.path.realpath(os.path.abspath(
+ os.path.expanduser(path)))
+ if not full_path.startswith(root):
+ logging.debug("Skipping path outside root: %s" % (path,))
+ return False
+ return True
+
+ def add(self, file_path):
+ """
+ Generate a list of files to upload to swift. Recurses through
+ directories
+ """
+
+ # file_list: A list of FileDetails to push to swift
+ file_list = []
+
+ if os.path.isfile(file_path):
+ relative_path = os.path.basename(file_path)
+ file_list.append(FileDetail(file_path, relative_path))
+ elif os.path.isdir(file_path):
+ original_root = os.path.realpath(os.path.abspath(
+ os.path.expanduser(file_path)))
+
+ parent_dir = os.path.dirname(file_path)
+ if not file_path.endswith('/'):
+ filename = os.path.basename(file_path)
+ full_path = file_path
+ relative_name = os.path.relpath(full_path, parent_dir)
+ file_list.append(FileDetail(full_path, relative_name,
+ filename))
+ # TODO: this will copy the result of symlinked files, but
+ # it won't follow directory symlinks. If we add that, we
+ # should ensure that we don't loop.
+ for path, folders, files in os.walk(file_path):
+ # Sort folder in-place so that we recurse in order.
+ files.sort(key=lambda x: x.lower())
+ folders.sort(key=lambda x: x.lower())
+ # relative_path: The path between the given directory
+ # and the one being currently walked.
+ relative_path = os.path.relpath(path, parent_dir)
+
+ for filename in folders:
+ full_path = os.path.join(path, filename)
+ if not self._path_in_tree(original_root, full_path):
+ continue
+ relative_name = os.path.relpath(full_path, parent_dir)
+ file_list.append(FileDetail(full_path, relative_name,
+ filename))
+
+ for filename in files:
+ full_path = os.path.join(path, filename)
+ if not self._path_in_tree(original_root, full_path):
+ continue
+ relative_name = os.path.relpath(full_path, parent_dir)
+ file_detail = FileDetail(full_path, relative_name)
+ file_list.append(file_detail)
+
+ self.file_list += file_list
+
+
+class Indexer():
+ """Index a FileList
+
+ Functions to generate indexes and other collated data for a
+ FileList
+
+ - make_indexes() : make index.html in folders
+ - make_download_script() : make a script to download all logs
+ """
+ def __init__(self, file_list):
+ '''
+ Args:
+ file_list (FileList): A FileList object with all files
+ to be indexed.
+ '''
+ assert isinstance(file_list, FileList)
+ self.file_list = file_list
+
+ def _make_index_file(self, folder_links, title, tempdir, append_footer):
+ """Writes an index into a file for pushing"""
+ for file_details in folder_links:
+ # Do not generate an index file if one exists already.
+ # This may be the case when uploading other machine generated
+ # content like python coverage info.
+ if self.index_filename == file_details.filename:
+ return
+ index_content = self._generate_log_index(
+ folder_links, title, append_footer)
+ fd = open(os.path.join(tempdir, self.index_filename), 'w')
+ fd.write(index_content)
+ return os.path.join(tempdir, self.index_filename)
+
+ def _generate_log_index(self, folder_links, title, append_footer):
+ """Create an index of logfiles and links to them"""
+
+ output = '<html><head><title>%s</title></head><body>\n' % title
+ output += '<h1>%s</h1>\n' % title
+ output += '<table><tr><th></th><th>Name</th><th>Last Modified</th>'
+ output += '<th>Size</th></tr>'
+
+ file_details_to_append = None
+ for file_details in folder_links:
+ output += '<tr>'
+ output += (
+ '<td><img alt="[ ]" title="%(m)s" src="%(i)s"></img></td>' % ({
+ 'm': file_details.mimetype,
+ 'i': get_mime_icon(file_details.mimetype,
+ file_details.filename),
+ }))
+ filename = file_details.filename
+ if file_details.folder:
+ filename += '/'
+ output += '<td><a href="%s">%s</a></td>' % (
+ urlparse.quote(filename),
+ filename)
+ output += '<td>%s</td>' % time.asctime(
+ file_details.last_modified)
+ size = sizeof_fmt(file_details.size, suffix='')
+ output += '<td style="text-align: right">%s</td>' % size
+ output += '</tr>\n'
+
+ if (append_footer and
+ append_footer in file_details.filename):
+ file_details_to_append = file_details
+
+ output += '</table>'
+
+ if file_details_to_append:
+ output += '<br /><hr />'
+ try:
+ with open(file_details_to_append.full_path, 'r') as f:
+ output += f.read()
+ except IOError:
+ logging.exception("Error opening file for appending")
+
+ output += '</body></html>\n'
+ return output
+
+ def make_indexes(self, create_parent_links=True,
+ create_topdir_parent_link=False,
+ append_footer='index_footer.html'):
+ '''Make index.html files
+
+ Iterate the file list and crete index.html files for folders
+
+ Args:
+ create_parent_links (bool): Create parent links
+ create_topdir_parent_link (bool): Create topdir parent link
+ append_footer (str): Filename of a footer to append to each
+ generated page
+
+ Return:
+ No value, the self.file_list will be updated
+ '''
+ self.index_filename = 'index.html'
+
+ folders = collections.OrderedDict()
+ for f in self.file_list:
+ if f.folder:
+ folders[f.relative_path] = []
+ folder = os.path.dirname(os.path.dirname(
+ f.relative_path + '/'))
+ if folder == '/':
+ folder = ''
+ else:
+ folder = os.path.dirname(f.relative_path)
+ folders[folder].append(f)
+
+ indexes = {}
+ parent_file_detail = FileDetail(None, '..', '..')
+ for folder, files in folders.items():
+ # Don't add the pseudo-top-directory
+ if files and files[0].full_path is None:
+ files = files[1:]
+ if create_topdir_parent_link:
+ files = [parent_file_detail] + files
+ elif create_parent_links:
+ files = [parent_file_detail] + files
+
+ # Do generate a link to the parent directory
+ full_path = self._make_index_file(files, 'Index of %s' % (folder,),
+ self.file_list.get_tempdir(),
+ append_footer)
+
+ if full_path:
+ filename = os.path.basename(full_path)
+ relative_name = os.path.join(folder, filename)
+ indexes[folder] = FileDetail(full_path, relative_name,
+ is_index=True)
+
+ # This appends the index file at the end of the group of files
+ # for each directory.
+ new_list = []
+ last_dirname = None
+ for f in reversed(list(self.file_list)):
+ if f.folder:
+ relative_path = f.relative_path + '/'
+ else:
+ relative_path = f.relative_path
+ dirname = os.path.dirname(relative_path)
+ if dirname == '/':
+ dirname = ''
+ if dirname != last_dirname:
+ index = indexes.pop(dirname, None)
+ if index:
+ new_list.append(index)
+ last_dirname = dirname
+ new_list.append(f)
+ new_list.reverse()
+ self.file_list.file_list = new_list
+
+ def make_download_script(self, base_url, download_template):
+ '''Make a download script from template
+
+ Note since you need the base_url, it really only makes sense
+ to call this after the Uploader() is initalised.
+
+ Args:
+ base_url (str): The base URL to prefix
+ download_template (str): Path to a jinja2 template
+
+ Return:
+ None; a file with the same name as the template (stripped of
+ .j2 if present) is added to self.file_list for upload.
+ '''
+ # Prune the list to just be files, no indexes (this should run
+ # before indexing anyway)
+ download_files = [f for f in self.file_list
+ if not f.folder and not f.is_index]
+ output_filename = os.path.basename(download_template[:-3]
+ if download_template.endswith('.j2')
+ else download_template)
+ output = os.path.join(self.file_list.get_tempdir(), output_filename)
+
+ with open(download_template) as f, open(output, 'wb') as output:
+ logging.debug("Writing template %s" % output.name)
+ template = jinja2.Template(f.read())
+ rendered = template.stream(
+ base_url=base_url.rstrip('/'),
+ # jinja wants unicode input
+ file_list=[to_text(f.relative_path) for f in download_files])
+ rendered.dump(output, encoding='utf-8')
+
+ download_script = FileDetail(output.name, output_filename)
+ self.file_list.file_list.append(download_script)
+
+
+class GzipFilter():
+ chunk_size = 16384
+
+ def __init__(self, infile):
+ self.gzipfile = GZIPCompressedStream(infile)
+ self.done = False
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.done:
+ self.gzipfile.close()
+ raise StopIteration()
+ data = self.gzipfile.read(self.chunk_size)
+ if not data:
+ self.done = True
+ return data
+
+
+class DeflateFilter():
+ chunk_size = 16384
+
+ def __init__(self, infile):
+ self.infile = infile
+ self.encoder = zlib.compressobj()
+ self.done = False
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.done:
+ raise StopIteration()
+ ret = b''
+ while True:
+ data = self.infile.read(self.chunk_size)
+ if data:
+ ret = self.encoder.compress(data)
+ if ret:
+ break
+ else:
+ self.done = True
+ ret = self.encoder.flush()
+ break
+ return ret
+
+
+class Uploader():
+ def __init__(self, cloud, container, prefix=None, delete_after=None,
+ public=True, dry_run=False):
+
+ self.dry_run = dry_run
+ if dry_run:
+ self.url = 'http://dry-run-url.com/a/path/'
+ return
+
+ self.cloud = cloud
+ self.container = container
+ self.prefix = prefix or ''
+ self.delete_after = delete_after
+
+ sess = self.cloud.config.get_session()
+ adapter = requests.adapters.HTTPAdapter(pool_maxsize=100)
+ sess.mount('https://', adapter)
+
+ # If we're in Rackspace, there's some non-standard stuff we
+ # need to do to get the public endpoint.
+ try:
+ cdn_endpoint = self.cloud.session.auth.get_endpoint(
+ self.cloud.session, service_type='rax:object-cdn',
+ region_name=self.cloud.config.region_name,
+ interface=self.cloud.config.interface)
+ cdn_url = os.path.join(cdn_endpoint, self.container)
+ except keystoneauth1.exceptions.catalog.EndpointNotFound:
+ cdn_url = None
+
+ # We retry here because sometimes we get HTTP 401 errors in rax.
+ # They seem to happen infrequently (on the order of once a day across
+ # all jobs) so a retry is likely to work.
+ container = retry_function(
+ lambda: self.cloud.get_container(self.container))
+ if not container:
+ retry_function(
+ lambda: self.cloud.create_container(
+ name=self.container, public=public))
+ headers = {'X-Container-Meta-Web-Index': 'index.html',
+ 'X-Container-Meta-Access-Control-Allow-Origin': '*'}
+ retry_function(
+ lambda: self.cloud.update_container(
+ name=self.container,
+ headers=headers))
+ # 'X-Container-Meta-Web-Listings': 'true'
+
+ # The ceph radosgw swift implementation requires an
+ # index.html at the root in order for any other indexes to
+ # work.
+ index_headers = {'access-control-allow-origin': '*'}
+ retry_function(
+ lambda: self.cloud.create_object(self.container,
+ name='index.html',
+ data='',
+ content_type='text/html',
+ **index_headers))
+
+ # Enable the CDN in rax
+ if cdn_url:
+ retry_function(lambda: self.cloud.session.put(cdn_url))
+
+ if cdn_url:
+ endpoint = retry_function(
+ lambda: self.cloud.session.head(
+ cdn_url).headers['X-Cdn-Ssl-Uri'])
+ container = endpoint
+ else:
+ endpoint = self.cloud.object_store.get_endpoint()
+ container = os.path.join(endpoint, self.container)
+
+ self.url = os.path.join(container, self.prefix)
+
+ def upload(self, file_list):
+ """Spin up thread pool to upload to swift"""
+
+ if self.dry_run:
+ return
+
+ num_threads = min(len(file_list), MAX_UPLOAD_THREADS)
+ threads = []
+ queue = queuelib.Queue()
+ # add items to queue
+ for f in file_list:
+ queue.put(f)
+
+ for x in range(num_threads):
+ t = threading.Thread(target=self.post_thread, args=(queue,))
+ threads.append(t)
+ t.start()
+
+ for t in threads:
+ t.join()
+
+ def post_thread(self, queue):
+ while True:
+ try:
+ file_detail = queue.get_nowait()
+ logging.debug("%s: processing job %s",
+ threading.current_thread(),
+ file_detail)
+ retry_function(lambda: self._post_file(file_detail))
+ except requests.exceptions.RequestException:
+ # Do our best to attempt to upload all the files
+ logging.exception("Error posting file after multiple attempts")
+ continue
+ except IOError:
+ # Do our best to attempt to upload all the files
+ logging.exception("Error opening file")
+ continue
+ except queuelib.Empty:
+ # No more work to do
+ return
+
+ @staticmethod
+ def _is_text_type(mimetype):
+ # We want to compress all text types.
+ if mimetype.startswith('text/'):
+ return True
+
+ # Further compress types that typically contain text but are no
+ # text sub type.
+ compress_types = [
+ 'application/json',
+ 'image/svg+xml',
+ ]
+ if mimetype in compress_types:
+ return True
+ return False
+
+ def _post_file(self, file_detail):
+ relative_path = os.path.join(self.prefix, file_detail.relative_path)
+ headers = {}
+ if self.delete_after:
+ headers['x-delete-after'] = str(self.delete_after)
+ headers['content-type'] = file_detail.mimetype
+ # This is required for Rackspace CDN
+ headers['access-control-allow-origin'] = '*'
+
+ if not file_detail.folder:
+ if (file_detail.encoding is None and
+ self._is_text_type(file_detail.mimetype)):
+ headers['content-encoding'] = 'gzip'
+ data = GzipFilter(open(file_detail.full_path, 'rb'))
+ else:
+ if (not file_detail.filename.endswith(".gz") and
+ file_detail.encoding):
+ # Don't apply gzip encoding to files that we receive as
+ # already gzipped. The reason for this is swift will
+ # serve this back to users as an uncompressed file if they
+ # don't set an accept-encoding that includes gzip. This
+ # can cause problems when the desired file state is
+ # compressed as with .tar.gz tarballs.
+ headers['content-encoding'] = file_detail.encoding
+ data = open(file_detail.full_path, 'rb')
+ else:
+ data = ''
+ relative_path = relative_path.rstrip('/')
+ if relative_path == '':
+ relative_path = '/'
+ self.cloud.create_object(self.container,
+ name=relative_path,
+ data=data,
+ **headers)
+
+
+def run(cloud, container, files,
+ indexes=True, parent_links=True, topdir_parent_link=False,
+ partition=False, footer='index_footer.html', delete_after=15552000,
+ prefix=None, public=True, dry_run=False, download_template=''):
+
+ if prefix:
+ prefix = prefix.lstrip('/')
+ if partition and prefix:
+ parts = prefix.split('/')
+ if len(parts) > 1:
+ container += '_' + parts[0]
+ prefix = '/'.join(parts[1:])
+
+ # Create the objects to make sure the arguments are sound.
+ with FileList() as file_list:
+ # Scan the files.
+ for file_path in files:
+ file_list.add(file_path)
+
+ # Upload.
+ uploader = Uploader(cloud, container, prefix, delete_after,
+ public, dry_run)
+
+ indexer = Indexer(file_list)
+
+ # (Possibly) make download script
+ if download_template:
+ indexer.make_download_script(uploader.url, download_template)
+
+ # (Possibly) make indexes.
+ if indexes:
+ indexer.make_indexes(create_parent_links=parent_links,
+ create_topdir_parent_link=topdir_parent_link,
+ append_footer=footer)
+
+ logging.debug("List of files prepared to upload:")
+ for x in file_list:
+ logging.debug(x)
+
+ uploader.upload(file_list)
+ return uploader.url
+
+
+def ansible_main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ cloud=dict(required=True, type='raw'),
+ container=dict(required=True, type='str'),
+ files=dict(required=True, type='list'),
+ partition=dict(type='bool', default=False),
+ indexes=dict(type='bool', default=True),
+ parent_links=dict(type='bool', default=True),
+ topdir_parent_link=dict(type='bool', default=False),
+ public=dict(type='bool', default=True),
+ footer=dict(type='str'),
+ delete_after=dict(type='int'),
+ prefix=dict(type='str'),
+ download_template=dict(type='str'),
+ )
+ )
+
+ p = module.params
+ cloud = get_cloud(p.get('cloud'))
+ try:
+ url = run(cloud, p.get('container'), p.get('files'),
+ indexes=p.get('indexes'),
+ parent_links=p.get('parent_links'),
+ topdir_parent_link=p.get('topdir_parent_link'),
+ partition=p.get('partition'),
+ footer=p.get('footer'),
+ delete_after=p.get('delete_after', 15552000),
+ prefix=p.get('prefix'),
+ public=p.get('public'),
+ download_template=p.get('download_template'))
+ except (keystoneauth1.exceptions.http.HttpError,
+ requests.exceptions.RequestException):
+ s = "Error uploading to %s.%s" % (cloud.name, cloud.config.region_name)
+ logging.exception(s)
+ s += "\n" + traceback.format_exc()
+ module.fail_json(
+ changed=False,
+ msg=s,
+ cloud=cloud.name,
+ region_name=cloud.config.region_name)
+ module.exit_json(changed=True,
+ url=url)
+
+
+def cli_main():
+ parser = argparse.ArgumentParser(
+ description="Upload files to swift"
+ )
+ parser.add_argument('--verbose', action='store_true',
+ help='show debug information')
+ parser.add_argument('--no-indexes', action='store_true',
+ help='do not generate any indexes at all')
+ parser.add_argument('--no-parent-links', action='store_true',
+ help='do not include links back to a parent dir')
+ parser.add_argument('--create-topdir-parent-link', action='store_true',
+ help='include a link in the root directory of the '
+ 'files to the parent directory which may be the '
+ 'index of all results')
+ parser.add_argument('--no-public', action='store_true',
+ help='do not create the container as public')
+ parser.add_argument('--partition', action='store_true',
+ help='partition the prefix into multiple containers')
+ parser.add_argument('--append-footer', default='index_footer.html',
+ help='when generating an index, if the given file is '
+ 'present in a directory, append it to the index '
+ '(set to "none" to disable)')
+ parser.add_argument('--delete-after', default=15552000,
+ help='Number of seconds to delete object after '
+ 'upload. Default is 6 months (15552000 seconds) '
+ 'and if set to 0 X-Delete-After will not be set',
+ type=int)
+ parser.add_argument('--download-template', default='',
+ help='Path to a Jinja2 template that will be filled '
+ 'out to create an automatic download script')
+ parser.add_argument('--prefix',
+ help='Prepend this path to the object names when '
+ 'uploading')
+ parser.add_argument('--dry-run', action='store_true',
+ help='do not attempt to create containers or upload, '
+ 'useful with --verbose for debugging')
+ parser.add_argument('cloud',
+ help='Name of the cloud to use when uploading')
+ parser.add_argument('container',
+ help='Name of the container to use when uploading')
+ parser.add_argument('files', nargs='+',
+ help='the file(s) to upload with recursive glob '
+ 'matching when supplied as a string')
+
+ args = parser.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ # Set requests log level accordingly
+ logging.getLogger("requests").setLevel(logging.DEBUG)
+ # logging.getLogger("keystoneauth").setLevel(logging.INFO)
+ # logging.getLogger("stevedore").setLevel(logging.INFO)
+ logging.captureWarnings(True)
+
+ append_footer = args.append_footer
+ if append_footer.lower() == 'none':
+ append_footer = None
+
+ url = run(get_cloud(args.cloud), args.container, args.files,
+ indexes=not args.no_indexes,
+ parent_links=not args.no_parent_links,
+ topdir_parent_link=args.create_topdir_parent_link,
+ partition=args.partition,
+ footer=append_footer,
+ delete_after=args.delete_after,
+ prefix=args.prefix,
+ public=not args.no_public,
+ dry_run=args.dry_run,
+ download_template=args.download_template)
+ print(url)
+
+
+if __name__ == '__main__':
+ # Avoid unactionable warnings
+ requestsexceptions.squelch_warnings(
+ requestsexceptions.InsecureRequestWarning)
+
+ if sys.stdin.isatty():
+ cli_main()
+ else:
+ ansible_main()
diff --git a/roles/test-upload-logs-swift/tasks/main.yaml b/roles/test-upload-logs-swift/tasks/main.yaml
new file mode 100644
index 0000000..4dc6c5a
--- /dev/null
+++ b/roles/test-upload-logs-swift/tasks/main.yaml
@@ -0,0 +1,46 @@
+- name: Set zuul-log-path fact
+ include_role:
+ name: set-zuul-log-path-fact
+ when: zuul_log_path is not defined
+
+# Always upload (true), never upload (false) or only on failure ('failure')
+- when: zuul_site_upload_logs | default(true) | bool or
+ (zuul_site_upload_logs == 'failure' and not zuul_success | bool)
+ block:
+ # Use chmod instead of file because ansible 2.5 file with recurse and
+ # follow can't really handle symlinks to .
+ - name: Ensure logs are readable before uploading
+ delegate_to: localhost
+ command: "chmod -R u=rwX,g=rX,o=rX {{ zuul.executor.log_root }}/"
+ # ANSIBLE0007 chmod used in place of argument mode to file
+ tags:
+ - skip_ansible_lint
+
+ - name: Set download template
+ set_fact:
+ download_template: "{{ zuul_log_download_template }}"
+ when:
+ - zuul_log_include_download_script
+
+ - name: Upload logs to swift
+ delegate_to: localhost
+ zuul_swift_upload:
+ cloud: "{{ zuul_log_cloud_config }}"
+ partition: "{{ zuul_log_partition }}"
+ container: "{{ zuul_log_container }}"
+ public: "{{ zuul_log_container_public }}"
+ prefix: "{{ zuul_log_path }}"
+ indexes: "{{ zuul_log_create_indexes }}"
+ files:
+ - "{{ zuul.executor.log_root }}/"
+ delete_after: "{{ zuul_log_delete_after | default(omit) }}"
+ download_template: "{{ download_template | default(omit) }}"
+ register: upload_results
+
+- name: Return log URL to Zuul
+ delegate_to: localhost
+ zuul_return:
+ data:
+ zuul:
+ log_url: "{{ upload_results.url }}/"
+ when: upload_results is defined
diff --git a/roles/test-upload-logs-swift/templates/download-logs.sh.j2 b/roles/test-upload-logs-swift/templates/download-logs.sh.j2
new file mode 100644
index 0000000..9a13f17
--- /dev/null
+++ b/roles/test-upload-logs-swift/templates/download-logs.sh.j2
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+# Download all logs
+
+#
+# To use this file
+#
+# curl "{{ base_url }}/download-logs.sh" | bash
+#
+# Logs will be copied in a temporary directory as described in the
+# output. Set DOWNLOAD_DIR to an empty directory if you wish to
+# override this.
+#
+
+BASE_URL={{ base_url }}
+
+function log {
+ echo "$(date -Iseconds) | $@"
+}
+
+function save_file {
+ local file="$1"
+
+ curl -s --compressed --create-dirs -o "${file}" "${BASE_URL}/${file}"
+
+ # Using --compressed we will send an Accept-Encoding: gzip header
+ # and the data will come to us across the network compressed.
+ # However, sometimes things like OpenStack's log server will send
+ # .gz files (as stored on its disk) uncompressed, so we check if
+ # this really looks like an ASCII file and rename for clarity.
+ if [[ "${file}" == *.gz ]]; then
+ local type=$(file "${file}")
+ if [[ "${type}" =~ "ASCII text" ]] || [[ "${type}" =~ "Unicode text" ]]; then
+ local new_name=${file%.gz}
+ log "Renaming to ${new_name}"
+ mv "${file}" "${new_name}"
+ fi
+ fi
+
+}
+
+if [[ -z "${DOWNLOAD_DIR}" ]]; then
+ DOWNLOAD_DIR=$(mktemp -d --tmpdir zuul-logs.XXXXXX)
+fi
+log "Saving logs to ${DOWNLOAD_DIR}"
+
+pushd "${DOWNLOAD_DIR}" > /dev/null
+
+{% set total_files = file_list | length %}
+{% for file in file_list %}
+log "Getting ${BASE_URL}/{{ '%-80s'|format(file) }} [ {{ '%04d'|format(loop.index) }} / {{ '%04d'|format(total_files) }} ]"
+save_file "{{ file }}"
+{% endfor %}
+
+popd >/dev/null
+
+log "Download complete!"
diff --git a/roles/tox/library/test-constraints.txt b/roles/tox/library/test-constraints.txt
new file mode 100644
index 0000000..10e6706
--- /dev/null
+++ b/roles/tox/library/test-constraints.txt
@@ -0,0 +1,2 @@
+requests===2.18.4
+doesnotexistonpypi===0.0.1
diff --git a/roles/tox/library/test_tox_install_sibling_packages.py b/roles/tox/library/test_tox_install_sibling_packages.py
new file mode 100644
index 0000000..b74697c
--- /dev/null
+++ b/roles/tox/library/test_tox_install_sibling_packages.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2019 VEXXHOST, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+#
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import testtools
+
+from .tox_install_sibling_packages import get_installed_packages
+from .tox_install_sibling_packages import write_new_constraints_file
+
+
+class TestToxInstallSiblingPackages(testtools.TestCase):
+ def test_get_installed_packages(self):
+ # NOTE(mnaser): Given that we run our tests inside Tox, we can
+ # leverage the tox virtual environment we use in
+ # unit tests instead of mocking up everything.
+ pkgs = get_installed_packages(sys.executable)
+
+ # NOTE(mnaser): requests should be installed in this virtualenv
+ # but this might fail later if we stop adding requests
+ # in the unit tests.
+ self.assertIn("requests", pkgs)
+
+ def test_write_new_constraints_file(self):
+ # NOTE(mnaser): Given that we run our tests inside Tox, we can
+ # leverage the tox virtual environment we use in
+ # unit tests instead of mocking up everything.
+ pkgs = get_installed_packages(sys.executable)
+
+ # NOTE(mnaser): requests should be installed in this virtualenv
+ # but this might fail later if we stop adding requests
+ # in the unit tests.
+ test_constraints = os.path.join(os.path.dirname(__file__),
+ 'test-constraints.txt')
+ constraints = write_new_constraints_file(test_constraints, pkgs)
+
+ def cleanup_constraints_file():
+ if os.path.exists(constraints):
+ os.unlink(constraints)
+ self.addCleanup(cleanup_constraints_file)
+
+ self.assertTrue(os.path.exists(constraints))
+ with open(constraints) as f:
+ s = f.read()
+ self.assertNotIn("requests", s)
+ self.assertIn("doesnotexistonpypi", s)
diff --git a/roles/tox/library/tox_install_sibling_packages.py b/roles/tox/library/tox_install_sibling_packages.py
index abec207..915186c 100644
--- a/roles/tox/library/tox_install_sibling_packages.py
+++ b/roles/tox/library/tox_install_sibling_packages.py
@@ -121,7 +121,8 @@ def get_installed_packages(tox_python):
def write_new_constraints_file(constraints, packages):
- with tempfile.NamedTemporaryFile(delete=False) as constraints_file:
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) \
+ as constraints_file:
constraints_lines = open(constraints, 'r').read().split('\n')
for line in constraints_lines:
package_name = line.split('===')[0]
diff --git a/roles/upload-afs-roots/README.rst b/roles/upload-afs-roots/README.rst
new file mode 100644
index 0000000..819f381
--- /dev/null
+++ b/roles/upload-afs-roots/README.rst
@@ -0,0 +1,26 @@
+Copy contents from ``{{ zuul.executor.work_root }}/artifacts/`` to AFS
+
+This is intented for documentation publishing, it deletes files that
+do not exist in the content from the source.
+
+Before the job rsyncs the build into its final location, it must first
+create a list of directories that should not be deleted. This way if
+an entire directory is removed from a document, it will still be
+removed from the website, but directories which are themselves roots
+of other documents (for example, the stein branch) are not removed. A
+marker file, called `.root-marker`, at the root of each such directory
+will accomplish this; therefore each build job should also ensure that
+it leaves such a marker file at the root of its build. The job will
+find each of those in the destination hierarchy and add their
+containing directories to a list of directories to exclude from
+rsyncing.
+
+**Role Variables**
+
+.. zuul:rolevar:: afs_source
+
+ Path to local source directory.
+
+.. zuul:rolevar:: afs_target
+
+ Target path in AFS (should begin with '/afs/...').
diff --git a/roles/upload-afs-roots/__init__.py b/roles/upload-afs-roots/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-afs-roots/__init__.py
diff --git a/roles/upload-afs/defaults/main.yaml b/roles/upload-afs-roots/defaults/main.yaml
index 199870f..199870f 100644
--- a/roles/upload-afs/defaults/main.yaml
+++ b/roles/upload-afs-roots/defaults/main.yaml
diff --git a/roles/upload-afs-roots/library/__init__.py b/roles/upload-afs-roots/library/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-afs-roots/library/__init__.py
diff --git a/roles/upload-afs/library/zuul_afs.py b/roles/upload-afs-roots/library/zuul_afs.py
index 56f1e14..56f1e14 100644
--- a/roles/upload-afs/library/zuul_afs.py
+++ b/roles/upload-afs-roots/library/zuul_afs.py
diff --git a/roles/upload-afs/tasks/main.yaml b/roles/upload-afs-roots/tasks/main.yaml
index 3b01008..3b01008 100644
--- a/roles/upload-afs/tasks/main.yaml
+++ b/roles/upload-afs-roots/tasks/main.yaml
diff --git a/roles/upload-afs-synchronize/README.rst b/roles/upload-afs-synchronize/README.rst
new file mode 100644
index 0000000..c2c902f
--- /dev/null
+++ b/roles/upload-afs-synchronize/README.rst
@@ -0,0 +1,19 @@
+Copy contents from ``{{ zuul.executor.work_root }}/artifacts/`` to AFS
+
+**Role Variables**
+
+.. zuul:rolevar:: afs_source
+
+ Path to local source directory.
+
+.. zuul:rolevar:: afs_target
+
+ Target path in AFS (should begin with '/afs/...').
+
+.. zuul:rolevar:: afs_copy_only
+ :default: True
+
+ If set to `false`, this will specify `--delete-after` to remove
+ files on the remote side that do not exist on the copying side.
+ When set to `true` will act as a regular additive copy process and
+ will not remove any remote files.
diff --git a/roles/upload-afs-synchronize/defaults/main.yaml b/roles/upload-afs-synchronize/defaults/main.yaml
new file mode 100644
index 0000000..0fd7b56
--- /dev/null
+++ b/roles/upload-afs-synchronize/defaults/main.yaml
@@ -0,0 +1,2 @@
+afs_source: "{{ zuul.executor.work_root }}/artifacts/"
+afs_copy_only: true
diff --git a/roles/upload-afs-synchronize/tasks/main.yaml b/roles/upload-afs-synchronize/tasks/main.yaml
new file mode 100644
index 0000000..492b47f
--- /dev/null
+++ b/roles/upload-afs-synchronize/tasks/main.yaml
@@ -0,0 +1,18 @@
+- name: Precreate AFS target directory
+ # rsync will create the target directory but not its parent directories
+ # which may not yet exist
+ file:
+ path: "{{ afs_target }}"
+ state: directory
+- name: Upload contents to AFS
+ synchronize:
+ src: "{{ afs_source }}"
+ dest: "{{ afs_target }}"
+ # NOTE(ianw): you can't set group permissions on AFS, hence we
+ # don't set owner specifically.
+ archive: false
+ perms: true
+ times: true
+ recursive: true
+ rsync_opts: '{{ ["--safe-links"] + ["--delete-after"] if not afs_copy_only else [] }}'
+
diff --git a/roles/upload-afs/README.rst b/roles/upload-afs/README.rst
deleted file mode 100644
index d684e0f..0000000
--- a/roles/upload-afs/README.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-Copy contents from ``{{ zuul.executor.work_root }}/artifacts/`` to AFS
-
-**Role Variables**
-
-.. zuul:rolevar:: afs_source
-
- Path to local source directory.
-
-.. zuul:rolevar:: afs_target
-
- Target path in AFS (should begin with '/afs/...').
diff --git a/roles/upload-docker-image/tasks/main.yaml b/roles/upload-docker-image/tasks/main.yaml
index 1549090..5052e3c 100644
--- a/roles/upload-docker-image/tasks/main.yaml
+++ b/roles/upload-docker-image/tasks/main.yaml
@@ -1,10 +1,12 @@
- name: Verify repository names
when: |
docker_credentials.repository is defined
- and not item.repository | regex_search(docker_credentials.repository)
+ and not zj_image.repository | regex_search(docker_credentials.repository)
loop: "{{ docker_images }}"
+ loop_control:
+ loop_var: zj_image
fail:
- msg: "{{ item.repository }} not permitted by {{ docker_credentials.repository }}"
+ msg: "{{ zj_image.repository }} not permitted by {{ docker_credentials.repository }}"
- name: Log in to dockerhub
command: "docker login -u {{ docker_credentials.username }} -p {{ docker_credentials.password }}"
no_log: true
diff --git a/roles/upload-docker-image/tasks/push.yaml b/roles/upload-docker-image/tasks/push.yaml
index 878a8bb..150782a 100644
--- a/roles/upload-docker-image/tasks/push.yaml
+++ b/roles/upload-docker-image/tasks/push.yaml
@@ -1,5 +1,9 @@
- name: Upload tag to dockerhub
- command: "docker push {{ image.repository }}:change_{{ zuul.change }}_{{ image_tag }}"
+ command: "docker push {{ image.repository }}:change_{{ zuul.change }}_{{ zj_image_tag }}"
loop: "{{ image.tags | default(['latest']) }}"
loop_control:
- loop_var: image_tag
+ loop_var: zj_image_tag
+ register: result
+ until: result.rc == 0
+ retries: 3
+ delay: 30
diff --git a/roles/upload-logs-gcs/README.rst b/roles/upload-logs-gcs/README.rst
new file mode 100644
index 0000000..0b1e355
--- /dev/null
+++ b/roles/upload-logs-gcs/README.rst
@@ -0,0 +1,63 @@
+Upload logs to Google Cloud Storage
+
+Before using this role, create at least one bucket and set up
+appropriate access controls or lifecycle events. This role will not
+automatically create buckets (though it will configure CORS policies).
+
+This role requires the ``google-cloud-storage`` Python package to be
+installed in the Ansible environment on the Zuul executor. It uses
+Google Cloud Application Default Credentials.
+
+**Role Variables**
+
+.. zuul:rolevar:: zuul_site_upload_logs
+ :default: true
+
+ Controls when logs are uploaded. true, the default, means always
+ upload logs. false means never upload logs. 'failure' means to only
+ upload logs when the job has failed.
+
+ .. note:: Intended to be set by admins via site-variables.
+
+.. zuul:rolevar:: zuul_log_partition
+ :default: false
+
+ If set to true, then the first component of the log path will be
+ removed from the object name and added to the bucket name, so that
+ logs for different changes are distributed across a large number of
+ buckets.
+
+.. zuul:rolevar:: zuul_log_container
+
+ This role *will not* create buckets which do not already exist. If
+ partitioning is not enabled, this is the name of the bucket which
+ will be used. If partitioning is enabled, then this will be used
+ as the prefix for the bucket name which will be separated from the
+ partition name by an underscore. For example, "logs_42" would be
+ the bucket name for partition 42.
+
+ Note that you will want to set this to a value that uniquely
+ identifies your Zuul installation.
+
+.. zuul:rolevar:: zuul_log_path
+ :default: Generated by the role `set-zuul-log-path-fact`
+
+ Prepend this path to the object names when uploading.
+
+.. zuul:rolevar:: zuul_log_create_indexes
+ :default: true
+
+ Whether to create `index.html` files with directory indexes. If set
+ to false, Swift containers can be marked with a `Web-Listings=true`
+ property to activate Swift's own directory indexing.
+
+.. zuul:rolevar:: zuul_log_path_shard_build
+ :default: false
+
+ This var is consumed by set-zuul-log-path-fact which
+ upload-logs-gcs calls into. If you set this you will get log paths
+ prefixed with the first three characters of the build uuid. This
+ will improve log file sharding.
+
+ More details can be found at
+ :zuul:rolevar:`set-zuul-log-path-fact.zuul_log_path_shard_build`.
diff --git a/roles/upload-logs-gcs/__init__.py b/roles/upload-logs-gcs/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-logs-gcs/__init__.py
diff --git a/roles/upload-logs-gcs/defaults/main.yaml b/roles/upload-logs-gcs/defaults/main.yaml
new file mode 100644
index 0000000..4d7c551
--- /dev/null
+++ b/roles/upload-logs-gcs/defaults/main.yaml
@@ -0,0 +1,3 @@
+zuul_log_partition: false
+zuul_log_container_public: true
+zuul_log_create_indexes: true
diff --git a/roles/upload-logs-gcs/library/__init__.py b/roles/upload-logs-gcs/library/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-logs-gcs/library/__init__.py
diff --git a/roles/upload-logs-gcs/library/test-fixtures/artifacts/foo.tar.gz b/roles/upload-logs-gcs/library/test-fixtures/artifacts/foo.tar.gz
new file mode 100644
index 0000000..9b1579d
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/artifacts/foo.tar.gz
Binary files differ
diff --git a/roles/upload-logs-gcs/library/test-fixtures/artifacts/foo.tgz b/roles/upload-logs-gcs/library/test-fixtures/artifacts/foo.tgz
new file mode 100644
index 0000000..ca9fccb
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/artifacts/foo.tgz
Binary files differ
diff --git a/roles/upload-logs-gcs/library/test-fixtures/links/controller/service_log.txt b/roles/upload-logs-gcs/library/test-fixtures/links/controller/service_log.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/links/controller/service_log.txt
diff --git a/roles/upload-logs-gcs/library/test-fixtures/links/job-output.json b/roles/upload-logs-gcs/library/test-fixtures/links/job-output.json
new file mode 100644
index 0000000..c8cd7e9
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/links/job-output.json
@@ -0,0 +1 @@
+{"test": "foo"}
diff --git a/roles/upload-logs-gcs/library/test-fixtures/links/symlink_loop/placeholder b/roles/upload-logs-gcs/library/test-fixtures/links/symlink_loop/placeholder
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/links/symlink_loop/placeholder
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/controller/compressed.gz b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/compressed.gz
new file mode 100644
index 0000000..4dc3bad
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/compressed.gz
Binary files differ
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/controller/cpu-load.svg b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/cpu-load.svg
new file mode 100644
index 0000000..01a940a
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/cpu-load.svg
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<svg>
+</svg>
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/controller/journal.xz b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/journal.xz
new file mode 100644
index 0000000..ea28d9e
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/journal.xz
Binary files differ
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/controller/service_log.txt b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/service_log.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/service_log.txt
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/controller/subdir/foo::3.txt b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/subdir/foo::3.txt
new file mode 100644
index 0000000..384ce7d
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/subdir/foo::3.txt
@@ -0,0 +1,2 @@
+This is a plan text file with a funny name.
+The index links should escape the :'s.
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/controller/subdir/subdir.txt b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/subdir/subdir.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/subdir/subdir.txt
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/controller/syslog b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/syslog
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/controller/syslog
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/job-output.json b/roles/upload-logs-gcs/library/test-fixtures/logs/job-output.json
new file mode 100644
index 0000000..c8cd7e9
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/job-output.json
@@ -0,0 +1 @@
+{"test": "foo"}
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/zuul-info/inventory.yaml b/roles/upload-logs-gcs/library/test-fixtures/logs/zuul-info/inventory.yaml
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/zuul-info/inventory.yaml
diff --git a/roles/upload-logs-gcs/library/test-fixtures/logs/zuul-info/zuul-info.controller.txt b/roles/upload-logs-gcs/library/test-fixtures/logs/zuul-info/zuul-info.controller.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test-fixtures/logs/zuul-info/zuul-info.controller.txt
diff --git a/roles/upload-logs-gcs/library/test_zuul_google_storage_upload.py b/roles/upload-logs-gcs/library/test_zuul_google_storage_upload.py
new file mode 100644
index 0000000..ed5c556
--- /dev/null
+++ b/roles/upload-logs-gcs/library/test_zuul_google_storage_upload.py
@@ -0,0 +1,406 @@
+# Copyright (C) 2018-2019 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+#
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import testtools
+import time
+import stat
+import fixtures
+
+from bs4 import BeautifulSoup
+from .zuul_google_storage_upload import FileList, Indexer, FileDetail
+
+
+FIXTURE_DIR = os.path.join(os.path.dirname(__file__),
+ 'test-fixtures')
+
+
+class SymlinkFixture(fixtures.Fixture):
+ links = [
+ ('bad_symlink', '/etc'),
+ ('bad_symlink_file', '/etc/issue'),
+ ('good_symlink', 'controller'),
+ ('recursive_symlink', '.'),
+ ('symlink_file', 'job-output.json'),
+ ('symlink_loop_a', 'symlink_loop'),
+ ('symlink_loop/symlink_loop_b', '..'),
+ ]
+
+ def _setUp(self):
+ for (src, target) in self.links:
+ path = os.path.join(FIXTURE_DIR, 'links', src)
+ os.symlink(target, path)
+ self.addCleanup(os.unlink, path)
+
+
+class TestFileList(testtools.TestCase):
+
+ def assert_files(self, result, files):
+ self.assertEqual(len(result), len(files))
+ for expected, received in zip(files, result):
+ self.assertEqual(expected[0], received.relative_path)
+ if expected[0] and expected[0][-1] == '/':
+ efilename = os.path.split(
+ os.path.dirname(expected[0]))[1] + '/'
+ else:
+ efilename = os.path.split(expected[0])[1]
+ self.assertEqual(efilename, received.filename)
+ if received.folder:
+ if received.full_path is not None and expected[0] != '':
+ self.assertTrue(os.path.isdir(received.full_path))
+ else:
+ self.assertTrue(os.path.isfile(received.full_path))
+ self.assertEqual(expected[1], received.mimetype)
+ self.assertEqual(expected[2], received.encoding)
+
+ def find_file(self, file_list, path):
+ for f in file_list:
+ if f.relative_path == path:
+ return f
+
+ def test_single_dir_trailing_slash(self):
+ '''Test a single directory with a trailing slash'''
+
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs/'))
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('zuul-info', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('controller/subdir', 'application/directory', None),
+ ('controller/compressed.gz', 'text/plain', 'gzip'),
+ ('controller/cpu-load.svg', 'image/svg+xml', None),
+ ('controller/journal.xz', 'text/plain', 'xz'),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('controller/syslog', 'text/plain', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
+ ('controller/subdir/subdir.txt', 'text/plain', None),
+ ('zuul-info/inventory.yaml', 'text/plain', None),
+ ('zuul-info/zuul-info.controller.txt', 'text/plain', None),
+ ])
+
+ def test_single_dir(self):
+ '''Test a single directory without a trailing slash'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs'))
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('logs', 'application/directory', None),
+ ('logs/controller', 'application/directory', None),
+ ('logs/zuul-info', 'application/directory', None),
+ ('logs/job-output.json', 'application/json', None),
+ ('logs/controller/subdir', 'application/directory', None),
+ ('logs/controller/compressed.gz', 'text/plain', 'gzip'),
+ ('logs/controller/cpu-load.svg', 'image/svg+xml', None),
+ ('logs/controller/journal.xz', 'text/plain', 'xz'),
+ ('logs/controller/service_log.txt', 'text/plain', None),
+ ('logs/controller/syslog', 'text/plain', None),
+ ('logs/controller/subdir/foo::3.txt', 'text/plain', None),
+ ('logs/controller/subdir/subdir.txt', 'text/plain', None),
+ ('logs/zuul-info/inventory.yaml', 'text/plain', None),
+ ('logs/zuul-info/zuul-info.controller.txt',
+ 'text/plain', None),
+ ])
+
+ def test_single_file(self):
+ '''Test a single file'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR,
+ 'logs/zuul-info/inventory.yaml'))
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('inventory.yaml', 'text/plain', None),
+ ])
+
+ def test_symlinks(self):
+ '''Test symlinks'''
+ with FileList() as fl:
+ self.useFixture(SymlinkFixture())
+ fl.add(os.path.join(FIXTURE_DIR, 'links/'))
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('good_symlink', 'application/directory', None),
+ ('recursive_symlink', 'application/directory', None),
+ ('symlink_loop', 'application/directory', None),
+ ('symlink_loop_a', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('symlink_file', 'text/plain', None),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('symlink_loop/symlink_loop_b', 'application/directory', None),
+ ('symlink_loop/placeholder', 'text/plain', None),
+ ])
+
+ def test_index_files(self):
+ '''Test index generation'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs'))
+ ix = Indexer(fl)
+ ix.make_indexes()
+
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('index.html', 'text/html', None),
+ ('logs', 'application/directory', None),
+ ('logs/controller', 'application/directory', None),
+ ('logs/zuul-info', 'application/directory', None),
+ ('logs/job-output.json', 'application/json', None),
+ ('logs/index.html', 'text/html', None),
+ ('logs/controller/subdir', 'application/directory', None),
+ ('logs/controller/compressed.gz', 'text/plain', 'gzip'),
+ ('logs/controller/cpu-load.svg', 'image/svg+xml', None),
+ ('logs/controller/journal.xz', 'text/plain', 'xz'),
+ ('logs/controller/service_log.txt', 'text/plain', None),
+ ('logs/controller/syslog', 'text/plain', None),
+ ('logs/controller/index.html', 'text/html', None),
+ ('logs/controller/subdir/foo::3.txt', 'text/plain', None),
+ ('logs/controller/subdir/subdir.txt', 'text/plain', None),
+ ('logs/controller/subdir/index.html', 'text/html', None),
+ ('logs/zuul-info/inventory.yaml', 'text/plain', None),
+ ('logs/zuul-info/zuul-info.controller.txt',
+ 'text/plain', None),
+ ('logs/zuul-info/index.html', 'text/html', None),
+ ])
+
+ top_index = self.find_file(fl, 'index.html')
+ page = open(top_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ self.assertEqual(len(rows), 1)
+
+ self.assertEqual(rows[0].find('a').get('href'), 'logs/index.html')
+ self.assertEqual(rows[0].find('a').text, 'logs/')
+
+ subdir_index = self.find_file(
+ fl, 'logs/controller/subdir/index.html')
+ page = open(subdir_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+ self.assertEqual(rows[0].find('a').get('href'), '../index.html')
+ self.assertEqual(rows[0].find('a').text, '../')
+
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[1].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[1].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[2].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[2].find('a').text, 'subdir.txt')
+
+ def test_index_files_trailing_slash(self):
+ '''Test index generation with a trailing slash'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs/'))
+ ix = Indexer(fl)
+ ix.make_indexes()
+
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('zuul-info', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('index.html', 'text/html', None),
+ ('controller/subdir', 'application/directory', None),
+ ('controller/compressed.gz', 'text/plain', 'gzip'),
+ ('controller/cpu-load.svg', 'image/svg+xml', None),
+ ('controller/journal.xz', 'text/plain', 'xz'),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('controller/syslog', 'text/plain', None),
+ ('controller/index.html', 'text/html', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
+ ('controller/subdir/subdir.txt', 'text/plain', None),
+ ('controller/subdir/index.html', 'text/html', None),
+ ('zuul-info/inventory.yaml', 'text/plain', None),
+ ('zuul-info/zuul-info.controller.txt', 'text/plain', None),
+ ('zuul-info/index.html', 'text/html', None),
+ ])
+
+ top_index = self.find_file(fl, 'index.html')
+ page = open(top_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ self.assertEqual(len(rows), 3)
+
+ self.assertEqual(rows[0].find('a').get('href'),
+ 'controller/index.html')
+ self.assertEqual(rows[0].find('a').text, 'controller/')
+
+ self.assertEqual(rows[1].find('a').get('href'),
+ 'zuul-info/index.html')
+ self.assertEqual(rows[1].find('a').text, 'zuul-info/')
+
+ subdir_index = self.find_file(fl, 'controller/subdir/index.html')
+ page = open(subdir_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+ self.assertEqual(rows[0].find('a').get('href'), '../index.html')
+ self.assertEqual(rows[0].find('a').text, '../')
+
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[1].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[1].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[2].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[2].find('a').text, 'subdir.txt')
+
+ def test_topdir_parent_link(self):
+ '''Test index generation creates topdir parent link'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs/'))
+ ix = Indexer(fl)
+ ix.make_indexes(
+ create_parent_links=True,
+ create_topdir_parent_link=True)
+
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('zuul-info', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('index.html', 'text/html', None),
+ ('controller/subdir', 'application/directory', None),
+ ('controller/compressed.gz', 'text/plain', 'gzip'),
+ ('controller/cpu-load.svg', 'image/svg+xml', None),
+ ('controller/journal.xz', 'text/plain', 'xz'),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('controller/syslog', 'text/plain', None),
+ ('controller/index.html', 'text/html', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
+ ('controller/subdir/subdir.txt', 'text/plain', None),
+ ('controller/subdir/index.html', 'text/html', None),
+ ('zuul-info/inventory.yaml', 'text/plain', None),
+ ('zuul-info/zuul-info.controller.txt', 'text/plain', None),
+ ('zuul-info/index.html', 'text/html', None),
+ ])
+
+ top_index = self.find_file(fl, 'index.html')
+ page = open(top_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ self.assertEqual(len(rows), 4)
+
+ self.assertEqual(rows[0].find('a').get('href'),
+ '../index.html')
+ self.assertEqual(rows[0].find('a').text, '../')
+
+ self.assertEqual(rows[1].find('a').get('href'),
+ 'controller/index.html')
+ self.assertEqual(rows[1].find('a').text, 'controller/')
+
+ self.assertEqual(rows[2].find('a').get('href'),
+ 'zuul-info/index.html')
+ self.assertEqual(rows[2].find('a').text, 'zuul-info/')
+
+ subdir_index = self.find_file(fl, 'controller/subdir/index.html')
+ page = open(subdir_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+ self.assertEqual(rows[0].find('a').get('href'), '../index.html')
+ self.assertEqual(rows[0].find('a').text, '../')
+
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[1].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[1].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[2].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[2].find('a').text, 'subdir.txt')
+
+ def test_no_parent_links(self):
+ '''Test index generation creates topdir parent link'''
+ with FileList() as fl:
+ fl.add(os.path.join(FIXTURE_DIR, 'logs/'))
+ ix = Indexer(fl)
+ ix.make_indexes(
+ create_parent_links=False,
+ create_topdir_parent_link=False)
+
+ self.assert_files(fl, [
+ ('', 'application/directory', None),
+ ('controller', 'application/directory', None),
+ ('zuul-info', 'application/directory', None),
+ ('job-output.json', 'application/json', None),
+ ('index.html', 'text/html', None),
+ ('controller/subdir', 'application/directory', None),
+ ('controller/compressed.gz', 'text/plain', 'gzip'),
+ ('controller/cpu-load.svg', 'image/svg+xml', None),
+ ('controller/journal.xz', 'text/plain', 'xz'),
+ ('controller/service_log.txt', 'text/plain', None),
+ ('controller/syslog', 'text/plain', None),
+ ('controller/index.html', 'text/html', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
+ ('controller/subdir/subdir.txt', 'text/plain', None),
+ ('controller/subdir/index.html', 'text/html', None),
+ ('zuul-info/inventory.yaml', 'text/plain', None),
+ ('zuul-info/zuul-info.controller.txt', 'text/plain', None),
+ ('zuul-info/index.html', 'text/html', None),
+ ])
+
+ top_index = self.find_file(fl, 'index.html')
+ page = open(top_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ self.assertEqual(len(rows), 3)
+
+ self.assertEqual(rows[0].find('a').get('href'),
+ 'controller/index.html')
+ self.assertEqual(rows[0].find('a').text,
+ 'controller/')
+
+ self.assertEqual(rows[1].find('a').get('href'),
+ 'zuul-info/index.html')
+ self.assertEqual(rows[1].find('a').text,
+ 'zuul-info/')
+
+ subdir_index = self.find_file(fl, 'controller/subdir/index.html')
+ page = open(subdir_index.full_path).read()
+ page = BeautifulSoup(page, 'html.parser')
+ rows = page.find_all('tr')[1:]
+
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[0].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[0].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[1].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[1].find('a').text, 'subdir.txt')
+
+
+class TestFileDetail(testtools.TestCase):
+
+ def test_get_file_detail(self):
+ '''Test files info'''
+ path = os.path.join(FIXTURE_DIR, 'logs/job-output.json')
+ file_detail = FileDetail(path, '')
+ path_stat = os.stat(path)
+ self.assertEqual(
+ time.gmtime(path_stat[stat.ST_MTIME]),
+ file_detail.last_modified)
+ self.assertEqual(16, file_detail.size)
+
+ def test_get_file_detail_missing_file(self):
+ '''Test files that go missing during a walk'''
+
+ file_detail = FileDetail('missing/file/that/we/cant/find', '')
+
+ self.assertEqual(time.gmtime(0), file_detail.last_modified)
+ self.assertEqual(0, file_detail.size)
diff --git a/roles/upload-logs-gcs/library/zuul_google_storage_upload.py b/roles/upload-logs-gcs/library/zuul_google_storage_upload.py
new file mode 100755
index 0000000..774a786
--- /dev/null
+++ b/roles/upload-logs-gcs/library/zuul_google_storage_upload.py
@@ -0,0 +1,862 @@
+#!/usr/bin/env python3
+#
+# Copyright 2014 Rackspace Australia
+# Copyright 2018-2019 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+"""
+Utility to upload files to google
+"""
+
+import argparse
+import datetime
+import gzip
+import io
+import json
+import logging
+import mimetypes
+import os
+try:
+ import queue as queuelib
+except ImportError:
+ import Queue as queuelib
+import shutil
+import stat
+import sys
+import tempfile
+import threading
+import time
+try:
+ import urllib.parse as urlparse
+except ImportError:
+ import urllib as urlparse
+import zlib
+import collections
+
+from google.cloud import storage
+import google.auth.compute_engine.credentials as gce_cred
+
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ # Python 3.3+
+ from collections.abc import Sequence
+except ImportError:
+ from collections import Sequence
+
+mimetypes.init()
+mimetypes.add_type('text/plain', '.yaml')
+
+MAX_UPLOAD_THREADS = 24
+POST_ATTEMPTS = 3
+
+# Map mime types to apache icons
+APACHE_MIME_ICON_MAP = {
+ '_default': 'unknown.png',
+ 'application/gzip': 'compressed.png',
+ 'application/directory': 'folder.png',
+ 'text/html': 'text.png',
+ 'text/plain': 'text.png',
+}
+
+# Map mime types to apache icons
+APACHE_FILE_ICON_MAP = {
+ '..': 'back.png',
+}
+
+# These icon files are from the Apache project and are in the public
+# domain.
+ICON_IMAGES = {
+ 'back.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAElBMVEX/'
+ '///M//+ZmZlmZmYzMzMAAACei5rnAAAAAnRSTlP/AOW3MEoAAABWSURB'
+ 'VHjabdBBCgAhDEPRRpv7X3kwEMsQ//IRRC08urjRHbha5VLFUsVSxVI9'
+ 'lmDh5hMpHD6n0EgoiZG0DNINpnWlcVXaRix76e1/8dddcL6nG0Ri9gHj'
+ 'tgSXKYeLBgAAAABJRU5ErkJggg==',
+ 'compressed.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAADAFBM'
+ 'VEX//////8z//5n//2b//zP//wD/zP//zMz/zJn/zGb/zDP/zAD/'
+ 'mf//mcz/mZn/mWb/mTP/mQD/Zv//Zsz/Zpn/Zmb/ZjP/ZgD/M///'
+ 'M8z/M5n/M2b/MzP/MwD/AP//AMz/AJn/AGb/ADP/AADM///M/8zM'
+ '/5nM/2bM/zPM/wDMzP/MzMzMzJnMzGbMzDPMzADMmf/MmczMmZnM'
+ 'mWbMmTPMmQDMZv/MZszMZpnMZmbMZjPMZgDMM//MM8zMM5nMM2bM'
+ 'MzPMMwDMAP/MAMzMAJnMAGbMADPMAACZ//+Z/8yZ/5mZ/2aZ/zOZ'
+ '/wCZzP+ZzMyZzJmZzGaZzDOZzACZmf+ZmcyZmZmZmWaZmTOZmQCZ'
+ 'Zv+ZZsyZZpmZZmaZZjOZZgCZM/+ZM8yZM5mZM2aZMzOZMwCZAP+Z'
+ 'AMyZAJmZAGaZADOZAABm//9m/8xm/5lm/2Zm/zNm/wBmzP9mzMxm'
+ 'zJlmzGZmzDNmzABmmf9mmcxmmZlmmWZmmTNmmQBmZv9mZsxmZplm'
+ 'ZmZmZjNmZgBmM/9mM8xmM5lmM2ZmMzNmMwBmAP9mAMxmAJlmAGZm'
+ 'ADNmAAAz//8z/8wz/5kz/2Yz/zMz/wAzzP8zzMwzzJkzzGYzzDMz'
+ 'zAAzmf8zmcwzmZkzmWYzmTMzmQAzZv8zZswzZpkzZmYzZjMzZgAz'
+ 'M/8zM8wzM5kzM2YzMzMzMwAzAP8zAMwzAJkzAGYzADMzAAAA//8A'
+ '/8wA/5kA/2YA/zMA/wAAzP8AzMwAzJkAzGYAzDMAzAAAmf8AmcwA'
+ 'mZkAmWYAmTMAmQAAZv8AZswAZpkAZmYAZjMAZgAAM/8AM8wAM5kA'
+ 'M2YAMzMAMwAAAP8AAMwAAJkAAGYAADPuAADdAAC7AACqAACIAAB3'
+ 'AABVAABEAAAiAAARAAAA7gAA3QAAuwAAqgAAiAAAdwAAVQAARAAA'
+ 'IgAAEQAAAO4AAN0AALsAAKoAAIgAAHcAAFUAAEQAACIAABHu7u7d'
+ '3d27u7uqqqqIiIh3d3dVVVVEREQiIiIREREAAAD7CIKZAAAAJXRS'
+ 'TlP///////////////////////////////////////////////8A'
+ 'P89CTwAAAGtJREFUeNp9z9ENgDAIhOEOco+dybVuEXasFMRDY/x5'
+ '+xJCO6Znu6kSx7BhXyjtKBWWNlwW88Loid7hFRKBXiIYCMfMEYUQ'
+ 'QohC3CjFA5nIjqx1CqlDLGR/EhM5O06yvin0ftGOyIS7lV14AsQN'
+ 'aR7rMEBYAAAAAElFTkSuQmCC',
+ 'folder.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAElBMVEX/'
+ '////zJnM//+ZZjMzMzMAAADCEvqoAAAAA3RSTlP//wDXyg1BAAAASElE'
+ 'QVR42s3KQQ6AQAhDUaXt/a/sQDrRJu7c+NmQB0e99B3lnqjT6cYx6zSI'
+ 'bV40n3D7psYMoBoz4w8/EdNYQsbGEjNxYSljXTEsA9O1pLTvAAAAAElF'
+ 'TkSuQmCC',
+ 'text.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAD1BMVEX/'
+ '///M//+ZmZkzMzMAAABVsTOVAAAAAnRSTlP/AOW3MEoAAABISURBVHja'
+ 'tcrRCgAgCENRbf7/N7dKomGvngjhMsPLD4NdMPwia438NRIyxsaL/XQZ'
+ 'hyxpkC6zyjLXGVXnkhqWJWIIrOgeinECLlUCjBCqNQoAAAAASUVORK5C'
+ 'YII=',
+ 'unknown.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAD1BMVEX/'
+ '///M//+ZmZkzMzMAAABVsTOVAAAAAnRSTlP/AOW3MEoAAABYSURBVHja'
+ 'ncvRDoAgDEPRruX/v1kmNHPBxMTLyzgD6FmsILg56g2hQnJkOco4yZhq'
+ 'tN5nYd5Zq0LsHblwxwP9GTCWsaGtoelANKzOlz/RfaLYUmLE6E28ALlN'
+ 'AupSdoFsAAAAAElFTkSuQmCC'}
+
+
+# Begin vendored code
+# This code is licensed under the Public Domain/CC0 and comes from
+# https://github.com/leenr/gzip-stream/blob/master/gzip_stream.py
+# Code was modified:
+# removed type annotations to support python2.
+# removed use of *, somearg for positional anonymous args.
+# Default compression level to 9.
+
+class GZIPCompressedStream(io.RawIOBase):
+ def __init__(self, stream, compression_level=9):
+ assert 1 <= compression_level <= 9
+
+ self._compression_level = compression_level
+ self._stream = stream
+
+ self._compressed_stream = io.BytesIO()
+ self._compressor = gzip.GzipFile(
+ mode='wb',
+ fileobj=self._compressed_stream,
+ compresslevel=compression_level
+ )
+
+ # because of the GZIP header written by `GzipFile.__init__`:
+ self._compressed_stream.seek(0)
+ self.count = 0
+
+ def read(self, length):
+ r = super().read(length)
+ self.count += len(r)
+ return r
+
+ def tell(self):
+ return self.count
+
+ @property
+ def compression_level(self):
+ return self._compression_level
+
+ @property
+ def stream(self):
+ return self._stream
+
+ def readable(self):
+ return True
+
+ def _read_compressed_into(self, b):
+ buf = self._compressed_stream.read(len(b))
+ b[:len(buf)] = buf
+ return len(buf)
+
+ def readinto(self, b):
+ b = memoryview(b)
+
+ offset = 0
+ size = len(b)
+ while offset < size:
+ offset += self._read_compressed_into(b[offset:])
+ if offset < size:
+ # self._compressed_buffer now empty
+ if self._compressor.closed:
+ # nothing to compress anymore
+ break
+ # compress next bytes
+ self._read_n_compress(size)
+
+ return offset
+
+ def _read_n_compress(self, size):
+ assert size > 0
+
+ data = self._stream.read(size)
+
+ # rewind buffer to the start to free up memory
+ # (because anything currently in the buffer should be already
+ # streamed off the object)
+ self._compressed_stream.seek(0)
+ self._compressed_stream.truncate(0)
+
+ if data:
+ self._compressor.write(data)
+ else:
+ # this will write final data (will flush zlib with Z_FINISH)
+ self._compressor.close()
+
+ # rewind to the buffer start
+ self._compressed_stream.seek(0)
+
+ def __repr__(self):
+ return (
+ '{self.__class__.__name__}('
+ '{self.stream!r}, '
+ 'compression_level={self.compression_level!r}'
+ ')'
+ ).format(self=self)
+
+# End vendored code
+
+
+def get_mime_icon(mime, filename=''):
+ icon = (APACHE_FILE_ICON_MAP.get(filename) or
+ APACHE_MIME_ICON_MAP.get(mime) or
+ APACHE_MIME_ICON_MAP['_default'])
+ return "data:image/png;base64,%s" % ICON_IMAGES[icon]
+
+
+def retry_function(func):
+ for attempt in range(1, POST_ATTEMPTS + 1):
+ try:
+ return func()
+ except Exception:
+ if attempt >= POST_ATTEMPTS:
+ raise
+ else:
+ logging.exception("Error on attempt %d" % attempt)
+ time.sleep(attempt * 10)
+
+
+def sizeof_fmt(num, suffix='B'):
+ # From http://stackoverflow.com/questions/1094841/
+ # reusable-library-to-get-human-readable-version-of-file-size
+ for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
+ if abs(num) < 1024.0:
+ return "%3.1f%s%s" % (num, unit, suffix)
+ num /= 1024.0
+ return "%.1f%s%s" % (num, 'Y', suffix)
+
+
+class Credentials(gce_cred.Credentials):
+ def __init__(self, path):
+ self._path = path
+ self.refresh(None)
+
+ def refresh(self, request):
+ with open(self._path) as f:
+ data = json.loads(f.read())
+ self.token = data['access_token']
+ self.expiry = (datetime.datetime.utcnow() +
+ datetime.timedelta(seconds=data['expires_in']))
+
+
+class FileDetail():
+ """
+ Used to generate indexes with links or as the file path
+ to push to storage.
+ """
+
+ def __init__(self, full_path, relative_path, filename=None):
+ """
+ Args:
+ full_path (str): The absolute path to the file on disk.
+ relative_path (str): The relative path from the artifacts source
+ used for links.
+ filename (str): An optional alternate filename in links.
+ """
+ # Make FileNotFoundError exception to be compatible with python2
+ try:
+ FileNotFoundError # noqa: F823
+ except NameError:
+ FileNotFoundError = OSError
+
+ self.full_path = full_path
+ if filename is None:
+ self.filename = os.path.basename(full_path)
+ else:
+ self.filename = filename
+ self.relative_path = relative_path
+
+ if self.full_path and os.path.isfile(self.full_path):
+ mime_guess, encoding = mimetypes.guess_type(self.full_path)
+ self.mimetype = mime_guess if mime_guess else 'text/plain'
+ self.encoding = encoding
+ self.folder = False
+ else:
+ self.mimetype = 'application/directory'
+ self.encoding = None
+ self.folder = True
+ try:
+ st = os.stat(self.full_path)
+ self.last_modified = time.gmtime(st[stat.ST_MTIME])
+ self.size = st[stat.ST_SIZE]
+ except (FileNotFoundError, TypeError):
+ self.last_modified = time.gmtime(0)
+ self.size = 0
+
+ def __repr__(self):
+ t = 'Folder' if self.folder else 'File'
+ return '<%s %s>' % (t, self.relative_path)
+
+
+class FileList(Sequence):
+ '''A collection of FileDetail objects
+
+ This is a list-like group of FileDetail objects, intended to be
+ used as a context manager around the upload process.
+ '''
+ def __init__(self):
+ self.file_list = []
+ self.file_list.append(FileDetail(None, '', ''))
+ self.tempdirs = []
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ for tempdir in self.tempdirs:
+ shutil.rmtree(tempdir)
+
+ def __getitem__(self, item):
+ return self.file_list.__getitem__(item)
+
+ def __len__(self):
+ return self.file_list.__len__()
+
+ def get_tempdir(self):
+ '''Get a temporary directory
+
+ Returns path to a private temporary directory which will be
+ cleaned on exit
+ '''
+ tempdir = tempfile.mkdtemp(prefix='s-u-l-tmp')
+ self.tempdirs.append(tempdir)
+ return tempdir
+
+ @staticmethod
+ def _path_in_tree(root, path):
+ full_path = os.path.realpath(os.path.abspath(
+ os.path.expanduser(path)))
+ if not full_path.startswith(root):
+ logging.debug("Skipping path outside root: %s" % (path,))
+ return False
+ return True
+
+ def add(self, file_path):
+ """
+ Generate a list of files to upload to storage. Recurses through
+ directories
+ """
+
+ # file_list: A list of FileDetails to push to storage
+ file_list = []
+
+ if os.path.isfile(file_path):
+ relative_path = os.path.basename(file_path)
+ file_list.append(FileDetail(file_path, relative_path))
+ elif os.path.isdir(file_path):
+ original_root = os.path.realpath(os.path.abspath(
+ os.path.expanduser(file_path)))
+
+ parent_dir = os.path.dirname(file_path)
+ if not file_path.endswith('/'):
+ filename = os.path.basename(file_path)
+ full_path = file_path
+ relative_name = os.path.relpath(full_path, parent_dir)
+ file_list.append(FileDetail(full_path, relative_name,
+ filename))
+ # TODO: this will copy the result of symlinked files, but
+ # it won't follow directory symlinks. If we add that, we
+ # should ensure that we don't loop.
+ for path, folders, files in os.walk(file_path):
+ # Sort folder in-place so that we recurse in order.
+ files.sort(key=lambda x: x.lower())
+ folders.sort(key=lambda x: x.lower())
+ # relative_path: The path between the given directory
+ # and the one being currently walked.
+ relative_path = os.path.relpath(path, parent_dir)
+
+ for filename in folders:
+ full_path = os.path.join(path, filename)
+ if not self._path_in_tree(original_root, full_path):
+ continue
+ relative_name = os.path.relpath(full_path, parent_dir)
+ file_list.append(FileDetail(full_path, relative_name,
+ filename))
+
+ for filename in files:
+ full_path = os.path.join(path, filename)
+ if not self._path_in_tree(original_root, full_path):
+ continue
+ relative_name = os.path.relpath(full_path, parent_dir)
+ file_detail = FileDetail(full_path, relative_name)
+ file_list.append(file_detail)
+
+ self.file_list += file_list
+
+
+class Indexer():
+ """Index a FileList
+
+ Functions to generate indexes and other collated data for a
+ FileList
+
+ - make_indexes() : make index.html in folders
+ """
+ def __init__(self, file_list):
+ '''
+ Args:
+ file_list (FileList): A FileList object with all files
+ to be indexed.
+ '''
+ assert isinstance(file_list, FileList)
+ self.file_list = file_list
+
+ def _make_index_file(self, folder_links, title, tempdir, append_footer):
+ """Writes an index into a file for pushing"""
+ for file_details in folder_links:
+ # Do not generate an index file if one exists already.
+ # This may be the case when uploading other machine generated
+ # content like python coverage info.
+ if self.index_filename == file_details.filename:
+ return
+ index_content = self._generate_log_index(
+ folder_links, title, append_footer)
+ fd = open(os.path.join(tempdir, self.index_filename), 'w')
+ fd.write(index_content)
+ return os.path.join(tempdir, self.index_filename)
+
+ def _generate_log_index(self, folder_links, title, append_footer):
+ """Create an index of logfiles and links to them"""
+
+ output = '<html><head><title>%s</title></head><body>\n' % title
+ output += '<h1>%s</h1>\n' % title
+ output += '<table><tr><th></th><th>Name</th><th>Last Modified</th>'
+ output += '<th>Size</th></tr>'
+
+ file_details_to_append = None
+ for file_details in folder_links:
+ output += '<tr>'
+ output += (
+ '<td><img alt="[ ]" title="%(m)s" src="%(i)s"></img></td>' % ({
+ 'm': file_details.mimetype,
+ 'i': get_mime_icon(file_details.mimetype,
+ file_details.filename),
+ }))
+ filename = file_details.filename
+ link_filename = filename
+ if file_details.folder:
+ filename += '/'
+ link_filename += '/index.html'
+ output += '<td><a href="%s">%s</a></td>' % (
+ urlparse.quote(link_filename),
+ filename)
+ output += '<td>%s</td>' % time.asctime(
+ file_details.last_modified)
+ size = sizeof_fmt(file_details.size, suffix='')
+ output += '<td style="text-align: right">%s</td>' % size
+ output += '</tr>\n'
+
+ if (append_footer and
+ append_footer in file_details.filename):
+ file_details_to_append = file_details
+
+ output += '</table>'
+
+ if file_details_to_append:
+ output += '<br /><hr />'
+ try:
+ with open(file_details_to_append.full_path, 'r') as f:
+ output += f.read()
+ except IOError:
+ logging.exception("Error opening file for appending")
+
+ output += '</body></html>\n'
+ return output
+
+ def make_indexes(self, create_parent_links=True,
+ create_topdir_parent_link=False,
+ append_footer='index_footer.html'):
+ '''Make index.html files
+
+ Iterate the file list and crete index.html files for folders
+
+ Args:
+ create_parent_links (bool): Create parent links
+ create_topdir_parent_link (bool): Create topdir parent link
+ append_footer (str): Filename of a footer to append to each
+ generated page
+
+ Return:
+ No value, the self.file_list will be updated
+ '''
+ self.index_filename = 'index.html'
+
+ folders = collections.OrderedDict()
+ for f in self.file_list:
+ if f.folder:
+ folders[f.relative_path] = []
+ folder = os.path.dirname(os.path.dirname(
+ f.relative_path + '/'))
+ if folder == '/':
+ folder = ''
+ else:
+ folder = os.path.dirname(f.relative_path)
+ folders[folder].append(f)
+
+ indexes = {}
+ parent_file_detail = FileDetail(None, '..', '..')
+ for folder, files in folders.items():
+ # Don't add the pseudo-top-directory
+ if files and files[0].full_path is None:
+ files = files[1:]
+ if create_topdir_parent_link:
+ files = [parent_file_detail] + files
+ elif create_parent_links:
+ files = [parent_file_detail] + files
+
+ # Do generate a link to the parent directory
+ full_path = self._make_index_file(files, 'Index of %s' % (folder,),
+ self.file_list.get_tempdir(),
+ append_footer)
+
+ if full_path:
+ filename = os.path.basename(full_path)
+ relative_name = os.path.join(folder, filename)
+ indexes[folder] = FileDetail(full_path, relative_name)
+
+ # This appends the index file at the end of the group of files
+ # for each directory.
+ new_list = []
+ last_dirname = None
+ for f in reversed(list(self.file_list)):
+ if f.folder:
+ relative_path = f.relative_path + '/'
+ else:
+ relative_path = f.relative_path
+ dirname = os.path.dirname(relative_path)
+ if dirname == '/':
+ dirname = ''
+ if dirname != last_dirname:
+ index = indexes.pop(dirname, None)
+ if index:
+ new_list.append(index)
+ last_dirname = dirname
+ new_list.append(f)
+ new_list.reverse()
+ self.file_list.file_list = new_list
+
+
+class GzipFilter():
+ chunk_size = 16384
+
+ def __init__(self, infile):
+ self.gzipfile = GZIPCompressedStream(infile)
+ self.done = False
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.done:
+ self.gzipfile.close()
+ raise StopIteration()
+ data = self.gzipfile.read(self.chunk_size)
+ if not data:
+ self.done = True
+ return data
+
+
+class DeflateFilter():
+ chunk_size = 16384
+
+ def __init__(self, infile):
+ self.infile = infile
+ self.encoder = zlib.compressobj()
+ self.done = False
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.done:
+ raise StopIteration()
+ ret = b''
+ while True:
+ data = self.infile.read(self.chunk_size)
+ if data:
+ ret = self.encoder.compress(data)
+ if ret:
+ break
+ else:
+ self.done = True
+ ret = self.encoder.flush()
+ break
+ return ret
+
+
+class Uploader():
+ def __init__(self, client, container, prefix=None,
+ dry_run=False):
+
+ self.dry_run = dry_run
+ if dry_run:
+ self.url = 'http://dry-run-url.com/a/path/'
+ return
+
+ self.client = client
+ self.prefix = prefix or ''
+
+ self.bucket = client.bucket(container)
+ cors = [{
+ 'method': ['GET', 'HEAD'],
+ 'origin': ['*']
+ }]
+ self.bucket.cors = cors
+ self.bucket.website = {"mainPageSuffix": "index.html"}
+ self.bucket.update()
+
+ self.url = os.path.join('https://storage.googleapis.com/',
+ container, self.prefix)
+
+ def upload(self, file_list):
+ """Spin up thread pool to upload to storage"""
+
+ if self.dry_run:
+ return
+
+ num_threads = min(len(file_list), MAX_UPLOAD_THREADS)
+ threads = []
+ queue = queuelib.Queue()
+ # add items to queue
+ for f in file_list:
+ queue.put(f)
+
+ for x in range(num_threads):
+ t = threading.Thread(target=self.post_thread, args=(queue,))
+ threads.append(t)
+ t.start()
+
+ for t in threads:
+ t.join()
+
+ def post_thread(self, queue):
+ while True:
+ try:
+ file_detail = queue.get_nowait()
+ logging.debug("%s: processing job %s",
+ threading.current_thread(),
+ file_detail)
+ retry_function(lambda: self._post_file(file_detail))
+ except IOError:
+ # Do our best to attempt to upload all the files
+ logging.exception("Error opening file")
+ continue
+ except queuelib.Empty:
+ # No more work to do
+ return
+
+ @staticmethod
+ def _is_text_type(mimetype):
+ # We want to compress all text types.
+ if mimetype.startswith('text/'):
+ return True
+
+ # Further compress types that typically contain text but are no
+ # text sub type.
+ compress_types = [
+ 'application/json',
+ 'image/svg+xml',
+ ]
+ if mimetype in compress_types:
+ return True
+ return False
+
+ def _post_file(self, file_detail):
+ relative_path = os.path.join(self.prefix, file_detail.relative_path)
+ content_encoding = None
+
+ if not file_detail.folder:
+ if (file_detail.encoding is None and
+ self._is_text_type(file_detail.mimetype)):
+ content_encoding = 'gzip'
+ data = GZIPCompressedStream(open(file_detail.full_path, 'rb'))
+ else:
+ if (not file_detail.filename.endswith(".gz") and
+ file_detail.encoding):
+ # Don't apply gzip encoding to files that we receive as
+ # already gzipped. The reason for this is storage will
+ # serve this back to users as an uncompressed file if they
+ # don't set an accept-encoding that includes gzip. This
+ # can cause problems when the desired file state is
+ # compressed as with .tar.gz tarballs.
+ content_encoding = file_detail.encoding
+ data = open(file_detail.full_path, 'rb')
+ else:
+ data = ''
+ relative_path = relative_path.rstrip('/')
+ if relative_path == '':
+ relative_path = '/'
+ blob = self.bucket.blob(relative_path)
+ if content_encoding:
+ blob.content_encoding = content_encoding
+ if hasattr(data, 'tell'):
+ upload = blob.upload_from_file
+ else:
+ upload = blob.upload_from_string
+ upload(data, content_type=file_detail.mimetype)
+
+
+def run(container, files,
+ indexes=True, parent_links=True, topdir_parent_link=False,
+ partition=False, footer='index_footer.html',
+ prefix=None, dry_run=False, credentials_file=None):
+
+ if credentials_file:
+ cred = Credentials(credentials_file)
+ client = storage.Client(credentials=cred)
+ else:
+ client = storage.Client()
+
+ if prefix:
+ prefix = prefix.lstrip('/')
+ if partition and prefix:
+ parts = prefix.split('/')
+ if len(parts) > 1:
+ container += '_' + parts[0]
+ prefix = '/'.join(parts[1:])
+
+ # Create the objects to make sure the arguments are sound.
+ with FileList() as file_list:
+ # Scan the files.
+ for file_path in files:
+ file_list.add(file_path)
+
+ indexer = Indexer(file_list)
+
+ # (Possibly) make indexes.
+ if indexes:
+ indexer.make_indexes(create_parent_links=parent_links,
+ create_topdir_parent_link=topdir_parent_link,
+ append_footer=footer)
+
+ logging.debug("List of files prepared to upload:")
+ for x in file_list:
+ logging.debug(x)
+
+ # Upload.
+ uploader = Uploader(client, container, prefix, dry_run)
+ uploader.upload(file_list)
+ return uploader.url
+
+
+def ansible_main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ container=dict(required=True, type='str'),
+ files=dict(required=True, type='list'),
+ partition=dict(type='bool', default=False),
+ indexes=dict(type='bool', default=True),
+ parent_links=dict(type='bool', default=True),
+ topdir_parent_link=dict(type='bool', default=False),
+ footer=dict(type='str'),
+ prefix=dict(type='str'),
+ credentials_file=dict(type='str'),
+ )
+ )
+
+ p = module.params
+ url = run(p.get('container'), p.get('files'),
+ indexes=p.get('indexes'),
+ parent_links=p.get('parent_links'),
+ topdir_parent_link=p.get('topdir_parent_link'),
+ partition=p.get('partition'),
+ footer=p.get('footer'),
+ prefix=p.get('prefix'),
+ credentials_file=p.get('credentials_file'))
+ module.exit_json(changed=True,
+ url=url)
+
+
+def cli_main():
+ parser = argparse.ArgumentParser(
+ description="Upload files to Google Cloud Storage"
+ )
+ parser.add_argument('--verbose', action='store_true',
+ help='show debug information')
+ parser.add_argument('--no-indexes', action='store_true',
+ help='do not generate any indexes at all')
+ parser.add_argument('--no-parent-links', action='store_true',
+ help='do not include links back to a parent dir')
+ parser.add_argument('--create-topdir-parent-link', action='store_true',
+ help='include a link in the root directory of the '
+ 'files to the parent directory which may be the '
+ 'index of all results')
+ parser.add_argument('--partition', action='store_true',
+ help='partition the prefix into multiple containers')
+ parser.add_argument('--append-footer', default='index_footer.html',
+ help='when generating an index, if the given file is '
+ 'present in a directory, append it to the index '
+ '(set to "none" to disable)')
+ parser.add_argument('--prefix',
+ help='Prepend this path to the object names when '
+ 'uploading')
+ parser.add_argument('--dry-run', action='store_true',
+ help='do not attempt to create containers or upload, '
+ 'useful with --verbose for debugging')
+ parser.add_argument('--credentials_file',
+ help='A file with Google cloud credentials')
+ parser.add_argument('container',
+ help='Name of the container to use when uploading')
+ parser.add_argument('files', nargs='+',
+ help='the file(s) to upload with recursive glob '
+ 'matching when supplied as a string')
+
+ args = parser.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ logging.captureWarnings(True)
+
+ append_footer = args.append_footer
+ if append_footer.lower() == 'none':
+ append_footer = None
+
+ url = run(args.container, args.files,
+ indexes=not args.no_indexes,
+ parent_links=not args.no_parent_links,
+ topdir_parent_link=args.create_topdir_parent_link,
+ partition=args.partition,
+ footer=append_footer,
+ prefix=args.prefix,
+ dry_run=args.dry_run,
+ credentials_file=args.credentials_file)
+ print(url)
+
+
+if __name__ == '__main__':
+ if sys.stdin.isatty():
+ cli_main()
+ else:
+ ansible_main()
diff --git a/roles/upload-logs-gcs/tasks/main.yaml b/roles/upload-logs-gcs/tasks/main.yaml
new file mode 100644
index 0000000..53746e9
--- /dev/null
+++ b/roles/upload-logs-gcs/tasks/main.yaml
@@ -0,0 +1,37 @@
+- name: Set zuul-log-path fact
+ include_role:
+ name: set-zuul-log-path-fact
+ when: zuul_log_path is not defined
+
+# Always upload (true), never upload (false) or only on failure ('failure')
+- when: zuul_site_upload_logs | default(true) | bool or
+ (zuul_site_upload_logs == 'failure' and not zuul_success | bool)
+ block:
+ # Use chmod instead of file because ansible 2.5 file with recurse and
+ # follow can't really handle symlinks to .
+ - name: Ensure logs are readable before uploading
+ delegate_to: localhost
+ command: "chmod -R u=rwX,g=rX,o=rX {{ zuul.executor.log_root }}/"
+ # ANSIBLE0007 chmod used in place of argument mode to file
+ tags:
+ - skip_ansible_lint
+
+ - name: Upload logs to Google Cloud Storage
+ delegate_to: localhost
+ zuul_google_storage_upload:
+ partition: "{{ zuul_log_partition }}"
+ container: "{{ zuul_log_container }}"
+ prefix: "{{ zuul_log_path }}"
+ indexes: "{{ zuul_log_create_indexes }}"
+ credentials_file: "{{ zuul_log_credentials_file }}"
+ files:
+ - "{{ zuul.executor.log_root }}/"
+ register: upload_results
+
+- name: Return log URL to Zuul
+ delegate_to: localhost
+ zuul_return:
+ data:
+ zuul:
+ log_url: "{{ upload_results.url }}/"
+ when: upload_results is defined
diff --git a/roles/upload-logs-swift/library/test-fixtures/logs/controller/subdir/foo::3.txt b/roles/upload-logs-swift/library/test-fixtures/logs/controller/subdir/foo::3.txt
new file mode 100644
index 0000000..384ce7d
--- /dev/null
+++ b/roles/upload-logs-swift/library/test-fixtures/logs/controller/subdir/foo::3.txt
@@ -0,0 +1,2 @@
+This is a plan text file with a funny name.
+The index links should escape the :'s.
diff --git a/roles/upload-logs-swift/library/test_zuul_swift_upload.py b/roles/upload-logs-swift/library/test_zuul_swift_upload.py
index 075fa9e..6577c31 100644
--- a/roles/upload-logs-swift/library/test_zuul_swift_upload.py
+++ b/roles/upload-logs-swift/library/test_zuul_swift_upload.py
@@ -44,17 +44,10 @@ class SymlinkFixture(fixtures.Fixture):
]
def _setUp(self):
- self._cleanup()
for (src, target) in self.links:
path = os.path.join(FIXTURE_DIR, 'links', src)
os.symlink(target, path)
- self.addCleanup(self._cleanup)
-
- def _cleanup(self):
- for (src, target) in self.links:
- path = os.path.join(FIXTURE_DIR, 'links', src)
- if os.path.exists(path):
- os.unlink(path)
+ self.addCleanup(os.unlink, path)
class TestFileList(testtools.TestCase):
@@ -98,6 +91,7 @@ class TestFileList(testtools.TestCase):
('controller/journal.xz', 'text/plain', 'xz'),
('controller/service_log.txt', 'text/plain', None),
('controller/syslog', 'text/plain', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
('controller/subdir/subdir.txt', 'text/plain', None),
('zuul-info/inventory.yaml', 'text/plain', None),
('zuul-info/zuul-info.controller.txt', 'text/plain', None),
@@ -119,6 +113,7 @@ class TestFileList(testtools.TestCase):
('logs/controller/journal.xz', 'text/plain', 'xz'),
('logs/controller/service_log.txt', 'text/plain', None),
('logs/controller/syslog', 'text/plain', None),
+ ('logs/controller/subdir/foo::3.txt', 'text/plain', None),
('logs/controller/subdir/subdir.txt', 'text/plain', None),
('logs/zuul-info/inventory.yaml', 'text/plain', None),
('logs/zuul-info/zuul-info.controller.txt',
@@ -176,6 +171,7 @@ class TestFileList(testtools.TestCase):
('logs/controller/service_log.txt', 'text/plain', None),
('logs/controller/syslog', 'text/plain', None),
('logs/controller/index.html', 'text/html', None),
+ ('logs/controller/subdir/foo::3.txt', 'text/plain', None),
('logs/controller/subdir/subdir.txt', 'text/plain', None),
('logs/controller/subdir/index.html', 'text/html', None),
('logs/zuul-info/inventory.yaml', 'text/plain', None),
@@ -202,8 +198,12 @@ class TestFileList(testtools.TestCase):
self.assertEqual(rows[0].find('a').get('href'), '../')
self.assertEqual(rows[0].find('a').text, '../')
- self.assertEqual(rows[1].find('a').get('href'), 'subdir.txt')
- self.assertEqual(rows[1].find('a').text, 'subdir.txt')
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[1].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[1].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[2].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[2].find('a').text, 'subdir.txt')
def test_index_files_trailing_slash(self):
'''Test index generation with a trailing slash'''
@@ -225,6 +225,7 @@ class TestFileList(testtools.TestCase):
('controller/service_log.txt', 'text/plain', None),
('controller/syslog', 'text/plain', None),
('controller/index.html', 'text/html', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
('controller/subdir/subdir.txt', 'text/plain', None),
('controller/subdir/index.html', 'text/html', None),
('zuul-info/inventory.yaml', 'text/plain', None),
@@ -252,8 +253,12 @@ class TestFileList(testtools.TestCase):
self.assertEqual(rows[0].find('a').get('href'), '../')
self.assertEqual(rows[0].find('a').text, '../')
- self.assertEqual(rows[1].find('a').get('href'), 'subdir.txt')
- self.assertEqual(rows[1].find('a').text, 'subdir.txt')
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[1].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[1].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[2].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[2].find('a').text, 'subdir.txt')
def test_topdir_parent_link(self):
'''Test index generation creates topdir parent link'''
@@ -277,6 +282,7 @@ class TestFileList(testtools.TestCase):
('controller/service_log.txt', 'text/plain', None),
('controller/syslog', 'text/plain', None),
('controller/index.html', 'text/html', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
('controller/subdir/subdir.txt', 'text/plain', None),
('controller/subdir/index.html', 'text/html', None),
('zuul-info/inventory.yaml', 'text/plain', None),
@@ -307,8 +313,12 @@ class TestFileList(testtools.TestCase):
self.assertEqual(rows[0].find('a').get('href'), '../')
self.assertEqual(rows[0].find('a').text, '../')
- self.assertEqual(rows[1].find('a').get('href'), 'subdir.txt')
- self.assertEqual(rows[1].find('a').text, 'subdir.txt')
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[1].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[1].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[2].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[2].find('a').text, 'subdir.txt')
def test_no_parent_links(self):
'''Test index generation creates topdir parent link'''
@@ -332,6 +342,7 @@ class TestFileList(testtools.TestCase):
('controller/service_log.txt', 'text/plain', None),
('controller/syslog', 'text/plain', None),
('controller/index.html', 'text/html', None),
+ ('controller/subdir/foo::3.txt', 'text/plain', None),
('controller/subdir/subdir.txt', 'text/plain', None),
('controller/subdir/index.html', 'text/html', None),
('zuul-info/inventory.yaml', 'text/plain', None),
@@ -357,8 +368,12 @@ class TestFileList(testtools.TestCase):
page = BeautifulSoup(page, 'html.parser')
rows = page.find_all('tr')[1:]
- self.assertEqual(rows[0].find('a').get('href'), 'subdir.txt')
- self.assertEqual(rows[0].find('a').text, 'subdir.txt')
+ # Test proper escaping of files with funny names
+ self.assertEqual(rows[0].find('a').get('href'), 'foo%3A%3A3.txt')
+ self.assertEqual(rows[0].find('a').text, 'foo::3.txt')
+ # Test files without escaping
+ self.assertEqual(rows[1].find('a').get('href'), 'subdir.txt')
+ self.assertEqual(rows[1].find('a').text, 'subdir.txt')
class TestFileDetail(testtools.TestCase):
diff --git a/roles/upload-logs-swift/library/zuul_swift_upload.py b/roles/upload-logs-swift/library/zuul_swift_upload.py
index 196bc75..485f4fa 100755
--- a/roles/upload-logs-swift/library/zuul_swift_upload.py
+++ b/roles/upload-logs-swift/library/zuul_swift_upload.py
@@ -25,6 +25,8 @@ Utility to upload files to swift
"""
import argparse
+import gzip
+import io
import logging
import mimetypes
import os
@@ -39,6 +41,10 @@ import tempfile
import threading
import time
import traceback
+try:
+ import urllib.parse as urlparse
+except ImportError:
+ import urllib as urlparse
import zlib
import collections
@@ -127,6 +133,95 @@ ICON_IMAGES = {
'AupSdoFsAAAAAElFTkSuQmCC'}
+# Begin vendored code
+# This code is licensed under the Public Domain/CC0 and comes from
+# https://github.com/leenr/gzip-stream/blob/master/gzip_stream.py
+# Code was modified:
+# removed type annotations to support python2.
+# removed use of *, somearg for positional anonymous args.
+# Default compression level to 9.
+
+class GZIPCompressedStream(io.RawIOBase):
+ def __init__(self, stream, compression_level=9):
+ assert 1 <= compression_level <= 9
+
+ self._compression_level = compression_level
+ self._stream = stream
+
+ self._compressed_stream = io.BytesIO()
+ self._compressor = gzip.GzipFile(
+ mode='wb',
+ fileobj=self._compressed_stream,
+ compresslevel=compression_level
+ )
+
+ # because of the GZIP header written by `GzipFile.__init__`:
+ self._compressed_stream.seek(0)
+
+ @property
+ def compression_level(self):
+ return self._compression_level
+
+ @property
+ def stream(self):
+ return self._stream
+
+ def readable(self):
+ return True
+
+ def _read_compressed_into(self, b):
+ buf = self._compressed_stream.read(len(b))
+ b[:len(buf)] = buf
+ return len(buf)
+
+ def readinto(self, b):
+ b = memoryview(b)
+
+ offset = 0
+ size = len(b)
+ while offset < size:
+ offset += self._read_compressed_into(b[offset:])
+ if offset < size:
+ # self._compressed_buffer now empty
+ if self._compressor.closed:
+ # nothing to compress anymore
+ break
+ # compress next bytes
+ self._read_n_compress(size)
+
+ return offset
+
+ def _read_n_compress(self, size):
+ assert size > 0
+
+ data = self._stream.read(size)
+
+ # rewind buffer to the start to free up memory
+ # (because anything currently in the buffer should be already
+ # streamed off the object)
+ self._compressed_stream.seek(0)
+ self._compressed_stream.truncate(0)
+
+ if data:
+ self._compressor.write(data)
+ else:
+ # this will write final data (will flush zlib with Z_FINISH)
+ self._compressor.close()
+
+ # rewind to the buffer start
+ self._compressed_stream.seek(0)
+
+ def __repr__(self):
+ return (
+ '{self.__class__.__name__}('
+ '{self.stream!r}, '
+ 'compression_level={self.compression_level!r}'
+ ')'
+ ).format(self=self)
+
+# End vendored code
+
+
def get_mime_icon(mime, filename=''):
icon = (APACHE_FILE_ICON_MAP.get(filename) or
APACHE_MIME_ICON_MAP.get(mime) or
@@ -360,8 +455,9 @@ class Indexer():
filename = file_details.filename
if file_details.folder:
filename += '/'
- output += '<td><a href="%s">%s</a></td>' % (filename,
- filename)
+ output += '<td><a href="%s">%s</a></td>' % (
+ urlparse.quote(filename),
+ filename)
output += '<td>%s</td>' % time.asctime(
file_details.last_modified)
size = sizeof_fmt(file_details.size, suffix='')
@@ -458,6 +554,26 @@ class Indexer():
self.file_list.file_list = new_list
+class GzipFilter():
+ chunk_size = 16384
+
+ def __init__(self, infile):
+ self.gzipfile = GZIPCompressedStream(infile)
+ self.done = False
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.done:
+ self.gzipfile.close()
+ raise StopIteration()
+ data = self.gzipfile.read(self.chunk_size)
+ if not data:
+ self.done = True
+ return data
+
+
class DeflateFilter():
chunk_size = 16384
@@ -488,7 +604,13 @@ class DeflateFilter():
class Uploader():
def __init__(self, cloud, container, prefix=None, delete_after=None,
- public=True):
+ public=True, dry_run=False):
+
+ self.dry_run = dry_run
+ if dry_run:
+ self.url = 'http://dry-run-url.com/a/path/'
+ return
+
self.cloud = cloud
self.container = container
self.prefix = prefix or ''
@@ -509,7 +631,12 @@ class Uploader():
except keystoneauth1.exceptions.catalog.EndpointNotFound:
cdn_url = None
- if not self.cloud.get_container(self.container):
+ # We retry here because sometimes we get HTTP 401 errors in rax.
+ # They seem to happen infrequently (on the order of once a day across
+ # all jobs) so a retry is likely to work.
+ container = retry_function(
+ lambda: self.cloud.get_container(self.container))
+ if not container:
retry_function(
lambda: self.cloud.create_container(
name=self.container, public=public))
@@ -549,6 +676,10 @@ class Uploader():
def upload(self, file_list):
"""Spin up thread pool to upload to swift"""
+
+ if self.dry_run:
+ return
+
num_threads = min(len(file_list), MAX_UPLOAD_THREADS)
threads = []
queue = queuelib.Queue()
@@ -612,10 +743,17 @@ class Uploader():
if not file_detail.folder:
if (file_detail.encoding is None and
self._is_text_type(file_detail.mimetype)):
- headers['content-encoding'] = 'deflate'
- data = DeflateFilter(open(file_detail.full_path, 'rb'))
+ headers['content-encoding'] = 'gzip'
+ data = GzipFilter(open(file_detail.full_path, 'rb'))
else:
- if file_detail.encoding:
+ if (not file_detail.filename.endswith(".gz") and
+ file_detail.encoding):
+ # Don't apply gzip encoding to files that we receive as
+ # already gzipped. The reason for this is swift will
+ # serve this back to users as an uncompressed file if they
+ # don't set an accept-encoding that includes gzip. This
+ # can cause problems when the desired file state is
+ # compressed as with .tar.gz tarballs.
headers['content-encoding'] = file_detail.encoding
data = open(file_detail.full_path, 'rb')
else:
@@ -660,14 +798,9 @@ def run(cloud, container, files,
for x in file_list:
logging.debug(x)
- # Do no connect to swift or do any uploading in a dry run
- if dry_run:
- # No URL is known, so return nothing
- return
-
# Upload.
uploader = Uploader(cloud, container, prefix, delete_after,
- public)
+ public, dry_run)
uploader.upload(file_list)
return uploader.url
diff --git a/roles/upload-logs/README.rst b/roles/upload-logs/README.rst
index 7df56e3..26c4689 100644
--- a/roles/upload-logs/README.rst
+++ b/roles/upload-logs/README.rst
@@ -29,6 +29,13 @@ description of the site_logs secret in this example post-run playbook:
The root path to the logs on the logserver.
+.. zuul:rolevar:: zuul_log_compress
+ :default: false
+
+ When enabled, the console logs Zuul produces will be compressed
+ before uploading. You may need additional configuration for your web
+ server to view these files.
+
.. zuul:rolevar:: zuul_log_verbose
:default: false
diff --git a/roles/upload-logs/tasks/main.yaml b/roles/upload-logs/tasks/main.yaml
index cd2b46a..e692f99 100644
--- a/roles/upload-logs/tasks/main.yaml
+++ b/roles/upload-logs/tasks/main.yaml
@@ -40,8 +40,9 @@
with_items:
- job-output.txt
- job-output.json
+ when: zuul_log_compress | bool
- - name: Upload console log and json output
+ - name: Upload compressed console log and json output
synchronize:
src: "{{ zuul.executor.log_root }}/{{ item }}.gz"
dest: "{{ zuul_logserver_root }}/{{ zuul_log_path }}/{{ item }}.gz"
@@ -49,6 +50,17 @@
with_items:
- job-output.txt
- job-output.json
+ when: zuul_log_compress | bool
+
+ - name: Upload console log and json output
+ synchronize:
+ src: "{{ zuul.executor.log_root }}/{{ item }}"
+ dest: "{{ zuul_logserver_root }}/{{ zuul_log_path }}/{{ item }}"
+ verify_host: true
+ with_items:
+ - job-output.txt
+ - job-output.json
+ when: not zuul_log_compress | bool
- name: Return log URL to Zuul
delegate_to: localhost
diff --git a/roles/upload-logs/vars/main.yaml b/roles/upload-logs/vars/main.yaml
index c3a3be7..8848aee 100644
--- a/roles/upload-logs/vars/main.yaml
+++ b/roles/upload-logs/vars/main.yaml
@@ -1,2 +1,3 @@
zuul_logserver_root: /srv/static/logs
+zuul_log_compress: false
zuul_log_verbose: false
diff --git a/roles/use-buildset-registry/README.rst b/roles/use-buildset-registry/README.rst
index 8c93942..beabcb3 100644
--- a/roles/use-buildset-registry/README.rst
+++ b/roles/use-buildset-registry/README.rst
@@ -17,10 +17,6 @@ Use this role on any host which should use the buildset registry.
The port on which the registry is listening.
- .. zuul:rolevar:: proxy_port
-
- The port on which the registry proxy is listening.
-
.. zuul:rolevar:: username
The username used to access the registry via HTTP basic auth.
@@ -39,3 +35,15 @@ Use this role on any host which should use the buildset registry.
The system user to configure to use the docker registry. The
docker configuration file for this user will be updated. By
default, the user Ansible is running as.
+
+.. zuul:rolevar:: buildset_registry_namespaces
+ :default: ['docker.io', 'quay.io', 'gcr.io']
+
+ The namespaces that the buildset registry supports. The buildset
+ registry will be consulted first for images in these namespaces.
+ Any others will be fetched only from their upstream sources.
+
+ Add any local or third-party registries necessary here.
+
+ The default may change in the future as more general-purpose public
+ registries become known.
diff --git a/roles/use-buildset-registry/__init__.py b/roles/use-buildset-registry/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/use-buildset-registry/__init__.py
diff --git a/roles/use-buildset-registry/defaults/main.yaml b/roles/use-buildset-registry/defaults/main.yaml
new file mode 100644
index 0000000..137d009
--- /dev/null
+++ b/roles/use-buildset-registry/defaults/main.yaml
@@ -0,0 +1,4 @@
+buildset_registry_namespaces:
+ - docker.io
+ - quay.io
+ - gcr.io
diff --git a/roles/use-buildset-registry/library/__init__.py b/roles/use-buildset-registry/library/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/roles/use-buildset-registry/library/__init__.py
diff --git a/roles/use-buildset-registry/library/modify_registries_conf.py b/roles/use-buildset-registry/library/modify_registries_conf.py
new file mode 100644
index 0000000..904240f
--- /dev/null
+++ b/roles/use-buildset-registry/library/modify_registries_conf.py
@@ -0,0 +1,77 @@
+# Copyright 2019 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils import remarshal
+
+
+def get_location(prefix, location):
+ # To support usage with both docker and podman, the buildset
+ # registry keeps "docker.io" entries un-namespaced.
+ if prefix == 'docker.io':
+ return location
+ else:
+ return location + '/' + prefix
+
+
+def ansible_main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ path=dict(required=True, type='path'),
+ buildset_registry=dict(type='raw'),
+ buildset_registry_alias=dict(type='str'),
+ namespaces=dict(type='raw'),
+ )
+ )
+ p = module.params
+ location = '%s:%s' % (p['buildset_registry_alias'],
+ p['buildset_registry']['port'])
+
+ if os.path.exists(p['path']):
+ with open(p['path'], 'rb') as f:
+ input_data = f.read()
+ data = remarshal.decode('toml', input_data, True)
+ else:
+ data = {}
+
+ unseen = set(p['namespaces'])
+ if 'registry' not in data:
+ data['registry'] = []
+ for reg in data['registry']:
+ if reg['prefix'] in unseen:
+ unseen.remove(reg['prefix'])
+ else:
+ continue
+ mirrors = reg.setdefault('mirror', [])
+ mirrors.insert(0, {
+ 'location': get_location(reg['prefix'], location)})
+ for prefix in unseen:
+ mirrors = [{'location': get_location(prefix, location)},
+ {'location': prefix}]
+ reg = {'prefix': prefix,
+ 'location': prefix,
+ 'mirror': mirrors}
+ data['registry'].append(reg)
+
+ output_data = remarshal.encode_toml(data, True)
+ with open(p['path'], 'wb') as f:
+ f.write(output_data.encode('utf8'))
+
+ module.exit_json(changed=True, data=data)
+
+
+if __name__ == '__main__':
+ ansible_main()
diff --git a/roles/use-buildset-registry/module_utils/pytoml.py b/roles/use-buildset-registry/module_utils/pytoml.py
new file mode 100644
index 0000000..97ff02f
--- /dev/null
+++ b/roles/use-buildset-registry/module_utils/pytoml.py
@@ -0,0 +1,551 @@
+# No-notice MIT License
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# Originally from:
+# https://github.com/avakar/pytoml
+
+from __future__ import unicode_literals
+import datetime
+import re, sys
+import io, datetime, math, string, sys
+
+try:
+ from pathlib import PurePath as _path_types
+except ImportError:
+ _path_types = ()
+
+if sys.version_info[0] == 3:
+ long = int
+ unicode = str
+
+class TomlError(RuntimeError):
+ def __init__(self, message, line, col, filename):
+ RuntimeError.__init__(self, message, line, col, filename)
+ self.message = message
+ self.line = line
+ self.col = col
+ self.filename = filename
+
+ def __str__(self):
+ return '{}({}, {}): {}'.format(self.filename, self.line, self.col, self.message)
+
+ def __repr__(self):
+ return 'TomlError({!r}, {!r}, {!r}, {!r})'.format(self.message, self.line, self.col, self.filename)
+
+rfc3339_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))')
+
+def parse_rfc3339(v):
+ m = rfc3339_re.match(v)
+ if not m or m.group(0) != v:
+ return None
+ return parse_rfc3339_re(m)
+
+def parse_rfc3339_re(m):
+ r = map(int, m.groups()[:6])
+ if m.group(7):
+ micro = float(m.group(7))
+ else:
+ micro = 0
+
+ if m.group(8):
+ g = int(m.group(8), 10) * 60 + int(m.group(9), 10)
+ tz = _TimeZone(datetime.timedelta(0, g * 60))
+ else:
+ tz = _TimeZone(datetime.timedelta(0, 0))
+
+ y, m, d, H, M, S = r
+ return datetime.datetime(y, m, d, H, M, S, int(micro * 1000000), tz)
+
+
+def format_rfc3339(v):
+ offs = v.utcoffset()
+ offs = int(offs.total_seconds()) // 60 if offs is not None else 0
+
+ if offs == 0:
+ suffix = 'Z'
+ else:
+ if offs > 0:
+ suffix = '+'
+ else:
+ suffix = '-'
+ offs = -offs
+ suffix = '{0}{1:02}:{2:02}'.format(suffix, offs // 60, offs % 60)
+
+ if v.microsecond:
+ return v.strftime('%Y-%m-%dT%H:%M:%S.%f') + suffix
+ else:
+ return v.strftime('%Y-%m-%dT%H:%M:%S') + suffix
+
+class _TimeZone(datetime.tzinfo):
+ def __init__(self, offset):
+ self._offset = offset
+
+ def utcoffset(self, dt):
+ return self._offset
+
+ def dst(self, dt):
+ return None
+
+ def tzname(self, dt):
+ m = self._offset.total_seconds() // 60
+ if m < 0:
+ res = '-'
+ m = -m
+ else:
+ res = '+'
+ h = m // 60
+ m = m - h * 60
+ return '{}{:.02}{:.02}'.format(res, h, m)
+
+if sys.version_info[0] == 2:
+ _chr = unichr
+else:
+ _chr = chr
+
+def load(fin, translate=lambda t, x, v: v, object_pairs_hook=dict):
+ return loads(fin.read(), translate=translate, object_pairs_hook=object_pairs_hook, filename=getattr(fin, 'name', repr(fin)))
+
+def loads(s, filename='<string>', translate=lambda t, x, v: v, object_pairs_hook=dict):
+ if isinstance(s, bytes):
+ s = s.decode('utf-8')
+
+ s = s.replace('\r\n', '\n')
+
+ root = object_pairs_hook()
+ tables = object_pairs_hook()
+ scope = root
+
+ src = _Source(s, filename=filename)
+ ast = _p_toml(src, object_pairs_hook=object_pairs_hook)
+
+ def error(msg):
+ raise TomlError(msg, pos[0], pos[1], filename)
+
+ def process_value(v, object_pairs_hook):
+ kind, text, value, pos = v
+ if kind == 'array':
+ if value and any(k != value[0][0] for k, t, v, p in value[1:]):
+ error('array-type-mismatch')
+ value = [process_value(item, object_pairs_hook=object_pairs_hook) for item in value]
+ elif kind == 'table':
+ value = object_pairs_hook([(k, process_value(value[k], object_pairs_hook=object_pairs_hook)) for k in value])
+ return translate(kind, text, value)
+
+ for kind, value, pos in ast:
+ if kind == 'kv':
+ k, v = value
+ if k in scope:
+ error('duplicate_keys. Key "{0}" was used more than once.'.format(k))
+ scope[k] = process_value(v, object_pairs_hook=object_pairs_hook)
+ else:
+ is_table_array = (kind == 'table_array')
+ cur = tables
+ for name in value[:-1]:
+ if isinstance(cur.get(name), list):
+ d, cur = cur[name][-1]
+ else:
+ d, cur = cur.setdefault(name, (None, object_pairs_hook()))
+
+ scope = object_pairs_hook()
+ name = value[-1]
+ if name not in cur:
+ if is_table_array:
+ cur[name] = [(scope, object_pairs_hook())]
+ else:
+ cur[name] = (scope, object_pairs_hook())
+ elif isinstance(cur[name], list):
+ if not is_table_array:
+ error('table_type_mismatch')
+ cur[name].append((scope, object_pairs_hook()))
+ else:
+ if is_table_array:
+ error('table_type_mismatch')
+ old_scope, next_table = cur[name]
+ if old_scope is not None:
+ error('duplicate_tables')
+ cur[name] = (scope, next_table)
+
+ def merge_tables(scope, tables):
+ if scope is None:
+ scope = object_pairs_hook()
+ for k in tables:
+ if k in scope:
+ error('key_table_conflict')
+ v = tables[k]
+ if isinstance(v, list):
+ scope[k] = [merge_tables(sc, tbl) for sc, tbl in v]
+ else:
+ scope[k] = merge_tables(v[0], v[1])
+ return scope
+
+ return merge_tables(root, tables)
+
+class _Source:
+ def __init__(self, s, filename=None):
+ self.s = s
+ self._pos = (1, 1)
+ self._last = None
+ self._filename = filename
+ self.backtrack_stack = []
+
+ def last(self):
+ return self._last
+
+ def pos(self):
+ return self._pos
+
+ def fail(self):
+ return self._expect(None)
+
+ def consume_dot(self):
+ if self.s:
+ self._last = self.s[0]
+ self.s = self[1:]
+ self._advance(self._last)
+ return self._last
+ return None
+
+ def expect_dot(self):
+ return self._expect(self.consume_dot())
+
+ def consume_eof(self):
+ if not self.s:
+ self._last = ''
+ return True
+ return False
+
+ def expect_eof(self):
+ return self._expect(self.consume_eof())
+
+ def consume(self, s):
+ if self.s.startswith(s):
+ self.s = self.s[len(s):]
+ self._last = s
+ self._advance(s)
+ return True
+ return False
+
+ def expect(self, s):
+ return self._expect(self.consume(s))
+
+ def consume_re(self, re):
+ m = re.match(self.s)
+ if m:
+ self.s = self.s[len(m.group(0)):]
+ self._last = m
+ self._advance(m.group(0))
+ return m
+ return None
+
+ def expect_re(self, re):
+ return self._expect(self.consume_re(re))
+
+ def __enter__(self):
+ self.backtrack_stack.append((self.s, self._pos))
+
+ def __exit__(self, type, value, traceback):
+ if type is None:
+ self.backtrack_stack.pop()
+ else:
+ self.s, self._pos = self.backtrack_stack.pop()
+ return type == TomlError
+
+ def commit(self):
+ self.backtrack_stack[-1] = (self.s, self._pos)
+
+ def _expect(self, r):
+ if not r:
+ raise TomlError('msg', self._pos[0], self._pos[1], self._filename)
+ return r
+
+ def _advance(self, s):
+ suffix_pos = s.rfind('\n')
+ if suffix_pos == -1:
+ self._pos = (self._pos[0], self._pos[1] + len(s))
+ else:
+ self._pos = (self._pos[0] + s.count('\n'), len(s) - suffix_pos)
+
+_ews_re = re.compile(r'(?:[ \t]|#[^\n]*\n|#[^\n]*\Z|\n)*')
+def _p_ews(s):
+ s.expect_re(_ews_re)
+
+_ws_re = re.compile(r'[ \t]*')
+def _p_ws(s):
+ s.expect_re(_ws_re)
+
+_escapes = { 'b': '\b', 'n': '\n', 'r': '\r', 't': '\t', '"': '"',
+ '\\': '\\', 'f': '\f' }
+
+_basicstr_re = re.compile(r'[^"\\\000-\037]*')
+_short_uni_re = re.compile(r'u([0-9a-fA-F]{4})')
+_long_uni_re = re.compile(r'U([0-9a-fA-F]{8})')
+_escapes_re = re.compile(r'[btnfr\"\\]')
+_newline_esc_re = re.compile('\n[ \t\n]*')
+def _p_basicstr_content(s, content=_basicstr_re):
+ res = []
+ while True:
+ res.append(s.expect_re(content).group(0))
+ if not s.consume('\\'):
+ break
+ if s.consume_re(_newline_esc_re):
+ pass
+ elif s.consume_re(_short_uni_re) or s.consume_re(_long_uni_re):
+ v = int(s.last().group(1), 16)
+ if 0xd800 <= v < 0xe000:
+ s.fail()
+ res.append(_chr(v))
+ else:
+ s.expect_re(_escapes_re)
+ res.append(_escapes[s.last().group(0)])
+ return ''.join(res)
+
+_key_re = re.compile(r'[0-9a-zA-Z-_]+')
+def _p_key(s):
+ with s:
+ s.expect('"')
+ r = _p_basicstr_content(s, _basicstr_re)
+ s.expect('"')
+ return r
+ if s.consume('\''):
+ if s.consume('\'\''):
+ s.consume('\n')
+ r = s.expect_re(_litstr_ml_re).group(0)
+ s.expect('\'\'\'')
+ else:
+ r = s.expect_re(_litstr_re).group(0)
+ s.expect('\'')
+ return r
+ return s.expect_re(_key_re).group(0)
+
+_float_re = re.compile(r'[+-]?(?:0|[1-9](?:_?\d)*)(?:\.\d(?:_?\d)*)?(?:[eE][+-]?(?:\d(?:_?\d)*))?')
+
+_basicstr_ml_re = re.compile(r'(?:""?(?!")|[^"\\\000-\011\013-\037])*')
+_litstr_re = re.compile(r"[^'\000\010\012-\037]*")
+_litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\010\013-\037]))*")
+def _p_value(s, object_pairs_hook):
+ pos = s.pos()
+
+ if s.consume('true'):
+ return 'bool', s.last(), True, pos
+ if s.consume('false'):
+ return 'bool', s.last(), False, pos
+
+ if s.consume('"'):
+ if s.consume('""'):
+ s.consume('\n')
+ r = _p_basicstr_content(s, _basicstr_ml_re)
+ s.expect('"""')
+ else:
+ r = _p_basicstr_content(s, _basicstr_re)
+ s.expect('"')
+ return 'str', r, r, pos
+
+ if s.consume('\''):
+ if s.consume('\'\''):
+ s.consume('\n')
+ r = s.expect_re(_litstr_ml_re).group(0)
+ s.expect('\'\'\'')
+ else:
+ r = s.expect_re(_litstr_re).group(0)
+ s.expect('\'')
+ return 'str', r, r, pos
+
+ if s.consume_re(rfc3339_re):
+ m = s.last()
+ return 'datetime', m.group(0), parse_rfc3339_re(m), pos
+
+ if s.consume_re(_float_re):
+ m = s.last().group(0)
+ r = m.replace('_','')
+ if '.' in m or 'e' in m or 'E' in m:
+ return 'float', m, float(r), pos
+ else:
+ return 'int', m, int(r, 10), pos
+
+ if s.consume('['):
+ items = []
+ with s:
+ while True:
+ _p_ews(s)
+ items.append(_p_value(s, object_pairs_hook=object_pairs_hook))
+ s.commit()
+ _p_ews(s)
+ s.expect(',')
+ s.commit()
+ _p_ews(s)
+ s.expect(']')
+ return 'array', None, items, pos
+
+ if s.consume('{'):
+ _p_ws(s)
+ items = object_pairs_hook()
+ if not s.consume('}'):
+ k = _p_key(s)
+ _p_ws(s)
+ s.expect('=')
+ _p_ws(s)
+ items[k] = _p_value(s, object_pairs_hook=object_pairs_hook)
+ _p_ws(s)
+ while s.consume(','):
+ _p_ws(s)
+ k = _p_key(s)
+ _p_ws(s)
+ s.expect('=')
+ _p_ws(s)
+ items[k] = _p_value(s, object_pairs_hook=object_pairs_hook)
+ _p_ws(s)
+ s.expect('}')
+ return 'table', None, items, pos
+
+ s.fail()
+
+def _p_stmt(s, object_pairs_hook):
+ pos = s.pos()
+ if s.consume( '['):
+ is_array = s.consume('[')
+ _p_ws(s)
+ keys = [_p_key(s)]
+ _p_ws(s)
+ while s.consume('.'):
+ _p_ws(s)
+ keys.append(_p_key(s))
+ _p_ws(s)
+ s.expect(']')
+ if is_array:
+ s.expect(']')
+ return 'table_array' if is_array else 'table', keys, pos
+
+ key = _p_key(s)
+ _p_ws(s)
+ s.expect('=')
+ _p_ws(s)
+ value = _p_value(s, object_pairs_hook=object_pairs_hook)
+ return 'kv', (key, value), pos
+
+_stmtsep_re = re.compile(r'(?:[ \t]*(?:#[^\n]*)?\n)+[ \t]*')
+def _p_toml(s, object_pairs_hook):
+ stmts = []
+ _p_ews(s)
+ with s:
+ stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook))
+ while True:
+ s.commit()
+ s.expect_re(_stmtsep_re)
+ stmts.append(_p_stmt(s, object_pairs_hook=object_pairs_hook))
+ _p_ews(s)
+ s.expect_eof()
+ return stmts
+
+
+def dumps(obj, sort_keys=False):
+ fout = io.StringIO()
+ dump(obj, fout, sort_keys=sort_keys)
+ return fout.getvalue()
+
+
+_escapes = {'\n': 'n', '\r': 'r', '\\': '\\', '\t': 't', '\b': 'b', '\f': 'f', '"': '"'}
+
+
+def _escape_string(s):
+ res = []
+ start = 0
+
+ def flush():
+ if start != i:
+ res.append(s[start:i])
+ return i + 1
+
+ i = 0
+ while i < len(s):
+ c = s[i]
+ if c in '"\\\n\r\t\b\f':
+ start = flush()
+ res.append('\\' + _escapes[c])
+ elif ord(c) < 0x20:
+ start = flush()
+ res.append('\\u%04x' % ord(c))
+ i += 1
+
+ flush()
+ return '"' + ''.join(res) + '"'
+
+
+_key_chars = string.digits + string.ascii_letters + '-_'
+def _escape_id(s):
+ if any(c not in _key_chars for c in s):
+ return _escape_string(s)
+ return s
+
+
+def _format_value(v):
+ if isinstance(v, bool):
+ return 'true' if v else 'false'
+ if isinstance(v, int) or isinstance(v, long):
+ return unicode(v)
+ if isinstance(v, float):
+ if math.isnan(v) or math.isinf(v):
+ raise ValueError("{0} is not a valid TOML value".format(v))
+ else:
+ return repr(v)
+ elif isinstance(v, unicode) or isinstance(v, bytes):
+ return _escape_string(v)
+ elif isinstance(v, datetime.datetime):
+ return format_rfc3339(v)
+ elif isinstance(v, list):
+ return '[{0}]'.format(', '.join(_format_value(obj) for obj in v))
+ elif isinstance(v, dict):
+ return '{{{0}}}'.format(', '.join('{} = {}'.format(_escape_id(k), _format_value(obj)) for k, obj in v.items()))
+ elif isinstance(v, _path_types):
+ return _escape_string(str(v))
+ else:
+ raise RuntimeError(v)
+
+
+def dump(obj, fout, sort_keys=False):
+ tables = [((), obj, False)]
+
+ while tables:
+ name, table, is_array = tables.pop()
+ if name:
+ section_name = '.'.join(_escape_id(c) for c in name)
+ if is_array:
+ fout.write('[[{0}]]\n'.format(section_name))
+ else:
+ fout.write('[{0}]\n'.format(section_name))
+
+ table_keys = sorted(table.keys()) if sort_keys else table.keys()
+ new_tables = []
+ has_kv = False
+ for k in table_keys:
+ v = table[k]
+ if isinstance(v, dict):
+ new_tables.append((name + (k,), v, False))
+ elif isinstance(v, list) and v and all(isinstance(o, dict) for o in v):
+ new_tables.extend((name + (k,), d, True) for d in v)
+ elif v is None:
+ # based on mojombo's comment: https://github.com/toml-lang/toml/issues/146#issuecomment-25019344
+ fout.write(
+ '#{} = null # To use: uncomment and replace null with value\n'.format(_escape_id(k)))
+ has_kv = True
+ else:
+ fout.write('{0} = {1}\n'.format(_escape_id(k), _format_value(v)))
+ has_kv = True
+
+ tables.extend(reversed(new_tables))
+
+ if (name or has_kv) and tables:
+ fout.write('\n')
diff --git a/roles/use-buildset-registry/module_utils/remarshal.py b/roles/use-buildset-registry/module_utils/remarshal.py
new file mode 100644
index 0000000..39f1583
--- /dev/null
+++ b/roles/use-buildset-registry/module_utils/remarshal.py
@@ -0,0 +1,418 @@
+#! /usr/bin/env python3
+# remarshal, a utility to convert between serialization formats.
+# Copyright (c) 2014, 2015, 2016, 2017, 2018, 2019 dbohdan
+# License: MIT
+
+# Originally from:
+# https://github.com/dbohdan/remarshal
+
+from __future__ import print_function
+
+import argparse
+import datetime
+# import dateutil.parser
+import io
+import json
+import os.path
+import re
+import string
+import sys
+import test
+from ansible.module_utils.pytoml import loads as pytoml_loads
+from ansible.module_utils.pytoml import dumps as pytoml_dumps
+from ansible.module_utils.pytoml import TomlError
+# import umsgpack
+# import yaml
+
+from collections import OrderedDict
+
+
+__version__ = '0.11.2'
+
+FORMATS = ['json', 'msgpack', 'toml', 'yaml']
+
+
+# === JSON ===
+
+if hasattr(json, 'JSONDecodeError'):
+ JSONDecodeError = json.JSONDecodeError
+else:
+ JSONDecodeError = ValueError
+
+
+def json_default(obj):
+ if isinstance(obj, datetime.datetime):
+ return obj.isoformat()
+ raise TypeError("{0} is not JSON-serializable".format(repr(obj)))
+
+
+# === CLI ===
+
+def argv0_to_format(argv0):
+ possible_format = '(' + '|'.join(FORMATS) + ')'
+ match = re.search('^' + possible_format + '2' + possible_format, argv0)
+ if match:
+ from_, to = match.groups()
+ return True, from_, to
+ else:
+ return False, None, None
+
+
+def extension_to_format(path):
+ _, ext = os.path.splitext(path)
+
+ ext = ext[1:]
+
+ if ext == 'yml':
+ ext = 'yaml'
+
+ return ext if ext in FORMATS else None
+
+
+def parse_command_line(argv):
+ me = os.path.basename(argv[0])
+ format_from_argv0, argv0_from, argv0_to = argv0_to_format(me)
+
+ parser = argparse.ArgumentParser(
+ description='Convert between TOML, MessagePack, YAML, and JSON.'
+ )
+
+ input_group = parser.add_mutually_exclusive_group()
+ input_group.add_argument(
+ 'input',
+ nargs='?',
+ default='-',
+ help='input file'
+ )
+ input_group.add_argument(
+ '-i', '--input',
+ dest='input_flag',
+ metavar='input',
+ default=None,
+ help='input file'
+ )
+
+ output_group = parser.add_mutually_exclusive_group()
+ output_group.add_argument(
+ 'output',
+ nargs='?',
+ default='-',
+ help='input file'
+ )
+ output_group.add_argument(
+ '-o', '--output',
+ dest='output_flag',
+ metavar='output',
+ default=None,
+ help='output file'
+ )
+
+ if not format_from_argv0:
+ parser.add_argument(
+ '--if', '-if', '--input-format',
+ dest='input_format',
+ help="input format",
+ choices=FORMATS
+ )
+ parser.add_argument(
+ '--of',
+ '-of',
+ '--output-format',
+ dest='output_format',
+ help="output format",
+ choices=FORMATS
+ )
+
+ if not format_from_argv0 or argv0_to == 'json':
+ parser.add_argument(
+ '--indent-json',
+ dest='indent_json',
+ metavar='n',
+ type=int,
+ default=None,
+ help='indent JSON output'
+ )
+
+ if not format_from_argv0 or argv0_to == 'yaml':
+ parser.add_argument(
+ '--yaml-style',
+ dest='yaml_style',
+ default=None,
+ help='YAML formatting style',
+ choices=['', '\'', '"', '|', '>']
+ )
+
+ parser.add_argument(
+ '--wrap',
+ dest='wrap',
+ metavar='key',
+ default=None,
+ help='wrap the data in a map type with the given key'
+ )
+ parser.add_argument(
+ '--unwrap',
+ dest='unwrap',
+ metavar='key',
+ default=None,
+ help='only output the data stored under the given key'
+ )
+ parser.add_argument(
+ '-p', '--preserve-key-order',
+ dest='ordered',
+ action='store_true',
+ help='preserve the order of dictionary/mapping keys'
+ )
+ parser.add_argument(
+ '-v', '--version',
+ action='version',
+ version=__version__
+ )
+
+ args = parser.parse_args(args=argv[1:])
+
+ # Use the positional input and output arguments.
+ if args.input_flag is not None:
+ args.input = args.input_flag
+
+ if args.output_flag is not None:
+ args.output = args.output_flag
+
+ # Determine the implicit input and output format if possible.
+ if format_from_argv0:
+ args.input_format = argv0_from
+ args.output_format = argv0_to
+
+ if argv0_to != 'json':
+ args.__dict__['indent_json'] = None
+ if argv0_to != 'yaml':
+ args.__dict__['yaml_style'] = None
+ else:
+ if args.input_format is None:
+ args.input_format = extension_to_format(args.input)
+ if args.input_format is None:
+ parser.error('Need an explicit input format')
+
+ if args.output_format is None:
+ args.output_format = extension_to_format(args.output)
+ if args.output_format is None:
+ parser.error('Need an explicit output format')
+
+ # Wrap yaml_style.
+ args.__dict__['yaml_options'] = {'default_style': args.yaml_style}
+ del args.__dict__['yaml_style']
+
+ return args
+
+
+# === Parser/serializer wrappers ===
+
+def decode_json(input_data, ordered):
+ try:
+ pairs_hook = OrderedDict if ordered else dict
+ return json.loads(
+ input_data.decode('utf-8'),
+ object_pairs_hook=pairs_hook
+ )
+ except JSONDecodeError as e:
+ raise ValueError('Cannot parse as JSON ({0})'.format(e))
+
+
+def decode_msgpack(input_data, ordered):
+ try:
+ return umsgpack.unpackb(input_data, use_ordered_dict=ordered)
+ except umsgpack.UnpackException as e:
+ raise ValueError('Cannot parse as MessagePack ({0})'.format(e))
+
+
+def decode_toml(input_data, ordered):
+ try:
+ pairs_hook = OrderedDict if ordered else dict
+ return pytoml_loads(
+ input_data,
+ object_pairs_hook=pairs_hook
+ )
+ except TomlError as e:
+ raise ValueError('Cannot parse as TOML ({0})'.format(e))
+
+
+def decode(input_format, input_data, ordered):
+ decoder = {
+ 'json': decode_json,
+ 'msgpack': decode_msgpack,
+ 'toml': decode_toml,
+ }
+
+ if input_format not in decoder:
+ raise ValueError('Unknown input format: {0}'.format(input_format))
+
+ return decoder[input_format](input_data, ordered)
+
+
+def encode_json(data, ordered, indent):
+ if indent is True:
+ indent = 2
+
+ if indent:
+ separators = (',', ': ')
+ else:
+ separators = (',', ':')
+
+ try:
+ return json.dumps(
+ data,
+ default=json_default,
+ ensure_ascii=False,
+ indent=indent,
+ separators=separators,
+ sort_keys=not ordered
+ ) + "\n"
+ except TypeError as e:
+ raise ValueError('Cannot convert data to JSON ({0})'.format(e))
+
+
+def traverse(
+ col,
+ dict_callback=lambda x: x,
+ list_callback=lambda x: x,
+ key_callback=lambda x: x,
+ value_callback=lambda x: x
+):
+ if isinstance(col, dict):
+ return dict_callback(col.__class__([
+ (key_callback(k), traverse(
+ v,
+ dict_callback,
+ list_callback,
+ key_callback,
+ value_callback
+ )) for (k, v) in col.items()
+ ]))
+ elif isinstance(col, list):
+ return list_callback([traverse(
+ x,
+ dict_callback,
+ list_callback,
+ key_callback,
+ value_callback
+ ) for x in col])
+ else:
+ return value_callback(col)
+
+
+def encode_msgpack(data):
+ try:
+ return umsgpack.packb(data)
+ except umsgpack.UnsupportedTypeException as e:
+ raise ValueError('Cannot convert data to MessagePack ({0})'.format(e))
+
+
+def encode_toml(data, ordered):
+ try:
+ return pytoml_dumps(data, sort_keys=not ordered)
+ except AttributeError as e:
+ if str(e) == "'list' object has no attribute 'keys'":
+ raise ValueError(
+ 'Cannot convert non-dictionary data to '
+ 'TOML; use "wrap" to wrap it in a '
+ 'dictionary'
+ )
+ else:
+ raise e
+ except TypeError as e:
+ if str(e) == "'in <string>' requires string as left operand, not int":
+ raise ValueError('Cannot convert binary to TOML')
+ else:
+ raise ValueError('Cannot convert data to TOML ({0})'.format(e))
+
+
+# === Main ===
+
+def run(argv):
+ args = parse_command_line(argv)
+ remarshal(
+ args.input,
+ args.output,
+ args.input_format,
+ args.output_format,
+ args.wrap,
+ args.unwrap,
+ args.indent_json,
+ args.yaml_options,
+ args.ordered
+ )
+
+
+def remarshal(
+ input,
+ output,
+ input_format,
+ output_format,
+ wrap=None,
+ unwrap=None,
+ indent_json=None,
+ yaml_options={},
+ ordered=False,
+ transform=None,
+):
+ try:
+ if input == '-':
+ input_file = getattr(sys.stdin, 'buffer', sys.stdin)
+ else:
+ input_file = open(input, 'rb')
+
+ if output == '-':
+ output_file = getattr(sys.stdout, 'buffer', sys.stdout)
+ else:
+ output_file = open(output, 'wb')
+
+ input_data = input_file.read()
+
+ parsed = decode(input_format, input_data, ordered)
+
+ if unwrap is not None:
+ parsed = parsed[unwrap]
+ if wrap is not None:
+ temp = {}
+ temp[wrap] = parsed
+ parsed = temp
+
+ if transform:
+ parsed = transform(parsed)
+
+ if output_format == 'json':
+ output_data = encode_json(parsed, ordered, indent_json)
+ elif output_format == 'msgpack':
+ output_data = encode_msgpack(parsed)
+ elif output_format == 'toml':
+ output_data = encode_toml(parsed, ordered)
+ else:
+ raise ValueError(
+ 'Unknown output format: {0}'.format(output_format)
+ )
+
+ if output_format == 'msgpack':
+ encoded = output_data
+ else:
+ encoded = output_data.encode('utf-8')
+ output_file.write(encoded)
+
+ output_file.close()
+ finally:
+ if 'input_file' in locals():
+ input_file.close()
+ if 'output_file' in locals():
+ output_file.close()
+
+
+def main():
+ try:
+ run(sys.argv)
+ except KeyboardInterrupt as e:
+ pass
+ except (IOError, ValueError) as e:
+ print('Error: {0}'.format(e), file=sys.stderr)
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/use-buildset-registry/tasks/main.yaml b/roles/use-buildset-registry/tasks/main.yaml
index a276442..7e592ac 100644
--- a/roles/use-buildset-registry/tasks/main.yaml
+++ b/roles/use-buildset-registry/tasks/main.yaml
@@ -1,3 +1,12 @@
+- name: Include OS-specific variables
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ ansible_distribution }}-{{ ansible_distribution_major_version }}.yaml"
+ - "{{ ansible_distribution }}.{{ ansible_architecture }}.yaml"
+ - "{{ ansible_distribution }}.yaml"
+ - "{{ ansible_os_family }}.yaml"
+ - "default.yaml"
+
# Docker doesn't understand docker push [1234:5678::]:5000/image/path:tag
# so we set up /etc/hosts with a registry alias name to support ipv6 and 4.
- name: Configure /etc/hosts for buildset_registry to workaround docker not understanding ipv6 addresses
@@ -23,26 +32,14 @@
file:
state: directory
path: /etc/docker
-- name: Ensure buildset registry cert directory exists
- become: true
- file:
- path: "/etc/docker/certs.d/{{ buildset_registry_alias }}:{{ buildset_registry.port }}/"
- state: directory
-- name: Ensure proxy registry cert directory exists
- become: true
- file:
- path: "/etc/docker/certs.d/{{ buildset_registry_alias }}:{{ buildset_registry.proxy_port }}/"
- state: directory
- name: Write buildset registry TLS certificate
become: true
copy:
content: "{{ buildset_registry.cert }}"
- dest: "/etc/docker/certs.d/{{ buildset_registry_alias }}:{{ buildset_registry.port }}/ca.crt"
-- name: Write proxy registry TLS certificate
+ dest: "{{ ca_dir }}/buildset-registry.crt"
+- name: Update CA certs
+ command: "{{ ca_command }}"
become: true
- copy:
- content: "{{ buildset_registry.cert }}"
- dest: "/etc/docker/certs.d/{{ buildset_registry_alias }}:{{ buildset_registry.proxy_port }}/ca.crt"
# Update daemon config
- name: Check if docker daemon configuration exists
@@ -66,7 +63,7 @@
- name: Add registry to docker daemon configuration
vars:
new_config:
- registry-mirrors: "['https://{{ buildset_registry_alias }}:{{ buildset_registry.port }}/', 'https://{{ buildset_registry_alias }}:{{ buildset_registry.proxy_port }}/']"
+ registry-mirrors: "['https://{{ buildset_registry_alias }}:{{ buildset_registry.port }}/']"
set_fact:
docker_config: "{{ docker_config | combine(new_config) }}"
- name: Save docker daemon configuration
@@ -83,6 +80,19 @@
register: docker_restart
failed_when: docker_restart is failed and not 'Could not find the requested service' in docker_restart.msg
+- name: Ensure containers directory exists
+ become: yes
+ file:
+ state: directory
+ path: /etc/containers
+- name: Modify registries.conf
+ become: yes
+ modify_registries_conf:
+ path: /etc/containers/registries.conf
+ buildset_registry: "{{ buildset_registry }}"
+ buildset_registry_alias: "{{ buildset_registry_alias }}"
+ namespaces: "{{ buildset_registry_namespaces }}"
+
# We use 'block' here to cause the become to apply to all the tasks
# (which does not automatically happen with include_tasks).
- name: Update docker user config to use buildset registry
@@ -95,3 +105,15 @@
when: buildset_registry_docker_user is not defined
block:
- include_tasks: user-config.yaml
+
+- name: Check if cri-o is installed
+ stat:
+ path: /etc/crio/crio.conf
+ register: crio_path
+# TODO: with cri-o >= 1.16, change this to a SIGHUP of the crio process
+- name: Restart cri-o
+ when: crio_path.stat.exists
+ service:
+ name: crio
+ state: restarted
+ become: true
diff --git a/roles/use-buildset-registry/tasks/user-config.yaml b/roles/use-buildset-registry/tasks/user-config.yaml
index 8b7ebbf..238262f 100644
--- a/roles/use-buildset-registry/tasks/user-config.yaml
+++ b/roles/use-buildset-registry/tasks/user-config.yaml
@@ -27,11 +27,7 @@
new_config:
auths: |
{
- "https://index.docker.io/v1/":
- {"auth": "{{ (buildset_registry.username + ":" + buildset_registry.password) | b64encode }}"},
"{{ buildset_registry_alias }}:{{ buildset_registry.port }}":
- {"auth": "{{ (buildset_registry.username + ":" + buildset_registry.password) | b64encode }}"},
- "{{ buildset_registry_alias }}:{{ buildset_registry.proxy_port }}":
{"auth": "{{ (buildset_registry.username + ":" + buildset_registry.password) | b64encode }}"}
}
set_fact:
@@ -41,6 +37,10 @@
content: "{{ docker_config | to_nice_json }}"
dest: "~/.docker/config.json"
mode: 0600
+- name: Write containers auth configuration
+ copy:
+ content: "{{ docker_config | to_nice_json }}"
+ dest: "/run/user/{{ ansible_user_uid }}/auth.json"
- name: Check if /var/lib/kubelet exists
stat:
path: /var/lib/kubelet
@@ -51,4 +51,4 @@
file:
src: "~{{ buildset_registry_docker_user | default(ansible_user) }}/.docker/config.json"
dest: /var/lib/kubelet/config.json
- state: link \ No newline at end of file
+ state: link
diff --git a/roles/use-buildset-registry/vars/CentOS.yaml b/roles/use-buildset-registry/vars/CentOS.yaml
new file mode 100644
index 0000000..c2b260a
--- /dev/null
+++ b/roles/use-buildset-registry/vars/CentOS.yaml
@@ -0,0 +1,2 @@
+ca_dir: /etc/pki/ca-trust/source/anchors
+ca_command: update-ca-trust
diff --git a/roles/use-buildset-registry/vars/default.yaml b/roles/use-buildset-registry/vars/default.yaml
new file mode 100644
index 0000000..7bea1b2
--- /dev/null
+++ b/roles/use-buildset-registry/vars/default.yaml
@@ -0,0 +1,2 @@
+ca_dir: /usr/local/share/ca-certificates
+ca_command: update-ca-certificates
diff --git a/roles/validate-host/library/zuul_debug_info.py b/roles/validate-host/library/zuul_debug_info.py
index 8fc85d3..ce3be44 100644
--- a/roles/validate-host/library/zuul_debug_info.py
+++ b/roles/validate-host/library/zuul_debug_info.py
@@ -70,7 +70,7 @@ def main():
'traceroute6 -n {host}'.format(host=traceroute_host))
passed = True
except (subprocess.CalledProcessError, OSError) as e:
- ret['traceroute_v6_exception'] = traceback.format_exc(e)
+ ret['traceroute_v6_exception'] = traceback.format_exc()
ret['traceroute_v6_output'] = e.output
ret['traceroute_v6_return'] = e.returncode
pass
@@ -79,7 +79,7 @@ def main():
'traceroute -n {host}'.format(host=traceroute_host))
passed = True
except (subprocess.CalledProcessError, OSError) as e:
- ret['traceroute_v4_exception'] = traceback.format_exc(e)
+ ret['traceroute_v4_exception'] = traceback.format_exc()
ret['traceroute_v4_output'] = e.output
ret['traceroute_v4_return'] = e.returncode
pass
diff --git a/roles/validate-zone-db/tasks/find.yaml b/roles/validate-zone-db/tasks/find.yaml
index e2092cd..10d0438 100644
--- a/roles/validate-zone-db/tasks/find.yaml
+++ b/roles/validate-zone-db/tasks/find.yaml
@@ -8,5 +8,7 @@
- name: Build zone.db file list
set_fact:
- zone_db_files: '{{ zone_db_files + [ [item.path.split("/")[-2], item.path] ] }}'
+ zone_db_files: '{{ zone_db_files + [ [zj_zone_db_found_file.path.split("/")[-2], zj_zone_db_found_file.path] ] }}'
loop: "{{ zone_db_found_files['files'] }}"
+ loop_control:
+ loop_var: zj_zone_db_found_file
diff --git a/roles/validate-zone-db/tasks/main.yaml b/roles/validate-zone-db/tasks/main.yaml
index 8d11b34..4b3c176 100644
--- a/roles/validate-zone-db/tasks/main.yaml
+++ b/roles/validate-zone-db/tasks/main.yaml
@@ -9,5 +9,7 @@
when: not zone_db_files
- name: 'Run checkzone'
- command: '/usr/sbin/named-checkzone {{ item[0] }} {{ item[1] }}'
+ command: '/usr/sbin/named-checkzone {{ zj_zone_db_file[0] }} {{ zj_zone_db_file[1] }}'
loop: "{{ zone_db_files }}"
+ loop_control:
+ loop_var: zj_zone_db_file
diff --git a/zuul.d/general-jobs.yaml b/zuul.d/general-jobs.yaml
index dd2dc6b..85d4ceb 100644
--- a/zuul.d/general-jobs.yaml
+++ b/zuul.d/general-jobs.yaml
@@ -75,7 +75,6 @@
- job:
name: validate-zone-db
- parent: base
description: |
Validate zone.db files in project.
diff --git a/zuul.d/go-jobs.yaml b/zuul.d/go-jobs.yaml
new file mode 100644
index 0000000..d43ca4f
--- /dev/null
+++ b/zuul.d/go-jobs.yaml
@@ -0,0 +1,92 @@
+# Jobs listed in go-jobs.rst.
+
+- job:
+ name: golang-go
+ parent: unittests
+ description: |
+ Base job for go operations
+
+ Responds to these variables:
+
+ .. zuul:jobvar:: go_command
+ :default: build
+
+ Command to pass to go.
+
+ .. zuul:jobvar:: go_package_dir
+
+ Path to go package to test
+
+ .. zuul:jobvar:: go_install_dir
+ :default: /usr/local/
+
+ Path to install go in
+
+ .. zuul:jobvar:: go_version
+ :default: 1.13
+
+ The version of go to use.
+
+ .. zuul:jobvar:: go_os
+ :default: {{ ansible_system | lower }}
+
+ OS to use when choosing go version.
+
+ .. zuul:jobvar:: go_arch
+ :default: amd64 / 386
+
+ Architecture to use when choosing go version
+
+ .. zuul:jobvar:: zuul_work_dir
+ :default: {{ zuul.project.src_dir }}
+
+ Path to operate in.
+
+ pre-run: playbooks/go/pre.yaml
+ run: playbooks/go/run.yaml
+ vars:
+ go_command: build
+
+- job:
+ name: golang-go-test
+ parent: golang-go
+ description: |
+ Base job for go operations
+
+ Responds to these variables:
+
+ .. zuul:jobvar:: go_command
+ :default: build
+
+ Command to pass to go.
+
+ .. zuul:jobvar:: go_package_dir
+
+ Path to go package to test
+
+ .. zuul:jobvar:: go_install_dir
+ :default: /usr/local/
+
+ Path to install go in
+
+ .. zuul:jobvar:: go_version
+ :default: 1.13
+
+ The version of go to use.
+
+ .. zuul:jobvar:: go_os
+ :default: {{ ansible_system | lower }}
+
+ OS to use when choosing go version.
+
+ .. zuul:jobvar:: go_arch
+ :default: amd64 / 386
+
+ Architecture to use when choosing go version
+
+ .. zuul:jobvar:: zuul_work_dir
+ :default: {{ zuul.project.src_dir }}
+
+ Path to operate in.
+ vars:
+ go_command: test
diff --git a/zuul.d/helm-jobs.yaml b/zuul.d/helm-jobs.yaml
new file mode 100644
index 0000000..c39e145
--- /dev/null
+++ b/zuul.d/helm-jobs.yaml
@@ -0,0 +1,14 @@
+- job:
+ name: chart-testing-lint
+ description: |
+ Run chart-testing on Helm charts
+ pre-run: playbooks/chart-testing/pre.yaml
+ run: playbooks/chart-testing/run.yaml
+
+- job:
+ name: apply-helm-charts
+ description: |
+ Deploy a Kubernetes cluster and apply charts
+ pre-run: playbooks/helm/pre.yaml
+ run: playbooks/helm/run.yaml
+ post-run: playbooks/helm/post.yaml
diff --git a/zuul.d/js-jobs.yaml b/zuul.d/js-jobs.yaml
index e12baa9..1c20476 100644
--- a/zuul.d/js-jobs.yaml
+++ b/zuul.d/js-jobs.yaml
@@ -18,12 +18,12 @@
The version of Node to use.
- .. zuul:jobvar: zuul_work_dir
+ .. zuul:jobvar:: zuul_work_dir
:default: {{ zuul.project.src_dir }}
Path to operate in.
- .. zuul:jobvar: javascript_content_dir
+ .. zuul:jobvar:: javascript_content_dir
:default: dist
Directory, relative to zuul_work_dir, holding build content.
@@ -46,12 +46,12 @@
The version of Node to use.
- .. zuul:jobvar: zuul_work_dir
+ .. zuul:jobvar:: zuul_work_dir
:default: {{ zuul.project.src_dir }}
Path to operate in.
- .. zuul:jobvar: javascript_content_dir
+ .. zuul:jobvar:: javascript_content_dir
:default: dist
Directory, relative to zuul_work_dir, holding build content.
@@ -76,12 +76,12 @@
The version of Node to use.
- .. zuul:jobvar: zuul_work_dir
+ .. zuul:jobvar:: zuul_work_dir
:default: {{ zuul.project.src_dir }}
Path to operate in.
- .. zuul:jobvar: javascript_content_dir
+ .. zuul:jobvar:: javascript_content_dir
:default: dist
Directory, relative to zuul_work_dir, holding build content.
@@ -105,17 +105,17 @@
The version of Node to use.
- .. zuul:jobvar: zuul_work_dir
+ .. zuul:jobvar:: zuul_work_dir
:default: {{ zuul.project.src_dir }}
Path to operate in.
- .. zuul:jobvar: javascript_content_dir
+ .. zuul:jobvar:: javascript_content_dir
:default: dist
Directory, relative to zuul_work_dir, holding build content.
- .. zuul:jobvar: create_tarball_directory
+ .. zuul:jobvar:: create_tarball_directory
Create a tarball with the contents of
create_tarball_directory (relative to zuul_work_dir).
@@ -135,12 +135,12 @@
The version of Node to use.
- .. zuul:jobvar: zuul_work_dir
+ .. zuul:jobvar:: zuul_work_dir
:default: {{ zuul.project.src_dir }}
Path to operate in.
- .. zuul:jobvar: javascript_content_dir
+ .. zuul:jobvar:: javascript_content_dir
:default: dist
Directory, relative to zuul_work_dir, holding build content.
@@ -161,12 +161,12 @@
The version of Node to use.
- .. zuul:jobvar: zuul_work_dir
+ .. zuul:jobvar:: zuul_work_dir
:default: {{ zuul.project.src_dir }}
Path to operate in.
- .. zuul:jobvar: javascript_content_dir
+ .. zuul:jobvar:: javascript_content_dir
:default: dist
Directory, relative to zuul_work_dir, holding build content.
@@ -186,7 +186,7 @@
The version of Node to use.
- .. zuul:jobvar: zuul_work_dir
+ .. zuul:jobvar:: zuul_work_dir
:default: {{ zuul.project.src_dir }}
Path to operate in.
diff --git a/zuul.d/python-jobs.yaml b/zuul.d/python-jobs.yaml
index 56aec9a..0aff839 100644
--- a/zuul.d/python-jobs.yaml
+++ b/zuul.d/python-jobs.yaml
@@ -22,18 +22,18 @@
String containing extra arguments to append to the tox command line.
- .. zuul:jobvar: tox_constraints_file
+ .. zuul:jobvar:: tox_constraints_file
Path to a pip constraints file. Will be provided to tox in the
TOX_CONSTRAINTS_FILE environment variable if it exists.
- .. zuul:jobvar: tox_install_siblings
+ .. zuul:jobvar:: tox_install_siblings
:default: true
Override tox requirements that have corresponding zuul git repos
on the node by installing the git versions into the tox virtualenv.
- .. zuul:jobvar: tox_install_bindep
+ .. zuul:jobvar:: tox_install_bindep
:default: true
Whether or not to run the binary dependencies detection and
@@ -66,6 +66,7 @@
Uses tox with the ``py34`` environment.
vars:
tox_envlist: py34
+ python_version: 3.4
- job:
name: tox-py35
@@ -76,6 +77,7 @@
Uses tox with the ``py35`` environment.
vars:
tox_envlist: py35
+ python_version: 3.5
- job:
name: tox-py36
@@ -86,6 +88,7 @@
Uses tox with the ``py36`` environment.
vars:
tox_envlist: py36
+ python_version: 3.6
- job:
name: tox-py37
@@ -96,6 +99,18 @@
Uses tox with the ``py37`` environment.
vars:
tox_envlist: py37
+ python_version: 3.7
+
+- job:
+ name: tox-py38
+ parent: tox
+ description: |
+ Run unit tests for a Python project under cPython version 3.8.
+
+ Uses tox with the ``py38`` environment.
+ vars:
+ tox_envlist: py38
+ python_version: 3.8
- job:
name: tox-docs
@@ -126,6 +141,7 @@
Uses tox with the ``linters`` environment.
vars:
tox_envlist: linters
+ test_setup_skip: true
- job:
name: tox-molecule
@@ -193,12 +209,12 @@
String containing extra arguments to append to the tox command line.
- .. zuul:jobvar: tox_constraints_file
+ .. zuul:jobvar:: tox_constraints_file
Path to a pip constraints file. Will be provided to tox in the
TOX_CONSTRAINTS_FILE environment variable if it exists.
- .. zuul:jobvar: tox_install_siblings
+ .. zuul:jobvar:: tox_install_siblings
:default: true
Override tox requirements that have corresponding zuul git repos
@@ -215,12 +231,12 @@
The version of Node to use.
- .. zuul:jobvar: javascript_content_dir
+ .. zuul:jobvar:: javascript_content_dir
:default: dist
Directory, relative to zuul_work_dir, holding build content.
- .. zuul:jobvar: zuul_work_dir
+ .. zuul:jobvar:: zuul_work_dir
:default: {{ zuul.project.src_dir }}
Path to operate in.
@@ -320,3 +336,14 @@
- test-requirements.txt
- tox.ini
+- job:
+ name: markdownlint
+ files: '^.*\.md$'
+ pre-run: playbooks/markdownlint/pre.yaml
+ run: playbooks/markdownlint/run.yaml
+ post-run: playbooks/markdownlint/post.yaml
+ description: |
+ Check any markdown for basic lint problems. Include a file named
+ `.markdownlint.json` that is configured according to
+ https://github.com/DavidAnson/markdownlint#optionsconfig
+ to control rule specifics.