some fixes to cartodb ci (#16179)

* upload parallel, serial and docker-compose logs to gcs
pull/16169/head
ibrahim menem 4 years ago committed by GitHub
parent 7899e741a3
commit dbaf138efa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -28,6 +28,7 @@ sudo make install
### Bug fixes / enhancements
- Some CI improvements [16179](https://github.com/CartoDB/cartodb/pull/16179)
- Bump @carto/viewer to v1.0.3 [16170](https://github.com/CartoDB/cartodb/pull/16170)
- Show a new message for create connections after first login [16159](https://github.com/CartoDB/cartodb/pull/16159)
- Remove master api key from do-catalog layers request [16158](https://github.com/CartoDB/cartodb/pull/16158)

@ -1 +1 @@
Subproject commit 5742845d98626d536e1b11c185dde853c16cc7c1
Subproject commit 4227b8245b48062e4e2598860294a40a88dde9b6

@ -1,20 +1,20 @@
steps:
# Cancel previous job on the same branch
- name: gcr.io/cloud-builders/gcloud
- name: gcr.io/cloud-builders/gcloud-slim
entrypoint: /bin/bash
args:
- '-c'
- 'gcloud builds list --ongoing --filter="buildTriggerId=e983e3b9-109f-43a3-82b2-b312e42c878e AND substitutions.BRANCH_NAME=${BRANCH_NAME} AND id!=${BUILD_ID}" --format="get(ID)" > jobs_to_cancel'
- name: gcr.io/cloud-builders/gcloud
- name: gcr.io/cloud-builders/gcloud-slim
entrypoint: /bin/bash
args:
- '-c'
- 'gcloud builds cancel $(cat jobs_to_cancel | xargs) || true'
# Decrypt github key
- name: gcr.io/cloud-builders/gcloud
- name: gcr.io/cloud-builders/gcloud-slim
args:
- kms
- decrypt
@ -82,13 +82,48 @@ steps:
fi
docker build --build-arg COMPILE_ASSETS=false --build-arg BUNDLE_JOBS=16 -t gcr.io/cartodb-on-gcp-main-artifacts/builder:current -t gcr.io/cartodb-on-gcp-main-artifacts/builder:${_BRANCH_TAG} -t gcr.io/cartodb-on-gcp-main-artifacts/builder:${SHORT_SHA} -t gcr.io/cartodb-on-gcp-main-artifacts/builder:${_BRANCH_TAG}--${SHORT_SHA} --cache-from gcr.io/cartodb-on-gcp-main-artifacts/builder:${_BRANCH_TAG} .
# Run tests
# Start necessary services (redis, postgres) in background
- name: 'docker/compose:1.22.0'
args: ['-f', 'docker-compose-pg12.yml', 'up', '--build', '-d']
timeout: 900s
# Run tests, first in parallel, then give a second try in serial if some tests fail
- name: gcr.io/cloud-builders/docker
args: ['exec', '-i', 'builder_1', 'bash', '-c', '/cartodb/runParallelTests.sh 18' ]
timeout: 1800s
# Copy tests results and logs from the container
- name: 'docker/compose:1.22.0'
entrypoint: /bin/sh
args:
- -c
- |
mkdir test_logs_${BUILD_ID}
docker-compose -f docker-compose-pg12.yml logs --no-color > test_logs_${BUILD_ID}/docker_compose_logs
docker cp builder_1:/cartodb/parallel_tests_logs test_logs_${BUILD_ID}/parallel_tests_logs
docker cp builder_1:/cartodb/serial_tests_logs test_logs_${BUILD_ID}/serial_tests_logs || true
docker cp builder_1:/cartodb/tests_exit_status tests_exit_status
echo "Logs will be available during 1 day at gs://cartodb-ci-tmp-logs/${BUILD_ID}/"
# Upload logs to gcs
- name: gcr.io/cloud-builders/gsutil
args: [ '-m', 'cp', '-r', 'test_logs_${BUILD_ID}', 'gs://cartodb-ci-tmp-logs/' ]
# Check tests return value and exit accordingly
- name: gcr.io/cloud-builders/docker
entrypoint: /bin/bash
args:
- -c
- |
if [ $(cat tests_exit_status) == "ok" ];then
echo "Tests succeeded, logs are at https://console.cloud.google.com/storage/browser/cartodb-ci-tmp-logs/test_logs_${BUILD_ID}"
exit 0
else
cat tests_exit_status
echo "Tests failed, logs are at https://console.cloud.google.com/storage/browser/cartodb-ci-tmp-logs/test_logs_${BUILD_ID}"
exit 1
fi
substitutions:
_BRANCH_TAG: ${BRANCH_NAME//\//-}

@ -1,32 +0,0 @@
#!/bin/bash
# Jesus Vazquez
# reporter.sh: This script is the exit point for the tests execution. It reads from specfailed.log
# the amount of tests that have failed. If there are none it sends a success status but if there are
# 1 or more it sends a failure status to warn the developer
filename="parallel_tests/specfailed.log"
lines=$(cat $filename | wc -l)
if [ "$lines" -eq "0" ];
then
echo "Tests were OK";
# TODO
# gsu "Backend tests were OK" "Backend" success
exit 0; #OK
else
while read line;
do
# For each error cat its log file
logfile=$(echo $line | grep -o '[0-9][0-9][0-9][0-9].log')
# cat $logfile;
# Give feedback to github
# spec=$(echo $line | sed 's/\s.*$//')
# echo "GSU with spec $spec" TODO
# gsu "$spec failed" "$spec" failure TODO
done < $filename
# TODO
# gsu "Backend tests failed" "Backend" failure
exit 1; # ERROR
fi

@ -28,12 +28,5 @@ script/ci/wrapper.sh $WORKERS || exit 1
# TESTS
time parallel -j $WORKERS -a parallel_tests/specfull.txt 'script/ci/executor.sh {} {%} {#}' || exit 1
# print logs of first try
echo "PRINT LOGS OF FAILED PARALLEL TESTS (FIRST TRY)"
time cat parallel_tests/*.log
# SECOND TRY
script/ci/secondTry.sh || exit 1
# REPORTER
script/ci/reporter.sh || exit 1
script/ci/secondTry.sh

@ -13,6 +13,7 @@ failedSpecs=$(cat parallel_tests/specfailed.log | wc -l)
if [ "$failedSpecs" -eq "0" ];
then
echo ok > tests_exit_status
exit 0;
else
specs=$(cat parallel_tests/specfailed.log | sed ':a;N;$!ba;s/\n/ /g')
@ -20,32 +21,27 @@ fi
TRASH_MESSAGES="Varnish purge error: \[Errno 111\] Connection refused\|_CDB_LinkGhostTables() called with username=\|terminating connection due to administrator command\|Error trying to connect to Invalidation Service to link Ghost Tables: No module named redis\|pg_restore:\|pg_dump:\|is already a member of\|Skipping Ghost Tables linking"
## uncomment the following if you want to debug failures in parallel execution
## Print parallel logs if some of them failed
#if [ -s parallel_tests/specfailed.log ]; then
# echo "*****************************************************************************************************"
# echo "Logs of tests that ran in parallel"
# echo "*****************************************************************************************************"
# cat parallel_tests/6*.log | grep -v "$TRASH_MESSAGES"
# echo "*****************************************************************************************************"
#fi
# save parallel logs tests to be uploaded later"
cat parallel_tests/*.log > parallel_tests_logs
if [ "$failedSpecs" -gt "10" ];
then
echo "ERROR: Too many failures for a second try. Giving up."
exit 1;
fi
echo "$failedSpecs failed tests > 10, see parallel_tests_logs and docker-compose logs" > tests_exit_status
else
echo "*****************************************************************************************************"
echo "Giving a second try to the next specs"
echo "*****************************************************************************************************"
cat parallel_tests/specfailed.log
echo "*****************************************************************************************************"
RAILS_ENV=test bundle exec rspec $specs > tmp_file 2>&1
RAILS_ENV=test bundle exec rspec $specs > serial_tests_logs 2>&1
RC=$?
cat tmp_file | grep -v "$TRASH_MESSAGES"
if [ $RC -eq 0 ]; then
truncate -s 0 parallel_tests/specfailed.log # Here is where the hack takes place. If im the second try we dont have errors then we're OK
echo ok > tests_exit_status
else
exit 0; # The reporter script will output the failed specs
echo "some tests failed after a second try, see serial_tests_logs" > tests_exit_status
fi
fi

Loading…
Cancel
Save