docker-cartojw/config/varnish.vcl
Nick Ballenger f0b5b7e79f Updated Varnish to cache SQL API / Windshaft
The previous version of this file was enough to cache requests for the
SQL API, but unfortunately no traffic was ever reaching Varnish to be
cached. Nginx was proxying directly to the SQL API port, and Varnish was
set to listen on 6081, so it wasn't able to intercept those requests. I
updated the Nginx proxy config to aim at 6081 for requests to both SQL
API and Windshaft, so now Varnish is receiving traffic. However, in
order to know which backend to send traffic to, I had to add a custom
HTTP header in the Nginx proxy pass. That header is picked up in the
`vcl_recv` varnish subroutine and used to switch between backends.

Additionally I've added logic for controlling what hosts can issue an
HTTP PURGE command--in this case just localhost, since everything is on
a single image. The purges will typically come from a Postgres trigger.

As an overview of the purge related changes, see the Varnish docs here:

https://varnish-cache.org/docs/3.0/tutorial/purging.html#http-purges
2019-07-18 16:27:25 -07:00

49 lines
926 B
Plaintext

acl purge {
"localhost";
"127.0.0.1";
}
backend sqlapi {
.host = "127.0.0.1";
.port = "8080";
}
backend windshaft {
.host = "127.0.0.1";
.port = "8181";
}
sub vcl_recv {
# Allowing PURGE from localhost
if (req.request == "PURGE") {
if (!client.ip ~ purge) {
error 405 "Not allowed.";
}
return (lookup);
}
# Routing request to backend based on X-Carto-Service header from nginx
if (req.http.X-Carto-Service == "sqlapi") {
set req.backend = sqlapi;
remove req.http.X-Carto-Service;
}
if (req.http.X-Carto-Service == "windshaft") {
set req.backend = windshaft;
remove req.http.X-Carto-Service;
}
}
sub vcl_hit {
if (req.request == "PURGE") {
purge;
error 200 "Purged.";
}
}
sub vcl_miss {
if (req.request == "PURGE") {
purge;
error 200 "Purged.";
}
}