Skip to content

Conversation

benbz
Copy link
Member

@benbz benbz commented Aug 1, 2025

Not used by clients of ESS Community

@benbz benbz requested a review from a team as a code owner August 1, 2025 13:13
Copy link

github-actions bot commented Aug 1, 2025

dyff of changes in rendered templates of CI manifests

Full contents of manifests and dyffs are available in https://github.com/element-hq/ess-helm/actions/runs/16677581781/artifacts/3667926709

example-default-enabled-components-checkov-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [83 lines unchanged)]
  
  
    acl well-known path /.well-known/matrix/server
    acl well-known path /.well-known/matrix/client
    acl well-known path /.well-known/matrix/support
-   acl well-known path /.well-known/element/element.json
  
  
    http-request redirect  code 301  location https://element.ess.localhost unless well-known
  
    use_backend well-known-static if well-known
+   default_backend well-known-no-match
  
  backend well-known-static
    mode http
  
  
  [nine lines unchanged)]
  
  
    http-request return status 200 content-type "application/json" file "/well-known/server" if { path /.well-known/matrix/server }
    http-request return status 200 content-type "application/json" file "/well-known/client" if { path /.well-known/matrix/client }
    http-request return status 200 content-type "application/json" file "/well-known/support" if { path /.well-known/matrix/support }
-   http-request return status 200 content-type "application/json" file "/well-known/element.json" if { path /.well-known/element/element.json }
+ 
+ backend well-known-no-match
+   mode http
+ 
+   http-request deny status 404
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ ConfigMap/ess-ci/release-name-well-known-haproxy - data @@
- element.json: |
-   {}



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- d531d54f4df3648ffcdefb429ad5dfea15c0ee71
+ e59d853adcb9a3a0defa00984240755f4389ae07

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- ff05a8a5e3a1f96ac7e04ef4efcfeaa9e035d0ce
+ 9438107f375d3bac2aea0c0de68417772ac1be7a

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- d531d54f4df3648ffcdefb429ad5dfea15c0ee71
+ e59d853adcb9a3a0defa00984240755f4389ae07

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- ff05a8a5e3a1f96ac7e04ef4efcfeaa9e035d0ce
+ 9438107f375d3bac2aea0c0de68417772ac1be7a

example-default-enabled-components-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [83 lines unchanged)]
  
  
    acl well-known path /.well-known/matrix/server
    acl well-known path /.well-known/matrix/client
    acl well-known path /.well-known/matrix/support
-   acl well-known path /.well-known/element/element.json
  
  
    http-request redirect  code 301  location https://element.ess.localhost unless well-known
  
    use_backend well-known-static if well-known
+   default_backend well-known-no-match
  
  backend well-known-static
    mode http
  
  
  [nine lines unchanged)]
  
  
    http-request return status 200 content-type "application/json" file "/well-known/server" if { path /.well-known/matrix/server }
    http-request return status 200 content-type "application/json" file "/well-known/client" if { path /.well-known/matrix/client }
    http-request return status 200 content-type "application/json" file "/well-known/support" if { path /.well-known/matrix/support }
-   http-request return status 200 content-type "application/json" file "/well-known/element.json" if { path /.well-known/element/element.json }
+ 
+ backend well-known-no-match
+   mode http
+ 
+   http-request deny status 404
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ ConfigMap/ess-ci/release-name-well-known-haproxy - data @@
- element.json: |
-   {}



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- d531d54f4df3648ffcdefb429ad5dfea15c0ee71
+ e59d853adcb9a3a0defa00984240755f4389ae07

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- ff05a8a5e3a1f96ac7e04ef4efcfeaa9e035d0ce
+ 9438107f375d3bac2aea0c0de68417772ac1be7a

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- d531d54f4df3648ffcdefb429ad5dfea15c0ee71
+ e59d853adcb9a3a0defa00984240755f4389ae07

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- ff05a8a5e3a1f96ac7e04ef4efcfeaa9e035d0ce
+ 9438107f375d3bac2aea0c0de68417772ac1be7a

matrix-authentication-service-synapse-syn2mas-dry-run-secrets-externally-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [68 lines unchanged)]
  
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- fd20d9a19443e197695ba05016322854a7dfce6a
+ 839a1de441a7f5f37a2253fe59c4a10ee6cad639

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- fd20d9a19443e197695ba05016322854a7dfce6a
+ 839a1de441a7f5f37a2253fe59c4a10ee6cad639

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

matrix-authentication-service-synapse-syn2mas-dry-run-secrets-in-helm-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [68 lines unchanged)]
  
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- fd20d9a19443e197695ba05016322854a7dfce6a
+ 839a1de441a7f5f37a2253fe59c4a10ee6cad639

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- fd20d9a19443e197695ba05016322854a7dfce6a
+ 839a1de441a7f5f37a2253fe59c4a10ee6cad639

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

matrix-authentication-service-synapse-syn2mas-migrate-secrets-externally-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [74 lines unchanged)]
  
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- f9fa3f91b4f0a4e8d8f246c2bd636b69f35546fe
+ 82f39c3668fbaed8c3236ec05173f01c8736dbd3

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- f9fa3f91b4f0a4e8d8f246c2bd636b69f35546fe
+ 82f39c3668fbaed8c3236ec05173f01c8736dbd3

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

matrix-authentication-service-synapse-syn2mas-migrate-secrets-in-helm-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [74 lines unchanged)]
  
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- f9fa3f91b4f0a4e8d8f246c2bd636b69f35546fe
+ 82f39c3668fbaed8c3236ec05173f01c8736dbd3

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- f9fa3f91b4f0a4e8d8f246c2bd636b69f35546fe
+ 82f39c3668fbaed8c3236ec05173f01c8736dbd3

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

pytest-matrix-authentication-service-syn2mas-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [68 lines unchanged)]
  
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- fd20d9a19443e197695ba05016322854a7dfce6a
+ 839a1de441a7f5f37a2253fe59c4a10ee6cad639

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- fd20d9a19443e197695ba05016322854a7dfce6a
+ 839a1de441a7f5f37a2253fe59c4a10ee6cad639

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

pytest-matrix-authentication-service-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [74 lines unchanged)]
  
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- f9fa3f91b4f0a4e8d8f246c2bd636b69f35546fe
+ 82f39c3668fbaed8c3236ec05173f01c8736dbd3

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- f9fa3f91b4f0a4e8d8f246c2bd636b69f35546fe
+ 82f39c3668fbaed8c3236ec05173f01c8736dbd3

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

pytest-matrix-rtc-synapse-wellknown-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [77 lines unchanged)]
  
  
    acl well-known path /.well-known/matrix/server
    acl well-known path /.well-known/matrix/client
    acl well-known path /.well-known/matrix/support
-   acl well-known path /.well-known/element/element.json
  
  
    http-request redirect  code 301  location https://redirect.localhost/path unless well-known
  
    use_backend well-known-static if well-known
+   default_backend well-known-no-match
  
  backend well-known-static
    mode http
  
  
  [nine lines unchanged)]
  
  
    http-request return status 200 content-type "application/json" file "/well-known/server" if { path /.well-known/matrix/server }
    http-request return status 200 content-type "application/json" file "/well-known/client" if { path /.well-known/matrix/client }
    http-request return status 200 content-type "application/json" file "/well-known/support" if { path /.well-known/matrix/support }
-   http-request return status 200 content-type "application/json" file "/well-known/element.json" if { path /.well-known/element/element.json }
+ 
+ backend well-known-no-match
+   mode http
+ 
+   http-request deny status 404
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ ConfigMap/ess-ci/release-name-well-known-haproxy - data @@
- element.json: |
-   {}



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6ec723d6e0f7b3bd4d7ec3ae10c5082c77ee1c40
+ 404f14cd7c24ac666453589ea27c9f5447bb7d16

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 3b083a0a09d10f97a4ab78b649a8a508c2880c91
+ 594d25bb902836f53877e4962751f0e604a85051

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6ec723d6e0f7b3bd4d7ec3ae10c5082c77ee1c40
+ 404f14cd7c24ac666453589ea27c9f5447bb7d16

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 3b083a0a09d10f97a4ab78b649a8a508c2880c91
+ 594d25bb902836f53877e4962751f0e604a85051

pytest-synapse-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [34 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [177 lines unchanged)]
  
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 4b174803f851fbaa70a30443029c1c9b00676912
+ 024f8c0ca7e7e357bb0db18916c5aa648ddd0685

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- f4f8765d4c1356f8dd0851a574a297245a6d53bb
+ 4194d55afe100b595ebbfa8a46924af6042de8c7

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 4b174803f851fbaa70a30443029c1c9b00676912
+ 024f8c0ca7e7e357bb0db18916c5aa648ddd0685

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- f4f8765d4c1356f8dd0851a574a297245a6d53bb
+ 4194d55afe100b595ebbfa8a46924af6042de8c7

pytest-well-known-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [35 lines unchanged)]
  
  
    acl well-known path /.well-known/matrix/server
    acl well-known path /.well-known/matrix/client
    acl well-known path /.well-known/matrix/support
-   acl well-known path /.well-known/element/element.json
  
  
    http-request redirect  code 301  location https://redirect.localhost/path unless well-known
  
    use_backend well-known-static if well-known
+   default_backend well-known-no-match
  
  backend well-known-static
    mode http
  
  
  [nine lines unchanged)]
  
  
    http-request return status 200 content-type "application/json" file "/well-known/server" if { path /.well-known/matrix/server }
    http-request return status 200 content-type "application/json" file "/well-known/client" if { path /.well-known/matrix/client }
    http-request return status 200 content-type "application/json" file "/well-known/support" if { path /.well-known/matrix/support }
-   http-request return status 200 content-type "application/json" file "/well-known/element.json" if { path /.well-known/element/element.json }
+ 
+ backend well-known-no-match
+   mode http
+ 
+   http-request deny status 404
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-well-known-haproxy - data @@
- element.json: |
-   {}



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 2941eefc1b9ea0e2e99dd47a51f07f61110b1a27
+ 99a038a128fbbc977dae12c2b3914c95cb522fd8

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 94484b8010df9e679943c8432eecec2c712f72e0
+ adb308ac3718dc4bcd8fbc73cd3adbf21372aaaf

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 2941eefc1b9ea0e2e99dd47a51f07f61110b1a27
+ 99a038a128fbbc977dae12c2b3914c95cb522fd8

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 94484b8010df9e679943c8432eecec2c712f72e0
+ adb308ac3718dc4bcd8fbc73cd3adbf21372aaaf

quick-setup-certificates-pg-external-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [83 lines unchanged)]
  
  
    acl well-known path /.well-known/matrix/server
    acl well-known path /.well-known/matrix/client
    acl well-known path /.well-known/matrix/support
-   acl well-known path /.well-known/element/element.json
  
  
    http-request redirect  code 301  location https://chat.your.tld unless well-known
  
    use_backend well-known-static if well-known
+   default_backend well-known-no-match
  
  backend well-known-static
    mode http
  
  
  [nine lines unchanged)]
  
  
    http-request return status 200 content-type "application/json" file "/well-known/server" if { path /.well-known/matrix/server }
    http-request return status 200 content-type "application/json" file "/well-known/client" if { path /.well-known/matrix/client }
    http-request return status 200 content-type "application/json" file "/well-known/support" if { path /.well-known/matrix/support }
-   http-request return status 200 content-type "application/json" file "/well-known/element.json" if { path /.well-known/element/element.json }
+ 
+ backend well-known-no-match
+   mode http
+ 
+   http-request deny status 404
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ ConfigMap/ess-ci/release-name-well-known-haproxy - data @@
- element.json: |
-   {}



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6551f87f002bc802ad444736b48590d1f235f990
+ b4a57a8cad1a9dd1f7c9973e5cabdbc30408e250

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 1fc807cf3a57ceb74c78ebf806d02ae05f116780
+ 1524029fb714b9af8df3ead129d5904c39e0ddc7

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6551f87f002bc802ad444736b48590d1f235f990
+ b4a57a8cad1a9dd1f7c9973e5cabdbc30408e250

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 1fc807cf3a57ceb74c78ebf806d02ae05f116780
+ 1524029fb714b9af8df3ead129d5904c39e0ddc7

quick-setup-certificates-pg-with-helm-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [83 lines unchanged)]
  
  
    acl well-known path /.well-known/matrix/server
    acl well-known path /.well-known/matrix/client
    acl well-known path /.well-known/matrix/support
-   acl well-known path /.well-known/element/element.json
  
  
    http-request redirect  code 301  location https://chat.your.tld unless well-known
  
    use_backend well-known-static if well-known
+   default_backend well-known-no-match
  
  backend well-known-static
    mode http
  
  
  [nine lines unchanged)]
  
  
    http-request return status 200 content-type "application/json" file "/well-known/server" if { path /.well-known/matrix/server }
    http-request return status 200 content-type "application/json" file "/well-known/client" if { path /.well-known/matrix/client }
    http-request return status 200 content-type "application/json" file "/well-known/support" if { path /.well-known/matrix/support }
-   http-request return status 200 content-type "application/json" file "/well-known/element.json" if { path /.well-known/element/element.json }
+ 
+ backend well-known-no-match
+   mode http
+ 
+   http-request deny status 404
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ ConfigMap/ess-ci/release-name-well-known-haproxy - data @@
- element.json: |
-   {}



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6551f87f002bc802ad444736b48590d1f235f990
+ b4a57a8cad1a9dd1f7c9973e5cabdbc30408e250

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 1fc807cf3a57ceb74c78ebf806d02ae05f116780
+ 1524029fb714b9af8df3ead129d5904c39e0ddc7

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6551f87f002bc802ad444736b48590d1f235f990
+ b4a57a8cad1a9dd1f7c9973e5cabdbc30408e250

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 1fc807cf3a57ceb74c78ebf806d02ae05f116780
+ 1524029fb714b9af8df3ead129d5904c39e0ddc7

quick-setup-external-cert-pg-external-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [83 lines unchanged)]
  
  
    acl well-known path /.well-known/matrix/server
    acl well-known path /.well-known/matrix/client
    acl well-known path /.well-known/matrix/support
-   acl well-known path /.well-known/element/element.json
  
  
    http-request redirect  code 301  location https://chat.your.tld unless well-known
  
    use_backend well-known-static if well-known
+   default_backend well-known-no-match
  
  backend well-known-static
    mode http
  
  
  [nine lines unchanged)]
  
  
    http-request return status 200 content-type "application/json" file "/well-known/server" if { path /.well-known/matrix/server }
    http-request return status 200 content-type "application/json" file "/well-known/client" if { path /.well-known/matrix/client }
    http-request return status 200 content-type "application/json" file "/well-known/support" if { path /.well-known/matrix/support }
-   http-request return status 200 content-type "application/json" file "/well-known/element.json" if { path /.well-known/element/element.json }
+ 
+ backend well-known-no-match
+   mode http
+ 
+   http-request deny status 404
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ ConfigMap/ess-ci/release-name-well-known-haproxy - data @@
- element.json: |
-   {}



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6551f87f002bc802ad444736b48590d1f235f990
+ b4a57a8cad1a9dd1f7c9973e5cabdbc30408e250

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 1fc807cf3a57ceb74c78ebf806d02ae05f116780
+ 1524029fb714b9af8df3ead129d5904c39e0ddc7

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6551f87f002bc802ad444736b48590d1f235f990
+ b4a57a8cad1a9dd1f7c9973e5cabdbc30408e250

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 1fc807cf3a57ceb74c78ebf806d02ae05f116780
+ 1524029fb714b9af8df3ead129d5904c39e0ddc7

quick-setup-external-cert-pg-with-helm-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [83 lines unchanged)]
  
  
    acl well-known path /.well-known/matrix/server
    acl well-known path /.well-known/matrix/client
    acl well-known path /.well-known/matrix/support
-   acl well-known path /.well-known/element/element.json
  
  
    http-request redirect  code 301  location https://chat.your.tld unless well-known
  
    use_backend well-known-static if well-known
+   default_backend well-known-no-match
  
  backend well-known-static
    mode http
  
  
  [nine lines unchanged)]
  
  
    http-request return status 200 content-type "application/json" file "/well-known/server" if { path /.well-known/matrix/server }
    http-request return status 200 content-type "application/json" file "/well-known/client" if { path /.well-known/matrix/client }
    http-request return status 200 content-type "application/json" file "/well-known/support" if { path /.well-known/matrix/support }
-   http-request return status 200 content-type "application/json" file "/well-known/element.json" if { path /.well-known/element/element.json }
+ 
+ backend well-known-no-match
+   mode http
+ 
+   http-request deny status 404
  # a fake backend which fonxes every request with a 500. Useful for
  # handling overloads etc.
  backend return_500
    http-request deny deny_status 500



@@ ConfigMap/ess-ci/release-name-synapse-haproxy - data @@
+   429.http:



@@ ConfigMap/ess-ci/release-name-well-known-haproxy - data @@
- element.json: |
-   {}



@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6551f87f002bc802ad444736b48590d1f235f990
+ b4a57a8cad1a9dd1f7c9973e5cabdbc30408e250

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 1fc807cf3a57ceb74c78ebf806d02ae05f116780
+ 1524029fb714b9af8df3ead129d5904c39e0ddc7

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/shared-haproxy-config-hash @@
- 6551f87f002bc802ad444736b48590d1f235f990
+ b4a57a8cad1a9dd1f7c9973e5cabdbc30408e250

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/synapse-haproxy-config-hash @@
- dd15ae90396e08033c4449b6fe41a8513198f7c3
+ 27f917ba99dd2cdfbd08468f150ede374348b034

@@ Deployment/ess-ci/release-name-haproxy - spec.template.metadata.labels.k8s.element.io/wellknowndelegation-haproxy-config-hash @@
- 1fc807cf3a57ceb74c78ebf806d02ae05f116780
+ 1524029fb714b9af8df3ead129d5904c39e0ddc7

quick-setup-letsencrypt-pg-external-values.yaml
@@ ConfigMap/ess-ci/release-name-haproxy - data @@
- 429.http: |
-   HTTP/1.0 429 Too Many Requests
-   Cache-Control: no-cache
-   Connection: close
-   Content-Type: application/json
-   access-control-allow-origin: *
-   access-control-allow-methods: GET, POST, PUT, DELETE, OPTIONS
-   access-control-allow-headers: Origin, X-Requested-With, Content-Type, Accept, Authorization
-   
-   {"errcode":"M_UNKNOWN","error":"Server is unavailable"}

@@ ConfigMap/ess-ci/release-name-haproxy - data.haproxy.cfg @@
  global
    maxconn 40000
    log stdout format raw local0 info
  
  
  [51 lines unchanged)]
  
  
    compression algo gzip
    compression type text/plain text/html text/xml application/json text/css
  
-   # if we hit the maxconn on a server, and the queue timeout expires, we want
-   # to avoid returning 503, since that will cause cloudflare to mark us down.
-   #
-   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
-   #
-   #   503  when no server was available to handle the request, or in response to
-   #        monitoring requests which match the "monitor fail" condition
-   #
-   errorfile 503 /usr/local/etc/haproxy/429.http
- 
    # Use a consistent hashing scheme so that worker with balancing going down doesn't cause
    # the traffic for all others to be shuffled around.
    hash-type consistent sdbm
  
  
  [33 lines unchanged)]
  
    bind *:8008
  
    # same as http log, with %Th (handshake time)
    log-format "%ci:%cp [%tr] %ft %b/%s %Th/%TR/%Tw/%Tc/%Tr/%Ta %ST %B %CC %CS %tsc %ac/%fc/%bc/%sc/%rc %sq/%bq %hr %hs %{+Q}r"
+ 
+   # if we hit the maxconn on a server, and the queue timeout expires, we want
+   # to avoid returning 503, since that will cause cloudflare to mark us down.
+   #
+   # https://cbonte.github.io/haproxy-dconv/1.8/configuration.html#1.3.1 says:
+   #
+   #   503  when no server was available to handle the request, or in response to
+   #        monitoring requests which match the "monitor fail" condition
+   #
+   errorfile 503 /synapse/429.http
  
    capture request header Host len 32
    capture request header Referer len 200
    capture request header User-Agent len 200
  
  [83 lines unchanged)]
  
  
    acl well-known path /.well-known/matrix/server
    acl well-known path /.well-known/matrix/client
    acl well-known path /.well-known/matrix/support
-   acl well-known path /.well-known/element/element.json
  
  
    http-request redirect  code 301  location https://chat.your.tld unless well-known
  
    use_backend well-known-static if well-known
+   default_backend well-known-no-match
  
  backend well-known-static
    mode http
  
  
  [nine lines unchanged)]
  
  
    http-request return status 200 content-type "application/json" file "/well-known/server" if { path /.well-known/matrix/server }
    http-request return status 200 content-type "application/json" file "/well-known/client" if { path /.well-known/matrix/client }
    http-request return status 200 content-type "application/json" file "/well-known/support" if { path /.well-known/matrix...*[Comment body truncated]*

@benbz benbz force-pushed the bbz/remove-well-known-element branch 3 times, most recently from e3658a5 to 9dd0a49 Compare August 1, 2025 13:51
@benbz benbz force-pushed the bbz/remove-well-known-element branch from 9dd0a49 to a08a350 Compare August 1, 2025 14:19
@benbz benbz force-pushed the bbz/remove-well-known-element branch from a08a350 to 19f5e24 Compare August 1, 2025 14:25
@benbz benbz merged commit 0013b48 into main Aug 1, 2025
67 checks passed
@benbz benbz deleted the bbz/remove-well-known-element branch August 1, 2025 14:38
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

Successfully merging this pull request may close these issues.

1 participant