Hi, there!
While testing Traffic Splitting, I got the full Traefik config as below:
{
"http": {
"routers": {
"readiness": {
"entryPoints": [
"readiness"
],
"service": "readiness",
"rule": "Path(`/ping`)"
},
"test-server-80": {
"entryPoints": [
"http-5000"
],
"service": "test-server-80",
"rule": "Host(`server.test.traefik.mesh`) || Host(`server.test.maesh`) || Host(`10.96.11.122`)",
"priority": 1002
},
"test-server-server-split-80-traffic-split-direct": {
"entryPoints": [
"http-5000"
],
"service": "test-server-server-split-80-traffic-split",
"rule": "Host(`server.test.traefik.mesh`) || Host(`server.test.maesh`) || Host(`10.96.11.122`)",
"priority": 4002
},
"test-server-v1-80": {
"entryPoints": [
"http-5000"
],
"service": "test-server-v1-80",
"rule": "Host(`server-v1.test.traefik.mesh`) || Host(`server-v1.test.maesh`) || Host(`10.96.156.206`)",
"priority": 1002
},
"test-server-v2-80": {
"entryPoints": [
"http-5000"
],
"service": "test-server-v2-80",
"rule": "Host(`server-v2.test.traefik.mesh`) || Host(`server-v2.test.maesh`) || Host(`10.96.35.77`)",
"priority": 1002
}
},
"services": {
"block-all-service": {
"loadBalancer": {
"passHostHeader": false
}
},
"readiness": {
"loadBalancer": {
"servers": [
{
"url": "http://127.0.0.1:8080"
}
],
"passHostHeader": true
}
},
"test-server-80": {
"loadBalancer": {
"servers": [
{
"url": "http://10.244.1.13:80"
},
{
"url": "http://10.244.1.15:80"
},
{
"url": "http://10.244.1.16:80"
},
{
"url": "http://10.244.2.12:80"
},
{
"url": "http://10.244.2.13:80"
},
{
"url": "http://10.244.2.14:80"
}
],
"passHostHeader": true
}
},
"test-server-server-split-80-server-v1-traffic-split-backend": {
"loadBalancer": {
"servers": [
{
"url": "http://server-v1.test.traefik.mesh:80"
}
],
"passHostHeader": false
}
},
"test-server-server-split-80-server-v2-traffic-split-backend": {
"loadBalancer": {
"servers": [
{
"url": "http://server-v2.test.traefik.mesh:80"
}
],
"passHostHeader": false
}
},
"test-server-server-split-80-traffic-split": {
"weighted": {
"services": [
{
"name": "test-server-server-split-80-server-v1-traffic-split-backend",
"weight": 10
},
{
"name": "test-server-server-split-80-server-v2-traffic-split-backend",
"weight": 90
}
]
}
},
"test-server-v1-80": {
"loadBalancer": {
"servers": [
{
"url": "http://10.244.1.16:80"
},
{
"url": "http://10.244.2.14:80"
}
],
"passHostHeader": true
}
},
"test-server-v2-80": {
"loadBalancer": {
"servers": [
{
"url": "http://10.244.1.15:80"
},
{
"url": "http://10.244.2.13:80"
}
],
"passHostHeader": true
}
}
},
"middlewares": {
"block-all-middleware": {
"ipWhiteList": {
"sourceRange": [
"255.255.255.255"
]
}
}
}
}
}
I'm wondering why the urls of the following services are specified as hosts (instead of IPs), which themselves, to my understanding, will (again) be resolved to the IPs of Traefik Mesh Proxy:
...
"test-server-server-split-80-server-v1-traffic-split-backend": {
"loadBalancer": {
"servers": [
{
"url": "http://server-v1.test.traefik.mesh:80"
}
],
"passHostHeader": false
}
},
"test-server-server-split-80-server-v2-traffic-split-backend": {
"loadBalancer": {
"servers": [
{
"url": "http://server-v2.test.traefik.mesh:80"
}
],
"passHostHeader": false
}
},
...
If I'm not mistaken, I guess this will add one more hop when forwarding traffic to the real backend server. Why not to use pod IPs directly in this scenario? What's the idea behind this design?
Thanks in advance!