Running Traefik and Nginx Proxy Manager on the same Server

I went down the road you suggested. I converted nginx default.conf into a traefik-conf.yaml:

### --- Perseus Stack

http:
  routers:
    router-for-/:
      rule: PathPrefix(``/`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/
      service: service-for-/
    router-for-/backend:
      rule: PathPrefix(``/backend`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/backend
      service: service-for-/backend
    router-for-/user:
      rule: PathPrefix(``/user`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/user
      service: service-for-/user
    router-for-/user/api/is_token_valid_internal:
      rule: PathPrefix(``/user/api/is_token_valid_internal`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      service: service-for-/user/api/is_token_valid_internal
      middlewares:
      - headers-for-/user/api/is_token_valid_internal
    router-for-/white-rabbit:
      rule: PathPrefix(``/white-rabbit`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/white-rabbit
      service: service-for-/white-rabbit
    router-for-/cdm-builder:
      rule: PathPrefix(``/cdm-builder`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/cdm-builder
      service: service-for-/cdm-builder
    router-for-/data-quality-dashboard/api:
      rule: PathPrefix(``/data-quality-dashboard/api`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/data-quality-dashboard/api
      service: service-for-/data-quality-dashboard/api
    router-for-/data-quality-dashboard:
      rule: PathPrefix(``/data-quality-dashboard`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/data-quality-dashboard
      service: service-for-/data-quality-dashboard
    router-for-/athena:
      rule: PathPrefix(``/athena`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/athena
      service: service-for-/athena
    router-for-/solr:
      rule: PathPrefix(``/solr`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/solr
      service: service-for-/solr
    router-for-/usagi:
      rule: PathPrefix(``/usagi`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/usagi
      service: service-for-/usagi
    router-for-/swagger:
      rule: PathPrefix(``/swagger`.Host(`perseus.acumenus.net`)`)
      entryPoints:
      - web
      middlewares:
      - headers-for-/swagger
      service: service-for-/swagger
  services:
    service-for-/:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:4200
    service-for-/backend:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:5004
    service-for-/user:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:5001
    service-for-/user/api/is_token_valid_internal:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:5001
    service-for-/white-rabbit:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:8000
    service-for-/cdm-builder:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:9000
    service-for-/data-quality-dashboard/api:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:8001
    service-for-/data-quality-dashboard:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:8001
    service-for-/athena:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:5002
    service-for-/solr:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:8983
    service-for-/usagi:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:5003
    service-for-/swagger:
      loadBalancer:
        servers:
        - url: http://172.17.0.1:8080
  middlewares:
    headers-for-/:
      headers:
        customRequestHeaders:
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/backend:
      headers:
        customRequestHeaders:
          Username: $username
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/user:
      headers:
        customRequestHeaders:
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/user/api/is_token_valid_internal:
      headers:
        customRequestHeaders:
          Content-Length: '""'
          X-Original-URI: $request_uri
    headers-for-/white-rabbit:
      headers:
        customRequestHeaders:
          Username: $username
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/cdm-builder:
      headers:
        customRequestHeaders:
          Username: $username
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/data-quality-dashboard/api:
      headers:
        customRequestHeaders:
          Username: $username
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/data-quality-dashboard:
      headers:
        customRequestHeaders:
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/athena:
      headers:
        customRequestHeaders:
          Username: $username
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/solr:
      headers:
        customRequestHeaders:
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/usagi:
      headers:
        customRequestHeaders:
          Username: $username
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
    headers-for-/swagger:
      headers:
        customRequestHeaders:
          X-Real-IP: $remote_addr
          X-Forwarded-For: $proxy_add_x_forwarded_for
          X-Forwarded-Proto: $scheme
          Host: $host
entryPoints:
  web:
    address: :80

And completely rewrote the original stack's docker-compose.yml after removing nginx as the proxy manager:

version: "3.8"

services:
  traefik:
    image: traefik:v2.5
    container_name: traefik
    command:
      - "--configFile=/etc/traefik/traefik.yml"
      - "--certificatesResolvers.myresolver.acme.httpChallenge.entryPoint=web"
      - "--certificatesResolvers.myresolver.acme.email=youremail@example.com"
      - "--certificatesResolvers.myresolver.acme.storage=/acme.json"
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - /mnt/data/traefik-conf-modified.yml:/etc/traefik/traefik.yml
      - /path/to/acme.json:/acme.json
    labels:
      - "traefik.enable=true"
      - "traefik.http.routers.myrouter.rule=Host(`perseus.acumenus.net`)"
      - "traefik.http.routers.myrouter.entrypoints=websecure"
      - "traefik.http.routers.myrouter.tls.certresolver=myresolver"
      - "traefik.http.routers.myrouter.tls.domains[0].main=perseus.acumenus.net"
      - "traefik.http.routers.http-catchall.rule=HostRegexp(`{host:.+}`)"
      - "traefik.http.routers.http-catchall.entrypoints=web"
      - "traefik.http.routers.http-catchall.middlewares=https-redirect"
      - "traefik.http.middlewares.https-redirect.redirectscheme.scheme=https"
  shareddb:
    image: perseushub/shareddb:latest
    build: ./shared-db
    container_name: shareddb
    volumes:
      - shareddb:/data/postgres
    ports:
      - "5432:5432"
  files-manager:
    image: perseushub/files-manager:latest
    build: ./files-manager
    container_name: files-manager
    ports:
      - "10500:10500"
    environment:
      - SPRING_PROFILES_ACTIVE=docker
    depends_on:
      - shareddb
  user:
    image: perseushub/user:latest
    build: ./user
    container_name: user
    environment:
      USER_ENV: Docker
    env_file:
      - user/user-envs.txt
    ports:
      - "5001:5001"
    depends_on:
      - shareddb
  backend:
    image: perseushub/backend:latest
    build: ./perseus-api
    container_name: backend
    environment:
      PERSEUS_ENV: Docker
    ports:
      - "5004:5004"
    depends_on:
      - shareddb
      - files-manager
  frontend:
    image: perseushub/frontend:latest
    build:
      context: ./UI
      args:
        env: prod
    container_name:
      frontend
    ports:
      - "4200:4200"
  white-rabbit:
    image: perseushub/white-rabbit:latest
    build: ../WhiteRabbit
    container_name:
      white-rabbit
    ports:
      - "8002:8000"
    environment:
      - SPRING_PROFILES_ACTIVE=docker
    depends_on:
      - shareddb
      - files-manager
  vocabularydb:
    image: perseushub/vocabularydb:latest
    build: ./vocabulary-db
    container_name: vocabularydb
    healthcheck:
      test: [ "CMD", "pg_isready", "-q", "-d", "vocabulary", "-U", "admin" ]
      timeout: 60s
      interval: 30s
      retries: 10
    volumes:
      - vocabularydb:/data/postgres
    ports:
      - "5431:5432"
  cdm-builder:
    image: perseushub/cdm-builder:latest
    build: ../ETL-CDMBuilder
    container_name:
      cdm-builder
    ports:
      - "9000:9000"
    environment:
      - ASPNETCORE_ENVIRONMENT=Docker
    depends_on:
      - shareddb
      - files-manager
      - vocabularydb
  solr:
    image: perseushub/solr:latest
    build: ./solr
    container_name: solr
    ports:
      - "8983:8983"
    volumes:
      - solr:/var/solr
    depends_on:
      - vocabularydb
  athena:
    image: perseushub/athena:latest
    build: ./athena-api
    container_name: athena
    environment:
      ATHENA_ENV: Docker
    ports:
      - "5002:5002"
    depends_on:
      - solr
  usagi:
    image: perseushub/usagi:latest
    build: ./usagi-api
    command: python /app/main.py
    container_name: usagi
    environment:
      USAGI_ENV: Docker
    ports:
      - "5003:5003"
    depends_on:
      - shareddb
      - solr
  r-serve:
    image: perseushub/r-serve:latest
    build:
      context: ../DataQualityDashboard/R
      args:
        prop: docker
    container_name:
      r-serve
    ports:
      - "6311:6311"
    depends_on:
      - shareddb
  data-quality-dashboard:
    image: perseushub/data-quality-dashboard:latest
    build:
      context: ../DataQualityDashboard
    container_name:
      data-quality-dashboard
    ports:
      - "8001:8001"
    environment:
      - SPRING_PROFILES_ACTIVE=docker
    depends_on:
      - shareddb
      - files-manager
      - r-serve
  swagger:
    image: perseushub/swagger:latest
    build: ./swagger-ui
    container_name: swagger
    ports:
      - 8080:8080
volumes:
  shareddb:
  vocabularydb:
  solr:

I have some cleanup to do with volumes but I will let you know soon if it works!

I'd like to share a working solution I discovered for the same issue, although it's a bit delayed.

To resolve this, you'll need to add the following Traefik labels to yuor nginx container:

- traefik.enable=true
- traefik.docker.network=dokploy-network

- traefik.tcp.routers.catchall.rule=HostSNI(`*`)
- traefik.http.routers.catchall.priority=1
- traefik.tcp.services.catchall-service.loadbalancer.server.port=80
- traefik.tcp.routers.catchall.service=catchall-service

- traefik.tcp.routers.catchall-tls.rule=HostSNIRegexp(`^.*$`)
- traefik.tcp.routers.catchall-tls.tls.passthrough=true
- traefik.http.routers.catchall-tls.priority=2
- traefik.tcp.services.catchall-tls-service.loadbalancer.server.port=443
- traefik.tcp.routers.catchall-tls.service=catchall-tls-service

Below is my complete Docker Compose configuration for the Nginx Proxy Manager:

version: '3'

networks:
    traefik-network:
        external: true

volumes:
    data:
    letsencrypt:

services:
    nginx-proxy:    
        image: 'jc21/nginx-proxy-manager:latest'
        restart: unless-stopped
        labels:
            - traefik.enable=true
            - traefik.docker.network=dokploy-network
      
            - traefik.tcp.routers.catchall.rule=HostSNI(`*`)
            - traefik.http.routers.catchall.priority=1
            - traefik.tcp.services.catchall-service.loadbalancer.server.port=80
            - traefik.tcp.routers.catchall.service=catchall-service
      
            - traefik.tcp.routers.catchall-tls.rule=HostSNIRegexp(`^.*$`)
            - traefik.tcp.routers.catchall-tls.tls.passthrough=true
            - traefik.http.routers.catchall-tls.priority=2
            - traefik.tcp.services.catchall-tls-service.loadbalancer.server.port=443
            - traefik.tcp.routers.catchall-tls.service=catchall-tls-service
          
        volumes:
            - data:/data
            - letsencrypt:/etc/letsencrypt
        networks:
            - traefik-network

Update:

Use the following labels for managing the Nginx proxy container:

      - traefik.enable=true
      - traefik.docker.network=dokploy-network

      - traefik.tcp.routers.catchall.entrypoints=web
      - traefik.tcp.routers.catchall.rule=HostSNI(`*`)
      - traefik.tcp.routers.catchall.priority=2
      - traefik.tcp.services.catchall-service.loadbalancer.server.port=80
      - traefik.tcp.routers.catchall.service=catchall-service

      - traefik.tcp.routers.catchall-tls.entrypoints=websecure
      - traefik.tcp.routers.catchall-tls.rule=HostSNIRegexp(`.*`)
      - traefik.tcp.routers.catchall-tls.tls.passthrough=true
      - traefik.tcp.routers.catchall-tls.priority=1
      - traefik.tcp.services.catchall-tls-service.loadbalancer.server.port=443
      - traefik.tcp.routers.catchall-tls.service=catchall-tls-service

Additionally, set the Traefik ACME challenge to use TLS challenges as shown below:

certificatesResolvers:
  letsencrypt:
    acme:
      # ...
      tlsChallenge: {}

Setting the challenge to TLS is necessary because HTTP challenges will go directly to the Nginx server. When you add a new HTTPS domain to Traefik, it will not automatically create a rule for HTTP. As a result, the non-TLS catch-all will intercept it. To prevent this, you need to use TLS challenges in Traefik, which will be filtered out by the new HTTPS rule before reaching the catch-all.