0

I just successfully dockerized my seafile installation and now want to put that behind a reverse proxy because I would like to host multiple services. However, I'm getting "Bad Gateway" from nginx and can't figure out what the problem might be.

This is the working seafile docker-compose.yml (without reverse proxy):

version: '2.0'
services:
  db:
    image: mariadb:10.1
    container_name: seafile-mysql
    environment:
      - MYSQL_ROOT_PASSWORD=password
      - MYSQL_LOG_CONSOLE=true
    volumes:
      - /home/docker-seafile/seafile-mysql/db:/var/lib/mysql
    networks:
      - seafile-net
  memcached:
    image: memcached:1.5.6
    container_name: seafile-memcached
    entrypoint: memcached -m 256
    networks:
      - seafile-net
  seafile:
    image: seafileltd/seafile-mc:latest
    container_name: seafile
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - /home/docker-seafile/seafile-data:/shared
    environment:
      - DB_HOST=db
      - DB_ROOT_PASSWD=password
      - TIME_ZONE=Europe/Zurich
      - SEAFILE_ADMIN_EMAIL=info@domain.com
      - SEAFILE_ADMIN_PASSWORD=password
      - SEAFILE_SERVER_LETSENCRYPT=true
      - SEAFILE_SERVER_HOSTNAME=seafile.domain.com
    depends_on:
      - db
      - memcached
    networks:
      - seafile-net
networks:
  seafile-net:

This ist the docker-compose.yml for jwilder/proxy with letsencrypt companion:

version: '2'
services:
  nginx-proxy:
    image: jwilder/nginx-proxy
    container_name: nginx-proxy
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - /home/docker-nginx-proxy/conf:/etc/nginx/conf.d
      - /home/docker-nginx-proxy/vhost:/etc/nginx/vhost.d
      - /home/docker-nginx-proxy/html:/usr/share/nginx/html
      - /home/docker-nginx-proxy/dhparam:/etc/nginx/dhparam
      - /home/docker-nginx-proxy/certs:/etc/nginx/certs:ro
      - /var/run/docker.sock:/tmp/docker.sock:ro
    restart: always
  letsencrypt:
    image: jrcs/letsencrypt-nginx-proxy-companion
    container_name: nginx-proxy-le
    depends_on:
      - nginx-proxy
    volumes_from:
      - nginx-proxy
    volumes:
      - /home/docker-nginx-proxy/certs:/etc/nginx/certs
      - /var/run/docker.sock:/var/run/docker.sock:ro
    restart: always
networks:
  default:
    external:
      name: nginx-proxy

This is the modified docker-compose.yml for seafile behind the reverse proxy:

version: '2.0'
services:
  db:
    image: mariadb:10.1
    container_name: seafile-mysql
    environment:
      - MYSQL_ROOT_PASSWORD=password
      - MYSQL_LOG_CONSOLE=true
    volumes:
      - /home/docker-seafile/seafile-mysql/db:/var/lib/mysql
    networks:
      - seafile-net
  memcached:
    image: memcached:1.5.6
    container_name: seafile-memcached
    entrypoint: memcached -m 256
    networks:
      - seafile-net
  seafile:
    image: seafileltd/seafile-mc:latest
    container_name: seafile
    ports:
      - "3000:80"
    volumes:
      - /home/docker-seafile/seafile-data:/shared # Requested, specifies the path to Seafile data persistent store.
    environment:
      - DB_HOST=db
      - DB_ROOT_PASSWD=password
      - TIME_ZONE=Europe/Zurich
      - SEAFILE_ADMIN_EMAIL=info@domain.com
      - SEAFILE_ADMIN_PASSWORD=password
      - VIRTUAL_HOST=seafile.domain.com
      - VIRTUAL_NETWORK=nginx-proxy
      - VIRTUAL_PORT=3000
      - LETSENCRYPT_HOST=seafile.domain.com
      - LETSENCRYPT_EMAIL=info@domain.com
    depends_on:
      - db
      - memcached
    networks:
      - seafile-net
      - nginx-proxy
networks:
  seafile-net:
  nginx-proxy:
    external: true

Any ideas what might be wrong?

edit:

nginx config of jwilder/proxy looks like this:

root@7a10734e8ba2:/etc/nginx/conf.d# cat default.conf
# If we receive X-Forwarded-Proto, pass it through; otherwise, pass along the
# scheme used to connect to this server
map $http_x_forwarded_proto $proxy_x_forwarded_proto {
  default $http_x_forwarded_proto;
  ''      $scheme;
}
# If we receive X-Forwarded-Port, pass it through; otherwise, pass along the
# server port the client connected to
map $http_x_forwarded_port $proxy_x_forwarded_port {
  default $http_x_forwarded_port;
  ''      $server_port;
}
# If we receive Upgrade, set Connection to "upgrade"; otherwise, delete any
# Connection header that may have been passed to this server
map $http_upgrade $proxy_connection {
  default upgrade;
  '' close;
}
# Apply fix for very long server names
server_names_hash_bucket_size 128;
# Default dhparam
ssl_dhparam /etc/nginx/dhparam/dhparam.pem;
# Set appropriate X-Forwarded-Ssl header
map $scheme $proxy_x_forwarded_ssl {
  default off;
  https on;
}
gzip_types text/plain text/css application/javascript application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
log_format vhost '$host $remote_addr - $remote_user [$time_local] '
                 '"$request" $status $body_bytes_sent '
                 '"$http_referer" "$http_user_agent"';
access_log off;
                ssl_protocols TLSv1.2 TLSv1.3;
                ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
                ssl_prefer_server_ciphers off;
resolver 127.0.0.11;
# HTTP 1.1 support
proxy_http_version 1.1;
proxy_buffering off;
proxy_set_header Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $proxy_connection;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $proxy_x_forwarded_proto;
proxy_set_header X-Forwarded-Ssl $proxy_x_forwarded_ssl;
proxy_set_header X-Forwarded-Port $proxy_x_forwarded_port;
# Mitigate httpoxy attack (see README for details)
proxy_set_header Proxy "";
server {
        server_name _; # This is just an invalid value which will never trigger on a real hostname.
        listen 80;
        access_log /var/log/nginx/access.log vhost;
        return 503;
}
server {
        server_name _; # This is just an invalid value which will never trigger on a real hostname.
        listen 443 ssl http2;
        access_log /var/log/nginx/access.log vhost;
        return 503;
        ssl_session_cache shared:SSL:50m;
        ssl_session_tickets off;
        ssl_certificate /etc/nginx/certs/default.crt;
        ssl_certificate_key /etc/nginx/certs/default.key;
}
# seafile.domain.com
upstream seafile.domain.com {
                                # Cannot connect to network of this container
                                server 127.0.0.1 down;
                                ## Can be connected with "nginx-proxy" network
                        # seafile
                        server 172.19.0.4:80;
}
server {
        server_name seafile.domain.com;
        listen 80 ;
        access_log /var/log/nginx/access.log vhost;
        # Do not HTTPS redirect Let'sEncrypt ACME challenge
        location /.well-known/acme-challenge/ {
                auth_basic off;
                allow all;
                root /usr/share/nginx/html;
                try_files $uri =404;
                break;
        }
        location / {
                return 301 https://$host$request_uri;
        }
}
server {
        server_name seafile.domain.com;
        listen 443 ssl http2 ;
        access_log /var/log/nginx/access.log vhost;
        ssl_session_timeout 5m;
        ssl_session_cache shared:SSL:50m;
        ssl_session_tickets off;
        ssl_certificate /etc/nginx/certs/seafile.domain.com.crt;
        ssl_certificate_key /etc/nginx/certs/seafile.domain.com.key;
        ssl_dhparam /etc/nginx/certs/seafile.domain.com.dhparam.pem;
        ssl_stapling on;
        ssl_stapling_verify on;
        ssl_trusted_certificate /etc/nginx/certs/seafile.domain.com.chain.pem;
        add_header Strict-Transport-Security "max-age=31536000" always;
        include /etc/nginx/vhost.d/default;
        location / {
                proxy_pass http://seafile.domain.com;
        }
}

That proxy_pass to http://seafile.domain.com looks wrong or am I mistaken?

saimonsez
  • 344
  • 3
  • 16

6 Answers6

1

I found a partial solution with the help of https://github.com/haiwen/seafile-docker/issues/91

docker-compose.yml for seafile now looks like this:

version: '2.0'
services:
  db:
    image: mariadb:10.1
    container_name: seafile-mysql
    environment:
      - MYSQL_ROOT_PASSWORD= 
      - MYSQL_LOG_CONSOLE=true
    volumes:
      - ./seafile-mysql/db:/var/lib/mysql

  memcached:
    image: memcached:1.5.6
    container_name: seafile-memcached
    entrypoint: memcached -m 256

  seafile:
    image: seafileltd/seafile-mc:latest
    container_name: seafile
    expose:
      - "80"
    volumes:
      - ./seafile-data:/shared
      - ./seafile.nginx.conf.template:/templates/seafile.nginx.conf.template:ro
    environment:
      - DB_HOST=db
      - DB_ROOT_PASSWD=
      - TIME_ZONE=Europe/Zurich
      - SEAFILE_ADMIN_EMAIL=
      - SEAFILE_ADMIN_PASSWORD=
      - VIRTUAL_HOST=seafile.domain.com
      - VIRTUAL_NETWORK=nginx-proxy
      - VIRTUAL_PORT=80
      - LETSENCRYPT_HOST=seafile.domain.com
      - LETSENCRYPT_EMAIL=
    depends_on:
      - db
      - memcached
networks:
  default:
    external:
      name: nginx-proxy

and the corresponding seafile.nginx.conf.template

# -*- mode: nginx -*-
# Auto generated at {{ current_timestr }}
{% if https -%}
server {
    listen 80;
    server_name _ default_server;

    # allow certbot to connect to challenge location via HTTP Port 80
    # otherwise renewal request will fail
    location /.well-known/acme-challenge/ {
        alias /var/www/challenges/;
        try_files $uri =404;
    }

    location / {
        rewrite ^ https://{{ domain }}$request_uri? permanent;
    }
}
{% endif -%}

server {
{% if https -%}
    listen 443;
    ssl on;
    ssl_certificate      /shared/ssl/{{ domain }}.crt;
    ssl_certificate_key  /shared/ssl/{{ domain }}.key;

    ssl_ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS;

    # TODO: More SSL security hardening: ssl_session_tickets & ssl_dhparam
    # ssl_session_tickets on;
    # ssl_session_ticket_key /etc/nginx/sessionticket.key;
    # ssl_session_cache shared:SSL:10m;
    # ssl_session_timeout 10m;
{% else -%}
    listen 80;
{% endif -%}

    server_name {{ domain }};

    client_max_body_size 10m;

    location / {
        proxy_pass http://127.0.0.1:8000/;
        proxy_read_timeout 310s;
        proxy_set_header Host $host;
#       proxy_set_header Forwarded "for=$remote_addr;proto=$scheme";
        proxy_set_header Forwarded "for=$proxy_add_x_forwarded_for;proto=$http_x_forwarded_proto";
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
#       proxy_set_header X-Forwarded-Proto $scheme;
        proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
#       proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header X-Real-IP $proxy_add_x_forwarded_for;
        proxy_set_header Connection "";
        proxy_http_version 1.1;

        client_max_body_size 0;
        access_log      /var/log/nginx/seahub.access.log seafileformat;
        error_log       /var/log/nginx/seahub.error.log;
    }

    location /seafhttp {
        rewrite ^/seafhttp(.*)$ $1 break;
        proxy_pass http://127.0.0.1:8082;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        client_max_body_size 0;
        proxy_connect_timeout  36000s;
        proxy_read_timeout  36000s;
        proxy_request_buffering off;
        access_log      /var/log/nginx/seafhttp.access.log seafileformat;
        error_log       /var/log/nginx/seafhttp.error.log;
    }

    location /seafdav {
        client_max_body_size 0;
        fastcgi_pass    127.0.0.1:8080;
        fastcgi_param   SCRIPT_FILENAME     $document_root$fastcgi_script_name;
        fastcgi_param   PATH_INFO           $fastcgi_script_name;

        fastcgi_param   SERVER_PROTOCOL     $server_protocol;
        fastcgi_param   QUERY_STRING        $query_string;
        fastcgi_param   REQUEST_METHOD      $request_method;
        fastcgi_param   CONTENT_TYPE        $content_type;
        fastcgi_param   CONTENT_LENGTH      $content_length;
        fastcgi_param   SERVER_ADDR         $server_addr;
        fastcgi_param   SERVER_PORT         $server_port;
        fastcgi_param   SERVER_NAME         $server_name;

        access_log      /var/log/nginx/seafdav.access.log seafileformat;
        error_log       /var/log/nginx/seafdav.error.log;
    }

    location /media {
        root /opt/seafile/seafile-server-latest/seahub;
    }

    # For letsencrypt
    location /.well-known/acme-challenge/ {
        alias /var/www/challenges/;
        try_files $uri =404;
    }
}

With that, seafile.domain.com is reachable through the proxy. However, clients are still unable to sync, probably because of fastcgi. https://github.com/haiwen/seafile-docker/issues/91#issuecomment-549025051 mentions an open pull request to fix this issue hopefully.

saimonsez
  • 344
  • 3
  • 16
  • I am starting to wonder if the template should be added to the nginx-proxy container, not the seafile container. – David Gleba Apr 05 '20 at 17:42
1

I got a solution for the network problems the problem by jwilder is the client_max_body_size. There's no setting so the max upload ist 1M.

I solved it by a new file and a new line in docker-compose.yml at the proxy-container. Look here for a complete explaination:

[https://github.com/strahli30/HowTo-use-Docker-Seafile-Plex-NGINX][1]

In short:

  • Create a new folder mkdir nginx.template

  • Create a new file sudo nano nginx.template/client_max_body_size.conf

  • One line in this file: client_max_body_size 0;

  • in the docker-compose.yml of jwilder/nginx-proxy:alpine at volumes this line: ./nginx.template/client_max_body_size.conf:/etc/nginx/conf.d/client_max_body_size.conf:ro

After restart of Seafile and Nginx all works fine! :-)

strahli30
  • 11
  • 3
0

I struggled to setup a fresh empty seafile-mc 7.0.5 using jwilder nginx-proxy with letsencrypt companion. I was getting bad gateway errors.

I implemented the answer here: https://stackoverflow.com/a/60474891/2744870 . (It's an answer in this question)

It allowed the web interface to work, but not sync.

I added the config I previously used with docker seafile server 6.2.5 and now it syncs.

The config is at: https://github.com/dgleba/proxy457/blob/master/vol/nginx/vhost.d/s.dg.gleba.com

This is it here as well.

#
# for seafile.  ./vol/nginx/vhost.d/s.dg.gleba.com
#
  location /seafhttp {
      rewrite ^/seafhttp(.*)$ $1 break;
      proxy_pass http://seafile3:8082;
      client_max_body_size 0;
      proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
      proxy_connect_timeout  36000s;
      proxy_read_timeout  36000s;
      proxy_send_timeout  36000s;
      send_timeout  36000s;
  }

The repo linked below is what I am using to run seafile-mc 7.0.5. See the seafile3 service in https://github.com/dgleba/proxy457/blob/master/docker-compose.yml

I am not sure what I did in ./vol/nginx/vhost.d/s.dg.gleba.com is good practice.

Further investigation may be needed, but webui, sync client, and uploads apparently are working right now.

Update:

I had a typo in the SERVICE_URL, and FILE_SERVER_ROOT in webui System Admin which prevented file upload in webui and camera upload, but that is working now. I think this is all working.

Update 2:

I am now getting error: please check the network in seahub webui. It is random. I may have something mis-configured here.

David Gleba
  • 517
  • 1
  • 5
  • 21
0

I figured out how to add a block to the bottom of the autogenerated nginx config.

The key pieces are below. I can post a complete example if there is interest.

The last two volumes mount my edited nginx2.tmpl which then includes the override stanzas.

# This is part of docker-compose.yml
  jproxy:
  # docker-compose pull nproxy to update...
    image: jwilder/nginx-proxy:alpine
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - /var/run/docker.sock:/tmp/docker.sock:ro
      - ./vol/nginx/certs:/etc/nginx/certs:ro
      - ./vol/nginx/html:/usr/share/nginx/html
      - ./vol/nginx/vhost.d:/etc/nginx/vhost.d:ro
      # - ./vol/nginx/conf.d:/etc/nginx/conf.d/
      - ./vol/nginx/conf.d/my_proxy.conf:/etc/nginx/conf.d/my_proxy.conf:ro
      - ./nginx2.tmpl:/app/nginx.tmpl
      - ./vol/nginx/jwilder-nginx-override:/etc/nginx/jwilder-nginx-override
    labels:
      com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
    restart: always

I added a line to the bottom of the standard jwilder/nginx-proxy nginx.tmpl and called it nginx2.tmpl

# added to bottom of `nginx.tmpl`
include /etc/nginx/jwilder-nginx-override/*.conf;

So any .conf file in /etc/nginx/jwilder-nginx-override/ will get added verbatim to the generated config file.

This way there is complete control of some hosts. You supply the entire config for that host.

The following config is added for this seafile host.


# 82 for seafile4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

  # sf4.example.com
  # commented out..  upstream sf4.example.com {..}

  upstream sf4.example.com {
            # proxy457d_seafile4_1
           server 172.27.0.30:14191;
            # server 172.27.0.30:8000;
            # server 172.27.0.30:8082;
            # server 172.21.0.20:80;
  }

  server {
    server_name sf4.example.com;
    listen 80 ;

    access_log /var/log/nginx/access.log vhost;
    # Do not HTTPS redirect LetsEncrypt ACME challenge
    location /.well-known/acme-challenge/ {
      auth_basic off;
      allow all;
      root /usr/share/nginx/html;
      try_files $uri =404;
      break;
    }

    #rewrite ^ https://$http_host$request_uri? permanent; # force redirect http to https

    location / {
      return 301 https://$host$request_uri;
    }

    server_tokens off; 
  }
  server {
    server_name sf4.example.com;
    listen 443 ssl;
    # depricated.. ssl on;
    access_log /var/log/nginx/access.log vhost;
    ssl_session_timeout 5m;
    ssl_session_cache shared:SSL:50m;
    ssl_session_tickets off;
    ssl_certificate /etc/nginx/certs/sf4.example.com.crt;
    ssl_certificate_key /etc/nginx/certs/sf4.example.com.key;
    ssl_dhparam /etc/nginx/certs/sf4.example.com.dhparam.pem;
    ssl_stapling on;
    ssl_stapling_verify on;
    ssl_trusted_certificate /etc/nginx/certs/sf4.example.com.chain.pem;

    # secure settings (A+ at SSL Labs ssltest at time of writing)
    # see https://wiki.mozilla.org/Security/Server_Side_TLS#Nginx
    ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
    ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-
AES128-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:DHE-RSA-AES256-GCM-
SHA384:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-CAMELLIA256-SHA:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-SEED-SHA:DHE-RSA-CAMELLIA128-
SHA:HIGH:!aNULL:!eNULL:!LOW:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS';
    ssl_prefer_server_ciphers on;
    proxy_set_header X-Forwarded-For $remote_addr;
    add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
    server_tokens off;    

    location / {
    proxy_pass http://sf4.example.com;
    proxy_set_header   Host $host;
    proxy_set_header   X-Real-IP $remote_addr;
    proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_set_header   X-Forwarded-Host $server_name;
    proxy_set_header   X-Forwarded-Proto https;
    proxy_read_timeout 36001s;
    client_max_body_size 0;
    }
    location /seafhttp {
        rewrite ^/seafhttp(.*)$ $1 break;
        # proxy_pass http://172.27.0.30:8082;
        # proxy_pass http://sf4.example.com:8082;
        proxy_pass http://seafile4:8082;
        client_max_body_size 0;
        proxy_set_header   X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_read_timeout 36001s;
        proxy_connect_timeout  36000s;
        proxy_send_timeout  36000s;
        send_timeout  36000s;
    }
    #location /media {
    #    root /home/user/haiwen/seafile-server-latest/seahub;
    #}       
  }
# 82 for seafile4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

David Gleba
  • 517
  • 1
  • 5
  • 21
0

I got a solution for the network problems the problem by jwilder is the client_max_body_size. There's no setting so the max upload ist 1M.

I solved it by a new file and a new line in docker-compose.yml at the proxy-container. Look here for a complete explaination:

https://github.com/strahli30/HowTo-use-Docker-Seafile-Plex-NGINX

In short:

Create a new folder mkdir nginx.template

Create a new file sudo nano nginx.template/client_max_body_size.conf

One line in this file: client_max_body_size 0;

in the docker-compose.yml of jwilder/nginx-proxy:alpine at volumes this line:

./nginx.template/client_max_body_size.conf:/etc/nginx/conf.d/client_max_body_size.conf:ro

After restart of Seafile and Nginx all works fine! :-)

strahli30
  • 11
  • 3
0

As of Seafile 10.0.0, the solutions originally posted in https://github.com/haiwen/seafile-docker/issues/91 still work.

The required process is:

  1. Copy the contents of /templates/seafile.nginx.conf.template from a running Seafile container (or you can get /templates/seafile.nginx.conf.template from GitHub)
  2. Paste the contents from #1 in a new file alongside your docker-compose.yml or compose.yml file, with a few changes I will describe below, and
  3. Mount that file inside a new Seafile container.

The key changes that need to be made in /templates/seafile.nginx.conf.template are:

   location / {
        proxy_pass http://127.0.0.1:8000/;
        proxy_read_timeout 310s;
        proxy_set_header Host $host;
-       proxy_set_header Forwarded "for=$remote_addr;proto=$scheme";
+       proxy_set_header Forwarded "for=$proxy_add_x_forwarded_for;proto=$http_x_forwarded_proto";
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
-       proxy_set_header X-Forwarded-Proto $scheme;
+       proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
-       proxy_set_header X-Real-IP $remote_addr;
+       proxy_set_header X-Real-IP $proxy_add_x_forwarded_for;
        proxy_set_header Connection "";
        proxy_http_version 1.1;
        client_max_body_size 0;
        access_log      /var/log/nginx/seahub.access.log seafileformat;
        error_log       /var/log/nginx/seahub.error.log;
    }

Assuming you have a seafile.nginx.conf.template file in the same directory as your compose.yml file, you can mount seafile.nginx.conf.template by adding something like this to your compose.yml file:

    volumes:
      - ./seafile.nginx.conf.template:/templates/seafile.nginx.conf.template:ro

If you'd like to see how I have the complete file set up, you are welcome to see my Compose Seafile repository: https://github.com/tdworz/compose-seafile

Important: Please know that if you are struggling with this problem, you have likely run docker compose up already. Doing so will have generated the actual configuration file Nginx uses (/shared/nginx/conf/seafile.nginx.conf), which is created in the template you are creating (/templates/seafile.nginx.conf.template). If the new containers created by docker compose up use existing volumes (the default behavior), a new Nginx configuration file will not be generated. You can delete an existing volume using docker compose rm or you can delete the contents of a mounted directory.