nginx learning (II configuration file)

# User group, you can also use the root user
#user  nobody;
# Server with the same number of cores 
worker_processes   1 ;

#error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;
Save address process ID #
#pid        logs/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       mime.types;
    default_type  application/octet-stream;

    # Log Format main access address, time, browser and other information
    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
    #                  '$status $body_bytes_sent "$http_referer" '
    #                  '"$http_user_agent" "$http_x_forwarded_for"';

   # Save address global log can be individually configured in server inside
    #access_log  logs/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    #keepalive_timeout  0;
    keepalive_timeout  65;

    #gzip  on;

    server {
     # Listening port 
        the listen        80 ;
      # service name 
        server_name localhost;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;
     # Blocking rules, blocking the jump address 
        LOCATION / {
            root   html;
            index  index.html index.htm;
        }

        #error_page  404              /404.html;

        # redirect server error pages to the static page /50x.html
        #
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }

        # proxy the PHP scripts to Apache listening on 127.0.0.1:80
        #
        #location ~ \.php$ {
        #    proxy_pass   http://127.0.0.1;
        #}

        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
        #
        #location ~ \.php$ {
        #    root           html;
        #    fastcgi_pass   127.0.0.1:9000;
        #    fastcgi_index  index.php;
        #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;
        #    include        fastcgi_params;
        #}

        # deny access to .htaccess files, if Apache's document root
        # concurs with nginx's one
        #
        #location ~ /\.ht {
        #    deny  all;
        #}
    }


    # another virtual host using mix of IP-, name-, and port-based configuration
    #
    #server {
    #    listen       8000;
    #    listen       somename:8080;
    #    server_name  somename  alias  another.alias;

    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}


    # HTTPS server
    #
    #server {
    #    listen       443 ssl;
    #    server_name  localhost;

    #    ssl_certificate      cert.pem;
    #    ssl_certificate_key  cert.key;

    #    ssl_session_cache    shared:SSL:1m;
    #    ssl_session_timeout  5m;

    #    ssl_ciphers  HIGH:!aNULL:!MD5;
    #    ssl_prefer_server_ciphers  on;

    #    location / {
    #        root   html;
    #        index  index.html index.htm;
    #    }
    #}

}

Log split

Our practical application, might log analysis, if all logs in one file, we is not convenient to analyze the need for split

#user  nobody;
worker_processes  1;

#error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;

#pid        logs/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

    #access_log  logs/access.log  main;

    sendfile        on;

    keepalive_timeout  65;

    server {
        listen       80;
        server_name  localhost;
        if ($time_iso8601 ~ "^(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})") {
                        set $year $1;
                        set $month $2;
                        set $day $3;
                        set $hour $4;
                        set $minutes $5;
                        set $seconds $6;
                }

    access_log  logs/$year-$month-$day-$hour-$minutes-$seconds-access.log  main;

        location / {
            root   html;
            index  index.html index.htm;
        }

        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
            root   html;
        }

    }
}

To test the above configuration is configured to generate a second log, can be changed according to the needs of every day, pay attention to all the rights of the logs folder open, or may be reported to produce documents without permission error

Reverse Proxy

Before understanding the reverse proxy we need to understand what is forward proxy

Forward proxy agent client, the server is located between the client and the origin server, the client sends a request to the proxy server, the proxy server then forwards the request to the origin server.

 

The reverse proxy is a proxy server, which is to achieve a distributed deployment, the client sends a request, after receiving the nginx, according to certain rules distributed to the back-end processor business.

 

 

 

 

 

Reverse proxy configuration General configuration server {} in this section.

location

location = pattern {} precisely match

location / pattern {} general match

location ~ pattern {} regular match

Sample: rewrite

server {
    listen 1234;
    server_name 10.32.16.195;
    LOCATION / Goods {
         # access address matches the regular expression redirects 
        the rewrite " Goods - {\ D [l, 5]} \ HTML. " / Goods-Ctrl. HTML;
        root html;
        index test.html;
    }

}

Sample: proxy

LOCATION / springboot {
  # header pass real IP 
  proxy_set_header the X-Real-ip- $ REMOTE_ADDR 
  proxy_pass HTTP : // 10.32 . 16.179 : 8089 ; 
}

Example: Static Resource Allocation

location ~ .*\.(js|css|jpg|jpeg|gif|png|ico|pdf|txt)$ {
  proxy_pass http://10.32.16.179:8089;
}  

Load Balancing

Three ways: Polling (default), weight, ip_hash

polling:

ngnix sequentially sends the request to the background

upstream tomcatserver1 {  
    server 192.168.72.49:8080;  
    server 192.168.72.49:8081;  
    }   
  
 server {  
        listen       80;  
        server_name  8080.max.com;  
        #charset koi8-r;  
        #access_log  logs/host.access.log  main;  
        location / {  
            proxy_pass   http://tomcatserver1;  
            index  index.html index.htm;  
        }  
     }

Weights:

down: do not participate in load balancing

weight: the greater the larger the weight load

max_fails: maximum request error frequency exceeds the value defined by the error return block proxy_next_upstream

fail_timeout: After more than max_fails, pause time

backup: All other non-backup machines busy, request it, the minimum pressure of this machine

upstream myServer {    
  
    server 192.168.72.49:9090 down;   
    server 192.168.72.49:8080 weight=2 max_fails=2 fail_timeout=30;     
    server 192.168.72.49:7070 backup;   
}

ip_hash

Each request will be allocated according to the hash value of the IP, fixed to ensure that each visitor to access a back-end services, this approach solves the session sharing, but strictly speaking does not belong load balancing, and if a tomcat hung up , all users of this server will re-sign in again.

Most projects are now used jwt, to avoid the session sharing problem, it still uses the weight mode is better.

upstream tomcatserver1 { 
  ip_hash;   server
192.168.72.49:8080;   server 192.168.72.49:8081; }

 

Guess you like

Origin www.cnblogs.com/Unlimited-Blade-Works/p/12599694.html