Nginx simple request distribution with load balancing ---- distributed to multiple machines

 

demand:

Request http://10.3.10.99:8000 distributed to
http://10.3.10.22:8089/time_discern
http://10.3.10.99:8089/time_discern

 

 

• Nginx is actually a cluster: Web Hosting + Reverse Proxy + upstream distribution module consisting of
virtual hosts: to accept and respond to requests
reverse proxy: Use households with a data server to get data
upstream: Nginx tell which data server to get data
• Data ⾛ to
accept the Use user 1) virtual host requests
2) the virtual host to look for reverse proxy
3) reverse proxy let go upstream
4) upstream tell ⼀ data server IP
request 5) Nginx server to look for data and initiate Use households
6 ) accept the request and the server data processing request
7) in response to a data request to the server the Nginx
. 8) Using the Nginx in response to a user request

 

 

 

Content 2, the configuration file
It contains three main parts
(1) global block: the overall operation instruction Configuration Server
For example worker_processes 1; the number of concurrent processing arrangement
(2) events blocks: Effect Nginx server and the user's network connection
For example worker_connections 1024; maximum number of connections supported by 1024
(3) http block
           Also it contains two parts:
                http global block
                server block

 

nginx.conf

#user  nobody;
worker_processes  1;

#error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;

#pid        logs/nginx.pid;


events {
    worker_connections  1024;
}


http {
    include       mime.types;
    default_type  application/octet-stream;

    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
    #                  '$status $body_bytes_sent "$http_referer" '
    #                  '"$http_user_agent" "$http_x_forwarded_for"';

    #access_log  logs/access.log  main;

    sendfile        on;
    #tcp_nopush     on;

    #keepalive_timeout  0;
    65 keepalive_timeout; 
                proxy_redirect OFF;

    ON #gzip; 

   upstream the env { 
   # ip_hash; 
# polling a probability weight, weight, and is proportional to the rate of access for performance of back-end server unevenness case Server 10.3.10.22:8089 weight = 10; Server 10.3.10.99:8089 10 = weight; } Server { the listen 80; server_name 10.3.10.99; #charset KOI8-R & lt; #access_log logs / main host.access.log; # LOCATION / { # proxy_pass HTTP: // the env / time_discern; # the root HTML; index.htm index index.html #; #} LOCATION / { proxy_set_header the Host Host $; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://env/time_discern; } location /time_discern/ { proxy_redirect off; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_pass http://env; } #error_page 404 /404.html; # redirect server error pages to the static page /50x.html # error_page 500 502 503 504 /50x.html; location = /50x.html { root html; } # proxy the PHP scripts to Apache listening on 127.0.0.1:80 # #location ~ \.php$ { # proxy_pass http://127.0.0.1; #} # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000 # #location ~ \.php$ { # root html; # fastcgi_pass 127.0.0.1:9000; # fastcgi_index index.php; # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; # include fastcgi_params; #} # deny access to .htaccess files, if Apache's document root # concurs with nginx's one # #location ~ /\.ht { # deny all; #} } # another virtual host using mix of IP-, name-, and port-based configuration # #server { # listen 8000; # listen somename:8080; # server_name somename alias another.alias; # location / { # root html; # index index.html index.htm; # } #} # HTTPS server # #server { # listen 443 ssl; # server_name localhost; # ssl_certificate cert.pem; # ssl_certificate_key cert.key; # ssl_session_cache shared:SSL:1m; # ssl_session_timeout 5m; # ssl_ciphers HIGH:!aNULL:!MD5; # ssl_prefer_server_ciphers on; # location / { # root html; # index index.html index.htm; # } #} }

 

 

###### Detailed Chinese Nginx configuration file nginx.conf ##### 

#define running Nginx users and user groups 
user www www; 

number #nginx process, recommended setting is equal to the total number of cores CPU. 
8 worker_processes; 
 
# global error log-defined types, [Debug | info | Notice | The warn | error | Crit] 
error_log /usr/local/nginx/logs/error.log info; 

# process pid file 
pid / usr / local / nginx / logs / nginx.pid; 

# specify the maximum process can open descriptors: number 
# operation mode with the maximum number of connections 
# this instruction means when a process opens up nginx number of file descriptors, the theoretical value should be the maximum number of open files ( ulimit -n) divided by the number of nginx process, but nginx allocation request is not so uniform, so it is best to keep in line with the value of ulimit -n. 
# Now open the file in the linux 2.6 kernel open number 65535, worker_rlimit_nofile you should fill in the corresponding 65535. 
# This is because the allocation request nginx dispatched to the process is not so balanced, so if you fill in the 10240, the total amount reached 3-4 million concurrent process when there may be more than 10240, and then returns a 502 error. 
65535 worker_rlimit_nofile; 


Events 
{
    # Reference event model, use [kqueue | rtsig | epoll | / dev / poll | select | poll]; epoll model 
    # 2.6 or later kernel Linux in high-performance network I / O model, linux recommended epoll, if run on FreeBSD above, use kqueue model. 
    Supplement #: 
    # with similar apache, nginx for different operating systems, different event models 
    #A) standard event model 
    # Select, poll belongs to the standard event model, if the current system more efficient method does not exist, nginx will choose select or poll 
    #B) efficient event model 
    #Kqueue: used in FreeBSD 4.1+, OpenBSD 2.9+, NetBSD 2.0 and MacOS X. MacOS X system using a dual-processor kqueue use may cause a kernel panic. 
    #Epoll: Use the system Linux kernel version 2.6 and later. 
    # / dev / poll: used in Solaris 7 11/99 +, HP / UX 11.22+ (eventport), IRIX 6.5.15+ and Tru64 UNIX 5.1A +. 
    #Eventport: Use in Solaris 10. To prevent problems kernel panic, it is necessary to install security patches. 
    the epoll use; 

    # single process maximum number of connections (the maximum number of connections connecting * = number of processes) 
    # accordance with the hardware adjustment, front and work together with the process, as large as possible, but do not take 100% cpu went on the trip. Maximum number of connections per process maximum number of connections allowed, in theory, each server is nginx.
    65535 worker_connections; 

    #keepalive timeout. 
    60 keepalive_timeout; 

    # client request buffer size of the head. This can be set according to the size of your paging system, a general request header size does not exceed 1k, but due to the general paging system should be greater than 1k, so here set page size. 
    # Page size can be ordered getconf PAGESIZE made. 
    # [@ web001 the root ~] # of PAGESIZE the getconf 
    # 4096 
    # but there client_header_buffer_size exceeds 4k, but client_header_buffer_size this value must be set to "system page size" integral multiple. 
    4K client_header_buffer_size; 

    # this will open the specified file cache is not enabled by default, max specify the number of buffers, recommendations and open the same number of files, inactive refers to delete cache files after much time has not been requested. 
    max = 65535 = inactive open_file_cache 60s; 

    # refers to how long the information is valid a check cache.
    # Syntax: open_file_cache_valid time defaults: open_file_cache_valid 60 Using field: http, server, location This directive specifies the information when you need to check valid open_file_cache cached items.  
    open_file_cache_valid 80s;

    Least used within the parameters of inactive time #open_file_cache instruction file number, if more than this figure, the file descriptor has been opened in the cache, the above example, if a file is not used once in the inactive period, it will be removed. 
    # Syntax: open_file_cache_min_uses number defaults: open_file_cache_min_uses 1 Use field: http, server, location This directive specifies the minimum number of files in open_file_cache invalid command parameters that can be used within a certain time frame, if you use a larger value, file description character in the cache is always open. 
    open_file_cache_min_uses 1; 
    
    # syntax: open_file_cache_errors on | off default value: open_file_cache_errors off using field: http, server, location this directive specifies whether a file is recorded in the search cache error. 
    open_file_cache_errors oN; 
} 
 
 
 
# http server settings, using its reverse proxy feature provides load balancing support 
http 
{ 
    # file extension and file type map 
    include mime.types;

    # Default file type 
    default_type file application / OCTET-Stream; 

    # default encoding  
    #charset utf- 8;

    # server name hash table size 
    # server name hash table stored by the instruction and server_names_hash_bucket_size server_names_hash_max_size controlled. Parameter hash bucket size is always equal to the size of the hash table, and a multiple way cache size of the processor. Reducing the number of accesses in the memory after the accelerated keys hash table lookup in the processor becomes possible. If the hash bucket size equal to the size of the processor cache all the way, then find the key, the number of times the worst case lookup in memory 2. The first is to determine the address storage unit, and the second is to find the key in a storage unit. Thus, given if required to increase Nginx prompt hash max size or hash bucket size, then the size of the former is the primary parameter increased by one. 
    Server_names_hash_bucket_size 128; 

    # client request buffer size of the head. This can be set according to the size of your paging system, a general request header size does not exceed 1k, but due to the general paging system should be greater than 1k, so here set page size. Page size can be ordered getconf PAGESIZE made. 
    32K client_header_buffer_size; 

    # client request header buffer size. nginx will use the default client_header_buffer_size this buffer to read the header value, if the header is too large, it will use large_client_header_buffers to read.
    64K. 4 large_client_header_buffers; 

    # nginx set by the upload file size  
    client_max_body_size 8m;

    opening # efficient file transfer, nginx sendfile instruction specifies whether the function call sendfile output files for general application to on, if the application used for downloading the disk IO heavy loads applications, may be set to off, in order to balance the disk and network I / O processing speed and reduce the load on the system. Note: If the picture is not displayed properly put into this off. 
    #sendfile directive specifies whether nginx sendfile call function (zero copy mode) to output files for common applications, it must be set on. If the application used for downloading the disk IO heavy duty applications, it may be set to off, in order to balance the processing speed of the disk with the IO network, reduce system uptime. 
    ON sendfile; 

    # open access directory listings, download the appropriate server, off by default. 
    ON autoindex; 

    # This option enables or disables the use of TCP_CORK socke option, this option is only used when the use of sendfile 
    tcp_nopush ON; 
     
    TCP_NODELAY ON; 

    # longer connection time, in seconds 
    keepalive_timeout 120; 

    #FastCGI related parameters in order to improve Web site performance: reducing resource consumption, improve access speed. The following parameters can see literally understanding. 
    300 fastcgi_connect_timeout; 
    fastcgi_send_timeout 300;
    300 fastcgi_read_timeout; 
        load balancing of #upstream, weight is a weight, the weight can be defined in accordance with the machine configuration. The higher the greater the chance weigth parameter is the weight, the weight value is assigned to. 
    fastcgi_buffer_size 64K; 
    fastcgi_buffers. 4 64K;
    128K fastcgi_busy_buffers_size; 
    fastcgi_temp_file_write_size 128K; 

    #gzip module disposed 
    gzip on; # open gzip compressed output 
    gzip_min_length 1k; # minimum compressed file size 
    gzip_buffers 4 16k; # decompression buffer 
    gzip_http_version 1.0; # compressed version (1.1 default, if the front end is squid2.5 use 1.0) 
    gzip_comp_level 2; # compression level 
    gzip_types text / plain application / x- javascript text / css application / xml; # compression type, default already included textml, so do not write the following, there will be no write up problem, but there will be a warn. 
    gzip_vary on; 

    when # open connections limits the number of IP requires 
    #limit_zone content crawler $ binary_remote_addr is 10m; 



    # load balancing configuration 
    upstream piao.jd.com { 
     
        Server 192.168.80.121:80 weight =. 3; 
        Server 192.168.80.122:80 weight = 2 ; 
        Server = weight 192.168.80.123:80. 3; 

        #nginx the upstream current embodiment supports four assigned 
        # 1, a polling (default) 
        # assigned individually to each request a different chronological the back-end server if the back-end server is down, automatically excluded. 
        2 #, weight 
        # probability for polling, weight, and is proportional to the ratio of access, for the case where the uneven back-end server performance. 
        # For example: 
        #upstream bakend { 
        # Server 192.168.0.14 weight = 10; 
        # Server 192.168.0.15 weight = 10; 
        #} 
        # 2, ip_hash 
        # hash result for each request assigned by the ip access, so that each guest access to a fixed back-end server, you can solve the problem of session. 
        # For example: 
        #upstream bakend { 
        # ip_hash; 
        # Server 192.168.0.14:88; 
        # Server 192.168.0.15:80;
        } #  
        #. 3, Fair (third party)
        # According to the response time of the backend server allocation request, a short response time priority allocation. 
        {backend #upstream 
        # Server server1; 
        # Server Server2; 
        # Fair; 
        #} 
        #. 4, url_hash (third party) 
        # Press hash result to access url allocation request, each url directed to the same back-end server, the backend server is more effective when the cache. 
        Example #: hash join statement in the upstream, server statement can not be written other parameters like weight, hash_method hash algorithm is used 
        #upstream backend { 
        # Server squid1: 3128; 
        # Server squid2: 3128; 
        # $ REQUEST_URI hash; 
        # CRC32 hash_method; 
        #} 

        #tips: 
        Ip and device state #upstream bakend {# define the load balancing device {} 
        # ip_hash;
        Down server 127.0.0.1:9090 #; 
        # = 2 weight 127.0.0.1:8080 server; 
        # server 127.0.0.1:6060; 
        # 127.0.0.1:7070 Backup server; 
        #} 
        # proxy_pass increase in the server load balancing the need to use http: // bakend /; 

        status of each device is set to #: 
        # 1.down a front single server is temporarily not participate in load 
        # 2.weight to the greater weight, the weight load of greater weight. 
        # 3.max_fails: allow default when the number of failed requests is 1. exceeds the maximum frequency error is returned proxy_next_upstream module definition 
        # 4.fail_timeout: After max_fails failed, pause time. 
        # 5.backup: All other non-backup machine down or busy, request backup machine. So this machine will be the lightest pressure. 

        #nginx supports simultaneous load balancing multiple sets of settings, not used to the server to use. 
        #client_body_in_file_only set to On can speak client post over the data to a file used for debug
        #client_body_temp_path set the directory log file can be set up to three layers catalog 
        #location a URL matching can be redirected or a new proxy load balancing 
    } 
     
     
     
    configuration # virtual host
    Server 
    { 
        # listening port 
        the listen 80; 

        # can have multiple domain names, separated by spaces 
        server_name www.jd.com jd.com; 
        index the index.php index.html index.htm; 
        the root / Data / WWW / JD; 

        # of ****** load balancing 
        LOCATION ~ * (PHP | php5) $..? 
        { 
            fastcgi_pass 127.0.0.1:9000; 
            fastcgi_index index.php; 
            the include fastcgi.conf; 
        } 
         
        # picture cache time 
        location ~ *.. (GIF | JPG | jpeg | PNG | BMP | SWF) $  
        {
            the Expires 10d; 
        } 
         
        #js CSS caching and time settings 
        LOCATION ~ * (JS | CSS) $..? 
        {
            IH Expires; 
        } 
         
        # log formatting 
        # $ remote_addr and $ HTTP_X_FORWARDED_FOR for recording ip address of the client; 
        # $ REMOTE_USER: a client used to record the user name; 
        # $ time_local: access time to record the time zone; 
        # $ Request : url with http protocol is used to record request; 
        # $ status: used to record the status of the request; success is 200, 
        # $ body_bytes_sent: records are sent to the client the main content file size; 
        # $ HTTP_REFERER: to record from that page link access over; 
        # $ HTTP_USER_AGENT: record information about the client browser;
        # Define this virtual hosts access log 
        access_log /usr/local/nginx/logs/host.access.log main; 
        # usually reverse proxy web server on the back, so you can not get to the customer's IP address, and by $ remote_add get the IP address of the reverse proxy server iP address. Reverse proxy server forwards the request http header information, you can increase x_forwarded_for information request to record the IP address of the server address existing clients and former clients.
        Access log_format 'REMOTE_ADDR $ - $ REMOTE_USER [$ time_local] "$ Request"' 
        '$ $ body_bytes_sent Status "$ HTTP_REFERER"' 
        ' "$ HTTP_USER_AGENT" $ HTTP_X_FORWARDED_FOR'; 
         
        access_log /usr/local/nginx/logs/host.access. log404 404.log; 
         
        # to "/" enable reverse proxy 
        LOCATION / { 
            proxy_pass http://127.0.0.1:88; 
            proxy_redirect OFF; 
            proxy_set_header the X-Real-IP-$ REMOTE_ADDR; 
             
            # back-end Web server through X-Forwarded -For get the user's real IP 
            proxy_set_header the X-Forwarded-the For-$ proxy_add_x_forwarded_for; 
             
            # Here are some reverse proxy configuration, optional. 
            proxy_set_header Host $ host;

            client_max_body_size 10m; 

            the maximum number of bytes in the buffer requested by the client proxy cache #, 
            # if it is set to a relatively large value, for example 256k, then, regardless of whether or firefox IE browser to submit as less than 256k images are normal. If a comment to this instruction, use the default settings client_body_buffer_size, which is twice the operating system page size, 8k or 16k, the problem arises. 
            # whether to use firefox4.0 or IE8.0, submitted a relatively large, about 200k pictures, return 500 Internal Server Error error 
            client_body_buffer_size 128K; 

            # represents the nginx prevent HTTP response code of 400 or higher response. 
            proxy_intercept_errors on; 

            timeout # backend server connection _ initiate a handshake timeout waiting for a response 
            #nginx connection with the back-end server timeout (proxy connection timeout) 
            ; proxy_connect_timeout 90 

            # backend server data return time (Send Timeout) 
            after # end server data _ return time is within a predetermined time of the back-end server through after all the data must 
            proxy_send_timeout 90; 

            # after successful connection, the backend server response time (reception time-out agent) 
            # successful connection backend server response wait _ time _ in fact, has entered into the queue of waiting for the back-end processing (can also say that it was the back-end server to process the request) 
            proxy_read_timeout 90; 

            # set up a proxy server (nginx) to save the header information of the user buffer size
            # Set the size of the first portion from the buffer is read reply proxy server, which normally contains a small portion of the response header of the response, this value is the default size for the case where the instruction specified size proxy_buffers a buffer, However, it can be set to be smaller 
            proxy_buffer_size 4K; 

            #proxy_buffers buffer, average pages 32k provided the following 
            settings for the read response # (from the proxy server) and the number of buffer size, also for default page size, the operating system may be different or 4k 8K 
            proxy_buffers 32K. 4; 

            # high load buffer size (proxy_buffers * 2) 
            proxy_busy_buffers_size 64K; 

            # proxy_temp_path provided at the data write size, blocking preventing a worker process when the file transfer is too long 
            # set cache folder size greater than this value, from the upstream server transfer 
            proxy_temp_file_write_size 64K; 
        } 
         
         
        # Check Nginx state setting address 
        LOCATION / NginxStatus { 
            stub_status ON; 
            access_log ON;
            auth_basic "NginxStatus"; 
        {
            auth_basic_user_file confpasswd; 
            content #htpasswd htpasswd file may provide tools to generate apache. 
        } 
         
        # Local static and dynamic separation reverse proxy configuration 
        # all jsp pages were handed over to tomcat or resin treatment 
        LOCATION ~ (jsp | jspx | do) {$.? 
            Proxy_set_header $ Host Host; 
            proxy_set_header the X-Real-IP-$ REMOTE_ADDR; 
            proxy_set_header the X- $ proxy_add_x_forwarded_for the For--Forwarded; 
            proxy_pass http://127.0.0.1:8080; 
        } 
         
        # All static files nginx read directly without going through a tomcat or Resin 
        LOCATION ~ * (HTM | HTML | GIF | JPG | jpeg | PNG.. | BMP | SWF | IOC | RAR | ZIP | TXT | FLV | MID | DOC | PPT | 
        pdf | XLS | MP3 | WMA) $ 
            the Expires 15d; 
        }
         
        ..? LOCATION ~ * (JS | CSS) $ 
        { 
            the Expires 1H; 
        } 
    } 
} 
###### Nginx configuration file nginx.conf ##### Detailed Chinese

 

Guess you like

Origin www.cnblogs.com/ComputerVip/p/12629462.html