nginx configure dynamic and static separation and load balancing

First, the installation of nginx

 

     Local environment: centos-x86 6.5 nginx-1.10.2

1. Install the pcre library
yum install –y make zlib-devel openssl-devel pcre-devel

ubuntu system:
apt-get update
apt-get install libpcre3 libpcre3-dev

apt-get install zlib1g-dev

apt-get install openssl

2. Install GCC and GCC-C++
yum install gcc
yum install -y gcc gcc-c++

ubuntu system:
apt-get install build-essential
apt-get install libtool

3. Install ngx_cache_purge-2.3
Official website: http://labs.frickle.com/nginx_ngx_cache_purge/
G 名 : ngx_cache_purge-2.3.tar.gz tar zxvf ngx_cache_purge-2.3.tar.gz
Put the decompressed folder in the same directory as the nginx decompressed file

3. Install nginx (prefix specifies the nginx file installation path, with specifies the installed plugin
The role of the ngx_cache_purge module: used to clear the cache of the specified url)

tar zxvf nginx-1.10.2.tar.gz
cd nginx-1.10.2
./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_gzip_static_module --with-http_stub_status_module --add-module=../ngx_cache_purge-2.3
make && make install


4. Write a startup script
[root@localhost ~]# vi /etc/init.d/nginx
#!/bin/bash
# chkconfig: 345 99 20
# description: Nginx servicecontrol script
PROG="/usr/local/nginx/sbin/nginx"
PIDF="/usr/local/nginx/logs/nginx.pid"
case "$1" in
start)
$PROG
echo "Nginx servicestart success."
;;
stop)
kill -s QUIT $(cat $PIDF)
echo "Nginx service stopsuccess."
;;
restart)
$0 stop
$0 start
;;
reload)
kill -s HUP $(cat $PIDF)
echo"reload Nginx configsuccess."
;;
*)
echo "Usage: $0{start|stop|restart|reload}"
exit 1
esac


5. Start
[root@localhost ~]# chmod +x /etc/init.d/nginx
[root@localhost ~]# service nginx restart
[root@localhost ~]# chkconfig --add nginx
[root@localhost ~]# chkconfig nginx on

 

 

Second, modify the configuration file

 

   

worker_processes 4;

#error_log logs/error.log info;

events {
    #Use the epoll model to improve performance
    use epoll;

    #Number of connections in a single process (maximum number of connections = number of connections * number of processes)
    worker_connections 65535;
}


http {

    #File extension and file type mapping table
    include mime.types;

    #default file type
    default_type application/octet-stream;

    
    #Enable efficient file transfer mode. The sendfile instruction specifies whether nginx calls the sendfile function to output files. For ordinary applications, set it to on.
    #If it is used for applications with heavy disk IO load such as downloading, it can be set to off to balance the processing speed of disk and network I/O and reduce the load of the system.
    #Note: If the picture is not displayed properly, change this to off
    sendfile on;

    #Long connection timeout, in seconds
    keepalive_timeout 65;

    #gzip is to tell nginx to send data in gzip compressed form. This will reduce the amount of data we send.
    gzip on;
    gzip_min_length 1k; #Minimum 1K
    gzip_buffers 16 64K;
    gzip_http_version 1.1;
    gzip_comp_level 6;
    gzip_types text/plain application/x- text/css application/xml application/;
    gzip_vary on;

    #header setting: the user's real ip address is forwarded to the backend server
    proxy_set_header Host $host;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    proxy_set_header X-Real-IP $remote_addr;

    #buffer
    client_body_buffer_size 512k;
    proxy_connect_timeout 5;
    proxy_read_timeout 60;
    proxy_send_timeout 5;
    proxy_buffer_size 16k;
    proxy_buffers 4 64k;
    proxy_busy_buffers_size 128k;
    proxy_temp_file_write_size 128k;
    
    
    ###############################################
    #Set the cache temporary directory, if you want to use it with the linux memory directory /dev/shm, you must give the directory permission, because the default root permission
    proxy_temp_path /mydata/proxy_temp_path;

    #Set the cache directory, and set the name of the Web cache area to cache_one, the size of the memory cache space to 256m, the content that has not been accessed for 7 days will be automatically cleared, and the size of the hard disk cache space to be 40GB.
    proxy_cache_path /mydata/proxy_cache_path levels=1:2 keys_zone=cache_one:256m inactive=7d max_size=80g;

    #Enable html, jsp... <meta> tags are not cached
    proxy_ignore_headers X-Accel-Expires Expires Cache-Control Set-Cookie;


    
    ################################################################################################################################################################################################################### ##################################################    

    #dynamic resource cluster
    upstream dynamic {
        ip_hash;#Solve cross-domain problems
        server 127.0.0.1:8080 weight=1;
    }


    #Static resource cluster (generally managed by nginx, because nginx handles static resources well, if the server
    #The server is limited, it can also be deployed locally on the proxy server)
    upstream static {
        ip_hash;#Solve cross-domain problems
        server 127.0.0.1:8081 weight=1;
    }
	
	#Background resource cluster
	upstream admin {
		ip_hash;#Solve cross-domain problems
        server 127.0.0.1:9070 weight=1;
	}


    ################################################Nginx web acting################################################# ##    
    server {

		listen 80;
        proxy_intercept_errors on;
        server_name web;
        location / {
            proxy_pass http://dynamic;
        }
		#url settings for clearing the cache
        #Assuming a URL is demo/test.gif, then you can clear the cache of the URL by visiting /purge/demo/test.gif.
        location ~ /purge(/.*) {
            #Set to allow only the specified IP or IP segment to clear the URL cache
            allow 127.0.0.1;
            allow 183.233.212.166;
            deny all;
            proxy_cache_purge cache_one $host$1$is_args$args;
        }
        location ~ .*\.(js|css|ico|jpg|jpeg|png|JPG|JPEG|PNG|eot|svg|ttf|woff) {
            proxy_pass http://static;
        }
        location ~ .*$ {
            proxy_pass http://dynamic;
        }
        location = /error.html {
          # The directory path to put the error page.
          root  /mydata/program/tomcat-7.0/webapps/ROOT/error;
        }
    }
	
	################################################Nginx admin Agent (getting backstage listing pictures)########################################## #########    
	server {

		listen 9080;
        server_name admin;
        
		#url settings for clearing the cache
        #Assuming a URL is demo/test.gif, then you can clear the cache of the URL by visiting /purge/demo/test.gif.
        location ~ /purge(/.*) {
            #Set to allow only the specified IP or IP segment to clear the URL cache
            allow 127.0.0.1;
            allow 183.233.212.166;
            deny all;
            proxy_cache_purge cache_one $host$1$is_args$args;
        }
        location ~ /upload(/.*)\.(ico|jpg|jpeg|png|JPG|JPEG|PNG) {
			add_header 'Access-Control-Allow-Origin' '*';
			add_header 'Access-Control-Allow-Credentials' 'true';
			add_header 'Access-Control-Allow-Methods' '*';
			proxy_cache cache_one; #Set the zone of the resource cache
            proxy_cache_key $host$uri$is_args$args; #Set the cache key, combine the domain name, URI, and parameters to form the Web cache Key value, Nginx stores the cache content in the secondary cache directory according to the key value hash
            proxy_cache_valid 200 304 12h; #Set different cache times for different HTTP status codes
			proxy_ignore_headers X-Accel-Expires Expires Cache-Control Set-Cookie;
			proxy_hide_header Cache-Control;
			proxy_hide_header Set-Cookie;
            proxy_set_header Host  $host;
            proxy_set_header X-Forwarded-For  $remote_addr;
			#Add a header field to check whether the cache is hit in the browser (can be commented in production)
            add_header X-Cache '$upstream_cache_status from $host';
            root /mydata/mz_images/;
			expires 15d; #Cache for 15 days
        }
		location ~ /ueditor(/.*)\.(ico|jpg|jpeg|png|JPG|JPEG|PNG) {
			add_header 'Access-Control-Allow-Origin' '*';
			add_header 'Access-Control-Allow-Credentials' 'true';
			add_header 'Access-Control-Allow-Methods' '*';
			proxy_cache cache_one; #Set the zone of the resource cache
            proxy_cache_key $host$uri$is_args$args; #Set the cache key, combine the domain name, URI, and parameters to form the Web cache Key value, Nginx stores the cache content in the secondary cache directory according to the key value hash
            proxy_cache_valid 200 304 12h; #Set different cache times for different HTTP status codes
			proxy_ignore_headers X-Accel-Expires Expires Cache-Control Set-Cookie;
			proxy_hide_header Cache-Control;
			proxy_hide_header Set-Cookie;
            proxy_set_header Host  $host;
            proxy_set_header X-Forwarded-For  $remote_addr;
			#Add a header field to check whether the cache is hit in the browser (can be commented in production)
            add_header X-Cache '$upstream_cache_status from $host';
            root /mydata/mz_images/;
			expires 15d; #Cache for 15 days
        }
		location ~ .*$ {
			add_header 'Access-Control-Allow-Origin' '*';
			add_header 'Access-Control-Allow-Credentials' 'true';
			add_header 'Access-Control-Allow-Methods' '*';
            proxy_pass http://admin;
			client_max_body_size   10m;
        }
    }


    #Static resource server, listen to the local port 8081 here, because the static resource server and the proxy server are the same machine, so there are the following configurations
    #If it is an independent server, it can be configured directly in the cluster upstram.
    server{
        listen 8081;
        server_name static;
		
        location ~ .*\.(js|css|ico|jpg|jpeg|png|JPG|JPEG|PNG|eot|svg|ttf|woff) {
            proxy_cache cache_one; #Set the zone of the resource cache
            proxy_cache_key $host$uri$is_args$args; #Set the cache key, combine the domain name, URI, and parameters to form the Web cache Key value, Nginx stores the cache content in the secondary cache directory according to the key value hash
            proxy_cache_valid 200 304 12h; #Set different cache times for different HTTP status codes
			proxy_ignore_headers X-Accel-Expires Expires Cache-Control Set-Cookie;
			proxy_hide_header Cache-Control;
			proxy_hide_header Set-Cookie;
            proxy_set_header Host  $host;
            proxy_set_header X-Forwarded-For  $remote_addr;
			#Add a header field to check whether the cache is hit in the browser (can be commented in production)
            add_header X-Cache '$upstream_cache_status from $host';
                    
            #All static files are directly read from the hard disk
            #root /app/tomcat7/webapps/ROOT;
			proxy_pass http://dynamic;
            expires 15d; #Cache for 15 days
        }
		
    }
}

 

 

Guess you like

Origin http://10.200.1.11:23101/article/api/json?id=326943195&siteId=291194637