Detailed nginx.conf configuration file

 

 

# run user
    user www www;
    #Start the process, usually set to be equal to the number of CPUs
    worker_processes  4;
	worker_cpu_affinity 0001 0010 0100 1000;
	
	worker_processes  8;
	worker_cpu_affinity 00000001 00000010 00000100 00001000 00010000 00100000 01000000 10000000;

    #Global error log and PID file log level: debug|info|notice|warn|error|crit
    error_log  /var/log/nginx/error.log error;
    pid        /var/run/nginx.pid;
	
	worker_rlimit_nofile 51200; Change the maximum open file limit for worker processes. If not set, this value is the limit of the operating system

    #Working mode and connection number configuration
    events {
        use epoll; #epoll is a way of multiplexing IO (I/O Multiplexing), but it is only used for kernels above linux2.6, which can greatly improve the performance of nginx
        worker_connections 1024;#The maximum number of concurrent connections for a single background worker process process
        multi_accept off; #Multiple workers process connections in a serial manner, that is, only one worker is awakened for a connection, and the others are in a sleep state. After it is set to off, multiple workers process connections in parallel, that is, a connection will wake up all workers until the connection is allocated, and those who have not obtained the connection will continue to sleep. When your server has a small number of connections, enabling this parameter will reduce the load to a certain extent. But when the throughput of the server is very large, for efficiency, please turn off this parameter

    }

    #Set up the http server and use its reverse proxy function to provide load balancing support
    http {
        include mime.types; #Set the mime type, the type is defined by the mime.type file
        default_type  text/html;
		client_max_body_size 8m; #Maximum value of the body uploaded by the client
		
		lua_max_pending_timers 10240;

        sendfile on; The #sendfile instruction specifies whether nginx calls the sendfile function (zero copy mode) to output files. For ordinary applications, it must be set to on. If it is used for downloading applications such as applications with heavy disk IO load, it can be set to off to balance Disk and network I/O processing speed, reducing system uptime.
        tcp_nopush on; #Data packets will be accumulated and transmitted together, which can improve some transmission efficiency
        server_tokens off; #Turn off the nginx version number in the error page
        
		if_modified_since exact #Determine whether the page is the latest, the latest browser of the browser is obtained from the local.
		
        tcp_nodelay on; #Small packets do not wait for direct transmission
		access_log off;
		error_log /var/log/nginx/error.log crit;#Tell nginx to only log serious errors
		
		limit_conn addr 100; #Set the maximum number of connections for a given key. The key here is addr, and the value we set is 100, which means that we allow each IP address to open up to 100 connections at the same time
		
		#Connection timeout
        keepalive_timeout  10; #keepalive_disable none;
		client_header_timeout 10; #Set the timeout for the request header and request body (respectively). We can also set this lower
		client_body_timeout 10;
		reset_timeout_connection on;#Close the unresponsive client connection
		send_timeout 10;#The client does not read any data, nginx will close the connection
       
	    client_header_buffer_size 128k; #Set the buffer size of the header
	    large_client _header_buffer 4 128k; #Client request header buffer size. By default, nginx will use the client_header_buffer_size buffer to read the header value. If the header is too large, it will use large_client_header_buffers to read
		
		#Enable gzip compression
        gzip  on;
        gzip_disable "MSIE [1-6]\.(?!.*SV1)"; #Disable gizp for some browsers Microsoft Internet Explorer, referred to as MSIE
		gzip_proxied any; #Nginx is enabled when it acts as a reverse proxy, and determines whether to enable gzip compression in the response to the proxy request according to certain requests and responses
							#expired - enable compression, if the header contains the "Expires" header
							#no-cache - enable compression, if the header contains the "Cache-Control:no-cache" header
							#no-store - enable compression, if the header contains the "Cache-Control:no-store" header
							#private - enable compression, if the header contains the "Cache-Control:private" header
							#no_last_modified - enable compression, if the "Last-Modified" header is not included in the header
							#no_etag - enable compression, if the "ETag" header is not included in the header
							#auth - enable compression, if the "Authorization" header is included in the header
							#any - enable compression unconditionally
		gzip_min_length 1000; #Minimum number of bytes to enable compression on data. If a request is less than 1000 bytes, we better not compress it
		gzip_comp_level 4; #Set the compression level of the data. This level can be any number between 1-9, with 9 being the slowest but the most compressed. We set it to 4, which is a rather compromised setting
		gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
		
		#php fastcgi caching
		fastcgi_cache_path /usr/local/nginx/fastcgi_cache levels=1:2 keys_zone=TEST:10m inactive=5m;
		fastcgi_cache_key "$scheme$request_method$host$request_uri";
		fastcgi_connect_timeout 300;
		fastcgi_send_timeout 300;
		fastcgi_read_timeout 300;
		fastcgi_buffer_size 16k;
		fastcgi_buffers 16 16k;
		fastcgi_busy_buffers_size 16k;
		fastcgi_temp_file_write_size 16k;
		fastcgi_cache TEST;
		fastcgi_cache_valid 200 302 1h;
		fastcgi_cache_valid 301 1d;
		fastcgi_cache_valid any 1m;
		fastcgi_cache_min_uses 1;
		fastcgi_cache_use_stale error timeout invalid_header http_500;
		
	   
        

        open_file_cache max=204800 inactive=20s;#nginx also specifies the maximum number of caches when opening the file cache, as well as the cache time, cache overflow, and the longest used file (LRU) will be removed
        open_file_cache_valid 30s;#Specify the interval for detecting correct information in open_file_cache
		open_file_cache_min_uses 2;#Defines the minimum number of files during the inactive time of the command parameter in open_file_cache
		open_file_cache_errors on;
		
		#proxyProxyCache
		proxy_cache_path /cache/proxy_cache levels=1:2 keys_zone=cache_one:100m inactive=1d max_size=30g; #100m and 30G, increase appropriately according to service requirements
		proxy_temp_path /cache/proxy_temp;

        include /etc/nginx/conf.d/*.conf;
        include /etc/nginx/sites-enabled/*;

        #Set the server list for load balancing to realize the internal jump of nginx
        upstream mysvr {
			The #weigth parameter represents the weight, the higher the weight, the greater the probability of being assigned
			#Squid on this machine opens port 3128
			server 192.168.8.1:80  weight=5;
			server 192.168.8.2:80  weight=1;
			server 192.168.8.3:80  weight=6;
        }
		
		
		server {
        listen 2001;
        server_name  gmslog.jd.com;
			location / {
				autoindex is;
				root   "/export/Logs/gms-log";
				index  index.html index.log;
			}
		}


       server {
        #Listen on port 80
        listen 80;
        #Define access using www.xx.com
        server_name  www.xx.com;
		index index.html index.htm
        #Set the access log format of this virtual host
		log_format tick "$msec|||$u_t|||$http_x_forwarded_for|||$u_domain|||$u_url|||$u_title|||$u_referrer|||$u_sh|||$u_sw|||$u_cd|||$u_lang|||$http_user_agent|||$u_utrace|||$u_account|||$u_time";
        access_log  logs/www.xx.com.access.log  tick buffer=32k;
		open_log_file_cache max=1000 inactive=10s min_size=2 valid=1m#Record log cache
		
		location / {
			proxy_cache cache_one;
			proxy_cache_min_uses 3;
			proxy_cache_revalidate on;
			proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
			proxy_cache_key "$host$request_method$uri$is_args$args";
			proxy_cache_valid  200 304 12h;
			proxy_cache_valid  301 302 1m;
			proxy_cache_valid  any 1m;
			proxy_pass http://mysvr;
			proxy_redirect off;
			proxy_next_upstream http_503 http_500 http_502 error timeout invalid_header;
			proxy_set_header Host  $host;
			proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
			add_header  N-Cache "$upstream_cache_status From $host";
			log_not_found off;
			expires 1d;
		}
		
		location = /access {
			access_by_lua_block {
     
				--ngx.say(ngx.var.header_Accept);
				ngx.exec("/test");
				
			}
		 }
		 
		 
		location = /test {
			default_type 'text/plain';
			resolve 192.168.177.255;
			
			set $cache_key "1234567890";
			
			
			
			content_by_lua_file conf / test.lua;
		}
		 
		##Get the parameters from the post, get the parameters from the get, and get the header
		location /form {  
			set_form_input $name;
			content_by_lua '  
				local say = ngx.say;
				local header_Content_type = ngx.req.get_headers()["Cache-Control"]
				if not header_Content_type then
					say("error");
				end
				say(header_Content_type);
				local name = ngx.var.name;
				say(name);
				local age = ngx.var.arg_age;
				say(age);
			';  
		}
		
		
		# rewrite
		location /face {
			#rewrite ^(.*)$ http://211.151.188.190:8080/face.jpg redirect;
			rewrite ^(.*)$ http://211.151.188.190:8080/face.jpg break;
			proxy_pass http://192.168.149.90/api/rec/list?name=123;

		}
		

        # Define the error message page
        error_page   500 502 503 504 /50x.html;
            location = /50x.html {
            root   /root;
        }

        #Static files, nginx handles it by itself
        location ^~ /(images|static)/ {
            root /var/www/virtual/htdocs;
            # Expiration 30 days, static files are not updated very much, the expiration can be set larger, and if it is updated frequently, it can be set smaller.
            expires 30d;
        }
		
		location ~* \.(gif|jpg|jpeg)$ {
			root /var/www/virtual/htdocs;
            # Expiration 30 days, static files are not updated very much, the expiration can be set larger, and if it is updated frequently, it can be set smaller.
            expires 30d;
		}
		
        #Set the address for viewing Nginx status
        location /NginxStatus {
            stub_status            on;
            access_log              on;
            auth_basic              "NginxStatus";
            auth_basic_user_file  conf/htpasswd;
        }
		
		location / {
			deny  192.168.1.1;
			allow 192.168.1.0/24;
			allow 10.1.1.0/16;
			allow 2001:0db8::/32;
			deny  all;
		}
		
		location /face {
			rewrite ^(.*)$ http://211.151.188.190:8080/face.jpg redirect;

		}
		
        #Disable access to .htxxx files
        location ~ /\.ht {
            deny all;
        }
        
        }
    }

 

Guess you like

Origin http://10.200.1.11:23101/article/api/json?id=326737004&siteId=291194637