server-configs-nginx/nginx/nginx.conf

113 lines
5.5 KiB
Nginx Configuration File

# Run as a less privileged user for security reasons.
user www www;
# How many worker threads to run; "auto" sets it to the number
# of CPU cores available in the system, and offers the best
# performance. Don't set it higher than the number of CPU cores
# if changing this parameter.
#
# The maximum number of connections for Nginx is calculated by:
# max_clients = worker_processes * worker_connections
worker_processes auto;
# Maximum open file descriptors per process;
# should be > worker_connections.
worker_rlimit_nofile 8192;
events {
# When you need > 8000 * cpu_cores connections, you start optimizing
# your OS, and this is probably the point at where you hire people
# who are smarter than you, as this is *a lot* of requests.
worker_connections 8000;
}
# Default error log file (this is only used when you don't override error_log on a server{} level)
error_log logs/error.log warn;
pid /var/run/nginx.pid;
http {
# Hide nginx version information.
server_tokens off;
# Define the mime types for files.
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Format for our log files
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
# Default log file (this is only used when you don't override access_log on a server{} level)
access_log logs/access.log main;
# How long to allow each connection to stay idle; longer values are better
# for each individual client, particularly for SSL, but means that worker
# connections are tied up longer. (Default: 65)
keepalive_timeout 20;
# Speed up file transfers by using sendfile() to copy directly
# between descriptors rather than using read()/write().
sendfile on;
# Tell Nginx not to send out partial frames; this increases throughput
# since TCP frames are filled up before being sent out. (adds TCP_CORK)
tcp_nopush on;
# Tell Nginx to enable the Nagle buffering algorithm for TCP packets, which
# collates several smaller packets together into one larger packet, thus saving
# bandwidth at the cost of a nearly imperceptible increase to latency. (removes TCP_NODELAY)
tcp_nodelay off;
# Enable Gzip compressed responses from the server to massively speed up
# resource transfer times, especially for clients on slow connections.
# All browsers since ~1998 support Gzip compression.
gzip on;
gzip_http_version 1.0; # enable compression both for HTTP/1.0 and HTTP/1.1, required for CloudFront
gzip_disable "msie6"; # disable gzipping for ie 5.5 and ie 6
gzip_comp_level 5; # level is from 1-9; 5 is a perfect compromise between size and cpu usage, offering about 75% reduction for most ascii files (almost identical to level 9)
gzip_min_length 256; # don't compress anything that's already tiny and unlikely to shrink much if at all (the default is 20 bytes, which is bad as that usually leads to larger files after gzipping)
gzip_proxied any; # compress data even for clients that are connecting to us via proxies (identified by the "Via" header), required for CloudFront
gzip_vary on; # tells proxies to cache both the gzipped and regular version of a resource whenever the client's Accept-Encoding capabilities header varies; avoids the issue where a non-gzip capable client (which is extremely rare today) would display gibberish if their proxy gave them the gzipped version
gzip_types
# text/html is always compressed by HttpGzipModule
text/css
text/plain
text/x-component
application/javascript
application/json
application/xml
application/xhtml+xml
application/x-font-ttf
application/x-font-opentype
application/vnd.ms-fontobject
image/svg+xml
image/x-icon;
# This should be turned on if you are going to have pre-compressed copies (.gz) of
# static files available. If not it should be left off as it will cause extra I/O
# for the check. It is best if you enable this in a location{} block for
# a specific directory, or on an individual server{} level.
# gzip_static on;
# Protect against the BEAST attack by preferring RC4-SHA when using SSLv3 and TLS protocols.
# Note that TLSv1.1 and TLSv1.2 are immune to the beast attack but only work with OpenSSL v1.0.1 and higher and has limited client support.
ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers RC4:HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
# Optimize SSL by caching session parameters for 10 minutes. This cuts down on the number of expensive SSL handshakes.
# The handshake is the most CPU-intensive operation, and by default it is re-negotiated on every new/parallel connection.
# By enabling a cache (of type "shared between all Nginx workers"), we tell the client to re-use the already negotiated state.
# Further optimization can be achieved by raising keepalive_timeout, but that shouldn't be done unless you serve primarily HTTPS.
ssl_session_cache shared:SSL:10m; # a 1mb cache can hold about 4000 sessions, so we can hold 40000 sessions
ssl_session_timeout 10m;
# This default SSL certificate will be served whenever the client lacks support for SNI (Server Name Indication).
# Make it a symlink to the most important certificate you have, so that users of IE 8 and below on WinXP can see your main site without SSL errors.
#ssl_certificate /etc/nginx/default_ssl.crt;
#ssl_certificate_key /etc/nginx/default_ssl.key;
include sites-enabled/*;
}