# nginx Configuration File # http://wiki.nginx.org/Configuration # Run as a less privileged user for security reasons. user www www; # How many worker threads to run; # "auto" sets it to the number of CPU cores available in the system, and # offers the best performance. Don't set it higher than the number of CPU # cores if changing this parameter. # The maximum number of connections for Nginx is calculated by: # max_clients = worker_processes * worker_connections worker_processes 2; # Maximum open file descriptors per process; # should be > worker_connections. worker_rlimit_nofile 8192; events { # When you need > 8000 * cpu_cores connections, you start optimizing your OS, # and this is probably the point at which you hire people who are smarter than # you, as this is *a lot* of requests. worker_connections 8000; } # Default error log file # (this is only used when you don't override error_log on a server{} level) error_log logs/error.log warn; pid /var/run/nginx.pid; http { # Hide nginx version information. server_tokens off; # Define the MIME types for files. include /etc/nginx/mime.types; default_type application/octet-stream; # Format to use in log files log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; # Default log file # (this is only used when you don't override access_log on a server{} level) access_log logs/access.log main; # How long to allow each connection to stay idle; longer values are better # for each individual client, particularly for SSL, but means that worker # connections are tied up longer. (Default: 65) keepalive_timeout 20; # Speed up file transfers by using sendfile() to copy directly # between descriptors rather than using read()/write(). sendfile on; # Tell Nginx not to send out partial frames; this increases throughput # since TCP frames are filled up before being sent out. (adds TCP_CORK) tcp_nopush on; # Tell Nginx to enable the Nagle buffering algorithm for TCP packets, which # collates several smaller packets together into one larger packet, thus saving # bandwidth at the cost of a nearly imperceptible increase to latency. (removes TCP_NODELAY) tcp_nodelay off; # Compression # Enable Gzip compressed. gzip on; # Enable compression both for HTTP/1.0 and HTTP/1.1 (required for CloudFront). gzip_http_version 1.0; # Compression level (1-9). # 5 is a perfect compromise between size and cpu usage, offering about # 75% reduction for most ascii files (almost identical to level 9). gzip_comp_level 5; # Don't compress anything that's already small and unlikely to shrink much # if at all (the default is 20 bytes, which is bad as that usually leads to # larger files after gzipping). gzip_min_length 256; # Compress data even for clients that are connecting to us via proxies, # identified by the "Via" header (required for CloudFront). gzip_proxied any; # Tell proxies to cache both the gzipped and regular version of a resource # whenever the client's Accept-Encoding capabilities header varies; # Avoids the issue where a non-gzip capable client (which is extremely rare # today) would display gibberish if their proxy gave them the gzipped version. gzip_vary on; # Compress all output labeled with one of the following MIME-types. gzip_types application/atom+xml application/javascript application/json application/rss+xml application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/svg+xml image/x-icon text/css text/plain text/x-component; # text/html is always compressed by HttpGzipModule # This should be turned on if you are going to have pre-compressed copies (.gz) of # static files available. If not it should be left off as it will cause extra I/O # for the check. It is best if you enable this in a location{} block for # a specific directory, or on an individual server{} level. # gzip_static on; # Protect against the BEAST attack by preferring RC4-SHA when using SSLv3 and TLS protocols. # Note that TLSv1.1 and TLSv1.2 are immune to the beast attack but only work with OpenSSL v1.0.1 and higher and has limited client support. # Ciphers set to best allow protection from Beast, while providing forwarding secrecy, as defined by Mozilla - https://wiki.mozilla.org/Security/Server_Side_TLS#Nginx ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2; ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-ECDSA-RC4-SHA:AES128:AES256:RC4-SHA:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!3DES:!MD5:!PSK; ssl_prefer_server_ciphers on; # Optimize SSL by caching session parameters for 10 minutes. This cuts down on the number of expensive SSL handshakes. # The handshake is the most CPU-intensive operation, and by default it is re-negotiated on every new/parallel connection. # By enabling a cache (of type "shared between all Nginx workers"), we tell the client to re-use the already negotiated state. # Further optimization can be achieved by raising keepalive_timeout, but that shouldn't be done unless you serve primarily HTTPS. ssl_session_cache shared:SSL:10m; # a 1mb cache can hold about 4000 sessions, so we can hold 40000 sessions ssl_session_timeout 10m; # This default SSL certificate will be served whenever the client lacks support for SNI (Server Name Indication). # Make it a symlink to the most important certificate you have, so that users of IE 8 and below on WinXP can see your main site without SSL errors. #ssl_certificate /etc/nginx/default_ssl.crt; #ssl_certificate_key /etc/nginx/default_ssl.key; include sites-enabled/*; }