OLD | NEW |
(Empty) | |
| 1 """A high-speed, production ready, thread pooled, generic HTTP server. |
| 2 |
| 3 Simplest example on how to use this module directly |
| 4 (without using CherryPy's application machinery):: |
| 5 |
| 6 from cherrypy import wsgiserver |
| 7 |
| 8 def my_crazy_app(environ, start_response): |
| 9 status = '200 OK' |
| 10 response_headers = [('Content-type','text/plain')] |
| 11 start_response(status, response_headers) |
| 12 return ['Hello world!'] |
| 13 |
| 14 server = wsgiserver.CherryPyWSGIServer( |
| 15 ('0.0.0.0', 8070), my_crazy_app, |
| 16 server_name='www.cherrypy.example') |
| 17 server.start() |
| 18 |
| 19 The CherryPy WSGI server can serve as many WSGI applications |
| 20 as you want in one instance by using a WSGIPathInfoDispatcher:: |
| 21 |
| 22 d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app}) |
| 23 server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d) |
| 24 |
| 25 Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance. |
| 26 |
| 27 This won't call the CherryPy engine (application side) at all, only the |
| 28 HTTP server, which is independent from the rest of CherryPy. Don't |
| 29 let the name "CherryPyWSGIServer" throw you; the name merely reflects |
| 30 its origin, not its coupling. |
| 31 |
| 32 For those of you wanting to understand internals of this module, here's the |
| 33 basic call flow. The server's listening thread runs a very tight loop, |
| 34 sticking incoming connections onto a Queue:: |
| 35 |
| 36 server = CherryPyWSGIServer(...) |
| 37 server.start() |
| 38 while True: |
| 39 tick() |
| 40 # This blocks until a request comes in: |
| 41 child = socket.accept() |
| 42 conn = HTTPConnection(child, ...) |
| 43 server.requests.put(conn) |
| 44 |
| 45 Worker threads are kept in a pool and poll the Queue, popping off and then |
| 46 handling each connection in turn. Each connection can consist of an arbitrary |
| 47 number of requests and their responses, so we run a nested loop:: |
| 48 |
| 49 while True: |
| 50 conn = server.requests.get() |
| 51 conn.communicate() |
| 52 -> while True: |
| 53 req = HTTPRequest(...) |
| 54 req.parse_request() |
| 55 -> # Read the Request-Line, e.g. "GET /page HTTP/1.1" |
| 56 req.rfile.readline() |
| 57 read_headers(req.rfile, req.inheaders) |
| 58 req.respond() |
| 59 -> response = app(...) |
| 60 try: |
| 61 for chunk in response: |
| 62 if chunk: |
| 63 req.write(chunk) |
| 64 finally: |
| 65 if hasattr(response, "close"): |
| 66 response.close() |
| 67 if req.close_connection: |
| 68 return |
| 69 """ |
| 70 |
| 71 __all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer', |
| 72 'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile', |
| 73 'CP_fileobject', |
| 74 'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert', |
| 75 'WorkerThread', 'ThreadPool', 'SSLAdapter', |
| 76 'CherryPyWSGIServer', |
| 77 'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0', |
| 78 'WSGIPathInfoDispatcher', 'get_ssl_adapter_class'] |
| 79 |
| 80 import os |
| 81 try: |
| 82 import queue |
| 83 except: |
| 84 import Queue as queue |
| 85 import re |
| 86 import rfc822 |
| 87 import socket |
| 88 import sys |
| 89 if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'): |
| 90 socket.IPPROTO_IPV6 = 41 |
| 91 try: |
| 92 import cStringIO as StringIO |
| 93 except ImportError: |
| 94 import StringIO |
| 95 DEFAULT_BUFFER_SIZE = -1 |
| 96 |
| 97 _fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestrin
g) |
| 98 |
| 99 import threading |
| 100 import time |
| 101 import traceback |
| 102 def format_exc(limit=None): |
| 103 """Like print_exc() but return a string. Backport for Python 2.3.""" |
| 104 try: |
| 105 etype, value, tb = sys.exc_info() |
| 106 return ''.join(traceback.format_exception(etype, value, tb, limit)) |
| 107 finally: |
| 108 etype = value = tb = None |
| 109 |
| 110 |
| 111 from urllib import unquote |
| 112 from urlparse import urlparse |
| 113 import warnings |
| 114 |
| 115 if sys.version_info >= (3, 0): |
| 116 bytestr = bytes |
| 117 unicodestr = str |
| 118 basestring = (bytes, str) |
| 119 def ntob(n, encoding='ISO-8859-1'): |
| 120 """Return the given native string as a byte string in the given encoding
.""" |
| 121 # In Python 3, the native string type is unicode |
| 122 return n.encode(encoding) |
| 123 else: |
| 124 bytestr = str |
| 125 unicodestr = unicode |
| 126 basestring = basestring |
| 127 def ntob(n, encoding='ISO-8859-1'): |
| 128 """Return the given native string as a byte string in the given encoding
.""" |
| 129 # In Python 2, the native string type is bytes. Assume it's already |
| 130 # in the given encoding, which for ISO-8859-1 is almost always what |
| 131 # was intended. |
| 132 return n |
| 133 |
| 134 LF = ntob('\n') |
| 135 CRLF = ntob('\r\n') |
| 136 TAB = ntob('\t') |
| 137 SPACE = ntob(' ') |
| 138 COLON = ntob(':') |
| 139 SEMICOLON = ntob(';') |
| 140 EMPTY = ntob('') |
| 141 NUMBER_SIGN = ntob('#') |
| 142 QUESTION_MARK = ntob('?') |
| 143 ASTERISK = ntob('*') |
| 144 FORWARD_SLASH = ntob('/') |
| 145 quoted_slash = re.compile(ntob("(?i)%2F")) |
| 146 |
| 147 import errno |
| 148 |
| 149 def plat_specific_errors(*errnames): |
| 150 """Return error numbers for all errors in errnames on this platform. |
| 151 |
| 152 The 'errno' module contains different global constants depending on |
| 153 the specific platform (OS). This function will return the list of |
| 154 numeric values for a given list of potential names. |
| 155 """ |
| 156 errno_names = dir(errno) |
| 157 nums = [getattr(errno, k) for k in errnames if k in errno_names] |
| 158 # de-dupe the list |
| 159 return list(dict.fromkeys(nums).keys()) |
| 160 |
| 161 socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR") |
| 162 |
| 163 socket_errors_to_ignore = plat_specific_errors( |
| 164 "EPIPE", |
| 165 "EBADF", "WSAEBADF", |
| 166 "ENOTSOCK", "WSAENOTSOCK", |
| 167 "ETIMEDOUT", "WSAETIMEDOUT", |
| 168 "ECONNREFUSED", "WSAECONNREFUSED", |
| 169 "ECONNRESET", "WSAECONNRESET", |
| 170 "ECONNABORTED", "WSAECONNABORTED", |
| 171 "ENETRESET", "WSAENETRESET", |
| 172 "EHOSTDOWN", "EHOSTUNREACH", |
| 173 ) |
| 174 socket_errors_to_ignore.append("timed out") |
| 175 socket_errors_to_ignore.append("The read operation timed out") |
| 176 |
| 177 socket_errors_nonblocking = plat_specific_errors( |
| 178 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK') |
| 179 |
| 180 comma_separated_headers = [ntob(h) for h in |
| 181 ['Accept', 'Accept-Charset', 'Accept-Encoding', |
| 182 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', |
| 183 'Connection', 'Content-Encoding', 'Content-Language', 'Expect', |
| 184 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE', |
| 185 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', |
| 186 'WWW-Authenticate']] |
| 187 |
| 188 |
| 189 import logging |
| 190 if not hasattr(logging, 'statistics'): logging.statistics = {} |
| 191 |
| 192 |
| 193 def read_headers(rfile, hdict=None): |
| 194 """Read headers from the given stream into the given header dict. |
| 195 |
| 196 If hdict is None, a new header dict is created. Returns the populated |
| 197 header dict. |
| 198 |
| 199 Headers which are repeated are folded together using a comma if their |
| 200 specification so dictates. |
| 201 |
| 202 This function raises ValueError when the read bytes violate the HTTP spec. |
| 203 You should probably return "400 Bad Request" if this happens. |
| 204 """ |
| 205 if hdict is None: |
| 206 hdict = {} |
| 207 |
| 208 while True: |
| 209 line = rfile.readline() |
| 210 if not line: |
| 211 # No more data--illegal end of headers |
| 212 raise ValueError("Illegal end of headers.") |
| 213 |
| 214 if line == CRLF: |
| 215 # Normal end of headers |
| 216 break |
| 217 if not line.endswith(CRLF): |
| 218 raise ValueError("HTTP requires CRLF terminators") |
| 219 |
| 220 if line[0] in (SPACE, TAB): |
| 221 # It's a continuation line. |
| 222 v = line.strip() |
| 223 else: |
| 224 try: |
| 225 k, v = line.split(COLON, 1) |
| 226 except ValueError: |
| 227 raise ValueError("Illegal header line.") |
| 228 # TODO: what about TE and WWW-Authenticate? |
| 229 k = k.strip().title() |
| 230 v = v.strip() |
| 231 hname = k |
| 232 |
| 233 if k in comma_separated_headers: |
| 234 existing = hdict.get(hname) |
| 235 if existing: |
| 236 v = ", ".join((existing, v)) |
| 237 hdict[hname] = v |
| 238 |
| 239 return hdict |
| 240 |
| 241 |
| 242 class MaxSizeExceeded(Exception): |
| 243 pass |
| 244 |
| 245 class SizeCheckWrapper(object): |
| 246 """Wraps a file-like object, raising MaxSizeExceeded if too large.""" |
| 247 |
| 248 def __init__(self, rfile, maxlen): |
| 249 self.rfile = rfile |
| 250 self.maxlen = maxlen |
| 251 self.bytes_read = 0 |
| 252 |
| 253 def _check_length(self): |
| 254 if self.maxlen and self.bytes_read > self.maxlen: |
| 255 raise MaxSizeExceeded() |
| 256 |
| 257 def read(self, size=None): |
| 258 data = self.rfile.read(size) |
| 259 self.bytes_read += len(data) |
| 260 self._check_length() |
| 261 return data |
| 262 |
| 263 def readline(self, size=None): |
| 264 if size is not None: |
| 265 data = self.rfile.readline(size) |
| 266 self.bytes_read += len(data) |
| 267 self._check_length() |
| 268 return data |
| 269 |
| 270 # User didn't specify a size ... |
| 271 # We read the line in chunks to make sure it's not a 100MB line ! |
| 272 res = [] |
| 273 while True: |
| 274 data = self.rfile.readline(256) |
| 275 self.bytes_read += len(data) |
| 276 self._check_length() |
| 277 res.append(data) |
| 278 # See http://www.cherrypy.org/ticket/421 |
| 279 if len(data) < 256 or data[-1:] == "\n": |
| 280 return EMPTY.join(res) |
| 281 |
| 282 def readlines(self, sizehint=0): |
| 283 # Shamelessly stolen from StringIO |
| 284 total = 0 |
| 285 lines = [] |
| 286 line = self.readline() |
| 287 while line: |
| 288 lines.append(line) |
| 289 total += len(line) |
| 290 if 0 < sizehint <= total: |
| 291 break |
| 292 line = self.readline() |
| 293 return lines |
| 294 |
| 295 def close(self): |
| 296 self.rfile.close() |
| 297 |
| 298 def __iter__(self): |
| 299 return self |
| 300 |
| 301 def __next__(self): |
| 302 data = next(self.rfile) |
| 303 self.bytes_read += len(data) |
| 304 self._check_length() |
| 305 return data |
| 306 |
| 307 def next(self): |
| 308 data = self.rfile.next() |
| 309 self.bytes_read += len(data) |
| 310 self._check_length() |
| 311 return data |
| 312 |
| 313 |
| 314 class KnownLengthRFile(object): |
| 315 """Wraps a file-like object, returning an empty string when exhausted.""" |
| 316 |
| 317 def __init__(self, rfile, content_length): |
| 318 self.rfile = rfile |
| 319 self.remaining = content_length |
| 320 |
| 321 def read(self, size=None): |
| 322 if self.remaining == 0: |
| 323 return '' |
| 324 if size is None: |
| 325 size = self.remaining |
| 326 else: |
| 327 size = min(size, self.remaining) |
| 328 |
| 329 data = self.rfile.read(size) |
| 330 self.remaining -= len(data) |
| 331 return data |
| 332 |
| 333 def readline(self, size=None): |
| 334 if self.remaining == 0: |
| 335 return '' |
| 336 if size is None: |
| 337 size = self.remaining |
| 338 else: |
| 339 size = min(size, self.remaining) |
| 340 |
| 341 data = self.rfile.readline(size) |
| 342 self.remaining -= len(data) |
| 343 return data |
| 344 |
| 345 def readlines(self, sizehint=0): |
| 346 # Shamelessly stolen from StringIO |
| 347 total = 0 |
| 348 lines = [] |
| 349 line = self.readline(sizehint) |
| 350 while line: |
| 351 lines.append(line) |
| 352 total += len(line) |
| 353 if 0 < sizehint <= total: |
| 354 break |
| 355 line = self.readline(sizehint) |
| 356 return lines |
| 357 |
| 358 def close(self): |
| 359 self.rfile.close() |
| 360 |
| 361 def __iter__(self): |
| 362 return self |
| 363 |
| 364 def __next__(self): |
| 365 data = next(self.rfile) |
| 366 self.remaining -= len(data) |
| 367 return data |
| 368 |
| 369 |
| 370 class ChunkedRFile(object): |
| 371 """Wraps a file-like object, returning an empty string when exhausted. |
| 372 |
| 373 This class is intended to provide a conforming wsgi.input value for |
| 374 request entities that have been encoded with the 'chunked' transfer |
| 375 encoding. |
| 376 """ |
| 377 |
| 378 def __init__(self, rfile, maxlen, bufsize=8192): |
| 379 self.rfile = rfile |
| 380 self.maxlen = maxlen |
| 381 self.bytes_read = 0 |
| 382 self.buffer = EMPTY |
| 383 self.bufsize = bufsize |
| 384 self.closed = False |
| 385 |
| 386 def _fetch(self): |
| 387 if self.closed: |
| 388 return |
| 389 |
| 390 line = self.rfile.readline() |
| 391 self.bytes_read += len(line) |
| 392 |
| 393 if self.maxlen and self.bytes_read > self.maxlen: |
| 394 raise MaxSizeExceeded("Request Entity Too Large", self.maxlen) |
| 395 |
| 396 line = line.strip().split(SEMICOLON, 1) |
| 397 |
| 398 try: |
| 399 chunk_size = line.pop(0) |
| 400 chunk_size = int(chunk_size, 16) |
| 401 except ValueError: |
| 402 raise ValueError("Bad chunked transfer size: " + repr(chunk_size)) |
| 403 |
| 404 if chunk_size <= 0: |
| 405 self.closed = True |
| 406 return |
| 407 |
| 408 ## if line: chunk_extension = line[0] |
| 409 |
| 410 if self.maxlen and self.bytes_read + chunk_size > self.maxlen: |
| 411 raise IOError("Request Entity Too Large") |
| 412 |
| 413 chunk = self.rfile.read(chunk_size) |
| 414 self.bytes_read += len(chunk) |
| 415 self.buffer += chunk |
| 416 |
| 417 crlf = self.rfile.read(2) |
| 418 if crlf != CRLF: |
| 419 raise ValueError( |
| 420 "Bad chunked transfer coding (expected '\\r\\n', " |
| 421 "got " + repr(crlf) + ")") |
| 422 |
| 423 def read(self, size=None): |
| 424 data = EMPTY |
| 425 while True: |
| 426 if size and len(data) >= size: |
| 427 return data |
| 428 |
| 429 if not self.buffer: |
| 430 self._fetch() |
| 431 if not self.buffer: |
| 432 # EOF |
| 433 return data |
| 434 |
| 435 if size: |
| 436 remaining = size - len(data) |
| 437 data += self.buffer[:remaining] |
| 438 self.buffer = self.buffer[remaining:] |
| 439 else: |
| 440 data += self.buffer |
| 441 |
| 442 def readline(self, size=None): |
| 443 data = EMPTY |
| 444 while True: |
| 445 if size and len(data) >= size: |
| 446 return data |
| 447 |
| 448 if not self.buffer: |
| 449 self._fetch() |
| 450 if not self.buffer: |
| 451 # EOF |
| 452 return data |
| 453 |
| 454 newline_pos = self.buffer.find(LF) |
| 455 if size: |
| 456 if newline_pos == -1: |
| 457 remaining = size - len(data) |
| 458 data += self.buffer[:remaining] |
| 459 self.buffer = self.buffer[remaining:] |
| 460 else: |
| 461 remaining = min(size - len(data), newline_pos) |
| 462 data += self.buffer[:remaining] |
| 463 self.buffer = self.buffer[remaining:] |
| 464 else: |
| 465 if newline_pos == -1: |
| 466 data += self.buffer |
| 467 else: |
| 468 data += self.buffer[:newline_pos] |
| 469 self.buffer = self.buffer[newline_pos:] |
| 470 |
| 471 def readlines(self, sizehint=0): |
| 472 # Shamelessly stolen from StringIO |
| 473 total = 0 |
| 474 lines = [] |
| 475 line = self.readline(sizehint) |
| 476 while line: |
| 477 lines.append(line) |
| 478 total += len(line) |
| 479 if 0 < sizehint <= total: |
| 480 break |
| 481 line = self.readline(sizehint) |
| 482 return lines |
| 483 |
| 484 def read_trailer_lines(self): |
| 485 if not self.closed: |
| 486 raise ValueError( |
| 487 "Cannot read trailers until the request body has been read.") |
| 488 |
| 489 while True: |
| 490 line = self.rfile.readline() |
| 491 if not line: |
| 492 # No more data--illegal end of headers |
| 493 raise ValueError("Illegal end of headers.") |
| 494 |
| 495 self.bytes_read += len(line) |
| 496 if self.maxlen and self.bytes_read > self.maxlen: |
| 497 raise IOError("Request Entity Too Large") |
| 498 |
| 499 if line == CRLF: |
| 500 # Normal end of headers |
| 501 break |
| 502 if not line.endswith(CRLF): |
| 503 raise ValueError("HTTP requires CRLF terminators") |
| 504 |
| 505 yield line |
| 506 |
| 507 def close(self): |
| 508 self.rfile.close() |
| 509 |
| 510 def __iter__(self): |
| 511 # Shamelessly stolen from StringIO |
| 512 total = 0 |
| 513 line = self.readline(sizehint) |
| 514 while line: |
| 515 yield line |
| 516 total += len(line) |
| 517 if 0 < sizehint <= total: |
| 518 break |
| 519 line = self.readline(sizehint) |
| 520 |
| 521 |
| 522 class HTTPRequest(object): |
| 523 """An HTTP Request (and response). |
| 524 |
| 525 A single HTTP connection may consist of multiple request/response pairs. |
| 526 """ |
| 527 |
| 528 server = None |
| 529 """The HTTPServer object which is receiving this request.""" |
| 530 |
| 531 conn = None |
| 532 """The HTTPConnection object on which this request connected.""" |
| 533 |
| 534 inheaders = {} |
| 535 """A dict of request headers.""" |
| 536 |
| 537 outheaders = [] |
| 538 """A list of header tuples to write in the response.""" |
| 539 |
| 540 ready = False |
| 541 """When True, the request has been parsed and is ready to begin generating |
| 542 the response. When False, signals the calling Connection that the response |
| 543 should not be generated and the connection should close.""" |
| 544 |
| 545 close_connection = False |
| 546 """Signals the calling Connection that the request should close. This does |
| 547 not imply an error! The client and/or server may each request that the |
| 548 connection be closed.""" |
| 549 |
| 550 chunked_write = False |
| 551 """If True, output will be encoded with the "chunked" transfer-coding. |
| 552 |
| 553 This value is set automatically inside send_headers.""" |
| 554 |
| 555 def __init__(self, server, conn): |
| 556 self.server= server |
| 557 self.conn = conn |
| 558 |
| 559 self.ready = False |
| 560 self.started_request = False |
| 561 self.scheme = ntob("http") |
| 562 if self.server.ssl_adapter is not None: |
| 563 self.scheme = ntob("https") |
| 564 # Use the lowest-common protocol in case read_request_line errors. |
| 565 self.response_protocol = 'HTTP/1.0' |
| 566 self.inheaders = {} |
| 567 |
| 568 self.status = "" |
| 569 self.outheaders = [] |
| 570 self.sent_headers = False |
| 571 self.close_connection = self.__class__.close_connection |
| 572 self.chunked_read = False |
| 573 self.chunked_write = self.__class__.chunked_write |
| 574 |
| 575 def parse_request(self): |
| 576 """Parse the next HTTP request start-line and message-headers.""" |
| 577 self.rfile = SizeCheckWrapper(self.conn.rfile, |
| 578 self.server.max_request_header_size) |
| 579 try: |
| 580 success = self.read_request_line() |
| 581 except MaxSizeExceeded: |
| 582 self.simple_response("414 Request-URI Too Long", |
| 583 "The Request-URI sent with the request exceeds the maximum " |
| 584 "allowed bytes.") |
| 585 return |
| 586 else: |
| 587 if not success: |
| 588 return |
| 589 |
| 590 try: |
| 591 success = self.read_request_headers() |
| 592 except MaxSizeExceeded: |
| 593 self.simple_response("413 Request Entity Too Large", |
| 594 "The headers sent with the request exceed the maximum " |
| 595 "allowed bytes.") |
| 596 return |
| 597 else: |
| 598 if not success: |
| 599 return |
| 600 |
| 601 self.ready = True |
| 602 |
| 603 def read_request_line(self): |
| 604 # HTTP/1.1 connections are persistent by default. If a client |
| 605 # requests a page, then idles (leaves the connection open), |
| 606 # then rfile.readline() will raise socket.error("timed out"). |
| 607 # Note that it does this based on the value given to settimeout(), |
| 608 # and doesn't need the client to request or acknowledge the close |
| 609 # (although your TCP stack might suffer for it: cf Apache's history |
| 610 # with FIN_WAIT_2). |
| 611 request_line = self.rfile.readline() |
| 612 |
| 613 # Set started_request to True so communicate() knows to send 408 |
| 614 # from here on out. |
| 615 self.started_request = True |
| 616 if not request_line: |
| 617 return False |
| 618 |
| 619 if request_line == CRLF: |
| 620 # RFC 2616 sec 4.1: "...if the server is reading the protocol |
| 621 # stream at the beginning of a message and receives a CRLF |
| 622 # first, it should ignore the CRLF." |
| 623 # But only ignore one leading line! else we enable a DoS. |
| 624 request_line = self.rfile.readline() |
| 625 if not request_line: |
| 626 return False |
| 627 |
| 628 if not request_line.endswith(CRLF): |
| 629 self.simple_response("400 Bad Request", "HTTP requires CRLF terminat
ors") |
| 630 return False |
| 631 |
| 632 try: |
| 633 method, uri, req_protocol = request_line.strip().split(SPACE, 2) |
| 634 rp = int(req_protocol[5]), int(req_protocol[7]) |
| 635 except (ValueError, IndexError): |
| 636 self.simple_response("400 Bad Request", "Malformed Request-Line") |
| 637 return False |
| 638 |
| 639 self.uri = uri |
| 640 self.method = method |
| 641 |
| 642 # uri may be an abs_path (including "http://host.domain.tld"); |
| 643 scheme, authority, path = self.parse_request_uri(uri) |
| 644 if NUMBER_SIGN in path: |
| 645 self.simple_response("400 Bad Request", |
| 646 "Illegal #fragment in Request-URI.") |
| 647 return False |
| 648 |
| 649 if scheme: |
| 650 self.scheme = scheme |
| 651 |
| 652 qs = EMPTY |
| 653 if QUESTION_MARK in path: |
| 654 path, qs = path.split(QUESTION_MARK, 1) |
| 655 |
| 656 # Unquote the path+params (e.g. "/this%20path" -> "/this path"). |
| 657 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 |
| 658 # |
| 659 # But note that "...a URI must be separated into its components |
| 660 # before the escaped characters within those components can be |
| 661 # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2 |
| 662 # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path". |
| 663 try: |
| 664 atoms = [unquote(x) for x in quoted_slash.split(path)] |
| 665 except ValueError: |
| 666 ex = sys.exc_info()[1] |
| 667 self.simple_response("400 Bad Request", ex.args[0]) |
| 668 return False |
| 669 path = "%2F".join(atoms) |
| 670 self.path = path |
| 671 |
| 672 # Note that, like wsgiref and most other HTTP servers, |
| 673 # we "% HEX HEX"-unquote the path but not the query string. |
| 674 self.qs = qs |
| 675 |
| 676 # Compare request and server HTTP protocol versions, in case our |
| 677 # server does not support the requested protocol. Limit our output |
| 678 # to min(req, server). We want the following output: |
| 679 # request server actual written supported response |
| 680 # protocol protocol response protocol feature set |
| 681 # a 1.0 1.0 1.0 1.0 |
| 682 # b 1.0 1.1 1.1 1.0 |
| 683 # c 1.1 1.0 1.0 1.0 |
| 684 # d 1.1 1.1 1.1 1.1 |
| 685 # Notice that, in (b), the response will be "HTTP/1.1" even though |
| 686 # the client only understands 1.0. RFC 2616 10.5.6 says we should |
| 687 # only return 505 if the _major_ version is different. |
| 688 sp = int(self.server.protocol[5]), int(self.server.protocol[7]) |
| 689 |
| 690 if sp[0] != rp[0]: |
| 691 self.simple_response("505 HTTP Version Not Supported") |
| 692 return False |
| 693 |
| 694 self.request_protocol = req_protocol |
| 695 self.response_protocol = "HTTP/%s.%s" % min(rp, sp) |
| 696 |
| 697 return True |
| 698 |
| 699 def read_request_headers(self): |
| 700 """Read self.rfile into self.inheaders. Return success.""" |
| 701 |
| 702 # then all the http headers |
| 703 try: |
| 704 read_headers(self.rfile, self.inheaders) |
| 705 except ValueError: |
| 706 ex = sys.exc_info()[1] |
| 707 self.simple_response("400 Bad Request", ex.args[0]) |
| 708 return False |
| 709 |
| 710 mrbs = self.server.max_request_body_size |
| 711 if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs: |
| 712 self.simple_response("413 Request Entity Too Large", |
| 713 "The entity sent with the request exceeds the maximum " |
| 714 "allowed bytes.") |
| 715 return False |
| 716 |
| 717 # Persistent connection support |
| 718 if self.response_protocol == "HTTP/1.1": |
| 719 # Both server and client are HTTP/1.1 |
| 720 if self.inheaders.get("Connection", "") == "close": |
| 721 self.close_connection = True |
| 722 else: |
| 723 # Either the server or client (or both) are HTTP/1.0 |
| 724 if self.inheaders.get("Connection", "") != "Keep-Alive": |
| 725 self.close_connection = True |
| 726 |
| 727 # Transfer-Encoding support |
| 728 te = None |
| 729 if self.response_protocol == "HTTP/1.1": |
| 730 te = self.inheaders.get("Transfer-Encoding") |
| 731 if te: |
| 732 te = [x.strip().lower() for x in te.split(",") if x.strip()] |
| 733 |
| 734 self.chunked_read = False |
| 735 |
| 736 if te: |
| 737 for enc in te: |
| 738 if enc == "chunked": |
| 739 self.chunked_read = True |
| 740 else: |
| 741 # Note that, even if we see "chunked", we must reject |
| 742 # if there is an extension we don't recognize. |
| 743 self.simple_response("501 Unimplemented") |
| 744 self.close_connection = True |
| 745 return False |
| 746 |
| 747 # From PEP 333: |
| 748 # "Servers and gateways that implement HTTP 1.1 must provide |
| 749 # transparent support for HTTP 1.1's "expect/continue" mechanism. |
| 750 # This may be done in any of several ways: |
| 751 # 1. Respond to requests containing an Expect: 100-continue request |
| 752 # with an immediate "100 Continue" response, and proceed normally. |
| 753 # 2. Proceed with the request normally, but provide the application |
| 754 # with a wsgi.input stream that will send the "100 Continue" |
| 755 # response if/when the application first attempts to read from |
| 756 # the input stream. The read request must then remain blocked |
| 757 # until the client responds. |
| 758 # 3. Wait until the client decides that the server does not support |
| 759 # expect/continue, and sends the request body on its own. |
| 760 # (This is suboptimal, and is not recommended.) |
| 761 # |
| 762 # We used to do 3, but are now doing 1. Maybe we'll do 2 someday, |
| 763 # but it seems like it would be a big slowdown for such a rare case. |
| 764 if self.inheaders.get("Expect", "") == "100-continue": |
| 765 # Don't use simple_response here, because it emits headers |
| 766 # we don't want. See http://www.cherrypy.org/ticket/951 |
| 767 msg = self.server.protocol + " 100 Continue\r\n\r\n" |
| 768 try: |
| 769 self.conn.wfile.sendall(msg) |
| 770 except socket.error: |
| 771 x = sys.exc_info()[1] |
| 772 if x.args[0] not in socket_errors_to_ignore: |
| 773 raise |
| 774 return True |
| 775 |
| 776 def parse_request_uri(self, uri): |
| 777 """Parse a Request-URI into (scheme, authority, path). |
| 778 |
| 779 Note that Request-URI's must be one of:: |
| 780 |
| 781 Request-URI = "*" | absoluteURI | abs_path | authority |
| 782 |
| 783 Therefore, a Request-URI which starts with a double forward-slash |
| 784 cannot be a "net_path":: |
| 785 |
| 786 net_path = "//" authority [ abs_path ] |
| 787 |
| 788 Instead, it must be interpreted as an "abs_path" with an empty first |
| 789 path segment:: |
| 790 |
| 791 abs_path = "/" path_segments |
| 792 path_segments = segment *( "/" segment ) |
| 793 segment = *pchar *( ";" param ) |
| 794 param = *pchar |
| 795 """ |
| 796 if uri == ASTERISK: |
| 797 return None, None, uri |
| 798 |
| 799 i = uri.find('://') |
| 800 if i > 0 and QUESTION_MARK not in uri[:i]: |
| 801 # An absoluteURI. |
| 802 # If there's a scheme (and it must be http or https), then: |
| 803 # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]
] |
| 804 scheme, remainder = uri[:i].lower(), uri[i + 3:] |
| 805 authority, path = remainder.split(FORWARD_SLASH, 1) |
| 806 path = FORWARD_SLASH + path |
| 807 return scheme, authority, path |
| 808 |
| 809 if uri.startswith(FORWARD_SLASH): |
| 810 # An abs_path. |
| 811 return None, None, uri |
| 812 else: |
| 813 # An authority. |
| 814 return None, uri, None |
| 815 |
| 816 def respond(self): |
| 817 """Call the gateway and write its iterable output.""" |
| 818 mrbs = self.server.max_request_body_size |
| 819 if self.chunked_read: |
| 820 self.rfile = ChunkedRFile(self.conn.rfile, mrbs) |
| 821 else: |
| 822 cl = int(self.inheaders.get("Content-Length", 0)) |
| 823 if mrbs and mrbs < cl: |
| 824 if not self.sent_headers: |
| 825 self.simple_response("413 Request Entity Too Large", |
| 826 "The entity sent with the request exceeds the maximum " |
| 827 "allowed bytes.") |
| 828 return |
| 829 self.rfile = KnownLengthRFile(self.conn.rfile, cl) |
| 830 |
| 831 self.server.gateway(self).respond() |
| 832 |
| 833 if (self.ready and not self.sent_headers): |
| 834 self.sent_headers = True |
| 835 self.send_headers() |
| 836 if self.chunked_write: |
| 837 self.conn.wfile.sendall("0\r\n\r\n") |
| 838 |
| 839 def simple_response(self, status, msg=""): |
| 840 """Write a simple response back to the client.""" |
| 841 status = str(status) |
| 842 buf = [self.server.protocol + SPACE + |
| 843 status + CRLF, |
| 844 "Content-Length: %s\r\n" % len(msg), |
| 845 "Content-Type: text/plain\r\n"] |
| 846 |
| 847 if status[:3] in ("413", "414"): |
| 848 # Request Entity Too Large / Request-URI Too Long |
| 849 self.close_connection = True |
| 850 if self.response_protocol == 'HTTP/1.1': |
| 851 # This will not be true for 414, since read_request_line |
| 852 # usually raises 414 before reading the whole line, and we |
| 853 # therefore cannot know the proper response_protocol. |
| 854 buf.append("Connection: close\r\n") |
| 855 else: |
| 856 # HTTP/1.0 had no 413/414 status nor Connection header. |
| 857 # Emit 400 instead and trust the message body is enough. |
| 858 status = "400 Bad Request" |
| 859 |
| 860 buf.append(CRLF) |
| 861 if msg: |
| 862 if isinstance(msg, unicodestr): |
| 863 msg = msg.encode("ISO-8859-1") |
| 864 buf.append(msg) |
| 865 |
| 866 try: |
| 867 self.conn.wfile.sendall("".join(buf)) |
| 868 except socket.error: |
| 869 x = sys.exc_info()[1] |
| 870 if x.args[0] not in socket_errors_to_ignore: |
| 871 raise |
| 872 |
| 873 def write(self, chunk): |
| 874 """Write unbuffered data to the client.""" |
| 875 if self.chunked_write and chunk: |
| 876 buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF] |
| 877 self.conn.wfile.sendall(EMPTY.join(buf)) |
| 878 else: |
| 879 self.conn.wfile.sendall(chunk) |
| 880 |
| 881 def send_headers(self): |
| 882 """Assert, process, and send the HTTP response message-headers. |
| 883 |
| 884 You must set self.status, and self.outheaders before calling this. |
| 885 """ |
| 886 hkeys = [key.lower() for key, value in self.outheaders] |
| 887 status = int(self.status[:3]) |
| 888 |
| 889 if status == 413: |
| 890 # Request Entity Too Large. Close conn to avoid garbage. |
| 891 self.close_connection = True |
| 892 elif "content-length" not in hkeys: |
| 893 # "All 1xx (informational), 204 (no content), |
| 894 # and 304 (not modified) responses MUST NOT |
| 895 # include a message-body." So no point chunking. |
| 896 if status < 200 or status in (204, 205, 304): |
| 897 pass |
| 898 else: |
| 899 if (self.response_protocol == 'HTTP/1.1' |
| 900 and self.method != 'HEAD'): |
| 901 # Use the chunked transfer-coding |
| 902 self.chunked_write = True |
| 903 self.outheaders.append(("Transfer-Encoding", "chunked")) |
| 904 else: |
| 905 # Closing the conn is the only way to determine len. |
| 906 self.close_connection = True |
| 907 |
| 908 if "connection" not in hkeys: |
| 909 if self.response_protocol == 'HTTP/1.1': |
| 910 # Both server and client are HTTP/1.1 or better |
| 911 if self.close_connection: |
| 912 self.outheaders.append(("Connection", "close")) |
| 913 else: |
| 914 # Server and/or client are HTTP/1.0 |
| 915 if not self.close_connection: |
| 916 self.outheaders.append(("Connection", "Keep-Alive")) |
| 917 |
| 918 if (not self.close_connection) and (not self.chunked_read): |
| 919 # Read any remaining request body data on the socket. |
| 920 # "If an origin server receives a request that does not include an |
| 921 # Expect request-header field with the "100-continue" expectation, |
| 922 # the request includes a request body, and the server responds |
| 923 # with a final status code before reading the entire request body |
| 924 # from the transport connection, then the server SHOULD NOT close |
| 925 # the transport connection until it has read the entire request, |
| 926 # or until the client closes the connection. Otherwise, the client |
| 927 # might not reliably receive the response message. However, this |
| 928 # requirement is not be construed as preventing a server from |
| 929 # defending itself against denial-of-service attacks, or from |
| 930 # badly broken client implementations." |
| 931 remaining = getattr(self.rfile, 'remaining', 0) |
| 932 if remaining > 0: |
| 933 self.rfile.read(remaining) |
| 934 |
| 935 if "date" not in hkeys: |
| 936 self.outheaders.append(("Date", rfc822.formatdate())) |
| 937 |
| 938 if "server" not in hkeys: |
| 939 self.outheaders.append(("Server", self.server.server_name)) |
| 940 |
| 941 buf = [self.server.protocol + SPACE + self.status + CRLF] |
| 942 for k, v in self.outheaders: |
| 943 buf.append(k + COLON + SPACE + v + CRLF) |
| 944 buf.append(CRLF) |
| 945 self.conn.wfile.sendall(EMPTY.join(buf)) |
| 946 |
| 947 |
| 948 class NoSSLError(Exception): |
| 949 """Exception raised when a client speaks HTTP to an HTTPS socket.""" |
| 950 pass |
| 951 |
| 952 |
| 953 class FatalSSLAlert(Exception): |
| 954 """Exception raised when the SSL implementation signals a fatal alert.""" |
| 955 pass |
| 956 |
| 957 |
| 958 class CP_fileobject(socket._fileobject): |
| 959 """Faux file object attached to a socket object.""" |
| 960 |
| 961 def __init__(self, *args, **kwargs): |
| 962 self.bytes_read = 0 |
| 963 self.bytes_written = 0 |
| 964 socket._fileobject.__init__(self, *args, **kwargs) |
| 965 |
| 966 def sendall(self, data): |
| 967 """Sendall for non-blocking sockets.""" |
| 968 while data: |
| 969 try: |
| 970 bytes_sent = self.send(data) |
| 971 data = data[bytes_sent:] |
| 972 except socket.error, e: |
| 973 if e.args[0] not in socket_errors_nonblocking: |
| 974 raise |
| 975 |
| 976 def send(self, data): |
| 977 bytes_sent = self._sock.send(data) |
| 978 self.bytes_written += bytes_sent |
| 979 return bytes_sent |
| 980 |
| 981 def flush(self): |
| 982 if self._wbuf: |
| 983 buffer = "".join(self._wbuf) |
| 984 self._wbuf = [] |
| 985 self.sendall(buffer) |
| 986 |
| 987 def recv(self, size): |
| 988 while True: |
| 989 try: |
| 990 data = self._sock.recv(size) |
| 991 self.bytes_read += len(data) |
| 992 return data |
| 993 except socket.error, e: |
| 994 if (e.args[0] not in socket_errors_nonblocking |
| 995 and e.args[0] not in socket_error_eintr): |
| 996 raise |
| 997 |
| 998 if not _fileobject_uses_str_type: |
| 999 def read(self, size=-1): |
| 1000 # Use max, disallow tiny reads in a loop as they are very inefficien
t. |
| 1001 # We never leave read() with any leftover data from a new recv() cal
l |
| 1002 # in our internal buffer. |
| 1003 rbufsize = max(self._rbufsize, self.default_bufsize) |
| 1004 # Our use of StringIO rather than lists of string objects returned b
y |
| 1005 # recv() minimizes memory usage and fragmentation that occurs when |
| 1006 # rbufsize is large compared to the typical return value of recv(). |
| 1007 buf = self._rbuf |
| 1008 buf.seek(0, 2) # seek end |
| 1009 if size < 0: |
| 1010 # Read until EOF |
| 1011 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it
via buf. |
| 1012 while True: |
| 1013 data = self.recv(rbufsize) |
| 1014 if not data: |
| 1015 break |
| 1016 buf.write(data) |
| 1017 return buf.getvalue() |
| 1018 else: |
| 1019 # Read until size bytes or EOF seen, whichever comes first |
| 1020 buf_len = buf.tell() |
| 1021 if buf_len >= size: |
| 1022 # Already have size bytes in our buffer? Extract and return
. |
| 1023 buf.seek(0) |
| 1024 rv = buf.read(size) |
| 1025 self._rbuf = StringIO.StringIO() |
| 1026 self._rbuf.write(buf.read()) |
| 1027 return rv |
| 1028 |
| 1029 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it
via buf. |
| 1030 while True: |
| 1031 left = size - buf_len |
| 1032 # recv() will malloc the amount of memory given as its |
| 1033 # parameter even though it often returns much less data |
| 1034 # than that. The returned data string is short lived |
| 1035 # as we copy it into a StringIO and free it. This avoids |
| 1036 # fragmentation issues on many platforms. |
| 1037 data = self.recv(left) |
| 1038 if not data: |
| 1039 break |
| 1040 n = len(data) |
| 1041 if n == size and not buf_len: |
| 1042 # Shortcut. Avoid buffer data copies when: |
| 1043 # - We have no data in our buffer. |
| 1044 # AND |
| 1045 # - Our call to recv returned exactly the |
| 1046 # number of bytes we were asked to read. |
| 1047 return data |
| 1048 if n == left: |
| 1049 buf.write(data) |
| 1050 del data # explicit free |
| 1051 break |
| 1052 assert n <= left, "recv(%d) returned %d bytes" % (left, n) |
| 1053 buf.write(data) |
| 1054 buf_len += n |
| 1055 del data # explicit free |
| 1056 #assert buf_len == buf.tell() |
| 1057 return buf.getvalue() |
| 1058 |
| 1059 def readline(self, size=-1): |
| 1060 buf = self._rbuf |
| 1061 buf.seek(0, 2) # seek end |
| 1062 if buf.tell() > 0: |
| 1063 # check if we already have it in our buffer |
| 1064 buf.seek(0) |
| 1065 bline = buf.readline(size) |
| 1066 if bline.endswith('\n') or len(bline) == size: |
| 1067 self._rbuf = StringIO.StringIO() |
| 1068 self._rbuf.write(buf.read()) |
| 1069 return bline |
| 1070 del bline |
| 1071 if size < 0: |
| 1072 # Read until \n or EOF, whichever comes first |
| 1073 if self._rbufsize <= 1: |
| 1074 # Speed up unbuffered case |
| 1075 buf.seek(0) |
| 1076 buffers = [buf.read()] |
| 1077 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume
it via buf. |
| 1078 data = None |
| 1079 recv = self.recv |
| 1080 while data != "\n": |
| 1081 data = recv(1) |
| 1082 if not data: |
| 1083 break |
| 1084 buffers.append(data) |
| 1085 return "".join(buffers) |
| 1086 |
| 1087 buf.seek(0, 2) # seek end |
| 1088 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it
via buf. |
| 1089 while True: |
| 1090 data = self.recv(self._rbufsize) |
| 1091 if not data: |
| 1092 break |
| 1093 nl = data.find('\n') |
| 1094 if nl >= 0: |
| 1095 nl += 1 |
| 1096 buf.write(data[:nl]) |
| 1097 self._rbuf.write(data[nl:]) |
| 1098 del data |
| 1099 break |
| 1100 buf.write(data) |
| 1101 return buf.getvalue() |
| 1102 else: |
| 1103 # Read until size bytes or \n or EOF seen, whichever comes first |
| 1104 buf.seek(0, 2) # seek end |
| 1105 buf_len = buf.tell() |
| 1106 if buf_len >= size: |
| 1107 buf.seek(0) |
| 1108 rv = buf.read(size) |
| 1109 self._rbuf = StringIO.StringIO() |
| 1110 self._rbuf.write(buf.read()) |
| 1111 return rv |
| 1112 self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it
via buf. |
| 1113 while True: |
| 1114 data = self.recv(self._rbufsize) |
| 1115 if not data: |
| 1116 break |
| 1117 left = size - buf_len |
| 1118 # did we just receive a newline? |
| 1119 nl = data.find('\n', 0, left) |
| 1120 if nl >= 0: |
| 1121 nl += 1 |
| 1122 # save the excess data to _rbuf |
| 1123 self._rbuf.write(data[nl:]) |
| 1124 if buf_len: |
| 1125 buf.write(data[:nl]) |
| 1126 break |
| 1127 else: |
| 1128 # Shortcut. Avoid data copy through buf when return
ing |
| 1129 # a substring of our first recv(). |
| 1130 return data[:nl] |
| 1131 n = len(data) |
| 1132 if n == size and not buf_len: |
| 1133 # Shortcut. Avoid data copy through buf when |
| 1134 # returning exactly all of our first recv(). |
| 1135 return data |
| 1136 if n >= left: |
| 1137 buf.write(data[:left]) |
| 1138 self._rbuf.write(data[left:]) |
| 1139 break |
| 1140 buf.write(data) |
| 1141 buf_len += n |
| 1142 #assert buf_len == buf.tell() |
| 1143 return buf.getvalue() |
| 1144 else: |
| 1145 def read(self, size=-1): |
| 1146 if size < 0: |
| 1147 # Read until EOF |
| 1148 buffers = [self._rbuf] |
| 1149 self._rbuf = "" |
| 1150 if self._rbufsize <= 1: |
| 1151 recv_size = self.default_bufsize |
| 1152 else: |
| 1153 recv_size = self._rbufsize |
| 1154 |
| 1155 while True: |
| 1156 data = self.recv(recv_size) |
| 1157 if not data: |
| 1158 break |
| 1159 buffers.append(data) |
| 1160 return "".join(buffers) |
| 1161 else: |
| 1162 # Read until size bytes or EOF seen, whichever comes first |
| 1163 data = self._rbuf |
| 1164 buf_len = len(data) |
| 1165 if buf_len >= size: |
| 1166 self._rbuf = data[size:] |
| 1167 return data[:size] |
| 1168 buffers = [] |
| 1169 if data: |
| 1170 buffers.append(data) |
| 1171 self._rbuf = "" |
| 1172 while True: |
| 1173 left = size - buf_len |
| 1174 recv_size = max(self._rbufsize, left) |
| 1175 data = self.recv(recv_size) |
| 1176 if not data: |
| 1177 break |
| 1178 buffers.append(data) |
| 1179 n = len(data) |
| 1180 if n >= left: |
| 1181 self._rbuf = data[left:] |
| 1182 buffers[-1] = data[:left] |
| 1183 break |
| 1184 buf_len += n |
| 1185 return "".join(buffers) |
| 1186 |
| 1187 def readline(self, size=-1): |
| 1188 data = self._rbuf |
| 1189 if size < 0: |
| 1190 # Read until \n or EOF, whichever comes first |
| 1191 if self._rbufsize <= 1: |
| 1192 # Speed up unbuffered case |
| 1193 assert data == "" |
| 1194 buffers = [] |
| 1195 while data != "\n": |
| 1196 data = self.recv(1) |
| 1197 if not data: |
| 1198 break |
| 1199 buffers.append(data) |
| 1200 return "".join(buffers) |
| 1201 nl = data.find('\n') |
| 1202 if nl >= 0: |
| 1203 nl += 1 |
| 1204 self._rbuf = data[nl:] |
| 1205 return data[:nl] |
| 1206 buffers = [] |
| 1207 if data: |
| 1208 buffers.append(data) |
| 1209 self._rbuf = "" |
| 1210 while True: |
| 1211 data = self.recv(self._rbufsize) |
| 1212 if not data: |
| 1213 break |
| 1214 buffers.append(data) |
| 1215 nl = data.find('\n') |
| 1216 if nl >= 0: |
| 1217 nl += 1 |
| 1218 self._rbuf = data[nl:] |
| 1219 buffers[-1] = data[:nl] |
| 1220 break |
| 1221 return "".join(buffers) |
| 1222 else: |
| 1223 # Read until size bytes or \n or EOF seen, whichever comes first |
| 1224 nl = data.find('\n', 0, size) |
| 1225 if nl >= 0: |
| 1226 nl += 1 |
| 1227 self._rbuf = data[nl:] |
| 1228 return data[:nl] |
| 1229 buf_len = len(data) |
| 1230 if buf_len >= size: |
| 1231 self._rbuf = data[size:] |
| 1232 return data[:size] |
| 1233 buffers = [] |
| 1234 if data: |
| 1235 buffers.append(data) |
| 1236 self._rbuf = "" |
| 1237 while True: |
| 1238 data = self.recv(self._rbufsize) |
| 1239 if not data: |
| 1240 break |
| 1241 buffers.append(data) |
| 1242 left = size - buf_len |
| 1243 nl = data.find('\n', 0, left) |
| 1244 if nl >= 0: |
| 1245 nl += 1 |
| 1246 self._rbuf = data[nl:] |
| 1247 buffers[-1] = data[:nl] |
| 1248 break |
| 1249 n = len(data) |
| 1250 if n >= left: |
| 1251 self._rbuf = data[left:] |
| 1252 buffers[-1] = data[:left] |
| 1253 break |
| 1254 buf_len += n |
| 1255 return "".join(buffers) |
| 1256 |
| 1257 |
| 1258 class HTTPConnection(object): |
| 1259 """An HTTP connection (active socket). |
| 1260 |
| 1261 server: the Server object which received this connection. |
| 1262 socket: the raw socket object (usually TCP) for this connection. |
| 1263 makefile: a fileobject class for reading from the socket. |
| 1264 """ |
| 1265 |
| 1266 remote_addr = None |
| 1267 remote_port = None |
| 1268 ssl_env = None |
| 1269 rbufsize = DEFAULT_BUFFER_SIZE |
| 1270 wbufsize = DEFAULT_BUFFER_SIZE |
| 1271 RequestHandlerClass = HTTPRequest |
| 1272 |
| 1273 def __init__(self, server, sock, makefile=CP_fileobject): |
| 1274 self.server = server |
| 1275 self.socket = sock |
| 1276 self.rfile = makefile(sock, "rb", self.rbufsize) |
| 1277 self.wfile = makefile(sock, "wb", self.wbufsize) |
| 1278 self.requests_seen = 0 |
| 1279 |
| 1280 def communicate(self): |
| 1281 """Read each request and respond appropriately.""" |
| 1282 request_seen = False |
| 1283 try: |
| 1284 while True: |
| 1285 # (re)set req to None so that if something goes wrong in |
| 1286 # the RequestHandlerClass constructor, the error doesn't |
| 1287 # get written to the previous request. |
| 1288 req = None |
| 1289 req = self.RequestHandlerClass(self.server, self) |
| 1290 |
| 1291 # This order of operations should guarantee correct pipelining. |
| 1292 req.parse_request() |
| 1293 if self.server.stats['Enabled']: |
| 1294 self.requests_seen += 1 |
| 1295 if not req.ready: |
| 1296 # Something went wrong in the parsing (and the server has |
| 1297 # probably already made a simple_response). Return and |
| 1298 # let the conn close. |
| 1299 return |
| 1300 |
| 1301 request_seen = True |
| 1302 req.respond() |
| 1303 if req.close_connection: |
| 1304 return |
| 1305 except socket.error: |
| 1306 e = sys.exc_info()[1] |
| 1307 errnum = e.args[0] |
| 1308 # sadly SSL sockets return a different (longer) time out string |
| 1309 if errnum == 'timed out' or errnum == 'The read operation timed out'
: |
| 1310 # Don't error if we're between requests; only error |
| 1311 # if 1) no request has been started at all, or 2) we're |
| 1312 # in the middle of a request. |
| 1313 # See http://www.cherrypy.org/ticket/853 |
| 1314 if (not request_seen) or (req and req.started_request): |
| 1315 # Don't bother writing the 408 if the response |
| 1316 # has already started being written. |
| 1317 if req and not req.sent_headers: |
| 1318 try: |
| 1319 req.simple_response("408 Request Timeout") |
| 1320 except FatalSSLAlert: |
| 1321 # Close the connection. |
| 1322 return |
| 1323 elif errnum not in socket_errors_to_ignore: |
| 1324 self.server.error_log("socket.error %s" % repr(errnum), |
| 1325 level=logging.WARNING, traceback=True) |
| 1326 if req and not req.sent_headers: |
| 1327 try: |
| 1328 req.simple_response("500 Internal Server Error") |
| 1329 except FatalSSLAlert: |
| 1330 # Close the connection. |
| 1331 return |
| 1332 return |
| 1333 except (KeyboardInterrupt, SystemExit): |
| 1334 raise |
| 1335 except FatalSSLAlert: |
| 1336 # Close the connection. |
| 1337 return |
| 1338 except NoSSLError: |
| 1339 if req and not req.sent_headers: |
| 1340 # Unwrap our wfile |
| 1341 self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsiz
e) |
| 1342 req.simple_response("400 Bad Request", |
| 1343 "The client sent a plain HTTP request, but " |
| 1344 "this server only speaks HTTPS on this port.") |
| 1345 self.linger = True |
| 1346 except Exception: |
| 1347 e = sys.exc_info()[1] |
| 1348 self.server.error_log(repr(e), level=logging.ERROR, traceback=True) |
| 1349 if req and not req.sent_headers: |
| 1350 try: |
| 1351 req.simple_response("500 Internal Server Error") |
| 1352 except FatalSSLAlert: |
| 1353 # Close the connection. |
| 1354 return |
| 1355 |
| 1356 linger = False |
| 1357 |
| 1358 def close(self): |
| 1359 """Close the socket underlying this connection.""" |
| 1360 self.rfile.close() |
| 1361 |
| 1362 if not self.linger: |
| 1363 # Python's socket module does NOT call close on the kernel socket |
| 1364 # when you call socket.close(). We do so manually here because we |
| 1365 # want this server to send a FIN TCP segment immediately. Note this |
| 1366 # must be called *before* calling socket.close(), because the latter |
| 1367 # drops its reference to the kernel socket. |
| 1368 if hasattr(self.socket, '_sock'): |
| 1369 self.socket._sock.close() |
| 1370 self.socket.close() |
| 1371 else: |
| 1372 # On the other hand, sometimes we want to hang around for a bit |
| 1373 # to make sure the client has a chance to read our entire |
| 1374 # response. Skipping the close() calls here delays the FIN |
| 1375 # packet until the socket object is garbage-collected later. |
| 1376 # Someday, perhaps, we'll do the full lingering_close that |
| 1377 # Apache does, but not today. |
| 1378 pass |
| 1379 |
| 1380 |
| 1381 class TrueyZero(object): |
| 1382 """An object which equals and does math like the integer '0' but evals True.
""" |
| 1383 def __add__(self, other): |
| 1384 return other |
| 1385 def __radd__(self, other): |
| 1386 return other |
| 1387 trueyzero = TrueyZero() |
| 1388 |
| 1389 |
| 1390 _SHUTDOWNREQUEST = None |
| 1391 |
| 1392 class WorkerThread(threading.Thread): |
| 1393 """Thread which continuously polls a Queue for Connection objects. |
| 1394 |
| 1395 Due to the timing issues of polling a Queue, a WorkerThread does not |
| 1396 check its own 'ready' flag after it has started. To stop the thread, |
| 1397 it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue |
| 1398 (one for each running WorkerThread). |
| 1399 """ |
| 1400 |
| 1401 conn = None |
| 1402 """The current connection pulled off the Queue, or None.""" |
| 1403 |
| 1404 server = None |
| 1405 """The HTTP Server which spawned this thread, and which owns the |
| 1406 Queue and is placing active connections into it.""" |
| 1407 |
| 1408 ready = False |
| 1409 """A simple flag for the calling server to know when this thread |
| 1410 has begun polling the Queue.""" |
| 1411 |
| 1412 |
| 1413 def __init__(self, server): |
| 1414 self.ready = False |
| 1415 self.server = server |
| 1416 |
| 1417 self.requests_seen = 0 |
| 1418 self.bytes_read = 0 |
| 1419 self.bytes_written = 0 |
| 1420 self.start_time = None |
| 1421 self.work_time = 0 |
| 1422 self.stats = { |
| 1423 'Requests': lambda s: self.requests_seen + ((self.start_time is None
) and trueyzero or self.conn.requests_seen), |
| 1424 'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None)
and trueyzero or self.conn.rfile.bytes_read), |
| 1425 'Bytes Written': lambda s: self.bytes_written + ((self.start_time is
None) and trueyzero or self.conn.wfile.bytes_written), |
| 1426 'Work Time': lambda s: self.work_time + ((self.start_time is None) a
nd trueyzero or time.time() - self.start_time), |
| 1427 'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s)
or 1e-6), |
| 1428 'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'
](s) or 1e-6), |
| 1429 } |
| 1430 threading.Thread.__init__(self) |
| 1431 |
| 1432 def run(self): |
| 1433 self.server.stats['Worker Threads'][self.getName()] = self.stats |
| 1434 try: |
| 1435 self.ready = True |
| 1436 while True: |
| 1437 conn = self.server.requests.get() |
| 1438 if conn is _SHUTDOWNREQUEST: |
| 1439 return |
| 1440 |
| 1441 self.conn = conn |
| 1442 if self.server.stats['Enabled']: |
| 1443 self.start_time = time.time() |
| 1444 try: |
| 1445 conn.communicate() |
| 1446 finally: |
| 1447 conn.close() |
| 1448 if self.server.stats['Enabled']: |
| 1449 self.requests_seen += self.conn.requests_seen |
| 1450 self.bytes_read += self.conn.rfile.bytes_read |
| 1451 self.bytes_written += self.conn.wfile.bytes_written |
| 1452 self.work_time += time.time() - self.start_time |
| 1453 self.start_time = None |
| 1454 self.conn = None |
| 1455 except (KeyboardInterrupt, SystemExit): |
| 1456 exc = sys.exc_info()[1] |
| 1457 self.server.interrupt = exc |
| 1458 |
| 1459 |
| 1460 class ThreadPool(object): |
| 1461 """A Request Queue for an HTTPServer which pools threads. |
| 1462 |
| 1463 ThreadPool objects must provide min, get(), put(obj), start() |
| 1464 and stop(timeout) attributes. |
| 1465 """ |
| 1466 |
| 1467 def __init__(self, server, min=10, max=-1): |
| 1468 self.server = server |
| 1469 self.min = min |
| 1470 self.max = max |
| 1471 self._threads = [] |
| 1472 self._queue = queue.Queue() |
| 1473 self.get = self._queue.get |
| 1474 |
| 1475 def start(self): |
| 1476 """Start the pool of threads.""" |
| 1477 for i in range(self.min): |
| 1478 self._threads.append(WorkerThread(self.server)) |
| 1479 for worker in self._threads: |
| 1480 worker.setName("CP Server " + worker.getName()) |
| 1481 worker.start() |
| 1482 for worker in self._threads: |
| 1483 while not worker.ready: |
| 1484 time.sleep(.1) |
| 1485 |
| 1486 def _get_idle(self): |
| 1487 """Number of worker threads which are idle. Read-only.""" |
| 1488 return len([t for t in self._threads if t.conn is None]) |
| 1489 idle = property(_get_idle, doc=_get_idle.__doc__) |
| 1490 |
| 1491 def put(self, obj): |
| 1492 self._queue.put(obj) |
| 1493 if obj is _SHUTDOWNREQUEST: |
| 1494 return |
| 1495 |
| 1496 def grow(self, amount): |
| 1497 """Spawn new worker threads (not above self.max).""" |
| 1498 for i in range(amount): |
| 1499 if self.max > 0 and len(self._threads) >= self.max: |
| 1500 break |
| 1501 worker = WorkerThread(self.server) |
| 1502 worker.setName("CP Server " + worker.getName()) |
| 1503 self._threads.append(worker) |
| 1504 worker.start() |
| 1505 |
| 1506 def shrink(self, amount): |
| 1507 """Kill off worker threads (not below self.min).""" |
| 1508 # Grow/shrink the pool if necessary. |
| 1509 # Remove any dead threads from our list |
| 1510 for t in self._threads: |
| 1511 if not t.isAlive(): |
| 1512 self._threads.remove(t) |
| 1513 amount -= 1 |
| 1514 |
| 1515 if amount > 0: |
| 1516 for i in range(min(amount, len(self._threads) - self.min)): |
| 1517 # Put a number of shutdown requests on the queue equal |
| 1518 # to 'amount'. Once each of those is processed by a worker, |
| 1519 # that worker will terminate and be culled from our list |
| 1520 # in self.put. |
| 1521 self._queue.put(_SHUTDOWNREQUEST) |
| 1522 |
| 1523 def stop(self, timeout=5): |
| 1524 # Must shut down threads here so the code that calls |
| 1525 # this method can know when all threads are stopped. |
| 1526 for worker in self._threads: |
| 1527 self._queue.put(_SHUTDOWNREQUEST) |
| 1528 |
| 1529 # Don't join currentThread (when stop is called inside a request). |
| 1530 current = threading.currentThread() |
| 1531 if timeout and timeout >= 0: |
| 1532 endtime = time.time() + timeout |
| 1533 while self._threads: |
| 1534 worker = self._threads.pop() |
| 1535 if worker is not current and worker.isAlive(): |
| 1536 try: |
| 1537 if timeout is None or timeout < 0: |
| 1538 worker.join() |
| 1539 else: |
| 1540 remaining_time = endtime - time.time() |
| 1541 if remaining_time > 0: |
| 1542 worker.join(remaining_time) |
| 1543 if worker.isAlive(): |
| 1544 # We exhausted the timeout. |
| 1545 # Forcibly shut down the socket. |
| 1546 c = worker.conn |
| 1547 if c and not c.rfile.closed: |
| 1548 try: |
| 1549 c.socket.shutdown(socket.SHUT_RD) |
| 1550 except TypeError: |
| 1551 # pyOpenSSL sockets don't take an arg |
| 1552 c.socket.shutdown() |
| 1553 worker.join() |
| 1554 except (AssertionError, |
| 1555 # Ignore repeated Ctrl-C. |
| 1556 # See http://www.cherrypy.org/ticket/691. |
| 1557 KeyboardInterrupt): |
| 1558 pass |
| 1559 |
| 1560 def _get_qsize(self): |
| 1561 return self._queue.qsize() |
| 1562 qsize = property(_get_qsize) |
| 1563 |
| 1564 |
| 1565 |
| 1566 try: |
| 1567 import fcntl |
| 1568 except ImportError: |
| 1569 try: |
| 1570 from ctypes import windll, WinError |
| 1571 except ImportError: |
| 1572 def prevent_socket_inheritance(sock): |
| 1573 """Dummy function, since neither fcntl nor ctypes are available.""" |
| 1574 pass |
| 1575 else: |
| 1576 def prevent_socket_inheritance(sock): |
| 1577 """Mark the given socket fd as non-inheritable (Windows).""" |
| 1578 if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0): |
| 1579 raise WinError() |
| 1580 else: |
| 1581 def prevent_socket_inheritance(sock): |
| 1582 """Mark the given socket fd as non-inheritable (POSIX).""" |
| 1583 fd = sock.fileno() |
| 1584 old_flags = fcntl.fcntl(fd, fcntl.F_GETFD) |
| 1585 fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC) |
| 1586 |
| 1587 |
| 1588 class SSLAdapter(object): |
| 1589 """Base class for SSL driver library adapters. |
| 1590 |
| 1591 Required methods: |
| 1592 |
| 1593 * ``wrap(sock) -> (wrapped socket, ssl environ dict)`` |
| 1594 * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file
object`` |
| 1595 """ |
| 1596 |
| 1597 def __init__(self, certificate, private_key, certificate_chain=None): |
| 1598 self.certificate = certificate |
| 1599 self.private_key = private_key |
| 1600 self.certificate_chain = certificate_chain |
| 1601 |
| 1602 def wrap(self, sock): |
| 1603 raise NotImplemented |
| 1604 |
| 1605 def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE): |
| 1606 raise NotImplemented |
| 1607 |
| 1608 |
| 1609 class HTTPServer(object): |
| 1610 """An HTTP server.""" |
| 1611 |
| 1612 _bind_addr = "127.0.0.1" |
| 1613 _interrupt = None |
| 1614 |
| 1615 gateway = None |
| 1616 """A Gateway instance.""" |
| 1617 |
| 1618 minthreads = None |
| 1619 """The minimum number of worker threads to create (default 10).""" |
| 1620 |
| 1621 maxthreads = None |
| 1622 """The maximum number of worker threads to create (default -1 = no limit).""
" |
| 1623 |
| 1624 server_name = None |
| 1625 """The name of the server; defaults to socket.gethostname().""" |
| 1626 |
| 1627 protocol = "HTTP/1.1" |
| 1628 """The version string to write in the Status-Line of all HTTP responses. |
| 1629 |
| 1630 For example, "HTTP/1.1" is the default. This also limits the supported |
| 1631 features used in the response.""" |
| 1632 |
| 1633 request_queue_size = 5 |
| 1634 """The 'backlog' arg to socket.listen(); max queued connections (default 5).
""" |
| 1635 |
| 1636 shutdown_timeout = 5 |
| 1637 """The total time, in seconds, to wait for worker threads to cleanly exit.""
" |
| 1638 |
| 1639 timeout = 10 |
| 1640 """The timeout in seconds for accepted connections (default 10).""" |
| 1641 |
| 1642 version = "CherryPy/3.2.2" |
| 1643 """A version string for the HTTPServer.""" |
| 1644 |
| 1645 software = None |
| 1646 """The value to set for the SERVER_SOFTWARE entry in the WSGI environ. |
| 1647 |
| 1648 If None, this defaults to ``'%s Server' % self.version``.""" |
| 1649 |
| 1650 ready = False |
| 1651 """An internal flag which marks whether the socket is accepting connections.
""" |
| 1652 |
| 1653 max_request_header_size = 0 |
| 1654 """The maximum size, in bytes, for request headers, or 0 for no limit.""" |
| 1655 |
| 1656 max_request_body_size = 0 |
| 1657 """The maximum size, in bytes, for request bodies, or 0 for no limit.""" |
| 1658 |
| 1659 nodelay = True |
| 1660 """If True (the default since 3.1), sets the TCP_NODELAY socket option.""" |
| 1661 |
| 1662 ConnectionClass = HTTPConnection |
| 1663 """The class to use for handling HTTP connections.""" |
| 1664 |
| 1665 ssl_adapter = None |
| 1666 """An instance of SSLAdapter (or a subclass). |
| 1667 |
| 1668 You must have the corresponding SSL driver library installed.""" |
| 1669 |
| 1670 def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1, |
| 1671 server_name=None): |
| 1672 self.bind_addr = bind_addr |
| 1673 self.gateway = gateway |
| 1674 |
| 1675 self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads) |
| 1676 |
| 1677 if not server_name: |
| 1678 server_name = socket.gethostname() |
| 1679 self.server_name = server_name |
| 1680 self.clear_stats() |
| 1681 |
| 1682 def clear_stats(self): |
| 1683 self._start_time = None |
| 1684 self._run_time = 0 |
| 1685 self.stats = { |
| 1686 'Enabled': False, |
| 1687 'Bind Address': lambda s: repr(self.bind_addr), |
| 1688 'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(), |
| 1689 'Accepts': 0, |
| 1690 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(), |
| 1691 'Queue': lambda s: getattr(self.requests, "qsize", None), |
| 1692 'Threads': lambda s: len(getattr(self.requests, "_threads", [])), |
| 1693 'Threads Idle': lambda s: getattr(self.requests, "idle", None), |
| 1694 'Socket Errors': 0, |
| 1695 'Requests': lambda s: (not s['Enabled']) and -1 or sum([w['Requests'
](w) for w |
| 1696 in s['Worker Threads'].values()], 0), |
| 1697 'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes R
ead'](w) for w |
| 1698 in s['Worker Threads'].values()], 0), |
| 1699 'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum([w['Byte
s Written'](w) for w |
| 1700 in s['Worker Threads'].values()], 0)
, |
| 1701 'Work Time': lambda s: (not s['Enabled']) and -1 or sum([w['Work Tim
e'](w) for w |
| 1702 in s['Worker Threads'].values()], 0), |
| 1703 'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum( |
| 1704 [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6) |
| 1705 for w in s['Worker Threads'].values()], 0), |
| 1706 'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum( |
| 1707 [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6) |
| 1708 for w in s['Worker Threads'].values()], 0), |
| 1709 'Worker Threads': {}, |
| 1710 } |
| 1711 logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats |
| 1712 |
| 1713 def runtime(self): |
| 1714 if self._start_time is None: |
| 1715 return self._run_time |
| 1716 else: |
| 1717 return self._run_time + (time.time() - self._start_time) |
| 1718 |
| 1719 def __str__(self): |
| 1720 return "%s.%s(%r)" % (self.__module__, self.__class__.__name__, |
| 1721 self.bind_addr) |
| 1722 |
| 1723 def _get_bind_addr(self): |
| 1724 return self._bind_addr |
| 1725 def _set_bind_addr(self, value): |
| 1726 if isinstance(value, tuple) and value[0] in ('', None): |
| 1727 # Despite the socket module docs, using '' does not |
| 1728 # allow AI_PASSIVE to work. Passing None instead |
| 1729 # returns '0.0.0.0' like we want. In other words: |
| 1730 # host AI_PASSIVE result |
| 1731 # '' Y 192.168.x.y |
| 1732 # '' N 192.168.x.y |
| 1733 # None Y 0.0.0.0 |
| 1734 # None N 127.0.0.1 |
| 1735 # But since you can get the same effect with an explicit |
| 1736 # '0.0.0.0', we deny both the empty string and None as values. |
| 1737 raise ValueError("Host values of '' or None are not allowed. " |
| 1738 "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead " |
| 1739 "to listen on all active interfaces.") |
| 1740 self._bind_addr = value |
| 1741 bind_addr = property(_get_bind_addr, _set_bind_addr, |
| 1742 doc="""The interface on which to listen for connections. |
| 1743 |
| 1744 For TCP sockets, a (host, port) tuple. Host values may be any IPv4 |
| 1745 or IPv6 address, or any valid hostname. The string 'localhost' is a |
| 1746 synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6). |
| 1747 The string '0.0.0.0' is a special IPv4 entry meaning "any active |
| 1748 interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for |
| 1749 IPv6. The empty string or None are not allowed. |
| 1750 |
| 1751 For UNIX sockets, supply the filename as a string.""") |
| 1752 |
| 1753 def start(self): |
| 1754 """Run the server forever.""" |
| 1755 # We don't have to trap KeyboardInterrupt or SystemExit here, |
| 1756 # because cherrpy.server already does so, calling self.stop() for us. |
| 1757 # If you're using this server with another framework, you should |
| 1758 # trap those exceptions in whatever code block calls start(). |
| 1759 self._interrupt = None |
| 1760 |
| 1761 if self.software is None: |
| 1762 self.software = "%s Server" % self.version |
| 1763 |
| 1764 # SSL backward compatibility |
| 1765 if (self.ssl_adapter is None and |
| 1766 getattr(self, 'ssl_certificate', None) and |
| 1767 getattr(self, 'ssl_private_key', None)): |
| 1768 warnings.warn( |
| 1769 "SSL attributes are deprecated in CherryPy 3.2, and will " |
| 1770 "be removed in CherryPy 3.3. Use an ssl_adapter attribute " |
| 1771 "instead.", |
| 1772 DeprecationWarning |
| 1773 ) |
| 1774 try: |
| 1775 from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter |
| 1776 except ImportError: |
| 1777 pass |
| 1778 else: |
| 1779 self.ssl_adapter = pyOpenSSLAdapter( |
| 1780 self.ssl_certificate, self.ssl_private_key, |
| 1781 getattr(self, 'ssl_certificate_chain', None)) |
| 1782 |
| 1783 # Select the appropriate socket |
| 1784 if isinstance(self.bind_addr, basestring): |
| 1785 # AF_UNIX socket |
| 1786 |
| 1787 # So we can reuse the socket... |
| 1788 try: os.unlink(self.bind_addr) |
| 1789 except: pass |
| 1790 |
| 1791 # So everyone can access the socket... |
| 1792 try: os.chmod(self.bind_addr, 511) # 0777 |
| 1793 except: pass |
| 1794 |
| 1795 info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)] |
| 1796 else: |
| 1797 # AF_INET or AF_INET6 socket |
| 1798 # Get the correct address family for our host (allows IPv6 addresses
) |
| 1799 host, port = self.bind_addr |
| 1800 try: |
| 1801 info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, |
| 1802 socket.SOCK_STREAM, 0, socket.AI_PASSI
VE) |
| 1803 except socket.gaierror: |
| 1804 if ':' in self.bind_addr[0]: |
| 1805 info = [(socket.AF_INET6, socket.SOCK_STREAM, |
| 1806 0, "", self.bind_addr + (0, 0))] |
| 1807 else: |
| 1808 info = [(socket.AF_INET, socket.SOCK_STREAM, |
| 1809 0, "", self.bind_addr)] |
| 1810 |
| 1811 self.socket = None |
| 1812 msg = "No socket could be created" |
| 1813 for res in info: |
| 1814 af, socktype, proto, canonname, sa = res |
| 1815 try: |
| 1816 self.bind(af, socktype, proto) |
| 1817 except socket.error: |
| 1818 if self.socket: |
| 1819 self.socket.close() |
| 1820 self.socket = None |
| 1821 continue |
| 1822 break |
| 1823 if not self.socket: |
| 1824 raise socket.error(msg) |
| 1825 |
| 1826 # Timeout so KeyboardInterrupt can be caught on Win32 |
| 1827 self.socket.settimeout(1) |
| 1828 self.socket.listen(self.request_queue_size) |
| 1829 |
| 1830 # Create worker threads |
| 1831 self.requests.start() |
| 1832 |
| 1833 self.ready = True |
| 1834 self._start_time = time.time() |
| 1835 while self.ready: |
| 1836 try: |
| 1837 self.tick() |
| 1838 except (KeyboardInterrupt, SystemExit): |
| 1839 raise |
| 1840 except: |
| 1841 self.error_log("Error in HTTPServer.tick", level=logging.ERROR, |
| 1842 traceback=True) |
| 1843 |
| 1844 if self.interrupt: |
| 1845 while self.interrupt is True: |
| 1846 # Wait for self.stop() to complete. See _set_interrupt. |
| 1847 time.sleep(0.1) |
| 1848 if self.interrupt: |
| 1849 raise self.interrupt |
| 1850 |
| 1851 def error_log(self, msg="", level=20, traceback=False): |
| 1852 # Override this in subclasses as desired |
| 1853 sys.stderr.write(msg + '\n') |
| 1854 sys.stderr.flush() |
| 1855 if traceback: |
| 1856 tblines = format_exc() |
| 1857 sys.stderr.write(tblines) |
| 1858 sys.stderr.flush() |
| 1859 |
| 1860 def bind(self, family, type, proto=0): |
| 1861 """Create (or recreate) the actual socket object.""" |
| 1862 self.socket = socket.socket(family, type, proto) |
| 1863 prevent_socket_inheritance(self.socket) |
| 1864 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) |
| 1865 if self.nodelay and not isinstance(self.bind_addr, str): |
| 1866 self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) |
| 1867 |
| 1868 if self.ssl_adapter is not None: |
| 1869 self.socket = self.ssl_adapter.bind(self.socket) |
| 1870 |
| 1871 # If listening on the IPV6 any address ('::' = IN6ADDR_ANY), |
| 1872 # activate dual-stack. See http://www.cherrypy.org/ticket/871. |
| 1873 if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 |
| 1874 and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')): |
| 1875 try: |
| 1876 self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY,
0) |
| 1877 except (AttributeError, socket.error): |
| 1878 # Apparently, the socket option is not available in |
| 1879 # this machine's TCP stack |
| 1880 pass |
| 1881 |
| 1882 self.socket.bind(self.bind_addr) |
| 1883 |
| 1884 def tick(self): |
| 1885 """Accept a new connection and put it on the Queue.""" |
| 1886 try: |
| 1887 s, addr = self.socket.accept() |
| 1888 if self.stats['Enabled']: |
| 1889 self.stats['Accepts'] += 1 |
| 1890 if not self.ready: |
| 1891 return |
| 1892 |
| 1893 prevent_socket_inheritance(s) |
| 1894 if hasattr(s, 'settimeout'): |
| 1895 s.settimeout(self.timeout) |
| 1896 |
| 1897 makefile = CP_fileobject |
| 1898 ssl_env = {} |
| 1899 # if ssl cert and key are set, we try to be a secure HTTP server |
| 1900 if self.ssl_adapter is not None: |
| 1901 try: |
| 1902 s, ssl_env = self.ssl_adapter.wrap(s) |
| 1903 except NoSSLError: |
| 1904 msg = ("The client sent a plain HTTP request, but " |
| 1905 "this server only speaks HTTPS on this port.") |
| 1906 buf = ["%s 400 Bad Request\r\n" % self.protocol, |
| 1907 "Content-Length: %s\r\n" % len(msg), |
| 1908 "Content-Type: text/plain\r\n\r\n", |
| 1909 msg] |
| 1910 |
| 1911 wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE) |
| 1912 try: |
| 1913 wfile.sendall("".join(buf)) |
| 1914 except socket.error: |
| 1915 x = sys.exc_info()[1] |
| 1916 if x.args[0] not in socket_errors_to_ignore: |
| 1917 raise |
| 1918 return |
| 1919 if not s: |
| 1920 return |
| 1921 makefile = self.ssl_adapter.makefile |
| 1922 # Re-apply our timeout since we may have a new socket object |
| 1923 if hasattr(s, 'settimeout'): |
| 1924 s.settimeout(self.timeout) |
| 1925 |
| 1926 conn = self.ConnectionClass(self, s, makefile) |
| 1927 |
| 1928 if not isinstance(self.bind_addr, basestring): |
| 1929 # optional values |
| 1930 # Until we do DNS lookups, omit REMOTE_HOST |
| 1931 if addr is None: # sometimes this can happen |
| 1932 # figure out if AF_INET or AF_INET6. |
| 1933 if len(s.getsockname()) == 2: |
| 1934 # AF_INET |
| 1935 addr = ('0.0.0.0', 0) |
| 1936 else: |
| 1937 # AF_INET6 |
| 1938 addr = ('::', 0) |
| 1939 conn.remote_addr = addr[0] |
| 1940 conn.remote_port = addr[1] |
| 1941 |
| 1942 conn.ssl_env = ssl_env |
| 1943 |
| 1944 self.requests.put(conn) |
| 1945 except socket.timeout: |
| 1946 # The only reason for the timeout in start() is so we can |
| 1947 # notice keyboard interrupts on Win32, which don't interrupt |
| 1948 # accept() by default |
| 1949 return |
| 1950 except socket.error: |
| 1951 x = sys.exc_info()[1] |
| 1952 if self.stats['Enabled']: |
| 1953 self.stats['Socket Errors'] += 1 |
| 1954 if x.args[0] in socket_error_eintr: |
| 1955 # I *think* this is right. EINTR should occur when a signal |
| 1956 # is received during the accept() call; all docs say retry |
| 1957 # the call, and I *think* I'm reading it right that Python |
| 1958 # will then go ahead and poll for and handle the signal |
| 1959 # elsewhere. See http://www.cherrypy.org/ticket/707. |
| 1960 return |
| 1961 if x.args[0] in socket_errors_nonblocking: |
| 1962 # Just try again. See http://www.cherrypy.org/ticket/479. |
| 1963 return |
| 1964 if x.args[0] in socket_errors_to_ignore: |
| 1965 # Our socket was closed. |
| 1966 # See http://www.cherrypy.org/ticket/686. |
| 1967 return |
| 1968 raise |
| 1969 |
| 1970 def _get_interrupt(self): |
| 1971 return self._interrupt |
| 1972 def _set_interrupt(self, interrupt): |
| 1973 self._interrupt = True |
| 1974 self.stop() |
| 1975 self._interrupt = interrupt |
| 1976 interrupt = property(_get_interrupt, _set_interrupt, |
| 1977 doc="Set this to an Exception instance to " |
| 1978 "interrupt the server.") |
| 1979 |
| 1980 def stop(self): |
| 1981 """Gracefully shutdown a server that is serving forever.""" |
| 1982 self.ready = False |
| 1983 if self._start_time is not None: |
| 1984 self._run_time += (time.time() - self._start_time) |
| 1985 self._start_time = None |
| 1986 |
| 1987 sock = getattr(self, "socket", None) |
| 1988 if sock: |
| 1989 if not isinstance(self.bind_addr, basestring): |
| 1990 # Touch our own socket to make accept() return immediately. |
| 1991 try: |
| 1992 host, port = sock.getsockname()[:2] |
| 1993 except socket.error: |
| 1994 x = sys.exc_info()[1] |
| 1995 if x.args[0] not in socket_errors_to_ignore: |
| 1996 # Changed to use error code and not message |
| 1997 # See http://www.cherrypy.org/ticket/860. |
| 1998 raise |
| 1999 else: |
| 2000 # Note that we're explicitly NOT using AI_PASSIVE, |
| 2001 # here, because we want an actual IP to touch. |
| 2002 # localhost won't work if we've bound to a public IP, |
| 2003 # but it will if we bound to '0.0.0.0' (INADDR_ANY). |
| 2004 for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, |
| 2005 socket.SOCK_STREAM): |
| 2006 af, socktype, proto, canonname, sa = res |
| 2007 s = None |
| 2008 try: |
| 2009 s = socket.socket(af, socktype, proto) |
| 2010 # See http://groups.google.com/group/cherrypy-users/ |
| 2011 # browse_frm/thread/bbfe5eb39c904fe0 |
| 2012 s.settimeout(1.0) |
| 2013 s.connect((host, port)) |
| 2014 s.close() |
| 2015 except socket.error: |
| 2016 if s: |
| 2017 s.close() |
| 2018 if hasattr(sock, "close"): |
| 2019 sock.close() |
| 2020 self.socket = None |
| 2021 |
| 2022 self.requests.stop(self.shutdown_timeout) |
| 2023 |
| 2024 |
| 2025 class Gateway(object): |
| 2026 """A base class to interface HTTPServer with other systems, such as WSGI.""" |
| 2027 |
| 2028 def __init__(self, req): |
| 2029 self.req = req |
| 2030 |
| 2031 def respond(self): |
| 2032 """Process the current request. Must be overridden in a subclass.""" |
| 2033 raise NotImplemented |
| 2034 |
| 2035 |
| 2036 # These may either be wsgiserver.SSLAdapter subclasses or the string names |
| 2037 # of such classes (in which case they will be lazily loaded). |
| 2038 ssl_adapters = { |
| 2039 'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter', |
| 2040 'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter', |
| 2041 } |
| 2042 |
| 2043 def get_ssl_adapter_class(name='pyopenssl'): |
| 2044 """Return an SSL adapter class for the given name.""" |
| 2045 adapter = ssl_adapters[name.lower()] |
| 2046 if isinstance(adapter, basestring): |
| 2047 last_dot = adapter.rfind(".") |
| 2048 attr_name = adapter[last_dot + 1:] |
| 2049 mod_path = adapter[:last_dot] |
| 2050 |
| 2051 try: |
| 2052 mod = sys.modules[mod_path] |
| 2053 if mod is None: |
| 2054 raise KeyError() |
| 2055 except KeyError: |
| 2056 # The last [''] is important. |
| 2057 mod = __import__(mod_path, globals(), locals(), ['']) |
| 2058 |
| 2059 # Let an AttributeError propagate outward. |
| 2060 try: |
| 2061 adapter = getattr(mod, attr_name) |
| 2062 except AttributeError: |
| 2063 raise AttributeError("'%s' object has no attribute '%s'" |
| 2064 % (mod_path, attr_name)) |
| 2065 |
| 2066 return adapter |
| 2067 |
| 2068 # -------------------------------- WSGI Stuff -------------------------------- # |
| 2069 |
| 2070 |
| 2071 class CherryPyWSGIServer(HTTPServer): |
| 2072 """A subclass of HTTPServer which calls a WSGI application.""" |
| 2073 |
| 2074 wsgi_version = (1, 0) |
| 2075 """The version of WSGI to produce.""" |
| 2076 |
| 2077 def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None, |
| 2078 max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5): |
| 2079 self.requests = ThreadPool(self, min=numthreads or 1, max=max) |
| 2080 self.wsgi_app = wsgi_app |
| 2081 self.gateway = wsgi_gateways[self.wsgi_version] |
| 2082 |
| 2083 self.bind_addr = bind_addr |
| 2084 if not server_name: |
| 2085 server_name = socket.gethostname() |
| 2086 self.server_name = server_name |
| 2087 self.request_queue_size = request_queue_size |
| 2088 |
| 2089 self.timeout = timeout |
| 2090 self.shutdown_timeout = shutdown_timeout |
| 2091 self.clear_stats() |
| 2092 |
| 2093 def _get_numthreads(self): |
| 2094 return self.requests.min |
| 2095 def _set_numthreads(self, value): |
| 2096 self.requests.min = value |
| 2097 numthreads = property(_get_numthreads, _set_numthreads) |
| 2098 |
| 2099 |
| 2100 class WSGIGateway(Gateway): |
| 2101 """A base class to interface HTTPServer with WSGI.""" |
| 2102 |
| 2103 def __init__(self, req): |
| 2104 self.req = req |
| 2105 self.started_response = False |
| 2106 self.env = self.get_environ() |
| 2107 self.remaining_bytes_out = None |
| 2108 |
| 2109 def get_environ(self): |
| 2110 """Return a new environ dict targeting the given wsgi.version""" |
| 2111 raise NotImplemented |
| 2112 |
| 2113 def respond(self): |
| 2114 """Process the current request.""" |
| 2115 response = self.req.server.wsgi_app(self.env, self.start_response) |
| 2116 try: |
| 2117 for chunk in response: |
| 2118 # "The start_response callable must not actually transmit |
| 2119 # the response headers. Instead, it must store them for the |
| 2120 # server or gateway to transmit only after the first |
| 2121 # iteration of the application return value that yields |
| 2122 # a NON-EMPTY string, or upon the application's first |
| 2123 # invocation of the write() callable." (PEP 333) |
| 2124 if chunk: |
| 2125 if isinstance(chunk, unicodestr): |
| 2126 chunk = chunk.encode('ISO-8859-1') |
| 2127 self.write(chunk) |
| 2128 finally: |
| 2129 if hasattr(response, "close"): |
| 2130 response.close() |
| 2131 |
| 2132 def start_response(self, status, headers, exc_info = None): |
| 2133 """WSGI callable to begin the HTTP response.""" |
| 2134 # "The application may call start_response more than once, |
| 2135 # if and only if the exc_info argument is provided." |
| 2136 if self.started_response and not exc_info: |
| 2137 raise AssertionError("WSGI start_response called a second " |
| 2138 "time with no exc_info.") |
| 2139 self.started_response = True |
| 2140 |
| 2141 # "if exc_info is provided, and the HTTP headers have already been |
| 2142 # sent, start_response must raise an error, and should raise the |
| 2143 # exc_info tuple." |
| 2144 if self.req.sent_headers: |
| 2145 try: |
| 2146 raise exc_info[0], exc_info[1], exc_info[2] |
| 2147 finally: |
| 2148 exc_info = None |
| 2149 |
| 2150 self.req.status = status |
| 2151 for k, v in headers: |
| 2152 if not isinstance(k, str): |
| 2153 raise TypeError("WSGI response header key %r is not of type str.
" % k) |
| 2154 if not isinstance(v, str): |
| 2155 raise TypeError("WSGI response header value %r is not of type st
r." % v) |
| 2156 if k.lower() == 'content-length': |
| 2157 self.remaining_bytes_out = int(v) |
| 2158 self.req.outheaders.extend(headers) |
| 2159 |
| 2160 return self.write |
| 2161 |
| 2162 def write(self, chunk): |
| 2163 """WSGI callable to write unbuffered data to the client. |
| 2164 |
| 2165 This method is also used internally by start_response (to write |
| 2166 data from the iterable returned by the WSGI application). |
| 2167 """ |
| 2168 if not self.started_response: |
| 2169 raise AssertionError("WSGI write called before start_response.") |
| 2170 |
| 2171 chunklen = len(chunk) |
| 2172 rbo = self.remaining_bytes_out |
| 2173 if rbo is not None and chunklen > rbo: |
| 2174 if not self.req.sent_headers: |
| 2175 # Whew. We can send a 500 to the client. |
| 2176 self.req.simple_response("500 Internal Server Error", |
| 2177 "The requested resource returned more bytes than the " |
| 2178 "declared Content-Length.") |
| 2179 else: |
| 2180 # Dang. We have probably already sent data. Truncate the chunk |
| 2181 # to fit (so the client doesn't hang) and raise an error later. |
| 2182 chunk = chunk[:rbo] |
| 2183 |
| 2184 if not self.req.sent_headers: |
| 2185 self.req.sent_headers = True |
| 2186 self.req.send_headers() |
| 2187 |
| 2188 self.req.write(chunk) |
| 2189 |
| 2190 if rbo is not None: |
| 2191 rbo -= chunklen |
| 2192 if rbo < 0: |
| 2193 raise ValueError( |
| 2194 "Response body exceeds the declared Content-Length.") |
| 2195 |
| 2196 |
| 2197 class WSGIGateway_10(WSGIGateway): |
| 2198 """A Gateway class to interface HTTPServer with WSGI 1.0.x.""" |
| 2199 |
| 2200 def get_environ(self): |
| 2201 """Return a new environ dict targeting the given wsgi.version""" |
| 2202 req = self.req |
| 2203 env = { |
| 2204 # set a non-standard environ entry so the WSGI app can know what |
| 2205 # the *real* server protocol is (and what features to support). |
| 2206 # See http://www.faqs.org/rfcs/rfc2145.html. |
| 2207 'ACTUAL_SERVER_PROTOCOL': req.server.protocol, |
| 2208 'PATH_INFO': req.path, |
| 2209 'QUERY_STRING': req.qs, |
| 2210 'REMOTE_ADDR': req.conn.remote_addr or '', |
| 2211 'REMOTE_PORT': str(req.conn.remote_port or ''), |
| 2212 'REQUEST_METHOD': req.method, |
| 2213 'REQUEST_URI': req.uri, |
| 2214 'SCRIPT_NAME': '', |
| 2215 'SERVER_NAME': req.server.server_name, |
| 2216 # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol. |
| 2217 'SERVER_PROTOCOL': req.request_protocol, |
| 2218 'SERVER_SOFTWARE': req.server.software, |
| 2219 'wsgi.errors': sys.stderr, |
| 2220 'wsgi.input': req.rfile, |
| 2221 'wsgi.multiprocess': False, |
| 2222 'wsgi.multithread': True, |
| 2223 'wsgi.run_once': False, |
| 2224 'wsgi.url_scheme': req.scheme, |
| 2225 'wsgi.version': (1, 0), |
| 2226 } |
| 2227 |
| 2228 if isinstance(req.server.bind_addr, basestring): |
| 2229 # AF_UNIX. This isn't really allowed by WSGI, which doesn't |
| 2230 # address unix domain sockets. But it's better than nothing. |
| 2231 env["SERVER_PORT"] = "" |
| 2232 else: |
| 2233 env["SERVER_PORT"] = str(req.server.bind_addr[1]) |
| 2234 |
| 2235 # Request headers |
| 2236 for k, v in req.inheaders.iteritems(): |
| 2237 env["HTTP_" + k.upper().replace("-", "_")] = v |
| 2238 |
| 2239 # CONTENT_TYPE/CONTENT_LENGTH |
| 2240 ct = env.pop("HTTP_CONTENT_TYPE", None) |
| 2241 if ct is not None: |
| 2242 env["CONTENT_TYPE"] = ct |
| 2243 cl = env.pop("HTTP_CONTENT_LENGTH", None) |
| 2244 if cl is not None: |
| 2245 env["CONTENT_LENGTH"] = cl |
| 2246 |
| 2247 if req.conn.ssl_env: |
| 2248 env.update(req.conn.ssl_env) |
| 2249 |
| 2250 return env |
| 2251 |
| 2252 |
| 2253 class WSGIGateway_u0(WSGIGateway_10): |
| 2254 """A Gateway class to interface HTTPServer with WSGI u.0. |
| 2255 |
| 2256 WSGI u.0 is an experimental protocol, which uses unicode for keys and values |
| 2257 in both Python 2 and Python 3. |
| 2258 """ |
| 2259 |
| 2260 def get_environ(self): |
| 2261 """Return a new environ dict targeting the given wsgi.version""" |
| 2262 req = self.req |
| 2263 env_10 = WSGIGateway_10.get_environ(self) |
| 2264 env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()]) |
| 2265 env[u'wsgi.version'] = ('u', 0) |
| 2266 |
| 2267 # Request-URI |
| 2268 env.setdefault(u'wsgi.url_encoding', u'utf-8') |
| 2269 try: |
| 2270 for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]: |
| 2271 env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding']) |
| 2272 except UnicodeDecodeError: |
| 2273 # Fall back to latin 1 so apps can transcode if needed. |
| 2274 env[u'wsgi.url_encoding'] = u'ISO-8859-1' |
| 2275 for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]: |
| 2276 env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding']) |
| 2277 |
| 2278 for k, v in sorted(env.items()): |
| 2279 if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'): |
| 2280 env[k] = v.decode('ISO-8859-1') |
| 2281 |
| 2282 return env |
| 2283 |
| 2284 wsgi_gateways = { |
| 2285 (1, 0): WSGIGateway_10, |
| 2286 ('u', 0): WSGIGateway_u0, |
| 2287 } |
| 2288 |
| 2289 class WSGIPathInfoDispatcher(object): |
| 2290 """A WSGI dispatcher for dispatch based on the PATH_INFO. |
| 2291 |
| 2292 apps: a dict or list of (path_prefix, app) pairs. |
| 2293 """ |
| 2294 |
| 2295 def __init__(self, apps): |
| 2296 try: |
| 2297 apps = list(apps.items()) |
| 2298 except AttributeError: |
| 2299 pass |
| 2300 |
| 2301 # Sort the apps by len(path), descending |
| 2302 apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0]))) |
| 2303 apps.reverse() |
| 2304 |
| 2305 # The path_prefix strings must start, but not end, with a slash. |
| 2306 # Use "" instead of "/". |
| 2307 self.apps = [(p.rstrip("/"), a) for p, a in apps] |
| 2308 |
| 2309 def __call__(self, environ, start_response): |
| 2310 path = environ["PATH_INFO"] or "/" |
| 2311 for p, app in self.apps: |
| 2312 # The apps list should be sorted by length, descending. |
| 2313 if path.startswith(p + "/") or path == p: |
| 2314 environ = environ.copy() |
| 2315 environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p |
| 2316 environ["PATH_INFO"] = path[len(p):] |
| 2317 return app(environ, start_response) |
| 2318 |
| 2319 start_response('404 Not Found', [('Content-Type', 'text/plain'), |
| 2320 ('Content-Length', '0')]) |
| 2321 return [''] |
| 2322 |
OLD | NEW |