Package googleapiclient :: Module http
[hide private]
[frames] | no frames]

Source Code for Module googleapiclient.http

   1  # Copyright 2014 Google Inc. All Rights Reserved. 
   2  # 
   3  # Licensed under the Apache License, Version 2.0 (the "License"); 
   4  # you may not use this file except in compliance with the License. 
   5  # You may obtain a copy of the License at 
   6  # 
   7  #      http://www.apache.org/licenses/LICENSE-2.0 
   8  # 
   9  # Unless required by applicable law or agreed to in writing, software 
  10  # distributed under the License is distributed on an "AS IS" BASIS, 
  11  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  12  # See the License for the specific language governing permissions and 
  13  # limitations under the License. 
  14   
  15  """Classes to encapsulate a single HTTP request. 
  16   
  17  The classes implement a command pattern, with every 
  18  object supporting an execute() method that does the 
  19  actual HTTP request. 
  20  """ 
  21  from __future__ import absolute_import 
  22  import six 
  23  from six.moves import http_client 
  24  from six.moves import range 
  25   
  26  __author__ = "jcgregorio@google.com (Joe Gregorio)" 
  27   
  28  from six import BytesIO, StringIO 
  29  from six.moves.urllib.parse import urlparse, urlunparse, quote, unquote 
  30   
  31  import base64 
  32  import copy 
  33  import gzip 
  34  import httplib2 
  35  import json 
  36  import logging 
  37  import mimetypes 
  38  import os 
  39  import random 
  40  import socket 
  41  import sys 
  42  import time 
  43  import uuid 
  44   
  45  # TODO(issue 221): Remove this conditional import jibbajabba. 
  46  try: 
  47      import ssl 
  48  except ImportError: 
  49      _ssl_SSLError = object() 
  50  else: 
  51      _ssl_SSLError = ssl.SSLError 
  52   
  53  from email.generator import Generator 
  54  from email.mime.multipart import MIMEMultipart 
  55  from email.mime.nonmultipart import MIMENonMultipart 
  56  from email.parser import FeedParser 
  57   
  58  from googleapiclient import _helpers as util 
  59   
  60  from googleapiclient import _auth 
  61  from googleapiclient.errors import BatchError 
  62  from googleapiclient.errors import HttpError 
  63  from googleapiclient.errors import InvalidChunkSizeError 
  64  from googleapiclient.errors import ResumableUploadError 
  65  from googleapiclient.errors import UnexpectedBodyError 
  66  from googleapiclient.errors import UnexpectedMethodError 
  67  from googleapiclient.model import JsonModel 
  68   
  69   
  70  LOGGER = logging.getLogger(__name__) 
  71   
  72  DEFAULT_CHUNK_SIZE = 100 * 1024 * 1024 
  73   
  74  MAX_URI_LENGTH = 2048 
  75   
  76  MAX_BATCH_LIMIT = 1000 
  77   
  78  _TOO_MANY_REQUESTS = 429 
  79   
  80  DEFAULT_HTTP_TIMEOUT_SEC = 60 
  81   
  82  _LEGACY_BATCH_URI = "https://www.googleapis.com/batch" 
  83   
  84  if six.PY2: 
  85      # That's a builtin python3 exception, nonexistent in python2. 
  86      # Defined to None to avoid NameError while trying to catch it 
  87      ConnectionError = None 
88 89 90 -def _should_retry_response(resp_status, content):
91 """Determines whether a response should be retried. 92 93 Args: 94 resp_status: The response status received. 95 content: The response content body. 96 97 Returns: 98 True if the response should be retried, otherwise False. 99 """ 100 # Retry on 5xx errors. 101 if resp_status >= 500: 102 return True 103 104 # Retry on 429 errors. 105 if resp_status == _TOO_MANY_REQUESTS: 106 return True 107 108 # For 403 errors, we have to check for the `reason` in the response to 109 # determine if we should retry. 110 if resp_status == six.moves.http_client.FORBIDDEN: 111 # If there's no details about the 403 type, don't retry. 112 if not content: 113 return False 114 115 # Content is in JSON format. 116 try: 117 data = json.loads(content.decode("utf-8")) 118 if isinstance(data, dict): 119 reason = data["error"]["errors"][0]["reason"] 120 else: 121 reason = data[0]["error"]["errors"]["reason"] 122 except (UnicodeDecodeError, ValueError, KeyError): 123 LOGGER.warning("Invalid JSON content from response: %s", content) 124 return False 125 126 LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason) 127 128 # Only retry on rate limit related failures. 129 if reason in ("userRateLimitExceeded", "rateLimitExceeded"): 130 return True 131 132 # Everything else is a success or non-retriable so break. 133 return False
134
135 136 -def _retry_request( 137 http, num_retries, req_type, sleep, rand, uri, method, *args, **kwargs 138 ):
139 """Retries an HTTP request multiple times while handling errors. 140 141 If after all retries the request still fails, last error is either returned as 142 return value (for HTTP 5xx errors) or thrown (for ssl.SSLError). 143 144 Args: 145 http: Http object to be used to execute request. 146 num_retries: Maximum number of retries. 147 req_type: Type of the request (used for logging retries). 148 sleep, rand: Functions to sleep for random time between retries. 149 uri: URI to be requested. 150 method: HTTP method to be used. 151 args, kwargs: Additional arguments passed to http.request. 152 153 Returns: 154 resp, content - Response from the http request (may be HTTP 5xx). 155 """ 156 resp = None 157 content = None 158 exception = None 159 for retry_num in range(num_retries + 1): 160 if retry_num > 0: 161 # Sleep before retrying. 162 sleep_time = rand() * 2 ** retry_num 163 LOGGER.warning( 164 "Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s", 165 sleep_time, 166 retry_num, 167 num_retries, 168 req_type, 169 method, 170 uri, 171 resp.status if resp else exception, 172 ) 173 sleep(sleep_time) 174 175 try: 176 exception = None 177 resp, content = http.request(uri, method, *args, **kwargs) 178 # Retry on SSL errors and socket timeout errors. 179 except _ssl_SSLError as ssl_error: 180 exception = ssl_error 181 except socket.timeout as socket_timeout: 182 # It's important that this be before socket.error as it's a subclass 183 # socket.timeout has no errorcode 184 exception = socket_timeout 185 except ConnectionError as connection_error: 186 # Needs to be before socket.error as it's a subclass of 187 # OSError (socket.error) 188 exception = connection_error 189 except socket.error as socket_error: 190 # errno's contents differ by platform, so we have to match by name. 191 if socket.errno.errorcode.get(socket_error.errno) not in { 192 "WSAETIMEDOUT", 193 "ETIMEDOUT", 194 "EPIPE", 195 "ECONNABORTED", 196 }: 197 raise 198 exception = socket_error 199 except httplib2.ServerNotFoundError as server_not_found_error: 200 exception = server_not_found_error 201 202 if exception: 203 if retry_num == num_retries: 204 raise exception 205 else: 206 continue 207 208 if not _should_retry_response(resp.status, content): 209 break 210 211 return resp, content
212
213 214 -class MediaUploadProgress(object):
215 """Status of a resumable upload.""" 216
217 - def __init__(self, resumable_progress, total_size):
218 """Constructor. 219 220 Args: 221 resumable_progress: int, bytes sent so far. 222 total_size: int, total bytes in complete upload, or None if the total 223 upload size isn't known ahead of time. 224 """ 225 self.resumable_progress = resumable_progress 226 self.total_size = total_size
227
228 - def progress(self):
229 """Percent of upload completed, as a float. 230 231 Returns: 232 the percentage complete as a float, returning 0.0 if the total size of 233 the upload is unknown. 234 """ 235 if self.total_size is not None and self.total_size != 0: 236 return float(self.resumable_progress) / float(self.total_size) 237 else: 238 return 0.0
239
240 241 -class MediaDownloadProgress(object):
242 """Status of a resumable download.""" 243
244 - def __init__(self, resumable_progress, total_size):
245 """Constructor. 246 247 Args: 248 resumable_progress: int, bytes received so far. 249 total_size: int, total bytes in complete download. 250 """ 251 self.resumable_progress = resumable_progress 252 self.total_size = total_size
253
254 - def progress(self):
255 """Percent of download completed, as a float. 256 257 Returns: 258 the percentage complete as a float, returning 0.0 if the total size of 259 the download is unknown. 260 """ 261 if self.total_size is not None and self.total_size != 0: 262 return float(self.resumable_progress) / float(self.total_size) 263 else: 264 return 0.0
265
266 267 -class MediaUpload(object):
268 """Describes a media object to upload. 269 270 Base class that defines the interface of MediaUpload subclasses. 271 272 Note that subclasses of MediaUpload may allow you to control the chunksize 273 when uploading a media object. It is important to keep the size of the chunk 274 as large as possible to keep the upload efficient. Other factors may influence 275 the size of the chunk you use, particularly if you are working in an 276 environment where individual HTTP requests may have a hardcoded time limit, 277 such as under certain classes of requests under Google App Engine. 278 279 Streams are io.Base compatible objects that support seek(). Some MediaUpload 280 subclasses support using streams directly to upload data. Support for 281 streaming may be indicated by a MediaUpload sub-class and if appropriate for a 282 platform that stream will be used for uploading the media object. The support 283 for streaming is indicated by has_stream() returning True. The stream() method 284 should return an io.Base object that supports seek(). On platforms where the 285 underlying httplib module supports streaming, for example Python 2.6 and 286 later, the stream will be passed into the http library which will result in 287 less memory being used and possibly faster uploads. 288 289 If you need to upload media that can't be uploaded using any of the existing 290 MediaUpload sub-class then you can sub-class MediaUpload for your particular 291 needs. 292 """ 293
294 - def chunksize(self):
295 """Chunk size for resumable uploads. 296 297 Returns: 298 Chunk size in bytes. 299 """ 300 raise NotImplementedError()
301
302 - def mimetype(self):
303 """Mime type of the body. 304 305 Returns: 306 Mime type. 307 """ 308 return "application/octet-stream"
309
310 - def size(self):
311 """Size of upload. 312 313 Returns: 314 Size of the body, or None of the size is unknown. 315 """ 316 return None
317
318 - def resumable(self):
319 """Whether this upload is resumable. 320 321 Returns: 322 True if resumable upload or False. 323 """ 324 return False
325
326 - def getbytes(self, begin, end):
327 """Get bytes from the media. 328 329 Args: 330 begin: int, offset from beginning of file. 331 length: int, number of bytes to read, starting at begin. 332 333 Returns: 334 A string of bytes read. May be shorter than length if EOF was reached 335 first. 336 """ 337 raise NotImplementedError()
338
339 - def has_stream(self):
340 """Does the underlying upload support a streaming interface. 341 342 Streaming means it is an io.IOBase subclass that supports seek, i.e. 343 seekable() returns True. 344 345 Returns: 346 True if the call to stream() will return an instance of a seekable io.Base 347 subclass. 348 """ 349 return False
350
351 - def stream(self):
352 """A stream interface to the data being uploaded. 353 354 Returns: 355 The returned value is an io.IOBase subclass that supports seek, i.e. 356 seekable() returns True. 357 """ 358 raise NotImplementedError()
359 360 @util.positional(1)
361 - def _to_json(self, strip=None):
362 """Utility function for creating a JSON representation of a MediaUpload. 363 364 Args: 365 strip: array, An array of names of members to not include in the JSON. 366 367 Returns: 368 string, a JSON representation of this instance, suitable to pass to 369 from_json(). 370 """ 371 t = type(self) 372 d = copy.copy(self.__dict__) 373 if strip is not None: 374 for member in strip: 375 del d[member] 376 d["_class"] = t.__name__ 377 d["_module"] = t.__module__ 378 return json.dumps(d)
379
380 - def to_json(self):
381 """Create a JSON representation of an instance of MediaUpload. 382 383 Returns: 384 string, a JSON representation of this instance, suitable to pass to 385 from_json(). 386 """ 387 return self._to_json()
388 389 @classmethod
390 - def new_from_json(cls, s):
391 """Utility class method to instantiate a MediaUpload subclass from a JSON 392 representation produced by to_json(). 393 394 Args: 395 s: string, JSON from to_json(). 396 397 Returns: 398 An instance of the subclass of MediaUpload that was serialized with 399 to_json(). 400 """ 401 data = json.loads(s) 402 # Find and call the right classmethod from_json() to restore the object. 403 module = data["_module"] 404 m = __import__(module, fromlist=module.split(".")[:-1]) 405 kls = getattr(m, data["_class"]) 406 from_json = getattr(kls, "from_json") 407 return from_json(s)
408
409 410 -class MediaIoBaseUpload(MediaUpload):
411 """A MediaUpload for a io.Base objects. 412 413 Note that the Python file object is compatible with io.Base and can be used 414 with this class also. 415 416 fh = BytesIO('...Some data to upload...') 417 media = MediaIoBaseUpload(fh, mimetype='image/png', 418 chunksize=1024*1024, resumable=True) 419 farm.animals().insert( 420 id='cow', 421 name='cow.png', 422 media_body=media).execute() 423 424 Depending on the platform you are working on, you may pass -1 as the 425 chunksize, which indicates that the entire file should be uploaded in a single 426 request. If the underlying platform supports streams, such as Python 2.6 or 427 later, then this can be very efficient as it avoids multiple connections, and 428 also avoids loading the entire file into memory before sending it. Note that 429 Google App Engine has a 5MB limit on request size, so you should never set 430 your chunksize larger than 5MB, or to -1. 431 """ 432 433 @util.positional(3)
434 - def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
435 """Constructor. 436 437 Args: 438 fd: io.Base or file object, The source of the bytes to upload. MUST be 439 opened in blocking mode, do not use streams opened in non-blocking mode. 440 The given stream must be seekable, that is, it must be able to call 441 seek() on fd. 442 mimetype: string, Mime-type of the file. 443 chunksize: int, File will be uploaded in chunks of this many bytes. Only 444 used if resumable=True. Pass in a value of -1 if the file is to be 445 uploaded as a single chunk. Note that Google App Engine has a 5MB limit 446 on request size, so you should never set your chunksize larger than 5MB, 447 or to -1. 448 resumable: bool, True if this is a resumable upload. False means upload 449 in a single request. 450 """ 451 super(MediaIoBaseUpload, self).__init__() 452 self._fd = fd 453 self._mimetype = mimetype 454 if not (chunksize == -1 or chunksize > 0): 455 raise InvalidChunkSizeError() 456 self._chunksize = chunksize 457 self._resumable = resumable 458 459 self._fd.seek(0, os.SEEK_END) 460 self._size = self._fd.tell()
461
462 - def chunksize(self):
463 """Chunk size for resumable uploads. 464 465 Returns: 466 Chunk size in bytes. 467 """ 468 return self._chunksize
469
470 - def mimetype(self):
471 """Mime type of the body. 472 473 Returns: 474 Mime type. 475 """ 476 return self._mimetype
477
478 - def size(self):
479 """Size of upload. 480 481 Returns: 482 Size of the body, or None of the size is unknown. 483 """ 484 return self._size
485
486 - def resumable(self):
487 """Whether this upload is resumable. 488 489 Returns: 490 True if resumable upload or False. 491 """ 492 return self._resumable
493
494 - def getbytes(self, begin, length):
495 """Get bytes from the media. 496 497 Args: 498 begin: int, offset from beginning of file. 499 length: int, number of bytes to read, starting at begin. 500 501 Returns: 502 A string of bytes read. May be shorted than length if EOF was reached 503 first. 504 """ 505 self._fd.seek(begin) 506 return self._fd.read(length)
507
508 - def has_stream(self):
509 """Does the underlying upload support a streaming interface. 510 511 Streaming means it is an io.IOBase subclass that supports seek, i.e. 512 seekable() returns True. 513 514 Returns: 515 True if the call to stream() will return an instance of a seekable io.Base 516 subclass. 517 """ 518 return True
519
520 - def stream(self):
521 """A stream interface to the data being uploaded. 522 523 Returns: 524 The returned value is an io.IOBase subclass that supports seek, i.e. 525 seekable() returns True. 526 """ 527 return self._fd
528
529 - def to_json(self):
530 """This upload type is not serializable.""" 531 raise NotImplementedError("MediaIoBaseUpload is not serializable.")
532
533 534 -class MediaFileUpload(MediaIoBaseUpload):
535 """A MediaUpload for a file. 536 537 Construct a MediaFileUpload and pass as the media_body parameter of the 538 method. For example, if we had a service that allowed uploading images: 539 540 media = MediaFileUpload('cow.png', mimetype='image/png', 541 chunksize=1024*1024, resumable=True) 542 farm.animals().insert( 543 id='cow', 544 name='cow.png', 545 media_body=media).execute() 546 547 Depending on the platform you are working on, you may pass -1 as the 548 chunksize, which indicates that the entire file should be uploaded in a single 549 request. If the underlying platform supports streams, such as Python 2.6 or 550 later, then this can be very efficient as it avoids multiple connections, and 551 also avoids loading the entire file into memory before sending it. Note that 552 Google App Engine has a 5MB limit on request size, so you should never set 553 your chunksize larger than 5MB, or to -1. 554 """ 555 556 @util.positional(2)
557 - def __init__( 558 self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, resumable=False 559 ):
560 """Constructor. 561 562 Args: 563 filename: string, Name of the file. 564 mimetype: string, Mime-type of the file. If None then a mime-type will be 565 guessed from the file extension. 566 chunksize: int, File will be uploaded in chunks of this many bytes. Only 567 used if resumable=True. Pass in a value of -1 if the file is to be 568 uploaded in a single chunk. Note that Google App Engine has a 5MB limit 569 on request size, so you should never set your chunksize larger than 5MB, 570 or to -1. 571 resumable: bool, True if this is a resumable upload. False means upload 572 in a single request. 573 """ 574 self._filename = filename 575 fd = open(self._filename, "rb") 576 if mimetype is None: 577 # No mimetype provided, make a guess. 578 mimetype, _ = mimetypes.guess_type(filename) 579 if mimetype is None: 580 # Guess failed, use octet-stream. 581 mimetype = "application/octet-stream" 582 super(MediaFileUpload, self).__init__( 583 fd, mimetype, chunksize=chunksize, resumable=resumable 584 )
585
586 - def __del__(self):
587 self._fd.close()
588
589 - def to_json(self):
590 """Creating a JSON representation of an instance of MediaFileUpload. 591 592 Returns: 593 string, a JSON representation of this instance, suitable to pass to 594 from_json(). 595 """ 596 return self._to_json(strip=["_fd"])
597 598 @staticmethod
599 - def from_json(s):
600 d = json.loads(s) 601 return MediaFileUpload( 602 d["_filename"], 603 mimetype=d["_mimetype"], 604 chunksize=d["_chunksize"], 605 resumable=d["_resumable"], 606 )
607
608 609 -class MediaInMemoryUpload(MediaIoBaseUpload):
610 """MediaUpload for a chunk of bytes. 611 612 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for 613 the stream. 614 """ 615 616 @util.positional(2)
617 - def __init__( 618 self, 619 body, 620 mimetype="application/octet-stream", 621 chunksize=DEFAULT_CHUNK_SIZE, 622 resumable=False, 623 ):
624 """Create a new MediaInMemoryUpload. 625 626 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for 627 the stream. 628 629 Args: 630 body: string, Bytes of body content. 631 mimetype: string, Mime-type of the file or default of 632 'application/octet-stream'. 633 chunksize: int, File will be uploaded in chunks of this many bytes. Only 634 used if resumable=True. 635 resumable: bool, True if this is a resumable upload. False means upload 636 in a single request. 637 """ 638 fd = BytesIO(body) 639 super(MediaInMemoryUpload, self).__init__( 640 fd, mimetype, chunksize=chunksize, resumable=resumable 641 )
642
643 644 -class MediaIoBaseDownload(object):
645 """"Download media resources. 646 647 Note that the Python file object is compatible with io.Base and can be used 648 with this class also. 649 650 651 Example: 652 request = farms.animals().get_media(id='cow') 653 fh = io.FileIO('cow.png', mode='wb') 654 downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024) 655 656 done = False 657 while done is False: 658 status, done = downloader.next_chunk() 659 if status: 660 print "Download %d%%." % int(status.progress() * 100) 661 print "Download Complete!" 662 """ 663 664 @util.positional(3)
665 - def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
666 """Constructor. 667 668 Args: 669 fd: io.Base or file object, The stream in which to write the downloaded 670 bytes. 671 request: googleapiclient.http.HttpRequest, the media request to perform in 672 chunks. 673 chunksize: int, File will be downloaded in chunks of this many bytes. 674 """ 675 self._fd = fd 676 self._request = request 677 self._uri = request.uri 678 self._chunksize = chunksize 679 self._progress = 0 680 self._total_size = None 681 self._done = False 682 683 # Stubs for testing. 684 self._sleep = time.sleep 685 self._rand = random.random 686 687 self._headers = {} 688 for k, v in six.iteritems(request.headers): 689 # allow users to supply custom headers by setting them on the request 690 # but strip out the ones that are set by default on requests generated by 691 # API methods like Drive's files().get(fileId=...) 692 if not k.lower() in ("accept", "accept-encoding", "user-agent"): 693 self._headers[k] = v
694 695 @util.positional(1)
696 - def next_chunk(self, num_retries=0):
697 """Get the next chunk of the download. 698 699 Args: 700 num_retries: Integer, number of times to retry with randomized 701 exponential backoff. If all retries fail, the raised HttpError 702 represents the last request. If zero (default), we attempt the 703 request only once. 704 705 Returns: 706 (status, done): (MediaDownloadProgress, boolean) 707 The value of 'done' will be True when the media has been fully 708 downloaded or the total size of the media is unknown. 709 710 Raises: 711 googleapiclient.errors.HttpError if the response was not a 2xx. 712 httplib2.HttpLib2Error if a transport error has occurred. 713 """ 714 headers = self._headers.copy() 715 headers["range"] = "bytes=%d-%d" % ( 716 self._progress, 717 self._progress + self._chunksize, 718 ) 719 http = self._request.http 720 721 resp, content = _retry_request( 722 http, 723 num_retries, 724 "media download", 725 self._sleep, 726 self._rand, 727 self._uri, 728 "GET", 729 headers=headers, 730 ) 731 732 if resp.status in [200, 206]: 733 if "content-location" in resp and resp["content-location"] != self._uri: 734 self._uri = resp["content-location"] 735 self._progress += len(content) 736 self._fd.write(content) 737 738 if "content-range" in resp: 739 content_range = resp["content-range"] 740 length = content_range.rsplit("/", 1)[1] 741 self._total_size = int(length) 742 elif "content-length" in resp: 743 self._total_size = int(resp["content-length"]) 744 745 if self._total_size is None or self._progress == self._total_size: 746 self._done = True 747 return MediaDownloadProgress(self._progress, self._total_size), self._done 748 else: 749 raise HttpError(resp, content, uri=self._uri)
750
751 752 -class _StreamSlice(object):
753 """Truncated stream. 754 755 Takes a stream and presents a stream that is a slice of the original stream. 756 This is used when uploading media in chunks. In later versions of Python a 757 stream can be passed to httplib in place of the string of data to send. The 758 problem is that httplib just blindly reads to the end of the stream. This 759 wrapper presents a virtual stream that only reads to the end of the chunk. 760 """ 761
762 - def __init__(self, stream, begin, chunksize):
763 """Constructor. 764 765 Args: 766 stream: (io.Base, file object), the stream to wrap. 767 begin: int, the seek position the chunk begins at. 768 chunksize: int, the size of the chunk. 769 """ 770 self._stream = stream 771 self._begin = begin 772 self._chunksize = chunksize 773 self._stream.seek(begin)
774
775 - def read(self, n=-1):
776 """Read n bytes. 777 778 Args: 779 n, int, the number of bytes to read. 780 781 Returns: 782 A string of length 'n', or less if EOF is reached. 783 """ 784 # The data left available to read sits in [cur, end) 785 cur = self._stream.tell() 786 end = self._begin + self._chunksize 787 if n == -1 or cur + n > end: 788 n = end - cur 789 return self._stream.read(n)
790
791 792 -class HttpRequest(object):
793 """Encapsulates a single HTTP request.""" 794 795 @util.positional(4)
796 - def __init__( 797 self, 798 http, 799 postproc, 800 uri, 801 method="GET", 802 body=None, 803 headers=None, 804 methodId=None, 805 resumable=None, 806 ):
807 """Constructor for an HttpRequest. 808 809 Args: 810 http: httplib2.Http, the transport object to use to make a request 811 postproc: callable, called on the HTTP response and content to transform 812 it into a data object before returning, or raising an exception 813 on an error. 814 uri: string, the absolute URI to send the request to 815 method: string, the HTTP method to use 816 body: string, the request body of the HTTP request, 817 headers: dict, the HTTP request headers 818 methodId: string, a unique identifier for the API method being called. 819 resumable: MediaUpload, None if this is not a resumbale request. 820 """ 821 self.uri = uri 822 self.method = method 823 self.body = body 824 self.headers = headers or {} 825 self.methodId = methodId 826 self.http = http 827 self.postproc = postproc 828 self.resumable = resumable 829 self.response_callbacks = [] 830 self._in_error_state = False 831 832 # The size of the non-media part of the request. 833 self.body_size = len(self.body or "") 834 835 # The resumable URI to send chunks to. 836 self.resumable_uri = None 837 838 # The bytes that have been uploaded. 839 self.resumable_progress = 0 840 841 # Stubs for testing. 842 self._rand = random.random 843 self._sleep = time.sleep
844 845 @util.positional(1)
846 - def execute(self, http=None, num_retries=0):
847 """Execute the request. 848 849 Args: 850 http: httplib2.Http, an http object to be used in place of the 851 one the HttpRequest request object was constructed with. 852 num_retries: Integer, number of times to retry with randomized 853 exponential backoff. If all retries fail, the raised HttpError 854 represents the last request. If zero (default), we attempt the 855 request only once. 856 857 Returns: 858 A deserialized object model of the response body as determined 859 by the postproc. 860 861 Raises: 862 googleapiclient.errors.HttpError if the response was not a 2xx. 863 httplib2.HttpLib2Error if a transport error has occurred. 864 """ 865 if http is None: 866 http = self.http 867 868 if self.resumable: 869 body = None 870 while body is None: 871 _, body = self.next_chunk(http=http, num_retries=num_retries) 872 return body 873 874 # Non-resumable case. 875 876 if "content-length" not in self.headers: 877 self.headers["content-length"] = str(self.body_size) 878 # If the request URI is too long then turn it into a POST request. 879 # Assume that a GET request never contains a request body. 880 if len(self.uri) > MAX_URI_LENGTH and self.method == "GET": 881 self.method = "POST" 882 self.headers["x-http-method-override"] = "GET" 883 self.headers["content-type"] = "application/x-www-form-urlencoded" 884 parsed = urlparse(self.uri) 885 self.uri = urlunparse( 886 (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None, None) 887 ) 888 self.body = parsed.query 889 self.headers["content-length"] = str(len(self.body)) 890 891 # Handle retries for server-side errors. 892 resp, content = _retry_request( 893 http, 894 num_retries, 895 "request", 896 self._sleep, 897 self._rand, 898 str(self.uri), 899 method=str(self.method), 900 body=self.body, 901 headers=self.headers, 902 ) 903 904 for callback in self.response_callbacks: 905 callback(resp) 906 if resp.status >= 300: 907 raise HttpError(resp, content, uri=self.uri) 908 return self.postproc(resp, content)
909 910 @util.positional(2)
911 - def add_response_callback(self, cb):
912 """add_response_headers_callback 913 914 Args: 915 cb: Callback to be called on receiving the response headers, of signature: 916 917 def cb(resp): 918 # Where resp is an instance of httplib2.Response 919 """ 920 self.response_callbacks.append(cb)
921 922 @util.positional(1)
923 - def next_chunk(self, http=None, num_retries=0):
924 """Execute the next step of a resumable upload. 925 926 Can only be used if the method being executed supports media uploads and 927 the MediaUpload object passed in was flagged as using resumable upload. 928 929 Example: 930 931 media = MediaFileUpload('cow.png', mimetype='image/png', 932 chunksize=1000, resumable=True) 933 request = farm.animals().insert( 934 id='cow', 935 name='cow.png', 936 media_body=media) 937 938 response = None 939 while response is None: 940 status, response = request.next_chunk() 941 if status: 942 print "Upload %d%% complete." % int(status.progress() * 100) 943 944 945 Args: 946 http: httplib2.Http, an http object to be used in place of the 947 one the HttpRequest request object was constructed with. 948 num_retries: Integer, number of times to retry with randomized 949 exponential backoff. If all retries fail, the raised HttpError 950 represents the last request. If zero (default), we attempt the 951 request only once. 952 953 Returns: 954 (status, body): (ResumableMediaStatus, object) 955 The body will be None until the resumable media is fully uploaded. 956 957 Raises: 958 googleapiclient.errors.HttpError if the response was not a 2xx. 959 httplib2.HttpLib2Error if a transport error has occurred. 960 """ 961 if http is None: 962 http = self.http 963 964 if self.resumable.size() is None: 965 size = "*" 966 else: 967 size = str(self.resumable.size()) 968 969 if self.resumable_uri is None: 970 start_headers = copy.copy(self.headers) 971 start_headers["X-Upload-Content-Type"] = self.resumable.mimetype() 972 if size != "*": 973 start_headers["X-Upload-Content-Length"] = size 974 start_headers["content-length"] = str(self.body_size) 975 976 resp, content = _retry_request( 977 http, 978 num_retries, 979 "resumable URI request", 980 self._sleep, 981 self._rand, 982 self.uri, 983 method=self.method, 984 body=self.body, 985 headers=start_headers, 986 ) 987 988 if resp.status == 200 and "location" in resp: 989 self.resumable_uri = resp["location"] 990 else: 991 raise ResumableUploadError(resp, content) 992 elif self._in_error_state: 993 # If we are in an error state then query the server for current state of 994 # the upload by sending an empty PUT and reading the 'range' header in 995 # the response. 996 headers = {"Content-Range": "bytes */%s" % size, "content-length": "0"} 997 resp, content = http.request(self.resumable_uri, "PUT", headers=headers) 998 status, body = self._process_response(resp, content) 999 if body: 1000 # The upload was complete. 1001 return (status, body) 1002 1003 if self.resumable.has_stream(): 1004 data = self.resumable.stream() 1005 if self.resumable.chunksize() == -1: 1006 data.seek(self.resumable_progress) 1007 chunk_end = self.resumable.size() - self.resumable_progress - 1 1008 else: 1009 # Doing chunking with a stream, so wrap a slice of the stream. 1010 data = _StreamSlice( 1011 data, self.resumable_progress, self.resumable.chunksize() 1012 ) 1013 chunk_end = min( 1014 self.resumable_progress + self.resumable.chunksize() - 1, 1015 self.resumable.size() - 1, 1016 ) 1017 else: 1018 data = self.resumable.getbytes( 1019 self.resumable_progress, self.resumable.chunksize() 1020 ) 1021 1022 # A short read implies that we are at EOF, so finish the upload. 1023 if len(data) < self.resumable.chunksize(): 1024 size = str(self.resumable_progress + len(data)) 1025 1026 chunk_end = self.resumable_progress + len(data) - 1 1027 1028 headers = { 1029 "Content-Range": "bytes %d-%d/%s" 1030 % (self.resumable_progress, chunk_end, size), 1031 # Must set the content-length header here because httplib can't 1032 # calculate the size when working with _StreamSlice. 1033 "Content-Length": str(chunk_end - self.resumable_progress + 1), 1034 } 1035 1036 for retry_num in range(num_retries + 1): 1037 if retry_num > 0: 1038 self._sleep(self._rand() * 2 ** retry_num) 1039 LOGGER.warning( 1040 "Retry #%d for media upload: %s %s, following status: %d" 1041 % (retry_num, self.method, self.uri, resp.status) 1042 ) 1043 1044 try: 1045 resp, content = http.request( 1046 self.resumable_uri, method="PUT", body=data, headers=headers 1047 ) 1048 except: 1049 self._in_error_state = True 1050 raise 1051 if not _should_retry_response(resp.status, content): 1052 break 1053 1054 return self._process_response(resp, content)
1055
1056 - def _process_response(self, resp, content):
1057 """Process the response from a single chunk upload. 1058 1059 Args: 1060 resp: httplib2.Response, the response object. 1061 content: string, the content of the response. 1062 1063 Returns: 1064 (status, body): (ResumableMediaStatus, object) 1065 The body will be None until the resumable media is fully uploaded. 1066 1067 Raises: 1068 googleapiclient.errors.HttpError if the response was not a 2xx or a 308. 1069 """ 1070 if resp.status in [200, 201]: 1071 self._in_error_state = False 1072 return None, self.postproc(resp, content) 1073 elif resp.status == 308: 1074 self._in_error_state = False 1075 # A "308 Resume Incomplete" indicates we are not done. 1076 try: 1077 self.resumable_progress = int(resp["range"].split("-")[1]) + 1 1078 except KeyError: 1079 # If resp doesn't contain range header, resumable progress is 0 1080 self.resumable_progress = 0 1081 if "location" in resp: 1082 self.resumable_uri = resp["location"] 1083 else: 1084 self._in_error_state = True 1085 raise HttpError(resp, content, uri=self.uri) 1086 1087 return ( 1088 MediaUploadProgress(self.resumable_progress, self.resumable.size()), 1089 None, 1090 )
1091
1092 - def to_json(self):
1093 """Returns a JSON representation of the HttpRequest.""" 1094 d = copy.copy(self.__dict__) 1095 if d["resumable"] is not None: 1096 d["resumable"] = self.resumable.to_json() 1097 del d["http"] 1098 del d["postproc"] 1099 del d["_sleep"] 1100 del d["_rand"] 1101 1102 return json.dumps(d)
1103 1104 @staticmethod
1105 - def from_json(s, http, postproc):
1106 """Returns an HttpRequest populated with info from a JSON object.""" 1107 d = json.loads(s) 1108 if d["resumable"] is not None: 1109 d["resumable"] = MediaUpload.new_from_json(d["resumable"]) 1110 return HttpRequest( 1111 http, 1112 postproc, 1113 uri=d["uri"], 1114 method=d["method"], 1115 body=d["body"], 1116 headers=d["headers"], 1117 methodId=d["methodId"], 1118 resumable=d["resumable"], 1119 )
1120 1121 @staticmethod
1122 - def null_postproc(resp, contents):
1123 return resp, contents
1124
1125 1126 -class BatchHttpRequest(object):
1127 """Batches multiple HttpRequest objects into a single HTTP request. 1128 1129 Example: 1130 from googleapiclient.http import BatchHttpRequest 1131 1132 def list_animals(request_id, response, exception): 1133 \"\"\"Do something with the animals list response.\"\"\" 1134 if exception is not None: 1135 # Do something with the exception. 1136 pass 1137 else: 1138 # Do something with the response. 1139 pass 1140 1141 def list_farmers(request_id, response, exception): 1142 \"\"\"Do something with the farmers list response.\"\"\" 1143 if exception is not None: 1144 # Do something with the exception. 1145 pass 1146 else: 1147 # Do something with the response. 1148 pass 1149 1150 service = build('farm', 'v2') 1151 1152 batch = BatchHttpRequest() 1153 1154 batch.add(service.animals().list(), list_animals) 1155 batch.add(service.farmers().list(), list_farmers) 1156 batch.execute(http=http) 1157 """ 1158 1159 @util.positional(1)
1160 - def __init__(self, callback=None, batch_uri=None):
1161 """Constructor for a BatchHttpRequest. 1162 1163 Args: 1164 callback: callable, A callback to be called for each response, of the 1165 form callback(id, response, exception). The first parameter is the 1166 request id, and the second is the deserialized response object. The 1167 third is an googleapiclient.errors.HttpError exception object if an HTTP error 1168 occurred while processing the request, or None if no error occurred. 1169 batch_uri: string, URI to send batch requests to. 1170 """ 1171 if batch_uri is None: 1172 batch_uri = _LEGACY_BATCH_URI 1173 1174 if batch_uri == _LEGACY_BATCH_URI: 1175 LOGGER.warning( 1176 "You have constructed a BatchHttpRequest using the legacy batch " 1177 "endpoint %s. This endpoint will be turned down on August 12, 2020. " 1178 "Please provide the API-specific endpoint or use " 1179 "service.new_batch_http_request(). For more details see " 1180 "https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html" 1181 "and https://developers.google.com/api-client-library/python/guide/batch.", 1182 _LEGACY_BATCH_URI, 1183 ) 1184 self._batch_uri = batch_uri 1185 1186 # Global callback to be called for each individual response in the batch. 1187 self._callback = callback 1188 1189 # A map from id to request. 1190 self._requests = {} 1191 1192 # A map from id to callback. 1193 self._callbacks = {} 1194 1195 # List of request ids, in the order in which they were added. 1196 self._order = [] 1197 1198 # The last auto generated id. 1199 self._last_auto_id = 0 1200 1201 # Unique ID on which to base the Content-ID headers. 1202 self._base_id = None 1203 1204 # A map from request id to (httplib2.Response, content) response pairs 1205 self._responses = {} 1206 1207 # A map of id(Credentials) that have been refreshed. 1208 self._refreshed_credentials = {}
1209
1210 - def _refresh_and_apply_credentials(self, request, http):
1211 """Refresh the credentials and apply to the request. 1212 1213 Args: 1214 request: HttpRequest, the request. 1215 http: httplib2.Http, the global http object for the batch. 1216 """ 1217 # For the credentials to refresh, but only once per refresh_token 1218 # If there is no http per the request then refresh the http passed in 1219 # via execute() 1220 creds = None 1221 request_credentials = False 1222 1223 if request.http is not None: 1224 creds = _auth.get_credentials_from_http(request.http) 1225 request_credentials = True 1226 1227 if creds is None and http is not None: 1228 creds = _auth.get_credentials_from_http(http) 1229 1230 if creds is not None: 1231 if id(creds) not in self._refreshed_credentials: 1232 _auth.refresh_credentials(creds) 1233 self._refreshed_credentials[id(creds)] = 1 1234 1235 # Only apply the credentials if we are using the http object passed in, 1236 # otherwise apply() will get called during _serialize_request(). 1237 if request.http is None or not request_credentials: 1238 _auth.apply_credentials(creds, request.headers)
1239
1240 - def _id_to_header(self, id_):
1241 """Convert an id to a Content-ID header value. 1242 1243 Args: 1244 id_: string, identifier of individual request. 1245 1246 Returns: 1247 A Content-ID header with the id_ encoded into it. A UUID is prepended to 1248 the value because Content-ID headers are supposed to be universally 1249 unique. 1250 """ 1251 if self._base_id is None: 1252 self._base_id = uuid.uuid4() 1253 1254 # NB: we intentionally leave whitespace between base/id and '+', so RFC2822 1255 # line folding works properly on Python 3; see 1256 # https://github.com/googleapis/google-api-python-client/issues/164 1257 return "<%s + %s>" % (self._base_id, quote(id_))
1258
1259 - def _header_to_id(self, header):
1260 """Convert a Content-ID header value to an id. 1261 1262 Presumes the Content-ID header conforms to the format that _id_to_header() 1263 returns. 1264 1265 Args: 1266 header: string, Content-ID header value. 1267 1268 Returns: 1269 The extracted id value. 1270 1271 Raises: 1272 BatchError if the header is not in the expected format. 1273 """ 1274 if header[0] != "<" or header[-1] != ">": 1275 raise BatchError("Invalid value for Content-ID: %s" % header) 1276 if "+" not in header: 1277 raise BatchError("Invalid value for Content-ID: %s" % header) 1278 base, id_ = header[1:-1].split(" + ", 1) 1279 1280 return unquote(id_)
1281
1282 - def _serialize_request(self, request):
1283 """Convert an HttpRequest object into a string. 1284 1285 Args: 1286 request: HttpRequest, the request to serialize. 1287 1288 Returns: 1289 The request as a string in application/http format. 1290 """ 1291 # Construct status line 1292 parsed = urlparse(request.uri) 1293 request_line = urlunparse( 1294 ("", "", parsed.path, parsed.params, parsed.query, "") 1295 ) 1296 status_line = request.method + " " + request_line + " HTTP/1.1\n" 1297 major, minor = request.headers.get("content-type", "application/json").split( 1298 "/" 1299 ) 1300 msg = MIMENonMultipart(major, minor) 1301 headers = request.headers.copy() 1302 1303 if request.http is not None: 1304 credentials = _auth.get_credentials_from_http(request.http) 1305 if credentials is not None: 1306 _auth.apply_credentials(credentials, headers) 1307 1308 # MIMENonMultipart adds its own Content-Type header. 1309 if "content-type" in headers: 1310 del headers["content-type"] 1311 1312 for key, value in six.iteritems(headers): 1313 msg[key] = value 1314 msg["Host"] = parsed.netloc 1315 msg.set_unixfrom(None) 1316 1317 if request.body is not None: 1318 msg.set_payload(request.body) 1319 msg["content-length"] = str(len(request.body)) 1320 1321 # Serialize the mime message. 1322 fp = StringIO() 1323 # maxheaderlen=0 means don't line wrap headers. 1324 g = Generator(fp, maxheaderlen=0) 1325 g.flatten(msg, unixfrom=False) 1326 body = fp.getvalue() 1327 1328 return status_line + body
1329
1330 - def _deserialize_response(self, payload):
1331 """Convert string into httplib2 response and content. 1332 1333 Args: 1334 payload: string, headers and body as a string. 1335 1336 Returns: 1337 A pair (resp, content), such as would be returned from httplib2.request. 1338 """ 1339 # Strip off the status line 1340 status_line, payload = payload.split("\n", 1) 1341 protocol, status, reason = status_line.split(" ", 2) 1342 1343 # Parse the rest of the response 1344 parser = FeedParser() 1345 parser.feed(payload) 1346 msg = parser.close() 1347 msg["status"] = status 1348 1349 # Create httplib2.Response from the parsed headers. 1350 resp = httplib2.Response(msg) 1351 resp.reason = reason 1352 resp.version = int(protocol.split("/", 1)[1].replace(".", "")) 1353 1354 content = payload.split("\r\n\r\n", 1)[1] 1355 1356 return resp, content
1357
1358 - def _new_id(self):
1359 """Create a new id. 1360 1361 Auto incrementing number that avoids conflicts with ids already used. 1362 1363 Returns: 1364 string, a new unique id. 1365 """ 1366 self._last_auto_id += 1 1367 while str(self._last_auto_id) in self._requests: 1368 self._last_auto_id += 1 1369 return str(self._last_auto_id)
1370 1371 @util.positional(2)
1372 - def add(self, request, callback=None, request_id=None):
1373 """Add a new request. 1374 1375 Every callback added will be paired with a unique id, the request_id. That 1376 unique id will be passed back to the callback when the response comes back 1377 from the server. The default behavior is to have the library generate it's 1378 own unique id. If the caller passes in a request_id then they must ensure 1379 uniqueness for each request_id, and if they are not an exception is 1380 raised. Callers should either supply all request_ids or never supply a 1381 request id, to avoid such an error. 1382 1383 Args: 1384 request: HttpRequest, Request to add to the batch. 1385 callback: callable, A callback to be called for this response, of the 1386 form callback(id, response, exception). The first parameter is the 1387 request id, and the second is the deserialized response object. The 1388 third is an googleapiclient.errors.HttpError exception object if an HTTP error 1389 occurred while processing the request, or None if no errors occurred. 1390 request_id: string, A unique id for the request. The id will be passed 1391 to the callback with the response. 1392 1393 Returns: 1394 None 1395 1396 Raises: 1397 BatchError if a media request is added to a batch. 1398 KeyError is the request_id is not unique. 1399 """ 1400 1401 if len(self._order) >= MAX_BATCH_LIMIT: 1402 raise BatchError( 1403 "Exceeded the maximum calls(%d) in a single batch request." 1404 % MAX_BATCH_LIMIT 1405 ) 1406 if request_id is None: 1407 request_id = self._new_id() 1408 if request.resumable is not None: 1409 raise BatchError("Media requests cannot be used in a batch request.") 1410 if request_id in self._requests: 1411 raise KeyError("A request with this ID already exists: %s" % request_id) 1412 self._requests[request_id] = request 1413 self._callbacks[request_id] = callback 1414 self._order.append(request_id)
1415
1416 - def _execute(self, http, order, requests):
1417 """Serialize batch request, send to server, process response. 1418 1419 Args: 1420 http: httplib2.Http, an http object to be used to make the request with. 1421 order: list, list of request ids in the order they were added to the 1422 batch. 1423 requests: list, list of request objects to send. 1424 1425 Raises: 1426 httplib2.HttpLib2Error if a transport error has occurred. 1427 googleapiclient.errors.BatchError if the response is the wrong format. 1428 """ 1429 message = MIMEMultipart("mixed") 1430 # Message should not write out it's own headers. 1431 setattr(message, "_write_headers", lambda self: None) 1432 1433 # Add all the individual requests. 1434 for request_id in order: 1435 request = requests[request_id] 1436 1437 msg = MIMENonMultipart("application", "http") 1438 msg["Content-Transfer-Encoding"] = "binary" 1439 msg["Content-ID"] = self._id_to_header(request_id) 1440 1441 body = self._serialize_request(request) 1442 msg.set_payload(body) 1443 message.attach(msg) 1444 1445 # encode the body: note that we can't use `as_string`, because 1446 # it plays games with `From ` lines. 1447 fp = StringIO() 1448 g = Generator(fp, mangle_from_=False) 1449 g.flatten(message, unixfrom=False) 1450 body = fp.getvalue() 1451 1452 headers = {} 1453 headers["content-type"] = ( 1454 "multipart/mixed; " 'boundary="%s"' 1455 ) % message.get_boundary() 1456 1457 resp, content = http.request( 1458 self._batch_uri, method="POST", body=body, headers=headers 1459 ) 1460 1461 if resp.status >= 300: 1462 raise HttpError(resp, content, uri=self._batch_uri) 1463 1464 # Prepend with a content-type header so FeedParser can handle it. 1465 header = "content-type: %s\r\n\r\n" % resp["content-type"] 1466 # PY3's FeedParser only accepts unicode. So we should decode content 1467 # here, and encode each payload again. 1468 if six.PY3: 1469 content = content.decode("utf-8") 1470 for_parser = header + content 1471 1472 parser = FeedParser() 1473 parser.feed(for_parser) 1474 mime_response = parser.close() 1475 1476 if not mime_response.is_multipart(): 1477 raise BatchError( 1478 "Response not in multipart/mixed format.", resp=resp, content=content 1479 ) 1480 1481 for part in mime_response.get_payload(): 1482 request_id = self._header_to_id(part["Content-ID"]) 1483 response, content = self._deserialize_response(part.get_payload()) 1484 # We encode content here to emulate normal http response. 1485 if isinstance(content, six.text_type): 1486 content = content.encode("utf-8") 1487 self._responses[request_id] = (response, content)
1488 1489 @util.positional(1)
1490 - def execute(self, http=None):
1491 """Execute all the requests as a single batched HTTP request. 1492 1493 Args: 1494 http: httplib2.Http, an http object to be used in place of the one the 1495 HttpRequest request object was constructed with. If one isn't supplied 1496 then use a http object from the requests in this batch. 1497 1498 Returns: 1499 None 1500 1501 Raises: 1502 httplib2.HttpLib2Error if a transport error has occurred. 1503 googleapiclient.errors.BatchError if the response is the wrong format. 1504 """ 1505 # If we have no requests return 1506 if len(self._order) == 0: 1507 return None 1508 1509 # If http is not supplied use the first valid one given in the requests. 1510 if http is None: 1511 for request_id in self._order: 1512 request = self._requests[request_id] 1513 if request is not None: 1514 http = request.http 1515 break 1516 1517 if http is None: 1518 raise ValueError("Missing a valid http object.") 1519 1520 # Special case for OAuth2Credentials-style objects which have not yet been 1521 # refreshed with an initial access_token. 1522 creds = _auth.get_credentials_from_http(http) 1523 if creds is not None: 1524 if not _auth.is_valid(creds): 1525 LOGGER.info("Attempting refresh to obtain initial access_token") 1526 _auth.refresh_credentials(creds) 1527 1528 self._execute(http, self._order, self._requests) 1529 1530 # Loop over all the requests and check for 401s. For each 401 request the 1531 # credentials should be refreshed and then sent again in a separate batch. 1532 redo_requests = {} 1533 redo_order = [] 1534 1535 for request_id in self._order: 1536 resp, content = self._responses[request_id] 1537 if resp["status"] == "401": 1538 redo_order.append(request_id) 1539 request = self._requests[request_id] 1540 self._refresh_and_apply_credentials(request, http) 1541 redo_requests[request_id] = request 1542 1543 if redo_requests: 1544 self._execute(http, redo_order, redo_requests) 1545 1546 # Now process all callbacks that are erroring, and raise an exception for 1547 # ones that return a non-2xx response? Or add extra parameter to callback 1548 # that contains an HttpError? 1549 1550 for request_id in self._order: 1551 resp, content = self._responses[request_id] 1552 1553 request = self._requests[request_id] 1554 callback = self._callbacks[request_id] 1555 1556 response = None 1557 exception = None 1558 try: 1559 if resp.status >= 300: 1560 raise HttpError(resp, content, uri=request.uri) 1561 response = request.postproc(resp, content) 1562 except HttpError as e: 1563 exception = e 1564 1565 if callback is not None: 1566 callback(request_id, response, exception) 1567 if self._callback is not None: 1568 self._callback(request_id, response, exception)
1569
1570 1571 -class HttpRequestMock(object):
1572 """Mock of HttpRequest. 1573 1574 Do not construct directly, instead use RequestMockBuilder. 1575 """ 1576
1577 - def __init__(self, resp, content, postproc):
1578 """Constructor for HttpRequestMock 1579 1580 Args: 1581 resp: httplib2.Response, the response to emulate coming from the request 1582 content: string, the response body 1583 postproc: callable, the post processing function usually supplied by 1584 the model class. See model.JsonModel.response() as an example. 1585 """ 1586 self.resp = resp 1587 self.content = content 1588 self.postproc = postproc 1589 if resp is None: 1590 self.resp = httplib2.Response({"status": 200, "reason": "OK"}) 1591 if "reason" in self.resp: 1592 self.resp.reason = self.resp["reason"]
1593
1594 - def execute(self, http=None):
1595 """Execute the request. 1596 1597 Same behavior as HttpRequest.execute(), but the response is 1598 mocked and not really from an HTTP request/response. 1599 """ 1600 return self.postproc(self.resp, self.content)
1601
1602 1603 -class RequestMockBuilder(object):
1604 """A simple mock of HttpRequest 1605 1606 Pass in a dictionary to the constructor that maps request methodIds to 1607 tuples of (httplib2.Response, content, opt_expected_body) that should be 1608 returned when that method is called. None may also be passed in for the 1609 httplib2.Response, in which case a 200 OK response will be generated. 1610 If an opt_expected_body (str or dict) is provided, it will be compared to 1611 the body and UnexpectedBodyError will be raised on inequality. 1612 1613 Example: 1614 response = '{"data": {"id": "tag:google.c...' 1615 requestBuilder = RequestMockBuilder( 1616 { 1617 'plus.activities.get': (None, response), 1618 } 1619 ) 1620 googleapiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder) 1621 1622 Methods that you do not supply a response for will return a 1623 200 OK with an empty string as the response content or raise an excpetion 1624 if check_unexpected is set to True. The methodId is taken from the rpcName 1625 in the discovery document. 1626 1627 For more details see the project wiki. 1628 """ 1629
1630 - def __init__(self, responses, check_unexpected=False):
1631 """Constructor for RequestMockBuilder 1632 1633 The constructed object should be a callable object 1634 that can replace the class HttpResponse. 1635 1636 responses - A dictionary that maps methodIds into tuples 1637 of (httplib2.Response, content). The methodId 1638 comes from the 'rpcName' field in the discovery 1639 document. 1640 check_unexpected - A boolean setting whether or not UnexpectedMethodError 1641 should be raised on unsupplied method. 1642 """ 1643 self.responses = responses 1644 self.check_unexpected = check_unexpected
1645
1646 - def __call__( 1647 self, 1648 http, 1649 postproc, 1650 uri, 1651 method="GET", 1652 body=None, 1653 headers=None, 1654 methodId=None, 1655 resumable=None, 1656 ):
1657 """Implements the callable interface that discovery.build() expects 1658 of requestBuilder, which is to build an object compatible with 1659 HttpRequest.execute(). See that method for the description of the 1660 parameters and the expected response. 1661 """ 1662 if methodId in self.responses: 1663 response = self.responses[methodId] 1664 resp, content = response[:2] 1665 if len(response) > 2: 1666 # Test the body against the supplied expected_body. 1667 expected_body = response[2] 1668 if bool(expected_body) != bool(body): 1669 # Not expecting a body and provided one 1670 # or expecting a body and not provided one. 1671 raise UnexpectedBodyError(expected_body, body) 1672 if isinstance(expected_body, str): 1673 expected_body = json.loads(expected_body) 1674 body = json.loads(body) 1675 if body != expected_body: 1676 raise UnexpectedBodyError(expected_body, body) 1677 return HttpRequestMock(resp, content, postproc) 1678 elif self.check_unexpected: 1679 raise UnexpectedMethodError(methodId=methodId) 1680 else: 1681 model = JsonModel(False) 1682 return HttpRequestMock(None, "{}", model.response)
1683
1684 1685 -class HttpMock(object):
1686 """Mock of httplib2.Http""" 1687
1688 - def __init__(self, filename=None, headers=None):
1689 """ 1690 Args: 1691 filename: string, absolute filename to read response from 1692 headers: dict, header to return with response 1693 """ 1694 if headers is None: 1695 headers = {"status": "200"} 1696 if filename: 1697 with open(filename, "rb") as f: 1698 self.data = f.read() 1699 else: 1700 self.data = None 1701 self.response_headers = headers 1702 self.headers = None 1703 self.uri = None 1704 self.method = None 1705 self.body = None 1706 self.headers = None
1707
1708 - def request( 1709 self, 1710 uri, 1711 method="GET", 1712 body=None, 1713 headers=None, 1714 redirections=1, 1715 connection_type=None, 1716 ):
1717 self.uri = uri 1718 self.method = method 1719 self.body = body 1720 self.headers = headers 1721 return httplib2.Response(self.response_headers), self.data
1722
1723 - def close(self):
1724 return None
1725
1726 -class HttpMockSequence(object):
1727 """Mock of httplib2.Http 1728 1729 Mocks a sequence of calls to request returning different responses for each 1730 call. Create an instance initialized with the desired response headers 1731 and content and then use as if an httplib2.Http instance. 1732 1733 http = HttpMockSequence([ 1734 ({'status': '401'}, ''), 1735 ({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'), 1736 ({'status': '200'}, 'echo_request_headers'), 1737 ]) 1738 resp, content = http.request("http://examples.com") 1739 1740 There are special values you can pass in for content to trigger 1741 behavours that are helpful in testing. 1742 1743 'echo_request_headers' means return the request headers in the response body 1744 'echo_request_headers_as_json' means return the request headers in 1745 the response body 1746 'echo_request_body' means return the request body in the response body 1747 'echo_request_uri' means return the request uri in the response body 1748 """ 1749
1750 - def __init__(self, iterable):
1751 """ 1752 Args: 1753 iterable: iterable, a sequence of pairs of (headers, body) 1754 """ 1755 self._iterable = iterable 1756 self.follow_redirects = True 1757 self.request_sequence = list()
1758
1759 - def request( 1760 self, 1761 uri, 1762 method="GET", 1763 body=None, 1764 headers=None, 1765 redirections=1, 1766 connection_type=None, 1767 ):
1768 # Remember the request so after the fact this mock can be examined 1769 self.request_sequence.append((uri, method, body, headers)) 1770 resp, content = self._iterable.pop(0) 1771 content = six.ensure_binary(content) 1772 1773 if content == b"echo_request_headers": 1774 content = headers 1775 elif content == b"echo_request_headers_as_json": 1776 content = json.dumps(headers) 1777 elif content == b"echo_request_body": 1778 if hasattr(body, "read"): 1779 content = body.read() 1780 else: 1781 content = body 1782 elif content == b"echo_request_uri": 1783 content = uri 1784 if isinstance(content, six.text_type): 1785 content = content.encode("utf-8") 1786 return httplib2.Response(resp), content
1787
1788 1789 -def set_user_agent(http, user_agent):
1790 """Set the user-agent on every request. 1791 1792 Args: 1793 http - An instance of httplib2.Http 1794 or something that acts like it. 1795 user_agent: string, the value for the user-agent header. 1796 1797 Returns: 1798 A modified instance of http that was passed in. 1799 1800 Example: 1801 1802 h = httplib2.Http() 1803 h = set_user_agent(h, "my-app-name/6.0") 1804 1805 Most of the time the user-agent will be set doing auth, this is for the rare 1806 cases where you are accessing an unauthenticated endpoint. 1807 """ 1808 request_orig = http.request 1809 1810 # The closure that will replace 'httplib2.Http.request'. 1811 def new_request( 1812 uri, 1813 method="GET", 1814 body=None, 1815 headers=None, 1816 redirections=httplib2.DEFAULT_MAX_REDIRECTS, 1817 connection_type=None, 1818 ): 1819 """Modify the request headers to add the user-agent.""" 1820 if headers is None: 1821 headers = {} 1822 if "user-agent" in headers: 1823 headers["user-agent"] = user_agent + " " + headers["user-agent"] 1824 else: 1825 headers["user-agent"] = user_agent 1826 resp, content = request_orig( 1827 uri, 1828 method=method, 1829 body=body, 1830 headers=headers, 1831 redirections=redirections, 1832 connection_type=connection_type, 1833 ) 1834 return resp, content
1835 1836 http.request = new_request 1837 return http 1838
1839 1840 -def tunnel_patch(http):
1841 """Tunnel PATCH requests over POST. 1842 Args: 1843 http - An instance of httplib2.Http 1844 or something that acts like it. 1845 1846 Returns: 1847 A modified instance of http that was passed in. 1848 1849 Example: 1850 1851 h = httplib2.Http() 1852 h = tunnel_patch(h, "my-app-name/6.0") 1853 1854 Useful if you are running on a platform that doesn't support PATCH. 1855 Apply this last if you are using OAuth 1.0, as changing the method 1856 will result in a different signature. 1857 """ 1858 request_orig = http.request 1859 1860 # The closure that will replace 'httplib2.Http.request'. 1861 def new_request( 1862 uri, 1863 method="GET", 1864 body=None, 1865 headers=None, 1866 redirections=httplib2.DEFAULT_MAX_REDIRECTS, 1867 connection_type=None, 1868 ): 1869 """Modify the request headers to add the user-agent.""" 1870 if headers is None: 1871 headers = {} 1872 if method == "PATCH": 1873 if "oauth_token" in headers.get("authorization", ""): 1874 LOGGER.warning( 1875 "OAuth 1.0 request made with Credentials after tunnel_patch." 1876 ) 1877 headers["x-http-method-override"] = "PATCH" 1878 method = "POST" 1879 resp, content = request_orig( 1880 uri, 1881 method=method, 1882 body=body, 1883 headers=headers, 1884 redirections=redirections, 1885 connection_type=connection_type, 1886 ) 1887 return resp, content
1888 1889 http.request = new_request 1890 return http 1891
1892 1893 -def build_http():
1894 """Builds httplib2.Http object 1895 1896 Returns: 1897 A httplib2.Http object, which is used to make http requests, and which has timeout set by default. 1898 To override default timeout call 1899 1900 socket.setdefaulttimeout(timeout_in_sec) 1901 1902 before interacting with this method. 1903 """ 1904 if socket.getdefaulttimeout() is not None: 1905 http_timeout = socket.getdefaulttimeout() 1906 else: 1907 http_timeout = DEFAULT_HTTP_TIMEOUT_SEC 1908 http = httplib2.Http(timeout=http_timeout) 1909 # 308's are used by several Google APIs (Drive, YouTube) 1910 # for Resumable Uploads rather than Permanent Redirects. 1911 # This asks httplib2 to exclude 308s from the status codes 1912 # it treats as redirects 1913 try: 1914 http.redirect_codes = http.redirect_codes - {308} 1915 except AttributeError: 1916 # Apache Beam tests depend on this library and cannot 1917 # currently upgrade their httplib2 version 1918 # http.redirect_codes does not exist in previous versions 1919 # of httplib2, so pass 1920 pass 1921 1922 return http
1923