Package googleapiclient :: Module http
[hide private]
[frames] | no frames]

Source Code for Module googleapiclient.http

   1  # Copyright 2014 Google Inc. All Rights Reserved. 
   2  # 
   3  # Licensed under the Apache License, Version 2.0 (the "License"); 
   4  # you may not use this file except in compliance with the License. 
   5  # You may obtain a copy of the License at 
   6  # 
   7  #      http://www.apache.org/licenses/LICENSE-2.0 
   8  # 
   9  # Unless required by applicable law or agreed to in writing, software 
  10  # distributed under the License is distributed on an "AS IS" BASIS, 
  11  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  12  # See the License for the specific language governing permissions and 
  13  # limitations under the License. 
  14   
  15  """Classes to encapsulate a single HTTP request. 
  16   
  17  The classes implement a command pattern, with every 
  18  object supporting an execute() method that does the 
  19  actuall HTTP request. 
  20  """ 
  21  from __future__ import absolute_import 
  22  import six 
  23  from six.moves import http_client 
  24  from six.moves import range 
  25   
  26  __author__ = 'jcgregorio@google.com (Joe Gregorio)' 
  27   
  28  from six import BytesIO, StringIO 
  29  from six.moves.urllib.parse import urlparse, urlunparse, quote, unquote 
  30   
  31  import base64 
  32  import copy 
  33  import gzip 
  34  import httplib2 
  35  import json 
  36  import logging 
  37  import mimetypes 
  38  import os 
  39  import random 
  40  import socket 
  41  import sys 
  42  import time 
  43  import uuid 
  44   
  45  # TODO(issue 221): Remove this conditional import jibbajabba. 
  46  try: 
  47    import ssl 
  48  except ImportError: 
  49    _ssl_SSLError = object() 
  50  else: 
  51    _ssl_SSLError = ssl.SSLError 
  52   
  53  from email.generator import Generator 
  54  from email.mime.multipart import MIMEMultipart 
  55  from email.mime.nonmultipart import MIMENonMultipart 
  56  from email.parser import FeedParser 
  57   
  58  from googleapiclient import mimeparse 
  59  from googleapiclient.errors import BatchError 
  60  from googleapiclient.errors import HttpError 
  61  from googleapiclient.errors import InvalidChunkSizeError 
  62  from googleapiclient.errors import ResumableUploadError 
  63  from googleapiclient.errors import UnexpectedBodyError 
  64  from googleapiclient.errors import UnexpectedMethodError 
  65  from googleapiclient.model import JsonModel 
  66  from oauth2client import util 
  67   
  68   
  69  LOGGER = logging.getLogger(__name__) 
  70   
  71  DEFAULT_CHUNK_SIZE = 512*1024 
  72   
  73  MAX_URI_LENGTH = 2048 
  74   
  75  _TOO_MANY_REQUESTS = 429 
76 77 78 -def _should_retry_response(resp_status, content):
79 """Determines whether a response should be retried. 80 81 Args: 82 resp_status: The response status received. 83 content: The response content body. 84 85 Returns: 86 True if the response should be retried, otherwise False. 87 """ 88 # Retry on 5xx errors. 89 if resp_status >= 500: 90 return True 91 92 # Retry on 429 errors. 93 if resp_status == _TOO_MANY_REQUESTS: 94 return True 95 96 # For 403 errors, we have to check for the `reason` in the response to 97 # determine if we should retry. 98 if resp_status == six.moves.http_client.FORBIDDEN: 99 # If there's no details about the 403 type, don't retry. 100 if not content: 101 return False 102 103 # Content is in JSON format. 104 try: 105 data = json.loads(content.decode('utf-8')) 106 reason = data['error']['errors'][0]['reason'] 107 except (UnicodeDecodeError, ValueError, KeyError): 108 LOGGER.warning('Invalid JSON content from response: %s', content) 109 return False 110 111 LOGGER.warning('Encountered 403 Forbidden with reason "%s"', reason) 112 113 # Only retry on rate limit related failures. 114 if reason in ('userRateLimitExceeded', 'rateLimitExceeded', ): 115 return True 116 117 # Everything else is a success or non-retriable so break. 118 return False
119
120 121 -def _retry_request(http, num_retries, req_type, sleep, rand, uri, method, *args, 122 **kwargs):
123 """Retries an HTTP request multiple times while handling errors. 124 125 If after all retries the request still fails, last error is either returned as 126 return value (for HTTP 5xx errors) or thrown (for ssl.SSLError). 127 128 Args: 129 http: Http object to be used to execute request. 130 num_retries: Maximum number of retries. 131 req_type: Type of the request (used for logging retries). 132 sleep, rand: Functions to sleep for random time between retries. 133 uri: URI to be requested. 134 method: HTTP method to be used. 135 args, kwargs: Additional arguments passed to http.request. 136 137 Returns: 138 resp, content - Response from the http request (may be HTTP 5xx). 139 """ 140 resp = None 141 content = None 142 for retry_num in range(num_retries + 1): 143 if retry_num > 0: 144 # Sleep before retrying. 145 sleep_time = rand() * 2 ** retry_num 146 LOGGER.warning( 147 'Sleeping %.2f seconds before retry %d of %d for %s: %s %s, after %s', 148 sleep_time, retry_num, num_retries, req_type, method, uri, 149 resp.status if resp else exception) 150 sleep(sleep_time) 151 152 try: 153 exception = None 154 resp, content = http.request(uri, method, *args, **kwargs) 155 # Retry on SSL errors and socket timeout errors. 156 except _ssl_SSLError as ssl_error: 157 exception = ssl_error 158 except socket.error as socket_error: 159 # errno's contents differ by platform, so we have to match by name. 160 if socket.errno.errorcode.get(socket_error.errno) not in ( 161 'WSAETIMEDOUT', 'ETIMEDOUT', 'EPIPE', 'ECONNABORTED', ): 162 raise 163 exception = socket_error 164 165 if exception: 166 if retry_num == num_retries: 167 raise exception 168 else: 169 continue 170 171 if not _should_retry_response(resp.status, content): 172 break 173 174 return resp, content
175
176 177 -class MediaUploadProgress(object):
178 """Status of a resumable upload.""" 179
180 - def __init__(self, resumable_progress, total_size):
181 """Constructor. 182 183 Args: 184 resumable_progress: int, bytes sent so far. 185 total_size: int, total bytes in complete upload, or None if the total 186 upload size isn't known ahead of time. 187 """ 188 self.resumable_progress = resumable_progress 189 self.total_size = total_size
190
191 - def progress(self):
192 """Percent of upload completed, as a float. 193 194 Returns: 195 the percentage complete as a float, returning 0.0 if the total size of 196 the upload is unknown. 197 """ 198 if self.total_size is not None: 199 return float(self.resumable_progress) / float(self.total_size) 200 else: 201 return 0.0
202
203 204 -class MediaDownloadProgress(object):
205 """Status of a resumable download.""" 206
207 - def __init__(self, resumable_progress, total_size):
208 """Constructor. 209 210 Args: 211 resumable_progress: int, bytes received so far. 212 total_size: int, total bytes in complete download. 213 """ 214 self.resumable_progress = resumable_progress 215 self.total_size = total_size
216
217 - def progress(self):
218 """Percent of download completed, as a float. 219 220 Returns: 221 the percentage complete as a float, returning 0.0 if the total size of 222 the download is unknown. 223 """ 224 if self.total_size is not None: 225 return float(self.resumable_progress) / float(self.total_size) 226 else: 227 return 0.0
228
229 230 -class MediaUpload(object):
231 """Describes a media object to upload. 232 233 Base class that defines the interface of MediaUpload subclasses. 234 235 Note that subclasses of MediaUpload may allow you to control the chunksize 236 when uploading a media object. It is important to keep the size of the chunk 237 as large as possible to keep the upload efficient. Other factors may influence 238 the size of the chunk you use, particularly if you are working in an 239 environment where individual HTTP requests may have a hardcoded time limit, 240 such as under certain classes of requests under Google App Engine. 241 242 Streams are io.Base compatible objects that support seek(). Some MediaUpload 243 subclasses support using streams directly to upload data. Support for 244 streaming may be indicated by a MediaUpload sub-class and if appropriate for a 245 platform that stream will be used for uploading the media object. The support 246 for streaming is indicated by has_stream() returning True. The stream() method 247 should return an io.Base object that supports seek(). On platforms where the 248 underlying httplib module supports streaming, for example Python 2.6 and 249 later, the stream will be passed into the http library which will result in 250 less memory being used and possibly faster uploads. 251 252 If you need to upload media that can't be uploaded using any of the existing 253 MediaUpload sub-class then you can sub-class MediaUpload for your particular 254 needs. 255 """ 256
257 - def chunksize(self):
258 """Chunk size for resumable uploads. 259 260 Returns: 261 Chunk size in bytes. 262 """ 263 raise NotImplementedError()
264
265 - def mimetype(self):
266 """Mime type of the body. 267 268 Returns: 269 Mime type. 270 """ 271 return 'application/octet-stream'
272
273 - def size(self):
274 """Size of upload. 275 276 Returns: 277 Size of the body, or None of the size is unknown. 278 """ 279 return None
280
281 - def resumable(self):
282 """Whether this upload is resumable. 283 284 Returns: 285 True if resumable upload or False. 286 """ 287 return False
288
289 - def getbytes(self, begin, end):
290 """Get bytes from the media. 291 292 Args: 293 begin: int, offset from beginning of file. 294 length: int, number of bytes to read, starting at begin. 295 296 Returns: 297 A string of bytes read. May be shorter than length if EOF was reached 298 first. 299 """ 300 raise NotImplementedError()
301
302 - def has_stream(self):
303 """Does the underlying upload support a streaming interface. 304 305 Streaming means it is an io.IOBase subclass that supports seek, i.e. 306 seekable() returns True. 307 308 Returns: 309 True if the call to stream() will return an instance of a seekable io.Base 310 subclass. 311 """ 312 return False
313
314 - def stream(self):
315 """A stream interface to the data being uploaded. 316 317 Returns: 318 The returned value is an io.IOBase subclass that supports seek, i.e. 319 seekable() returns True. 320 """ 321 raise NotImplementedError()
322 323 @util.positional(1)
324 - def _to_json(self, strip=None):
325 """Utility function for creating a JSON representation of a MediaUpload. 326 327 Args: 328 strip: array, An array of names of members to not include in the JSON. 329 330 Returns: 331 string, a JSON representation of this instance, suitable to pass to 332 from_json(). 333 """ 334 t = type(self) 335 d = copy.copy(self.__dict__) 336 if strip is not None: 337 for member in strip: 338 del d[member] 339 d['_class'] = t.__name__ 340 d['_module'] = t.__module__ 341 return json.dumps(d)
342
343 - def to_json(self):
344 """Create a JSON representation of an instance of MediaUpload. 345 346 Returns: 347 string, a JSON representation of this instance, suitable to pass to 348 from_json(). 349 """ 350 return self._to_json()
351 352 @classmethod
353 - def new_from_json(cls, s):
354 """Utility class method to instantiate a MediaUpload subclass from a JSON 355 representation produced by to_json(). 356 357 Args: 358 s: string, JSON from to_json(). 359 360 Returns: 361 An instance of the subclass of MediaUpload that was serialized with 362 to_json(). 363 """ 364 data = json.loads(s) 365 # Find and call the right classmethod from_json() to restore the object. 366 module = data['_module'] 367 m = __import__(module, fromlist=module.split('.')[:-1]) 368 kls = getattr(m, data['_class']) 369 from_json = getattr(kls, 'from_json') 370 return from_json(s)
371
372 373 -class MediaIoBaseUpload(MediaUpload):
374 """A MediaUpload for a io.Base objects. 375 376 Note that the Python file object is compatible with io.Base and can be used 377 with this class also. 378 379 fh = BytesIO('...Some data to upload...') 380 media = MediaIoBaseUpload(fh, mimetype='image/png', 381 chunksize=1024*1024, resumable=True) 382 farm.animals().insert( 383 id='cow', 384 name='cow.png', 385 media_body=media).execute() 386 387 Depending on the platform you are working on, you may pass -1 as the 388 chunksize, which indicates that the entire file should be uploaded in a single 389 request. If the underlying platform supports streams, such as Python 2.6 or 390 later, then this can be very efficient as it avoids multiple connections, and 391 also avoids loading the entire file into memory before sending it. Note that 392 Google App Engine has a 5MB limit on request size, so you should never set 393 your chunksize larger than 5MB, or to -1. 394 """ 395 396 @util.positional(3)
397 - def __init__(self, fd, mimetype, chunksize=DEFAULT_CHUNK_SIZE, 398 resumable=False):
399 """Constructor. 400 401 Args: 402 fd: io.Base or file object, The source of the bytes to upload. MUST be 403 opened in blocking mode, do not use streams opened in non-blocking mode. 404 The given stream must be seekable, that is, it must be able to call 405 seek() on fd. 406 mimetype: string, Mime-type of the file. 407 chunksize: int, File will be uploaded in chunks of this many bytes. Only 408 used if resumable=True. Pass in a value of -1 if the file is to be 409 uploaded as a single chunk. Note that Google App Engine has a 5MB limit 410 on request size, so you should never set your chunksize larger than 5MB, 411 or to -1. 412 resumable: bool, True if this is a resumable upload. False means upload 413 in a single request. 414 """ 415 super(MediaIoBaseUpload, self).__init__() 416 self._fd = fd 417 self._mimetype = mimetype 418 if not (chunksize == -1 or chunksize > 0): 419 raise InvalidChunkSizeError() 420 self._chunksize = chunksize 421 self._resumable = resumable 422 423 self._fd.seek(0, os.SEEK_END) 424 self._size = self._fd.tell()
425
426 - def chunksize(self):
427 """Chunk size for resumable uploads. 428 429 Returns: 430 Chunk size in bytes. 431 """ 432 return self._chunksize
433
434 - def mimetype(self):
435 """Mime type of the body. 436 437 Returns: 438 Mime type. 439 """ 440 return self._mimetype
441
442 - def size(self):
443 """Size of upload. 444 445 Returns: 446 Size of the body, or None of the size is unknown. 447 """ 448 return self._size
449
450 - def resumable(self):
451 """Whether this upload is resumable. 452 453 Returns: 454 True if resumable upload or False. 455 """ 456 return self._resumable
457
458 - def getbytes(self, begin, length):
459 """Get bytes from the media. 460 461 Args: 462 begin: int, offset from beginning of file. 463 length: int, number of bytes to read, starting at begin. 464 465 Returns: 466 A string of bytes read. May be shorted than length if EOF was reached 467 first. 468 """ 469 self._fd.seek(begin) 470 return self._fd.read(length)
471
472 - def has_stream(self):
473 """Does the underlying upload support a streaming interface. 474 475 Streaming means it is an io.IOBase subclass that supports seek, i.e. 476 seekable() returns True. 477 478 Returns: 479 True if the call to stream() will return an instance of a seekable io.Base 480 subclass. 481 """ 482 return True
483
484 - def stream(self):
485 """A stream interface to the data being uploaded. 486 487 Returns: 488 The returned value is an io.IOBase subclass that supports seek, i.e. 489 seekable() returns True. 490 """ 491 return self._fd
492
493 - def to_json(self):
494 """This upload type is not serializable.""" 495 raise NotImplementedError('MediaIoBaseUpload is not serializable.')
496
497 498 -class MediaFileUpload(MediaIoBaseUpload):
499 """A MediaUpload for a file. 500 501 Construct a MediaFileUpload and pass as the media_body parameter of the 502 method. For example, if we had a service that allowed uploading images: 503 504 505 media = MediaFileUpload('cow.png', mimetype='image/png', 506 chunksize=1024*1024, resumable=True) 507 farm.animals().insert( 508 id='cow', 509 name='cow.png', 510 media_body=media).execute() 511 512 Depending on the platform you are working on, you may pass -1 as the 513 chunksize, which indicates that the entire file should be uploaded in a single 514 request. If the underlying platform supports streams, such as Python 2.6 or 515 later, then this can be very efficient as it avoids multiple connections, and 516 also avoids loading the entire file into memory before sending it. Note that 517 Google App Engine has a 5MB limit on request size, so you should never set 518 your chunksize larger than 5MB, or to -1. 519 """ 520 521 @util.positional(2)
522 - def __init__(self, filename, mimetype=None, chunksize=DEFAULT_CHUNK_SIZE, 523 resumable=False):
524 """Constructor. 525 526 Args: 527 filename: string, Name of the file. 528 mimetype: string, Mime-type of the file. If None then a mime-type will be 529 guessed from the file extension. 530 chunksize: int, File will be uploaded in chunks of this many bytes. Only 531 used if resumable=True. Pass in a value of -1 if the file is to be 532 uploaded in a single chunk. Note that Google App Engine has a 5MB limit 533 on request size, so you should never set your chunksize larger than 5MB, 534 or to -1. 535 resumable: bool, True if this is a resumable upload. False means upload 536 in a single request. 537 """ 538 self._filename = filename 539 fd = open(self._filename, 'rb') 540 if mimetype is None: 541 # No mimetype provided, make a guess. 542 mimetype, _ = mimetypes.guess_type(filename) 543 if mimetype is None: 544 # Guess failed, use octet-stream. 545 mimetype = 'application/octet-stream' 546 super(MediaFileUpload, self).__init__(fd, mimetype, chunksize=chunksize, 547 resumable=resumable)
548
549 - def to_json(self):
550 """Creating a JSON representation of an instance of MediaFileUpload. 551 552 Returns: 553 string, a JSON representation of this instance, suitable to pass to 554 from_json(). 555 """ 556 return self._to_json(strip=['_fd'])
557 558 @staticmethod
559 - def from_json(s):
560 d = json.loads(s) 561 return MediaFileUpload(d['_filename'], mimetype=d['_mimetype'], 562 chunksize=d['_chunksize'], resumable=d['_resumable'])
563
564 565 -class MediaInMemoryUpload(MediaIoBaseUpload):
566 """MediaUpload for a chunk of bytes. 567 568 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for 569 the stream. 570 """ 571 572 @util.positional(2)
573 - def __init__(self, body, mimetype='application/octet-stream', 574 chunksize=DEFAULT_CHUNK_SIZE, resumable=False):
575 """Create a new MediaInMemoryUpload. 576 577 DEPRECATED: Use MediaIoBaseUpload with either io.TextIOBase or StringIO for 578 the stream. 579 580 Args: 581 body: string, Bytes of body content. 582 mimetype: string, Mime-type of the file or default of 583 'application/octet-stream'. 584 chunksize: int, File will be uploaded in chunks of this many bytes. Only 585 used if resumable=True. 586 resumable: bool, True if this is a resumable upload. False means upload 587 in a single request. 588 """ 589 fd = BytesIO(body) 590 super(MediaInMemoryUpload, self).__init__(fd, mimetype, chunksize=chunksize, 591 resumable=resumable)
592
593 594 -class MediaIoBaseDownload(object):
595 """"Download media resources. 596 597 Note that the Python file object is compatible with io.Base and can be used 598 with this class also. 599 600 601 Example: 602 request = farms.animals().get_media(id='cow') 603 fh = io.FileIO('cow.png', mode='wb') 604 downloader = MediaIoBaseDownload(fh, request, chunksize=1024*1024) 605 606 done = False 607 while done is False: 608 status, done = downloader.next_chunk() 609 if status: 610 print "Download %d%%." % int(status.progress() * 100) 611 print "Download Complete!" 612 """ 613 614 @util.positional(3)
615 - def __init__(self, fd, request, chunksize=DEFAULT_CHUNK_SIZE):
616 """Constructor. 617 618 Args: 619 fd: io.Base or file object, The stream in which to write the downloaded 620 bytes. 621 request: googleapiclient.http.HttpRequest, the media request to perform in 622 chunks. 623 chunksize: int, File will be downloaded in chunks of this many bytes. 624 """ 625 self._fd = fd 626 self._request = request 627 self._uri = request.uri 628 self._chunksize = chunksize 629 self._progress = 0 630 self._total_size = None 631 self._done = False 632 633 # Stubs for testing. 634 self._sleep = time.sleep 635 self._rand = random.random
636 637 @util.positional(1)
638 - def next_chunk(self, num_retries=0):
639 """Get the next chunk of the download. 640 641 Args: 642 num_retries: Integer, number of times to retry 500's with randomized 643 exponential backoff. If all retries fail, the raised HttpError 644 represents the last request. If zero (default), we attempt the 645 request only once. 646 647 Returns: 648 (status, done): (MediaDownloadStatus, boolean) 649 The value of 'done' will be True when the media has been fully 650 downloaded. 651 652 Raises: 653 googleapiclient.errors.HttpError if the response was not a 2xx. 654 httplib2.HttpLib2Error if a transport error has occured. 655 """ 656 headers = { 657 'range': 'bytes=%d-%d' % ( 658 self._progress, self._progress + self._chunksize) 659 } 660 http = self._request.http 661 662 resp, content = _retry_request( 663 http, num_retries, 'media download', self._sleep, self._rand, self._uri, 664 'GET', headers=headers) 665 666 if resp.status in [200, 206]: 667 if 'content-location' in resp and resp['content-location'] != self._uri: 668 self._uri = resp['content-location'] 669 self._progress += len(content) 670 self._fd.write(content) 671 672 if 'content-range' in resp: 673 content_range = resp['content-range'] 674 length = content_range.rsplit('/', 1)[1] 675 self._total_size = int(length) 676 elif 'content-length' in resp: 677 self._total_size = int(resp['content-length']) 678 679 if self._progress == self._total_size: 680 self._done = True 681 return MediaDownloadProgress(self._progress, self._total_size), self._done 682 else: 683 raise HttpError(resp, content, uri=self._uri)
684
685 686 -class _StreamSlice(object):
687 """Truncated stream. 688 689 Takes a stream and presents a stream that is a slice of the original stream. 690 This is used when uploading media in chunks. In later versions of Python a 691 stream can be passed to httplib in place of the string of data to send. The 692 problem is that httplib just blindly reads to the end of the stream. This 693 wrapper presents a virtual stream that only reads to the end of the chunk. 694 """ 695
696 - def __init__(self, stream, begin, chunksize):
697 """Constructor. 698 699 Args: 700 stream: (io.Base, file object), the stream to wrap. 701 begin: int, the seek position the chunk begins at. 702 chunksize: int, the size of the chunk. 703 """ 704 self._stream = stream 705 self._begin = begin 706 self._chunksize = chunksize 707 self._stream.seek(begin)
708
709 - def read(self, n=-1):
710 """Read n bytes. 711 712 Args: 713 n, int, the number of bytes to read. 714 715 Returns: 716 A string of length 'n', or less if EOF is reached. 717 """ 718 # The data left available to read sits in [cur, end) 719 cur = self._stream.tell() 720 end = self._begin + self._chunksize 721 if n == -1 or cur + n > end: 722 n = end - cur 723 return self._stream.read(n)
724
725 726 -class HttpRequest(object):
727 """Encapsulates a single HTTP request.""" 728 729 @util.positional(4)
730 - def __init__(self, http, postproc, uri, 731 method='GET', 732 body=None, 733 headers=None, 734 methodId=None, 735 resumable=None):
736 """Constructor for an HttpRequest. 737 738 Args: 739 http: httplib2.Http, the transport object to use to make a request 740 postproc: callable, called on the HTTP response and content to transform 741 it into a data object before returning, or raising an exception 742 on an error. 743 uri: string, the absolute URI to send the request to 744 method: string, the HTTP method to use 745 body: string, the request body of the HTTP request, 746 headers: dict, the HTTP request headers 747 methodId: string, a unique identifier for the API method being called. 748 resumable: MediaUpload, None if this is not a resumbale request. 749 """ 750 self.uri = uri 751 self.method = method 752 self.body = body 753 self.headers = headers or {} 754 self.methodId = methodId 755 self.http = http 756 self.postproc = postproc 757 self.resumable = resumable 758 self.response_callbacks = [] 759 self._in_error_state = False 760 761 # Pull the multipart boundary out of the content-type header. 762 major, minor, params = mimeparse.parse_mime_type( 763 self.headers.get('content-type', 'application/json')) 764 765 # The size of the non-media part of the request. 766 self.body_size = len(self.body or '') 767 768 # The resumable URI to send chunks to. 769 self.resumable_uri = None 770 771 # The bytes that have been uploaded. 772 self.resumable_progress = 0 773 774 # Stubs for testing. 775 self._rand = random.random 776 self._sleep = time.sleep
777 778 @util.positional(1)
779 - def execute(self, http=None, num_retries=0):
780 """Execute the request. 781 782 Args: 783 http: httplib2.Http, an http object to be used in place of the 784 one the HttpRequest request object was constructed with. 785 num_retries: Integer, number of times to retry 500's with randomized 786 exponential backoff. If all retries fail, the raised HttpError 787 represents the last request. If zero (default), we attempt the 788 request only once. 789 790 Returns: 791 A deserialized object model of the response body as determined 792 by the postproc. 793 794 Raises: 795 googleapiclient.errors.HttpError if the response was not a 2xx. 796 httplib2.HttpLib2Error if a transport error has occured. 797 """ 798 if http is None: 799 http = self.http 800 801 if self.resumable: 802 body = None 803 while body is None: 804 _, body = self.next_chunk(http=http, num_retries=num_retries) 805 return body 806 807 # Non-resumable case. 808 809 if 'content-length' not in self.headers: 810 self.headers['content-length'] = str(self.body_size) 811 # If the request URI is too long then turn it into a POST request. 812 if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET': 813 self.method = 'POST' 814 self.headers['x-http-method-override'] = 'GET' 815 self.headers['content-type'] = 'application/x-www-form-urlencoded' 816 parsed = urlparse(self.uri) 817 self.uri = urlunparse( 818 (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None, 819 None) 820 ) 821 self.body = parsed.query 822 self.headers['content-length'] = str(len(self.body)) 823 824 # Handle retries for server-side errors. 825 resp, content = _retry_request( 826 http, num_retries, 'request', self._sleep, self._rand, str(self.uri), 827 method=str(self.method), body=self.body, headers=self.headers) 828 829 for callback in self.response_callbacks: 830 callback(resp) 831 if resp.status >= 300: 832 raise HttpError(resp, content, uri=self.uri) 833 return self.postproc(resp, content)
834 835 @util.positional(2)
836 - def add_response_callback(self, cb):
837 """add_response_headers_callback 838 839 Args: 840 cb: Callback to be called on receiving the response headers, of signature: 841 842 def cb(resp): 843 # Where resp is an instance of httplib2.Response 844 """ 845 self.response_callbacks.append(cb)
846 847 @util.positional(1)
848 - def next_chunk(self, http=None, num_retries=0):
849 """Execute the next step of a resumable upload. 850 851 Can only be used if the method being executed supports media uploads and 852 the MediaUpload object passed in was flagged as using resumable upload. 853 854 Example: 855 856 media = MediaFileUpload('cow.png', mimetype='image/png', 857 chunksize=1000, resumable=True) 858 request = farm.animals().insert( 859 id='cow', 860 name='cow.png', 861 media_body=media) 862 863 response = None 864 while response is None: 865 status, response = request.next_chunk() 866 if status: 867 print "Upload %d%% complete." % int(status.progress() * 100) 868 869 870 Args: 871 http: httplib2.Http, an http object to be used in place of the 872 one the HttpRequest request object was constructed with. 873 num_retries: Integer, number of times to retry 500's with randomized 874 exponential backoff. If all retries fail, the raised HttpError 875 represents the last request. If zero (default), we attempt the 876 request only once. 877 878 Returns: 879 (status, body): (ResumableMediaStatus, object) 880 The body will be None until the resumable media is fully uploaded. 881 882 Raises: 883 googleapiclient.errors.HttpError if the response was not a 2xx. 884 httplib2.HttpLib2Error if a transport error has occured. 885 """ 886 if http is None: 887 http = self.http 888 889 if self.resumable.size() is None: 890 size = '*' 891 else: 892 size = str(self.resumable.size()) 893 894 if self.resumable_uri is None: 895 start_headers = copy.copy(self.headers) 896 start_headers['X-Upload-Content-Type'] = self.resumable.mimetype() 897 if size != '*': 898 start_headers['X-Upload-Content-Length'] = size 899 start_headers['content-length'] = str(self.body_size) 900 901 resp, content = _retry_request( 902 http, num_retries, 'resumable URI request', self._sleep, self._rand, 903 self.uri, method=self.method, body=self.body, headers=start_headers) 904 905 if resp.status == 200 and 'location' in resp: 906 self.resumable_uri = resp['location'] 907 else: 908 raise ResumableUploadError(resp, content) 909 elif self._in_error_state: 910 # If we are in an error state then query the server for current state of 911 # the upload by sending an empty PUT and reading the 'range' header in 912 # the response. 913 headers = { 914 'Content-Range': 'bytes */%s' % size, 915 'content-length': '0' 916 } 917 resp, content = http.request(self.resumable_uri, 'PUT', 918 headers=headers) 919 status, body = self._process_response(resp, content) 920 if body: 921 # The upload was complete. 922 return (status, body) 923 924 if self.resumable.has_stream(): 925 data = self.resumable.stream() 926 if self.resumable.chunksize() == -1: 927 data.seek(self.resumable_progress) 928 chunk_end = self.resumable.size() - self.resumable_progress - 1 929 else: 930 # Doing chunking with a stream, so wrap a slice of the stream. 931 data = _StreamSlice(data, self.resumable_progress, 932 self.resumable.chunksize()) 933 chunk_end = min( 934 self.resumable_progress + self.resumable.chunksize() - 1, 935 self.resumable.size() - 1) 936 else: 937 data = self.resumable.getbytes( 938 self.resumable_progress, self.resumable.chunksize()) 939 940 # A short read implies that we are at EOF, so finish the upload. 941 if len(data) < self.resumable.chunksize(): 942 size = str(self.resumable_progress + len(data)) 943 944 chunk_end = self.resumable_progress + len(data) - 1 945 946 headers = { 947 'Content-Range': 'bytes %d-%d/%s' % ( 948 self.resumable_progress, chunk_end, size), 949 # Must set the content-length header here because httplib can't 950 # calculate the size when working with _StreamSlice. 951 'Content-Length': str(chunk_end - self.resumable_progress + 1) 952 } 953 954 for retry_num in range(num_retries + 1): 955 if retry_num > 0: 956 self._sleep(self._rand() * 2**retry_num) 957 LOGGER.warning( 958 'Retry #%d for media upload: %s %s, following status: %d' 959 % (retry_num, self.method, self.uri, resp.status)) 960 961 try: 962 resp, content = http.request(self.resumable_uri, method='PUT', 963 body=data, 964 headers=headers) 965 except: 966 self._in_error_state = True 967 raise 968 if resp.status < 500: 969 break 970 971 return self._process_response(resp, content)
972
973 - def _process_response(self, resp, content):
974 """Process the response from a single chunk upload. 975 976 Args: 977 resp: httplib2.Response, the response object. 978 content: string, the content of the response. 979 980 Returns: 981 (status, body): (ResumableMediaStatus, object) 982 The body will be None until the resumable media is fully uploaded. 983 984 Raises: 985 googleapiclient.errors.HttpError if the response was not a 2xx or a 308. 986 """ 987 if resp.status in [200, 201]: 988 self._in_error_state = False 989 return None, self.postproc(resp, content) 990 elif resp.status == 308: 991 self._in_error_state = False 992 # A "308 Resume Incomplete" indicates we are not done. 993 self.resumable_progress = int(resp['range'].split('-')[1]) + 1 994 if 'location' in resp: 995 self.resumable_uri = resp['location'] 996 else: 997 self._in_error_state = True 998 raise HttpError(resp, content, uri=self.uri) 999 1000 return (MediaUploadProgress(self.resumable_progress, self.resumable.size()), 1001 None)
1002
1003 - def to_json(self):
1004 """Returns a JSON representation of the HttpRequest.""" 1005 d = copy.copy(self.__dict__) 1006 if d['resumable'] is not None: 1007 d['resumable'] = self.resumable.to_json() 1008 del d['http'] 1009 del d['postproc'] 1010 del d['_sleep'] 1011 del d['_rand'] 1012 1013 return json.dumps(d)
1014 1015 @staticmethod
1016 - def from_json(s, http, postproc):
1017 """Returns an HttpRequest populated with info from a JSON object.""" 1018 d = json.loads(s) 1019 if d['resumable'] is not None: 1020 d['resumable'] = MediaUpload.new_from_json(d['resumable']) 1021 return HttpRequest( 1022 http, 1023 postproc, 1024 uri=d['uri'], 1025 method=d['method'], 1026 body=d['body'], 1027 headers=d['headers'], 1028 methodId=d['methodId'], 1029 resumable=d['resumable'])
1030
1031 1032 -class BatchHttpRequest(object):
1033 """Batches multiple HttpRequest objects into a single HTTP request. 1034 1035 Example: 1036 from googleapiclient.http import BatchHttpRequest 1037 1038 def list_animals(request_id, response, exception): 1039 \"\"\"Do something with the animals list response.\"\"\" 1040 if exception is not None: 1041 # Do something with the exception. 1042 pass 1043 else: 1044 # Do something with the response. 1045 pass 1046 1047 def list_farmers(request_id, response, exception): 1048 \"\"\"Do something with the farmers list response.\"\"\" 1049 if exception is not None: 1050 # Do something with the exception. 1051 pass 1052 else: 1053 # Do something with the response. 1054 pass 1055 1056 service = build('farm', 'v2') 1057 1058 batch = BatchHttpRequest() 1059 1060 batch.add(service.animals().list(), list_animals) 1061 batch.add(service.farmers().list(), list_farmers) 1062 batch.execute(http=http) 1063 """ 1064 1065 @util.positional(1)
1066 - def __init__(self, callback=None, batch_uri=None):
1067 """Constructor for a BatchHttpRequest. 1068 1069 Args: 1070 callback: callable, A callback to be called for each response, of the 1071 form callback(id, response, exception). The first parameter is the 1072 request id, and the second is the deserialized response object. The 1073 third is an googleapiclient.errors.HttpError exception object if an HTTP error 1074 occurred while processing the request, or None if no error occurred. 1075 batch_uri: string, URI to send batch requests to. 1076 """ 1077 if batch_uri is None: 1078 batch_uri = 'https://www.googleapis.com/batch' 1079 self._batch_uri = batch_uri 1080 1081 # Global callback to be called for each individual response in the batch. 1082 self._callback = callback 1083 1084 # A map from id to request. 1085 self._requests = {} 1086 1087 # A map from id to callback. 1088 self._callbacks = {} 1089 1090 # List of request ids, in the order in which they were added. 1091 self._order = [] 1092 1093 # The last auto generated id. 1094 self._last_auto_id = 0 1095 1096 # Unique ID on which to base the Content-ID headers. 1097 self._base_id = None 1098 1099 # A map from request id to (httplib2.Response, content) response pairs 1100 self._responses = {} 1101 1102 # A map of id(Credentials) that have been refreshed. 1103 self._refreshed_credentials = {}
1104
1105 - def _refresh_and_apply_credentials(self, request, http):
1106 """Refresh the credentials and apply to the request. 1107 1108 Args: 1109 request: HttpRequest, the request. 1110 http: httplib2.Http, the global http object for the batch. 1111 """ 1112 # For the credentials to refresh, but only once per refresh_token 1113 # If there is no http per the request then refresh the http passed in 1114 # via execute() 1115 creds = None 1116 if request.http is not None and hasattr(request.http.request, 1117 'credentials'): 1118 creds = request.http.request.credentials 1119 elif http is not None and hasattr(http.request, 'credentials'): 1120 creds = http.request.credentials 1121 if creds is not None: 1122 if id(creds) not in self._refreshed_credentials: 1123 creds.refresh(http) 1124 self._refreshed_credentials[id(creds)] = 1 1125 1126 # Only apply the credentials if we are using the http object passed in, 1127 # otherwise apply() will get called during _serialize_request(). 1128 if request.http is None or not hasattr(request.http.request, 1129 'credentials'): 1130 creds.apply(request.headers)
1131
1132 - def _id_to_header(self, id_):
1133 """Convert an id to a Content-ID header value. 1134 1135 Args: 1136 id_: string, identifier of individual request. 1137 1138 Returns: 1139 A Content-ID header with the id_ encoded into it. A UUID is prepended to 1140 the value because Content-ID headers are supposed to be universally 1141 unique. 1142 """ 1143 if self._base_id is None: 1144 self._base_id = uuid.uuid4() 1145 1146 return '<%s+%s>' % (self._base_id, quote(id_))
1147
1148 - def _header_to_id(self, header):
1149 """Convert a Content-ID header value to an id. 1150 1151 Presumes the Content-ID header conforms to the format that _id_to_header() 1152 returns. 1153 1154 Args: 1155 header: string, Content-ID header value. 1156 1157 Returns: 1158 The extracted id value. 1159 1160 Raises: 1161 BatchError if the header is not in the expected format. 1162 """ 1163 if header[0] != '<' or header[-1] != '>': 1164 raise BatchError("Invalid value for Content-ID: %s" % header) 1165 if '+' not in header: 1166 raise BatchError("Invalid value for Content-ID: %s" % header) 1167 base, id_ = header[1:-1].rsplit('+', 1) 1168 1169 return unquote(id_)
1170
1171 - def _serialize_request(self, request):
1172 """Convert an HttpRequest object into a string. 1173 1174 Args: 1175 request: HttpRequest, the request to serialize. 1176 1177 Returns: 1178 The request as a string in application/http format. 1179 """ 1180 # Construct status line 1181 parsed = urlparse(request.uri) 1182 request_line = urlunparse( 1183 ('', '', parsed.path, parsed.params, parsed.query, '') 1184 ) 1185 status_line = request.method + ' ' + request_line + ' HTTP/1.1\n' 1186 major, minor = request.headers.get('content-type', 'application/json').split('/') 1187 msg = MIMENonMultipart(major, minor) 1188 headers = request.headers.copy() 1189 1190 if request.http is not None and hasattr(request.http.request, 1191 'credentials'): 1192 request.http.request.credentials.apply(headers) 1193 1194 # MIMENonMultipart adds its own Content-Type header. 1195 if 'content-type' in headers: 1196 del headers['content-type'] 1197 1198 for key, value in six.iteritems(headers): 1199 msg[key] = value 1200 msg['Host'] = parsed.netloc 1201 msg.set_unixfrom(None) 1202 1203 if request.body is not None: 1204 msg.set_payload(request.body) 1205 msg['content-length'] = str(len(request.body)) 1206 1207 # Serialize the mime message. 1208 fp = StringIO() 1209 # maxheaderlen=0 means don't line wrap headers. 1210 g = Generator(fp, maxheaderlen=0) 1211 g.flatten(msg, unixfrom=False) 1212 body = fp.getvalue() 1213 1214 return status_line + body
1215
1216 - def _deserialize_response(self, payload):
1217 """Convert string into httplib2 response and content. 1218 1219 Args: 1220 payload: string, headers and body as a string. 1221 1222 Returns: 1223 A pair (resp, content), such as would be returned from httplib2.request. 1224 """ 1225 # Strip off the status line 1226 status_line, payload = payload.split('\n', 1) 1227 protocol, status, reason = status_line.split(' ', 2) 1228 1229 # Parse the rest of the response 1230 parser = FeedParser() 1231 parser.feed(payload) 1232 msg = parser.close() 1233 msg['status'] = status 1234 1235 # Create httplib2.Response from the parsed headers. 1236 resp = httplib2.Response(msg) 1237 resp.reason = reason 1238 resp.version = int(protocol.split('/', 1)[1].replace('.', '')) 1239 1240 content = payload.split('\r\n\r\n', 1)[1] 1241 1242 return resp, content
1243
1244 - def _new_id(self):
1245 """Create a new id. 1246 1247 Auto incrementing number that avoids conflicts with ids already used. 1248 1249 Returns: 1250 string, a new unique id. 1251 """ 1252 self._last_auto_id += 1 1253 while str(self._last_auto_id) in self._requests: 1254 self._last_auto_id += 1 1255 return str(self._last_auto_id)
1256 1257 @util.positional(2)
1258 - def add(self, request, callback=None, request_id=None):
1259 """Add a new request. 1260 1261 Every callback added will be paired with a unique id, the request_id. That 1262 unique id will be passed back to the callback when the response comes back 1263 from the server. The default behavior is to have the library generate it's 1264 own unique id. If the caller passes in a request_id then they must ensure 1265 uniqueness for each request_id, and if they are not an exception is 1266 raised. Callers should either supply all request_ids or nevery supply a 1267 request id, to avoid such an error. 1268 1269 Args: 1270 request: HttpRequest, Request to add to the batch. 1271 callback: callable, A callback to be called for this response, of the 1272 form callback(id, response, exception). The first parameter is the 1273 request id, and the second is the deserialized response object. The 1274 third is an googleapiclient.errors.HttpError exception object if an HTTP error 1275 occurred while processing the request, or None if no errors occurred. 1276 request_id: string, A unique id for the request. The id will be passed to 1277 the callback with the response. 1278 1279 Returns: 1280 None 1281 1282 Raises: 1283 BatchError if a media request is added to a batch. 1284 KeyError is the request_id is not unique. 1285 """ 1286 if request_id is None: 1287 request_id = self._new_id() 1288 if request.resumable is not None: 1289 raise BatchError("Media requests cannot be used in a batch request.") 1290 if request_id in self._requests: 1291 raise KeyError("A request with this ID already exists: %s" % request_id) 1292 self._requests[request_id] = request 1293 self._callbacks[request_id] = callback 1294 self._order.append(request_id)
1295
1296 - def _execute(self, http, order, requests):
1297 """Serialize batch request, send to server, process response. 1298 1299 Args: 1300 http: httplib2.Http, an http object to be used to make the request with. 1301 order: list, list of request ids in the order they were added to the 1302 batch. 1303 request: list, list of request objects to send. 1304 1305 Raises: 1306 httplib2.HttpLib2Error if a transport error has occured. 1307 googleapiclient.errors.BatchError if the response is the wrong format. 1308 """ 1309 message = MIMEMultipart('mixed') 1310 # Message should not write out it's own headers. 1311 setattr(message, '_write_headers', lambda self: None) 1312 1313 # Add all the individual requests. 1314 for request_id in order: 1315 request = requests[request_id] 1316 1317 msg = MIMENonMultipart('application', 'http') 1318 msg['Content-Transfer-Encoding'] = 'binary' 1319 msg['Content-ID'] = self._id_to_header(request_id) 1320 1321 body = self._serialize_request(request) 1322 msg.set_payload(body) 1323 message.attach(msg) 1324 1325 # encode the body: note that we can't use `as_string`, because 1326 # it plays games with `From ` lines. 1327 fp = StringIO() 1328 g = Generator(fp, mangle_from_=False) 1329 g.flatten(message, unixfrom=False) 1330 body = fp.getvalue() 1331 1332 headers = {} 1333 headers['content-type'] = ('multipart/mixed; ' 1334 'boundary="%s"') % message.get_boundary() 1335 1336 resp, content = http.request(self._batch_uri, method='POST', body=body, 1337 headers=headers) 1338 1339 if resp.status >= 300: 1340 raise HttpError(resp, content, uri=self._batch_uri) 1341 1342 # Prepend with a content-type header so FeedParser can handle it. 1343 header = 'content-type: %s\r\n\r\n' % resp['content-type'] 1344 # PY3's FeedParser only accepts unicode. So we should decode content 1345 # here, and encode each payload again. 1346 if six.PY3: 1347 content = content.decode('utf-8') 1348 for_parser = header + content 1349 1350 parser = FeedParser() 1351 parser.feed(for_parser) 1352 mime_response = parser.close() 1353 1354 if not mime_response.is_multipart(): 1355 raise BatchError("Response not in multipart/mixed format.", resp=resp, 1356 content=content) 1357 1358 for part in mime_response.get_payload(): 1359 request_id = self._header_to_id(part['Content-ID']) 1360 response, content = self._deserialize_response(part.get_payload()) 1361 # We encode content here to emulate normal http response. 1362 if isinstance(content, six.text_type): 1363 content = content.encode('utf-8') 1364 self._responses[request_id] = (response, content)
1365 1366 @util.positional(1)
1367 - def execute(self, http=None):
1368 """Execute all the requests as a single batched HTTP request. 1369 1370 Args: 1371 http: httplib2.Http, an http object to be used in place of the one the 1372 HttpRequest request object was constructed with. If one isn't supplied 1373 then use a http object from the requests in this batch. 1374 1375 Returns: 1376 None 1377 1378 Raises: 1379 httplib2.HttpLib2Error if a transport error has occured. 1380 googleapiclient.errors.BatchError if the response is the wrong format. 1381 """ 1382 # If we have no requests return 1383 if len(self._order) == 0: 1384 return None 1385 1386 # If http is not supplied use the first valid one given in the requests. 1387 if http is None: 1388 for request_id in self._order: 1389 request = self._requests[request_id] 1390 if request is not None: 1391 http = request.http 1392 break 1393 1394 if http is None: 1395 raise ValueError("Missing a valid http object.") 1396 1397 self._execute(http, self._order, self._requests) 1398 1399 # Loop over all the requests and check for 401s. For each 401 request the 1400 # credentials should be refreshed and then sent again in a separate batch. 1401 redo_requests = {} 1402 redo_order = [] 1403 1404 for request_id in self._order: 1405 resp, content = self._responses[request_id] 1406 if resp['status'] == '401': 1407 redo_order.append(request_id) 1408 request = self._requests[request_id] 1409 self._refresh_and_apply_credentials(request, http) 1410 redo_requests[request_id] = request 1411 1412 if redo_requests: 1413 self._execute(http, redo_order, redo_requests) 1414 1415 # Now process all callbacks that are erroring, and raise an exception for 1416 # ones that return a non-2xx response? Or add extra parameter to callback 1417 # that contains an HttpError? 1418 1419 for request_id in self._order: 1420 resp, content = self._responses[request_id] 1421 1422 request = self._requests[request_id] 1423 callback = self._callbacks[request_id] 1424 1425 response = None 1426 exception = None 1427 try: 1428 if resp.status >= 300: 1429 raise HttpError(resp, content, uri=request.uri) 1430 response = request.postproc(resp, content) 1431 except HttpError as e: 1432 exception = e 1433 1434 if callback is not None: 1435 callback(request_id, response, exception) 1436 if self._callback is not None: 1437 self._callback(request_id, response, exception)
1438
1439 1440 -class HttpRequestMock(object):
1441 """Mock of HttpRequest. 1442 1443 Do not construct directly, instead use RequestMockBuilder. 1444 """ 1445
1446 - def __init__(self, resp, content, postproc):
1447 """Constructor for HttpRequestMock 1448 1449 Args: 1450 resp: httplib2.Response, the response to emulate coming from the request 1451 content: string, the response body 1452 postproc: callable, the post processing function usually supplied by 1453 the model class. See model.JsonModel.response() as an example. 1454 """ 1455 self.resp = resp 1456 self.content = content 1457 self.postproc = postproc 1458 if resp is None: 1459 self.resp = httplib2.Response({'status': 200, 'reason': 'OK'}) 1460 if 'reason' in self.resp: 1461 self.resp.reason = self.resp['reason']
1462
1463 - def execute(self, http=None):
1464 """Execute the request. 1465 1466 Same behavior as HttpRequest.execute(), but the response is 1467 mocked and not really from an HTTP request/response. 1468 """ 1469 return self.postproc(self.resp, self.content)
1470
1471 1472 -class RequestMockBuilder(object):
1473 """A simple mock of HttpRequest 1474 1475 Pass in a dictionary to the constructor that maps request methodIds to 1476 tuples of (httplib2.Response, content, opt_expected_body) that should be 1477 returned when that method is called. None may also be passed in for the 1478 httplib2.Response, in which case a 200 OK response will be generated. 1479 If an opt_expected_body (str or dict) is provided, it will be compared to 1480 the body and UnexpectedBodyError will be raised on inequality. 1481 1482 Example: 1483 response = '{"data": {"id": "tag:google.c...' 1484 requestBuilder = RequestMockBuilder( 1485 { 1486 'plus.activities.get': (None, response), 1487 } 1488 ) 1489 googleapiclient.discovery.build("plus", "v1", requestBuilder=requestBuilder) 1490 1491 Methods that you do not supply a response for will return a 1492 200 OK with an empty string as the response content or raise an excpetion 1493 if check_unexpected is set to True. The methodId is taken from the rpcName 1494 in the discovery document. 1495 1496 For more details see the project wiki. 1497 """ 1498
1499 - def __init__(self, responses, check_unexpected=False):
1500 """Constructor for RequestMockBuilder 1501 1502 The constructed object should be a callable object 1503 that can replace the class HttpResponse. 1504 1505 responses - A dictionary that maps methodIds into tuples 1506 of (httplib2.Response, content). The methodId 1507 comes from the 'rpcName' field in the discovery 1508 document. 1509 check_unexpected - A boolean setting whether or not UnexpectedMethodError 1510 should be raised on unsupplied method. 1511 """ 1512 self.responses = responses 1513 self.check_unexpected = check_unexpected
1514
1515 - def __call__(self, http, postproc, uri, method='GET', body=None, 1516 headers=None, methodId=None, resumable=None):
1517 """Implements the callable interface that discovery.build() expects 1518 of requestBuilder, which is to build an object compatible with 1519 HttpRequest.execute(). See that method for the description of the 1520 parameters and the expected response. 1521 """ 1522 if methodId in self.responses: 1523 response = self.responses[methodId] 1524 resp, content = response[:2] 1525 if len(response) > 2: 1526 # Test the body against the supplied expected_body. 1527 expected_body = response[2] 1528 if bool(expected_body) != bool(body): 1529 # Not expecting a body and provided one 1530 # or expecting a body and not provided one. 1531 raise UnexpectedBodyError(expected_body, body) 1532 if isinstance(expected_body, str): 1533 expected_body = json.loads(expected_body) 1534 body = json.loads(body) 1535 if body != expected_body: 1536 raise UnexpectedBodyError(expected_body, body) 1537 return HttpRequestMock(resp, content, postproc) 1538 elif self.check_unexpected: 1539 raise UnexpectedMethodError(methodId=methodId) 1540 else: 1541 model = JsonModel(False) 1542 return HttpRequestMock(None, '{}', model.response)
1543
1544 1545 -class HttpMock(object):
1546 """Mock of httplib2.Http""" 1547
1548 - def __init__(self, filename=None, headers=None):
1549 """ 1550 Args: 1551 filename: string, absolute filename to read response from 1552 headers: dict, header to return with response 1553 """ 1554 if headers is None: 1555 headers = {'status': '200'} 1556 if filename: 1557 f = open(filename, 'rb') 1558 self.data = f.read() 1559 f.close() 1560 else: 1561 self.data = None 1562 self.response_headers = headers 1563 self.headers = None 1564 self.uri = None 1565 self.method = None 1566 self.body = None 1567 self.headers = None
1568 1569
1570 - def request(self, uri, 1571 method='GET', 1572 body=None, 1573 headers=None, 1574 redirections=1, 1575 connection_type=None):
1576 self.uri = uri 1577 self.method = method 1578 self.body = body 1579 self.headers = headers 1580 return httplib2.Response(self.response_headers), self.data
1581
1582 1583 -class HttpMockSequence(object):
1584 """Mock of httplib2.Http 1585 1586 Mocks a sequence of calls to request returning different responses for each 1587 call. Create an instance initialized with the desired response headers 1588 and content and then use as if an httplib2.Http instance. 1589 1590 http = HttpMockSequence([ 1591 ({'status': '401'}, ''), 1592 ({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'), 1593 ({'status': '200'}, 'echo_request_headers'), 1594 ]) 1595 resp, content = http.request("http://examples.com") 1596 1597 There are special values you can pass in for content to trigger 1598 behavours that are helpful in testing. 1599 1600 'echo_request_headers' means return the request headers in the response body 1601 'echo_request_headers_as_json' means return the request headers in 1602 the response body 1603 'echo_request_body' means return the request body in the response body 1604 'echo_request_uri' means return the request uri in the response body 1605 """ 1606
1607 - def __init__(self, iterable):
1608 """ 1609 Args: 1610 iterable: iterable, a sequence of pairs of (headers, body) 1611 """ 1612 self._iterable = iterable 1613 self.follow_redirects = True
1614
1615 - def request(self, uri, 1616 method='GET', 1617 body=None, 1618 headers=None, 1619 redirections=1, 1620 connection_type=None):
1621 resp, content = self._iterable.pop(0) 1622 if content == 'echo_request_headers': 1623 content = headers 1624 elif content == 'echo_request_headers_as_json': 1625 content = json.dumps(headers) 1626 elif content == 'echo_request_body': 1627 if hasattr(body, 'read'): 1628 content = body.read() 1629 else: 1630 content = body 1631 elif content == 'echo_request_uri': 1632 content = uri 1633 if isinstance(content, six.text_type): 1634 content = content.encode('utf-8') 1635 return httplib2.Response(resp), content
1636
1637 1638 -def set_user_agent(http, user_agent):
1639 """Set the user-agent on every request. 1640 1641 Args: 1642 http - An instance of httplib2.Http 1643 or something that acts like it. 1644 user_agent: string, the value for the user-agent header. 1645 1646 Returns: 1647 A modified instance of http that was passed in. 1648 1649 Example: 1650 1651 h = httplib2.Http() 1652 h = set_user_agent(h, "my-app-name/6.0") 1653 1654 Most of the time the user-agent will be set doing auth, this is for the rare 1655 cases where you are accessing an unauthenticated endpoint. 1656 """ 1657 request_orig = http.request 1658 1659 # The closure that will replace 'httplib2.Http.request'. 1660 def new_request(uri, method='GET', body=None, headers=None, 1661 redirections=httplib2.DEFAULT_MAX_REDIRECTS, 1662 connection_type=None): 1663 """Modify the request headers to add the user-agent.""" 1664 if headers is None: 1665 headers = {} 1666 if 'user-agent' in headers: 1667 headers['user-agent'] = user_agent + ' ' + headers['user-agent'] 1668 else: 1669 headers['user-agent'] = user_agent 1670 resp, content = request_orig(uri, method, body, headers, 1671 redirections, connection_type) 1672 return resp, content
1673 1674 http.request = new_request 1675 return http 1676
1677 1678 -def tunnel_patch(http):
1679 """Tunnel PATCH requests over POST. 1680 Args: 1681 http - An instance of httplib2.Http 1682 or something that acts like it. 1683 1684 Returns: 1685 A modified instance of http that was passed in. 1686 1687 Example: 1688 1689 h = httplib2.Http() 1690 h = tunnel_patch(h, "my-app-name/6.0") 1691 1692 Useful if you are running on a platform that doesn't support PATCH. 1693 Apply this last if you are using OAuth 1.0, as changing the method 1694 will result in a different signature. 1695 """ 1696 request_orig = http.request 1697 1698 # The closure that will replace 'httplib2.Http.request'. 1699 def new_request(uri, method='GET', body=None, headers=None, 1700 redirections=httplib2.DEFAULT_MAX_REDIRECTS, 1701 connection_type=None): 1702 """Modify the request headers to add the user-agent.""" 1703 if headers is None: 1704 headers = {} 1705 if method == 'PATCH': 1706 if 'oauth_token' in headers.get('authorization', ''): 1707 LOGGER.warning( 1708 'OAuth 1.0 request made with Credentials after tunnel_patch.') 1709 headers['x-http-method-override'] = "PATCH" 1710 method = 'POST' 1711 resp, content = request_orig(uri, method, body, headers, 1712 redirections, connection_type) 1713 return resp, content
1714 1715 http.request = new_request 1716 return http 1717