diff --git a/README.txt b/README.txt new file mode 100644 index 0000000..2fbb59c --- /dev/null +++ b/README.txt @@ -0,0 +1,15 @@ +# 部署方法 + +1. pip install -r requirements.txt 安装依赖的包。 +2. 配置数据库(步骤略) +3. python manage.py syncdb +4. python manage.py migrate +5. python manage.py collectstatic +6. 配置 nginx + gunicorn (步骤略) +7. 安装douban-python 与 gdata + +## 补充说明 + +1. 本项目的 css 文件全部由 sass 源文件编译生成,如需修改,建议修改 sass 文件后重新编译。 +2. Guardfile 文件主要用于 guard-livereload(开发期间使用),部署时不需要用到这个文件。 +3. 请保证当前目录以及 uploads/ 目录可写,因为文件上传时会上传到 uploads/。 diff --git a/build/pip-delete-this-directory.txt b/build/pip-delete-this-directory.txt new file mode 100644 index 0000000..c8883ea --- /dev/null +++ b/build/pip-delete-this-directory.txt @@ -0,0 +1,5 @@ +This file is placed here by pip to indicate the source was put +here by pip. + +Once this package is successfully installed this source code will be +deleted (unless you remove this file). diff --git a/contacts/models.py b/contacts/models.py index f350d0b..38568ba 100644 --- a/contacts/models.py +++ b/contacts/models.py @@ -29,6 +29,7 @@ class Contact(models.Model): phone = models.CharField(max_length=15, default='', verbose_name=u'电话号码') qq = models.CharField(max_length=15, default='', verbose_name=u'QQ 号码') + douban_id = models.CharField(max_length=15, default='', verbose_name=u'豆瓣ID') objects = ContactManager() @@ -39,4 +40,4 @@ class ContactUploadForm(forms.Form): class ContactEditForm(forms.ModelForm): class Meta: model = Contact - fields = ('fullname', 'email', 'phone', 'qq') + fields = ('fullname', 'email', 'phone', 'qq', 'douban_id') diff --git a/contacts/templates/contacts/.index.html.swp b/contacts/templates/contacts/.index.html.swp new file mode 100644 index 0000000..3f9a89c Binary files /dev/null and b/contacts/templates/contacts/.index.html.swp differ diff --git a/contacts/views.py b/contacts/views.py index 64a1b51..2b91de6 100644 --- a/contacts/views.py +++ b/contacts/views.py @@ -60,6 +60,7 @@ def edit(request): contact.email = contact_info.get('email') contact.phone = contact_info.get('phone') contact.qq = contact_info.get('qq') + contact.douban_id = contact_info.get('douban_id') form = ContactEditForm(instance=contact) elif request.method == 'POST': diff --git a/douban/__init__.py b/douban/__init__.py new file mode 100644 index 0000000..baa8a1b --- /dev/null +++ b/douban/__init__.py @@ -0,0 +1,488 @@ +import atom +import gdata + +DOUBAN_NAMESPACE = 'http://www.douban.com/xmlns/' + +def _t(v): + if v is not None: + return str(v) + +def _decode(v): + if v is not None: + if isinstance(v, unicode) == False: + return v.decode('utf-8') + return v + +class Location(atom.AtomBase): + _tag = 'location' + _namespace = DOUBAN_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, loc=None, **kwargs): + atom.AtomBase.__init__(self, text=loc, **kwargs) + + +class Uid(atom.AtomBase): + _tag = 'uid' + _namespace = DOUBAN_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, loc=None, **kwargs): + atom.AtomBase.__init__(self, text=loc, **kwargs) + +class Rating(atom.AtomBase): + """As gdata.py has not defined this element, we do this here. + + Should be removed when gdata.py includes the definition. + + """ + _tag = 'rating' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['average'] = 'average' + _attributes['min'] = 'min' + _attributes['max'] = 'max' + _attributes['numRaters'] = 'numRaters' + _attributes['value'] = 'value' + + def __init__(self, value=None, average=None, + min=1, max=5, numRaters=1, **kwargs): + atom.AtomBase.__init__(self, **kwargs) + self.value = _t(value) + self.average = _t(average) + self.min = _t(min) + self.max = _t(max) + self.numRaters = _t(numRaters) + + +class Attribute(atom.AtomBase): + _tag = 'attribute' + _namespace = DOUBAN_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['index'] = 'index' + _attributes['lang'] = 'lang' + + def __init__(self, name=None, value=None, index=None, lang=None, **kwargs): + atom.AtomBase.__init__(self, text=value, **kwargs) + self.name = name + self.index = _t(index) + self.lang = lang + +class Entity(atom.AtomBase): + _tag = 'entity' + _namespace = DOUBAN_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, value=None, **kwargs): + atom.AtomBase.__init__(self, text=value, **kwargs) + self.name = name + +class Tag(atom.AtomBase): + _tag = 'tag' + _namespace = DOUBAN_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['count'] = 'count' + _attributes['name'] = 'name' + + def __init__(self, name=None, count=None, **kwargs): + atom.AtomBase.__init__(self, **kwargs) + self.name = name + self.count = _t(count) + + +class Status(atom.AtomBase): + _tag = 'status' + _namespace = DOUBAN_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, status=None, **kwargs): + atom.AtomBase.__init__(self, text=status, **kwargs) + + +class Count(atom.AtomBase): + _tag = 'count' + _namespace = DOUBAN_NAMESPACE + + def __init__(self, count=None, **kwargs): + atom.AtomBase.__init__(self, text=count, **kwargs) + +def CreateClassFromXMLString(target_class, xml_string): + return atom.CreateClassFromXMLString(target_class, + xml_string.decode('utf8', 'ignore'), 'utf8') + +class PeopleEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}location' % (DOUBAN_NAMESPACE)] = ('location', Location) + _children['{%s}uid' % (DOUBAN_NAMESPACE)] = ('uid', Uid) + + def __init__(self, location=None, **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + self.location = location + +def PeopleEntryFromString(xml_string): + return CreateClassFromXMLString(PeopleEntry, xml_string) + +class PeopleFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [PeopleEntry]) + +def PeopleFeedFromString(xml_string): + return CreateClassFromXMLString(PeopleFeed, xml_string) + + +class SubjectEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rating' % (gdata.GDATA_NAMESPACE)] = ('rating', Rating) + _children['{%s}attribute' % (DOUBAN_NAMESPACE)] = ('attribute', [Attribute]) + _children['{%s}tag' % (DOUBAN_NAMESPACE)] = ('tag', [Tag]) + + def __init__(self, rating=None, attribute=None, tag=None, **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + self.rating = rating + self.attribute = attribute or [] + self.tag = tag or [] + + def GetImageLink(self): + for a_link in self.link: + if a_link.rel == 'image': + return a_link + + def GetCollectionLink(self): + for a_link in self.link: + if a_link.rel == 'collection': + return a_link + +class Subject(SubjectEntry): + """In some places we use to represent a subject entry.""" + _tag = 'subject' + _namespace = DOUBAN_NAMESPACE + +class BookEntry(SubjectEntry): + pass + +def BookEntryFromString(xml_string): + return CreateClassFromXMLString(BookEntry, xml_string) + + +class BookFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [BookEntry]) + +def BookFeedFromString(xml_string): + return CreateClassFromXMLString(BookFeed, xml_string) + + +class MovieEntry(SubjectEntry): + pass + +def MovieEntryFromString(xml_string): + return CreateClassFromXMLString(MovieEntry, xml_string) + + +class MovieFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [MovieEntry]) + +def MovieFeedFromString(xml_string): + return CreateClassFromXMLString(MovieFeed, xml_string) + + +class MusicEntry(SubjectEntry): + pass + +def MusicEntryFromString(xml_string): + return CreateClassFromXMLString(MusicEntry, xml_string) + + +class MusicFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [MusicEntry]) + +def MusicFeedFromString(xml_string): + return CreateClassFromXMLString(MusicFeed, xml_string) + +class BroadcastingEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}attribute' % (DOUBAN_NAMESPACE)] = ('attribute', [Attribute]) + + def __init__(self, attribute=None, **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + self.attribute = attribute or [] + +def BroadcastingEntryFromString(xml_string): + return CreateClassFromXMLString(BroadcastingEntry, xml_string) + +class BroadcastingFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [BroadcastingEntry]) + +def BroadcastingFeedFromString(xml_string): + return CreateClassFromXMLString(BroadcastingFeed, xml_string) + +class NoteEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}attribute' % (DOUBAN_NAMESPACE)] = ('attribute', [Attribute]) + + def __init__(self, attribute=None, **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + self.attribute = attribute or [] + + +def NoteEntryFromString(xml_string): + return CreateClassFromXMLString(NoteEntry, xml_string) + + +class NoteFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [NoteEntry]) + +def NoteFeedFromString(xml_string): + return CreateClassFromXMLString(NoteFeed, xml_string) + +class ReviewEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}subject' % (DOUBAN_NAMESPACE)] = ('subject', Subject) + _children['{%s}rating' % (gdata.GDATA_NAMESPACE)] = ('rating', Rating) + + def __init__(self, subject=None, rating=None, **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + self.subject = subject + self.rating = rating + +def ReviewEntryFromString(xml_string): + return CreateClassFromXMLString(ReviewEntry, xml_string) + + +class ReviewFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [ReviewEntry]) + +def ReviewFeedFromString(xml_string): + return CreateClassFromXMLString(ReviewFeed, xml_string) + + +class CollectionEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}status' % (DOUBAN_NAMESPACE)] = ('status', Status) + _children['{%s}subject' % (DOUBAN_NAMESPACE)] = ('subject', Subject) + _children['{%s}tag' % (DOUBAN_NAMESPACE)] = ('tags', [Tag]) + _children['{%s}rating' % (gdata.GDATA_NAMESPACE)] = ('rating', Rating) + _children['{%s}attribute' % (gdata.GDATA_NAMESPACE)] = ('attribute', [Attribute]) + + def __init__(self, status=None, subject=None, tag=None, rating=None, attribute=None, + **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + self.status = status + self.subject = subject + self.tags = tag or [] + self.rating = rating + self.attribute = attribute or [] + +def CollectionEntryFromString(xml_string): + return CreateClassFromXMLString(CollectionEntry, xml_string) + +class CollectionFeed(gdata.GDataFeed): + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [CollectionEntry]) + +def CollectionFeedFromString(xml_string): + return CreateClassFromXMLString(CollectionFeed, xml_string) + + +class TagEntry(gdata.GDataEntry): + _children = gdata.GDataEntry._children.copy() + _children['{%s}count' % (DOUBAN_NAMESPACE)] = ('count', Count) + def __init__(self, count=None, **kwargs): + + gdata.GDataEntry.__init__(self, **kwargs) + self.count = count + +class TagFeed(gdata.GDataFeed): + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [TagEntry]) + +def TagFeedFromString(xml_string): + return CreateClassFromXMLString(TagFeed, xml_string) + +class When(atom.AtomBase): + + _tag = 'when' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['startTime'] = 'start_time' + _attributes['endTime'] = 'end_time' + + def __init__(self, start_time=None, end_time=None, extension_elements=None, + extension_attributes=None, text=None): + self.start_time = start_time + self.end_time = end_time + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + +class Where(atom.AtomBase): + + _tag = 'where' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['valueString'] = 'value_string' + + def __init__(self, value_string=None, extension_elements=None, + extension_attributes=None, text=None): + self.value_string = value_string + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + +class EventEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}attribute' % (DOUBAN_NAMESPACE)] = ('attribute', [Attribute]) + _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', Where) + _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', When) + + def __init__(self, attribute=None, when=None, where=None, **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + self.attribute = attribute or [] + self.when = when + self.where = where + +def EventEntryFromString(xml_string): + return CreateClassFromXMLString(EventEntry, xml_string) + +class EventFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [EventEntry]) + +def EventFeedFromString(xml_string): + return CreateClassFromXMLString(EventFeed, xml_string) + +class RecommendationEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}attribute' % (DOUBAN_NAMESPACE)] = ('attribute', [Attribute]) + + def __init__(self, attribute=None, **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + self.attribute = attribute or [] + +def RecommendationEntryFromString(xml_string): + return CreateClassFromXMLString(RecommendationEntry, xml_string) + +class RecommendationFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [RecommendationEntry]) + +def RecommendationFeedFromString(xml_string): + return CreateClassFromXMLString(RecommendationFeed, xml_string) + +class RecommendationCommentEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, attribute=None, **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + +def RecommendationCommentEntryFromString(xml_string): + return CreateClassFromXMLString(RecommendationCommentEntry, xml_string) + +class RecommendationCommentFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [RecommendationEntry]) + +def RecommendationCommentFeedFromString(xml_string): + return CreateClassFromXMLString(RecommendationCommentFeed, xml_string) + +class DoumailEntry(gdata.GDataEntry): + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}attribute' % (DOUBAN_NAMESPACE)] = ('attribute', [Attribute]) + _children['{%s}entity' % (DOUBAN_NAMESPACE)] = ('entity', [Entity]) + + def __init__(self, attribute=None, entity=None, **kwargs): + gdata.GDataEntry.__init__(self, **kwargs) + self.attribute = attribute or [] + self.entity = entity or [] + +def DoumailEntryFromString(xml_string): + print xml_string + return CreateClassFromXMLString(DoumailEntry, xml_string) + +class DoumailFeed(gdata.GDataFeed): + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % (atom.ATOM_NAMESPACE)] = ('entry', [DoumailEntry]) + +def DoumailFeedFromString(xml_string): + return CreateClassFromXMLString(DoumailFeed, xml_string) + + diff --git a/douban/client.py b/douban/client.py new file mode 100644 index 0000000..342b8bd --- /dev/null +++ b/douban/client.py @@ -0,0 +1,110 @@ +# -*- encoding:utf-8 -*- + +import httplib,urlparse,cgi +import time +import oauth + +signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1() + +API_HOST = 'http://api.douban.com' +AUTH_HOST = 'http://www.douban.com' +REQUEST_TOKEN_URL = AUTH_HOST+'/service/auth/request_token' +ACCESS_TOKEN_URL = AUTH_HOST+'/service/auth/access_token' +AUTHORIZATION_URL = AUTH_HOST+'/service/auth/authorize' + +class OAuthClient: + def __init__(self, server='www.douban.com', key=None, secret=None): + self.server = server + self.consumer = oauth.OAuthConsumer(key, secret) + self.token = None + + def login(self, key=None, secret=None): + if key and secret: + self.token = oauth.OAuthToken(key, secret) + return True + + key,secret = self.get_request_token() + if not key: + print 'get request token failed' + return + url = self.get_authorization_url(key, secret) + print 'please paste the url in your webbrowser, complete the authorization then come back:' + print url + line = raw_input() + + key, secret, uid = self.get_access_token(key, secret) + if key: + return self.login(key, secret) + else: + print 'get access token failed' + return False + + def fetch_token(self, oauth_request): + connection = httplib.HTTPConnection("%s:%d" % (self.server, 80)) + connection.request('GET', urlparse.urlparse(oauth_request.http_url).path, + headers=oauth_request.to_header()) + response = connection.getresponse() + r = response.read() + try: + token = oauth.OAuthToken.from_string(r) + params = cgi.parse_qs(r, keep_blank_values=False) + user_id = params.get('douban_user_id',[None])[0] + return token.key,token.secret, user_id + except: + return None,None,None + + def get_request_token(self): + oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, + http_url=REQUEST_TOKEN_URL) + oauth_request.sign_request(signature_method, self.consumer, None) + return self.fetch_token(oauth_request)[:2] + + def get_authorization_url(self, key, secret, callback=None): + token = oauth.OAuthToken(key, secret) + oauth_request = oauth.OAuthRequest.from_token_and_callback(token=token, + http_url=AUTHORIZATION_URL, callback=callback) + return oauth_request.to_url() + + def get_access_token(self, key=None, secret=None, token=None): + if key and secret: + token = oauth.OAuthToken(key, secret) + assert token is not None + oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, + token=token, http_url=ACCESS_TOKEN_URL) + oauth_request.sign_request(signature_method, self.consumer, token) + return self.fetch_token(oauth_request)[:3] + + def get_auth_header(self, method, uri, parameter={}): + if self.token: + if not uri.startswith('http'): + uri = API_HOST + uri + oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, + token=self.token, http_method=method, http_url=uri, parameters=parameter) + oauth_request.sign_request(signature_method, self.consumer, self.token) + return oauth_request.to_header() + else: + return {} + + def access_resource(self, method, url, body=None): + oauth_request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, + token=self.token, http_url=url) + oauth_request.sign_request(signature_method, self.consumer, self.token) + headers = oauth_request.to_header() + if method in ('POST','PUT'): + headers['Content-Type'] = 'application/atom+xml; charset=utf-8' + connection = httplib.HTTPConnection("%s:%d" % (self.server, 80)) + connection.request(method, url, body=body, + headers=headers) + return connection.getresponse() + + +def test(): + API_KEY = '' + SECRET = '' + client = OAuthClient(key=API_KEY, secret=SECRET) + client.login() + res = client.access_resource('GET', 'http://api.douban.com/test?a=b&c=d').read() + print res + +if __name__ == '__main__': + test() diff --git a/douban/oauth.py b/douban/oauth.py new file mode 100644 index 0000000..b11f1c8 --- /dev/null +++ b/douban/oauth.py @@ -0,0 +1,513 @@ +import cgi +import urllib +import time +import random +import urlparse +import hmac +import base64 + +VERSION = '1.0' # Hi Blaine! +HTTP_METHOD = 'GET' +SIGNATURE_METHOD = 'PLAINTEXT' + +# Generic exception class +class OAuthError(RuntimeError): + def __init__(self, message='OAuth error occured'): + self.message = message + +# optional WWW-Authenticate header (401 error) +def build_authenticate_header(realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + +# url escape +def escape(s): + # escape '/' too + return urllib.quote(s, safe='~') + +# util function: current timestamp +# seconds since epoch (UTC) +def generate_timestamp(): + return int(time.time()) + +# util function: nonce +# pseudorandom number +def generate_nonce(length=8): + return ''.join(str(random.randint(0, 9)) for i in range(length)) + +# OAuthConsumer is a data type that represents the identity of the Consumer +# via its shared secret with the Service Provider. +class OAuthConsumer(object): + key = None + secret = None + + def __init__(self, key, secret): + self.key = key + self.secret = secret + +# OAuthToken is a data type that represents an End User via either an access +# or request token. +class OAuthToken(object): + # access tokens and request tokens + key = None + secret = None + + ''' + key = the token + secret = the token secret + ''' + def __init__(self, key, secret): + self.key = key + self.secret = secret + + def to_string(self): + return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret}) + + # return a token from something like: + # oauth_token_secret=digg&oauth_token=digg + @staticmethod + def from_string(s): + params = cgi.parse_qs(s, keep_blank_values=False) + key = params['oauth_token'][0] + secret = params['oauth_token_secret'][0] + return OAuthToken(key, secret) + + def __str__(self): + return self.to_string() + +# OAuthRequest represents the request and can be serialized +class OAuthRequest(object): + ''' + OAuth parameters: + - oauth_consumer_key + - oauth_token + - oauth_signature_method + - oauth_signature + - oauth_timestamp + - oauth_nonce + - oauth_version + ... any additional parameters, as defined by the Service Provider. + ''' + parameters = None # oauth parameters + http_method = HTTP_METHOD + http_url = None + version = VERSION + + def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None): + self.http_method = http_method + self.http_url = http_url + self.parameters = parameters or {} + + def set_parameter(self, parameter, value): + self.parameters[parameter] = value + + def get_parameter(self, parameter): + try: + return self.parameters[parameter] + except: + raise OAuthError('Parameter not found: %s' % parameter) + + def _get_timestamp_nonce(self): + return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce') + + # get any non-oauth parameters + def get_nonoauth_parameters(self): + parameters = {} + for k, v in self.parameters.iteritems(): + # ignore oauth parameters + if k.find('oauth_') < 0: + parameters[k] = v + return parameters + + # serialize as a header for an HTTPAuth request + def to_header(self, realm=''): + auth_header = 'OAuth realm="%s"' % realm + # add the oauth parameters + if self.parameters: + for k, v in self.parameters.iteritems(): + auth_header += ', %s="%s"' % (k, v) + return {'Authorization': auth_header} + + # serialize as post data for a POST request + def to_postdata(self): + return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()) + + # serialize as a url for a GET request + def to_url(self): + return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata()) + + # return a string that consists of all the parameters that need to be signed + def get_normalized_parameters(self): + params = self.parameters + + param_str = urlparse.urlparse(self.http_url).query + params.update(OAuthRequest._split_url_string(param_str)) + + try: + # exclude the signature if it exists + del params['oauth_signature'] + except: + pass + key_values = params.items() + # sort lexicographically, first after key, then after value + key_values.sort() + # combine key value pairs in string and escape + return '&'.join('%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values) + + # just uppercases the http method + def get_normalized_http_method(self): + return self.http_method.upper() + + # parses the url and rebuilds it to be scheme://host/path + def get_normalized_http_url(self): + parts = urlparse.urlparse(self.http_url) + url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path + return url_string + + # set the signature parameter to the result of build_signature + def sign_request(self, signature_method, consumer, token): + # set the signature method + self.set_parameter('oauth_signature_method', signature_method.get_name()) + # set the signature + self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token)) + + def build_signature(self, signature_method, consumer, token): + # call the build signature method within the signature method + return signature_method.build_signature(self, consumer, token) + + @staticmethod + def from_request(http_method, http_url, headers=None, postdata=None, parameters=None): + + # let the library user override things however they'd like, if they know + # which parameters to use then go for it, for example XMLRPC might want to + # do this + if parameters is not None: + return OAuthRequest(http_method, http_url, parameters) + + # from the headers + if headers is not None: + try: + auth_header = headers['Authorization'] + # check that the authorization header is OAuth + auth_header.index('OAuth') + # get the parameters from the header + parameters = OAuthRequest._split_header(auth_header) + return OAuthRequest(http_method, http_url, parameters) + except: + pass + + # from the parameter string (post body) + if http_method == 'POST' and postdata is not None: + parameters = OAuthRequest._split_url_string(postdata) + + # from the url string + elif http_method == 'GET': + param_str = urlparse.urlparse(http_url).query + parameters = OAuthRequest._split_url_string(param_str) + + if parameters: + return OAuthRequest(http_method, http_url, parameters) + + raise OAuthError('Missing all OAuth parameters. OAuth parameters must be in the headers, post body, or url.') + + @staticmethod + def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + defaults = { + 'oauth_consumer_key': oauth_consumer.key, + 'oauth_timestamp': generate_timestamp(), + 'oauth_nonce': generate_nonce(), + 'oauth_version': OAuthRequest.version, + } + + defaults.update(parameters) + parameters = defaults + + if token: + parameters['oauth_token'] = token.key + + return OAuthRequest(http_method, http_url, parameters) + + @staticmethod + def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + parameters['oauth_token'] = token.key + + if callback: + parameters['oauth_callback'] = escape(callback) + + return OAuthRequest(http_method, http_url, parameters) + + # util function: turn Authorization: header into parameters, has to do some unescaping + @staticmethod + def _split_header(header): + params = {} + parts = header.split(',') + for param in parts: + # ignore realm parameter + if param.find('OAuth realm') > -1: + continue + # remove whitespace + param = param.strip() + # split key-value + param_parts = param.split('=', 1) + # remove quotes and unescape the value + params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"')) + return params + + # util function: turn url string into parameters, has to do some unescaping + @staticmethod + def _split_url_string(param_str): + parameters = cgi.parse_qs(param_str, keep_blank_values=False) + for k, v in parameters.iteritems(): + parameters[k] = urllib.unquote(v[0]) + return parameters + +# OAuthServer is a worker to check a requests validity against a data store +class OAuthServer(object): + timestamp_threshold = 300 # in seconds, five minutes + version = VERSION + signature_methods = None + data_store = None + + def __init__(self, data_store=None, signature_methods=None): + self.data_store = data_store + self.signature_methods = signature_methods or {} + + def set_data_store(self, oauth_data_store): + self.data_store = data_store + + def get_data_store(self): + return self.data_store + + def add_signature_method(self, signature_method): + self.signature_methods[signature_method.get_name()] = signature_method + return self.signature_methods + + # process a request_token request + # returns the request token on success + def fetch_request_token(self, oauth_request): + try: + # get the request token for authorization + token = self._get_token(oauth_request, 'request') + except: + # no token required for the initial token request + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + self._check_signature(oauth_request, consumer, None) + # fetch a new token + token = self.data_store.fetch_request_token(consumer) + return token + + # process an access_token request + # returns the access token on success + def fetch_access_token(self, oauth_request): + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the request token + token = self._get_token(oauth_request, 'request') + self._check_signature(oauth_request, consumer, token) + new_token = self.data_store.fetch_access_token(consumer, token) + return new_token + + # verify an api call, checks all the parameters + def verify_request(self, oauth_request): + # -> consumer and token + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the access token + token = self._get_token(oauth_request, 'access') + self._check_signature(oauth_request, consumer, token) + parameters = oauth_request.get_nonoauth_parameters() + return consumer, token, parameters + + # authorize a request token + def authorize_token(self, token, user): + return self.data_store.authorize_request_token(token, user) + + # get the callback url + def get_callback(self, oauth_request): + return oauth_request.get_parameter('oauth_callback') + + # optional support for the authenticate header + def build_authenticate_header(self, realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + + # verify the correct version request for this server + def _get_version(self, oauth_request): + try: + version = oauth_request.get_parameter('oauth_version') + except: + version = VERSION + if version and version != self.version: + raise OAuthError('OAuth version %s not supported' % str(version)) + return version + + # figure out the signature with some defaults + def _get_signature_method(self, oauth_request): + try: + signature_method = oauth_request.get_parameter('oauth_signature_method') + except: + signature_method = SIGNATURE_METHOD + try: + # get the signature method object + signature_method = self.signature_methods[signature_method] + except: + signature_method_names = ', '.join(self.signature_methods.keys()) + raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names)) + + return signature_method + + def _get_consumer(self, oauth_request): + consumer_key = oauth_request.get_parameter('oauth_consumer_key') + if not consumer_key: + raise OAuthError('Invalid consumer key') + consumer = self.data_store.lookup_consumer(consumer_key) + if not consumer: + raise OAuthError('Invalid consumer') + return consumer + + # try to find the token for the provided request token key + def _get_token(self, oauth_request, token_type='access'): + token_field = oauth_request.get_parameter('oauth_token') + token = self.data_store.lookup_token(token_type, token_field) + if not token: + raise OAuthError('Invalid %s token: %s' % (token_type, token_field)) + return token + + def _check_signature(self, oauth_request, consumer, token): + timestamp, nonce = oauth_request._get_timestamp_nonce() + self._check_timestamp(timestamp) + self._check_nonce(consumer, token, nonce) + signature_method = self._get_signature_method(oauth_request) + try: + signature = oauth_request.get_parameter('oauth_signature') + except: + raise OAuthError('Missing signature') + # attempt to construct the same signature + built = signature_method.build_signature(oauth_request, consumer, token) + if signature != built: + raise OAuthError('Signature does not match. Expected: %s Got: %s' % (built, signature)) + + def _check_timestamp(self, timestamp): + # verify that timestamp is recentish + timestamp = int(timestamp) + now = int(time.time()) + lapsed = now - timestamp + if lapsed > self.timestamp_threshold: + raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) + + def _check_nonce(self, consumer, token, nonce): + # verify that the nonce is uniqueish + try: + self.data_store.lookup_nonce(consumer, token, nonce) + raise OAuthError('Nonce already used: %s' % str(nonce)) + except: + pass + +# OAuthClient is a worker to attempt to execute a request +class OAuthClient(object): + consumer = None + token = None + + def __init__(self, oauth_consumer, oauth_token): + self.consumer = oauth_consumer + self.token = oauth_token + + def get_consumer(self): + return self.consumer + + def get_token(self): + return self.token + + def fetch_request_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def access_resource(self, oauth_request): + # -> some protected resource + raise NotImplementedError + +# OAuthDataStore is a database abstraction used to lookup consumers and tokens +class OAuthDataStore(object): + + def lookup_consumer(self, key): + # -> OAuthConsumer + raise NotImplementedError + + def lookup_token(self, oauth_consumer, token_type, token_token): + # -> OAuthToken + raise NotImplementedError + + def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp): + # -> OAuthToken + raise NotImplementedError + + def fetch_request_token(self, oauth_consumer): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_consumer, oauth_token): + # -> OAuthToken + raise NotImplementedError + + def authorize_request_token(self, oauth_token, user): + # -> OAuthToken + raise NotImplementedError + +# OAuthSignatureMethod is a strategy class that implements a signature method +class OAuthSignatureMethod(object): + def get_name(): + # -> str + raise NotImplementedError + + def build_signature(oauth_request, oauth_consumer, oauth_token): + # -> str + raise NotImplementedError + +class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod): + + def get_name(self): + return 'HMAC-SHA1' + + def build_signature(self, oauth_request, consumer, token): + sig = ( + escape(oauth_request.get_normalized_http_method()), + escape(oauth_request.get_normalized_http_url()), + escape(oauth_request.get_normalized_parameters()), + ) + + key = '%s&' % escape(consumer.secret) + if token: + key += escape(token.secret) + raw = '&'.join(sig) + + # hmac object + try: + import hashlib # 2.5 + hashed = hmac.new(key, raw, hashlib.sha1) + except: + import sha # deprecated + hashed = hmac.new(key, raw, sha) + + # calculate the digest base 64 + return base64.b64encode(hashed.digest()) + +class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod): + + def get_name(self): + return 'PLAINTEXT' + + def build_signature(self, oauth_request, consumer, token): + # concatenate the consumer key and secret + sig = escape(consumer.secret) + if token: + sig = '&'.join((sig, escape(token.secret))) + return sig diff --git a/douban/service.py b/douban/service.py new file mode 100644 index 0000000..b24a6ad --- /dev/null +++ b/douban/service.py @@ -0,0 +1,437 @@ +# encoding: UTF-8 + +import atom +import gdata +import gdata.service +import douban +import urllib +import oauth, client + +class DoubanService(gdata.service.GDataService): + def __init__(self, api_key=None, secret=None, + source='douban-python', server='api.douban.com', + additional_headers=None): + self.api_key = api_key + self.client = client.OAuthClient(key=api_key, secret=secret) + gdata.service.GDataService.__init__(self, service='douban', source=source, + server=server, additional_headers=additional_headers) + + def GetAuthorizationURL(self, key, secret, callback=None): + return self.client.get_authorization_url(key, secret, callback) + + def ProgrammaticLogin(self, token_key=None, token_secret=None): + return self.client.login(token_key, token_secret) + + def Get(self, uri, extra_headers={}, *args, **kwargs): + auth_header = self.client.get_auth_header('GET', uri) + if auth_header: + extra_headers.update(auth_header) + elif self.api_key: + param = urllib.urlencode([('apikey', self.api_key)]) + if '?' in uri: + uri += '&' + param + else: + uri += '?' + param + return gdata.service.GDataService.Get(self, uri, extra_headers, *args, **kwargs) + def Post(self, data, uri, extra_headers=None, url_params=None, *args, **kwargs): + if extra_headers is None: + extra_headers = {} + extra_headers.update(self.client.get_auth_header('POST', uri, url_params)) + return gdata.service.GDataService.Post(self, data, uri, + extra_headers, url_params, *args, **kwargs) + + def Put(self, data, uri, extra_headers=None, url_params=None, *args, **kwargs): + if extra_headers is None: + extra_headers = {} + extra_headers.update(self.client.get_auth_header('PUT', uri, url_params)) + return gdata.service.GDataService.Put(self, data, uri, + extra_headers, url_params, *args, **kwargs) + + def Delete(self, uri, extra_headers=None, url_params=None, *args, **kwargs): + if extra_headers is None: + extra_headers = {} + extra_headers.update(self.client.get_auth_header('DELETE', uri, url_params)) + return gdata.service.GDataService.Delete(self, + uri, extra_headers, url_params, *args, **kwargs) + + def GetPeople(self, uri): + return self.Get(uri, converter=douban.PeopleEntryFromString) + + def GetPeopleFeed(self, uri): + return self.Get(uri, converter=douban.PeopleFeedFromString) + + def SearchPeople(self, text_query, start_index=None, max_results=None): + query = Query('/people/', text_query, start_index=start_index, + max_results=max_results) + return self.GetPeopleFeed(query.ToUri()) + + def GetFriends(self, uri): + return self.Get(uri, converter=douban.PeopleFeedFromString) + + def GetContacts(self, uri): + return self.Get(uri, converter=douban.PeopleFeedFromString) + + def GetAuthorizedUID(self, uri): + return self.Get(urllib.quote(uri), converter=douban.PeopleEntryFromString) + + def GetBook(self, uri): + return self.Get(uri, converter=douban.BookEntryFromString) + + def GetBookFeed(self, uri): + return self.Get(uri, converter=douban.BookFeedFromString) + + def SearchBook(self, text_query, start_index=None, max_results=None): + query = Query('/book/subjects', text_query=text_query, + start_index=start_index, max_results=max_results) + return self.GetBookFeed(query.ToUri()) + + def QueryBookByTag(self, tag, start_index=None, max_results=None): + query = Query('/book/subjects', text_query=None, + start_index=start_index, max_results=max_results, tag=tag) + return self.GetBookFeed(query.ToUri()) + + def GetMovie(self, uri): + return self.Get(uri, converter=douban.MovieEntryFromString) + + def GetMovieFeed(self, uri): + return self.Get(uri, converter=douban.MovieFeedFromString) + + def SearchMovie(self, text_query, start_index=None, max_results=None): + query = Query('/movie/subjects', text_query=text_query, + start_index=start_index, max_results=max_results) + return self.GetMovieFeed(query.ToUri()) + + def QueryMovieByTag(self, tag, start_index=None, max_results=None): + query = Query('/movie/subjects', text_query=None, + start_index=start_index, max_results=max_results, tag=tag) + return self.GetMovieFeed(query.ToUri()) + + def GetMusic(self, uri): + return self.Get(uri, converter=douban.MusicEntryFromString) + + def GetMusicFeed(self, uri): + return self.Get(uri, converter=douban.MusicFeedFromString) + + def SearchMusic(self, text_query, start_index=None, max_results=None): + query = Query('/music/subjects', text_query=text_query, + start_index=start_index, max_results=max_results) + return self.GetMusicFeed(query.ToUri()) + + def QueryMusicByTag(self, tag, start_index=None, max_results=None): + query = Query('/music/subjects', text_query=None, + start_index=start_index, max_results=max_results, tag=tag) + return self.GetMusicFeed(query.ToUri()) + + def GetReview(self, uri): + return self.Get(uri, converter=douban.ReviewEntryFromString) + + def GetReviewFeed(self, uri, orderby = 'score'): + query = Query(uri, text_query=None, start_index=None, max_results=None, orderby=orderby) + return self.Get(query.ToUri(), converter=douban.ReviewFeedFromString) + + def CreateReview(self, title, content, subject, rating=None): + subject = douban.Subject(atom_id=subject.id) + entry = douban.ReviewEntry(subject=subject) + if rating: + entry.rating = douban.Rating(value=rating) + entry.title = atom.Title(text=title) + entry.content = atom.Content(text=content) + return self.Post(entry, '/reviews', + converter=douban.ReviewEntryFromString) + + def UpdateReview(self, entry, title, content, rating=None): + if isinstance(entry,(str,unicode)): + entry = self.Get(entry, douban.ReviewEntryFromString) + + entry.title = atom.Title(text=title) + entry.content = atom.Content(text=content) + if rating: + entry.rating = douban.Rating(value=rating) + + uri = entry.GetSelfLink().href + return self.Put(entry, uri, converter=douban.ReviewEntryFromString) + + def DeleteReview(self, entry): + uri = entry.GetSelfLink().href + return self.Delete(uri) + + def GetCollection(self, uri): + return self.Get(uri, converter=douban.CollectionEntryFromString) + + def GetCollectionFeed(self, uri): + return self.Get(uri, converter=douban.CollectionFeedFromString) + + # def GetMyCollection(self): + # return self.Get(urllib.quote('/people/@me/collection'), + # converter=douban.CollectionFeedFromString) + def GetMyCollection(self, url, cat=None, tag=None, status=None, start_index=None, + max_results=None, updated_max=None, updated_min=None): + if updated_max and updated_min and status: + query = Query(url, text_query=None, start_index=start_index, + max_results=max_results, status=status, tag=tag, cat=cat, + updated_max=updated_max, updated_min=updated_min) + elif status: + query = Query(url, text_query=None, start_index=start_index, + max_results=max_results, tag=tag, cat=cat, status=status) + elif updated_max and updated_min: + query = Query(url, text_query=None, start_index=start_index, + max_results=max_results, tag=tag, cat=cat, + updated_max=updated_max, updated_min=updated_min) + else: + query = Query(url, text_query=None, start_index=start_index, + max_results=max_results, tag=tag, cat=cat) + return self.GetCollectionFeed(query.ToUri()) + + def AddCollection(self, status, subject, rating=None, tag=[], private=True): + subject = douban.Subject(atom_id=subject.id) + entry = douban.CollectionEntry(subject=subject, + status=douban.Status(status)) + if rating: + entry.rating = douban.Rating(rating) + if isinstance(tag, (str,unicode)): + tag = filter(None, tag.split(' ')) + if private: + attribute = douban.Attribute('privacy', 'private') + entry.attribute.append(attribute) + else: + attribute = douban.Attribute('privacy', 'public') + entry.attribute.append(attribute) + entry.tags = [douban.Tag(name=t) for t in tag] + return self.Post(entry, '/collection', + converter=douban.CollectionEntryFromString) + + def UpdateCollection(self, entry, status, tag=[], rating=None, private=False): + if isinstance(entry,(str,unicode)): + entry = self.Get(entry, douban.CollectionEntryFromString) + + entry.status = douban.Status(status) + if rating: + entry.rating = douban.Rating(rating) + if tag: + if isinstance(tag, (str,unicode)): + tag = filter(None, tag.split(' ')) + entry.tags = [douban.Tag(name=t) for t in tag] + else: + entry.tags = [douban.Tag(name=t.name) for t in entry.tags] + if private: + attribute = douban.Attribute('privacy', 'private') + entry.attribute.append(attribute) + else: + attribute = douban.Attribute('privacy', 'public') + entry.attribute.append(attribute) + uri = entry.GetSelfLink().href + return self.Put(entry, uri, converter=douban.CollectionEntryFromString) + + def DeleteCollection(self, entry): + uri = entry.GetSelfLink().href + return self.Delete(uri) + + def GetTagFeed(self, uri): + return self.Get(uri, converter=douban.TagFeedFromString) + + def GetBroadcastingFeed(self, uri, start_index=None, max_results=None): + query = Query(uri, text_query=None, + start_index=start_index, max_results=max_results) + return self.Get(query.ToUri(), converter=douban.BroadcastingFeedFromString) + + def GetContactsBroadcastingFeed(self, uri, start_index=None, max_results=None): + query = Query(uri, text_query=None, + start_index=start_index, max_results=max_results) + return self.Get(query.ToUri(), converter=douban.BroadcastingFeedFromString) + + def AddBroadcasting(self, uri, entry): + return self.Post(entry, uri, converter=douban.BroadcastingEntryFromString) + + def DeleteBroadcasting(self, entry): + uri = entry.id.text + return self.Delete(uri) + + def GetNote(self, uri): + return self.Get(uri, converter=douban.NoteEntryFromString) + + def GetMyNotes(self, uri, start_index=None, max_results=None): + query = Query(uri, text_query=None, + start_index=start_index, max_results=max_results) + return self.Get(query.ToUri(), converter=douban.NoteFeedFromString) + + def AddNote(self, uri, entry, private=False, can_reply=True): + if private: + attribute = douban.Attribute('privacy', 'private') + entry.attribute.append(attribute) + else: + attribute = douban.Attribute('privacy', 'public') + entry.attribute.append(attribute) + if can_reply: + attribute = douban.Attribute('can_reply', 'yes') + entry.attribute.append(attribute) + else: + attribute = douban.Attribute('can_reply', 'no') + entry.attribute.append(attribute) + return self.Post(entry, uri, converter=douban.NoteEntryFromString) + + def UpdateNote(self, entry, content, title, private=True, can_reply=True): + if isinstance(entry,(str,unicode)): + entry = self.Get(entry, douban.NoteEntryFromString) + + entry.title = atom.Title(text=title) + entry.content = atom.Content(text=content) + if private: + attribute = douban.Attribute('privacy', 'private') + entry.attribute.append(attribute) + else: + attribute = douban.Attribute('privacy', 'public') + entry.attribute.append(attribute) + if can_reply: + attribute = douban.Attribute('can_reply', 'yes') + entry.attribute.append(attribute) + else: + attribute = douban.Attribute('can_reply', 'no') + entry.attribute.append(attribute) + uri = entry.id.text + return self.Put(entry, uri, converter=douban.NoteEntryFromString) + + def DeleteNote(self, entry): + uri = entry.id.text + return self.Delete(uri) + + def GetEvent(self, uri): + return self.Get(uri, converter=douban.EventEntryFromString) + + def GetEventFeed(self, uri): + return self.Get(uri, converter=douban.EventFeedFromString) + + def SearchEvent(self, text_query, location, start_index=None, max_results=None): + query = Query('/events', text_query, location=location, start_index=start_index, + max_results=max_results) + return self.GetEventFeed(query.ToUri()) + + def GetLocationEvents(self, location, type=None, start_index=None, max_results=None): + query = Query('/event/location/%s' %location, type=type or 'all', start_index=start_index, max_results=max_results) + return self.GetEventFeed(query.ToUri()) + + def GetEvents(self, uri, start_index=None, max_results=None, status=None): + query = Query('%s%s' %(uri, status and '/%s' %status or ''), start_index=start_index, max_results=max_results) + return self.Get(query.ToUri(), converter=douban.EventFeedFromString) + + def GetEventWishers(self, uri, start_index=None, max_results=None): + query = Query('%s/wishers'%uri, start_index=start_index, max_results=max_results) + return self.Get(query.ToUri(), converter=douban.PeopleFeedFromString) + + def GetEventParticipants(self, uri, start_index=None, max_results=None): + query = Query('%s/participants'%uri, start_index=start_index, max_results=max_results) + return self.Get(query.ToUri(), converter=douban.PeopleFeedFromString) + + def DeleteEventWisher(self, uri): + return self.Delete('%s/wishers' %uri) + + def DeleteEventParticipants(self, uri): + return self.Delete('%s/participants' %uri) + + def AddEvent(self, uri, entry, invite_only=False, can_invite=True): + entry.attribute.append(douban.Attribute('invite_only', invite_only and 'yes' or 'no')) + entry.attribute.append(douban.Attribute('can_invite', can_invite and 'yes' or 'no')) + return self.Post(entry, uri, converter=douban.EventEntryFromString) + + def UpdateEvent(self, entry, content, title, invite_only=False, can_invite=True): + if isinstance(entry, (str, unicode)): + entry = self.Get(entry, douban.EventEntryFromString) + + entry.title = atom.Title(text=title) + entry.content = atom.Content(text=content) + entry.attribute.append(douban.Attribute('invite_only', invite_only and 'yes' or 'no')) + entry.attribute.append(douban.Attribute('can_invite', can_invite and 'yes' or 'no')) + uri = entry.GetSelfLink().href + return self.Put(entry, uri, converter=douban.EventEntryFromString) + + def DeleteEvent(self, entry, reason): + uri = entry.GetSelfLink().href+'/delete' + entry = gdata.GDataEntry() + entry.content = atom.Content(text=reason) + return self.Post(entry, uri) + + def GetRecommendation(self, uri): + return self.Get(uri, converter=douban.RecommendationEntryFromString) + + def GetRecommendations(self, uri, start_index=None, max_results=None): + query = Query(uri, text_query=None, + start_index=start_index, max_results=max_results) + return self.Get(query.ToUri(), converter=douban.RecommendationFeedFromString) + + def AddRecommendation(self, title, url, comment=""): + entry = douban.RecommendationEntry() + entry.title = atom.Title(text=title) + entry.link = atom.Link(href=url, rel="related") + attribute = douban.Attribute('comment', comment) + entry.attribute.append(attribute) + return self.Post(entry, '/recommendations', converter=douban.RecommendationEntryFromString) + + def DeleteRecommendation(self, entry): + return self.Delete(entry.id.text) + + def GetRecommendationComments(self, uri): + return self.Get(uri, converter=douban.RecommendationCommentFeedFromString) + + def AddRecommendationComment(self, entry, comment): + uri = entry.id.text+'/comments' + entry = gdata.GDataEntry() + entry.content = atom.Content(text=comment) + return self.Post(entry, uri) + + def DeleteRecommendationComment(self, entry): + return self.Delete(entry.id.text) + + def GetDoumail(self, uri): + return self.Get(uri, converter=douban.DoumailEntryFromString) + + def GetDoumailFeed(self, uri): + return self.Get(uri, converter=douban.DoumailFeedFromString) + + def AddDoumail(self, receiverURI, subject, body): + entry = douban.DoumailEntry() + entry.entity.append(douban.Entity('receiver', "",extension_elements=[atom.Uri(text=receiverURI)])) + entry.title = atom.Title(text=subject) + entry.content = atom.Content(text=body) + + return self.Post(entry, '/doumails', converter=douban.DoumailEntryFromString) + + def AddCaptchaDoumail(self, receiverURI, subject, body, captcha_token, captacha_string): + entry = douban.DoumailEntry() + entry.entity.append(douban.Entity('receiver', "",extension_elements=[atom.Uri(text=receiverURI)])) + entry.title = atom.Title(text=subject) + entry.content = atom.Content(text=body) + entry.attribute = [] + entry.attribute.append(douban.Attribute('captcha_string', captacha_string)) + entry.attribute.append(douban.Attribute('captcha_token', captcha_token)) + return self.Post(entry, '/doumails', converter=douban.DoumailEntryFromString) + + def DeleteDoumail(self, entry): + uri = entry.GetSelfLink().href + return self.Delete(uri) + + def DeleteDoumails(self, uris): + feed = gdata.GDataFeed() + for uri in uris: + entry = gdata.GDataEntry() + entry.id = atom.Id(text=uri) + feed.entry.append(entry) + return self.Post(feed, '/doumail/delete') + + def MarkDoumailRead(self, uris): + feed = gdata.GDataFeed() + for uri in uris: + entry = gdata.GDataEntry() + entry.id = atom.Id(text=uri) + entry.attribute = [] + entry.attribute.append(douban.Attribute('unread', 'false')) + feed.entry.append(entry) + return self.Put(feed, '/doumail/') + +class Query(gdata.service.Query): + def __init__(self, feed=None, text_query=None, start_index=None, + max_results=None, **params): + gdata.service.Query.__init__(self, feed=feed, text_query=text_query, + params=params) + if start_index is not None: + self.start_index = start_index + if max_results is not None: + self.max_results = max_results diff --git a/gdata.py-1.2.3/INSTALL.txt b/gdata.py-1.2.3/INSTALL.txt new file mode 100644 index 0000000..19f2327 --- /dev/null +++ b/gdata.py-1.2.3/INSTALL.txt @@ -0,0 +1,247 @@ +To get started using the library, you need to make sure that the library +and dependencies can be imported. Short instructions: + +sudo python setup.py install + +or + +python setup.py install --home=~ +and set your PYTHONPATH to include your home directory. + +Long instructions copied from the following article from Aug. 2007: + +Getting Started with the Google Data Python Library +http://code.google.com/support/bin/answer.py?answer=75582 + +==Introduction== + +So you've decided to use the Google data Python client library to write an +application using one of the many Google data services. Excellent choice! +My aim with this short tutorial is to quickly get you started in using the +client library to develop your application. + +You probably want to jump in and start creating your application right +away. First though, you may need to configure your development environment +and set up the tools you'll need to run the modules included in the client +library. Follow the steps below and you'll be running code in no time. + +==Installing Python== + +If you're going to be developing with the Python client library, you'll +need a working version of Python 2.2 or higher. Many operating systems +come with a version of Python included, so you may be able to skip the +installation step. To see which version of Python you have, run +python -V in a command line terminal. (Note: the V is uppercase.) This +should result in something like: + +Python 2.4.3 + +If you see version 2.2 or higher, then you can start installing dependencies. +Otherwise, look below to find installation/upgrade instructions for your +operating system. + +--Installing Python on Windows-- + +There are quite a few implementations of Python to choose from in Windows, +but for purposes of this guide, I'll be using the .msi installer found on +python.org. + + 1. Begin by downloading the installer from the Python download page. + http://www.python.org/download/ + 2. Run the installer ? you can accept all the default settings + 3. To see if your install is working as expected, open a command prompt and + run python -V. + +--Installing Python on Mac OS X-- + +The list of downloads on python.org has .dmg installers for the Mac users out +there. Here are the steps to install one of them: + + 1. Navigate to http://www.python.org/download/mac/ + 2. From this page, download the installer for the appropriate version of + Mac OS X. Note: The Python installation page for Mac OS X 10.3.8 and + below is different than newer versions of Mac OS X. To find your OS X + version, choose About This Mac from the Apple menu in the top-left + corner of your screen. + 3. After the download finishes, double-click the new disk image file + (ex. python-2.5-macosx.dmg) to mount it. If you're running Safari, this + has already been done for you. + 4. Open the mounted image and double-click the installer package inside. + 5. Follow the installation instructions and read the information and + license agreements as they're presented to you. Again, the default + settings will work fine here. + 6. Verify the installation by opening Terminal.app + (in /Applications/Utilities) and running python -V. The installation's + version should appear. + +--Installing Python on Linux-- + +To install on Linux and other *nix style operating systems, I prefer to +download the source code and compile it. However, you may be able to use your +favorite package manager to install Python. (For example, on Ubuntu this can +be as easy as running sudo apt-get install python on the command line.) To +install from source, follow these steps: + + 1. Download the source tarball from the Python download page. + http://python.org/download/ + 2. Once you've downloaded the package, unpack it using the command line. + You can use the following + + tar zxvf Python-2..tgz + + 3. Next, you'll need to compile and install the source code for the Python + interpreter. In the decompressed directory, run ./configure to generate + a makefile. + 4. Then, run make. This will create a working Python executable file in + the local directory. If you don't have root permission or you just want + to use Python from your home directory, you can stop here. You'll be + able to run Python from this directory, so you might want to add it to + your PATH environment variable. + 5. I prefer to have Python installed in /usr/bin/ where most Python + scripts look for the interpreter. If you have root access, then run + make install as root. This will install Python in the default location + and it will be usable by everyone on your machine. + 6. Check to see if your install is working as expected by opening a + terminal and running python -V. + +==Installing Dependencies== + +Currently, the only external dependency is an XML library named ElementTree. +If you are using Python version 2.5 or higher, you won't need to install +ElementTree since it comes with the Python package. + +To see if ElementTree is already present on your system, do the following: + + 1. Run the Python interpreter. I usually do this by executing python on + the command line. + 2. Try importing the ElementTree module. If you are using Python 2.5 or + higher, enter the following in the interpreter: + + from xml.etree import ElementTree + + For older versions, enter: + + from elementtree import ElementTree + + 3. If the import fails, then you will need to continue reading this + section. If it works, then you can skip to Installing the Google + data library. + 4. Download a version which is appropriate for your operating system. + For example, if you are using Windows, download + elementtree-1.2.6-20050316.win32.exe. For other operating systems, + I recommend downloading a compressed version. + 5. If you are using a .tar.gz or .zip version of the library, first + unpack, then install it by running ./setup.py install. + +Running ./setup.py install attempts to compile the library and place it in +the system directory for your Python modules. If you do not have root access, +you can install the modules in your home directory or an alternate location by +running ./setup.py install --home=~. This will place the code in your home +directory. + +There is another option which avoids installing altogether. Once you +decompress the download, you will find a directory named elementtree. This +directory contains the modules which you will need to import. When you call +import from within Python, it looks for a module with the desired name in +several places. The first place it looks is in the current directory, so +if you are always running your code from one directory, you could just put +the elementtree directory there. Python will also look at the directories +listed in your PYTHONPATH environment variable. For instructions on +editing your PYTHONPATH, see the Appendix at the end of this article. +I recommend using ./setup.py install for elementtree. + +==Installing the Google Data Library== + +Download the Google data Python library if you haven't done so. Look for the +latest version on the Python project's downloads page. + +After downloading the library, unpack it using unzip or tar zxvf depending +on the type of download you chose. + +Now you are ready to install the library modules so that they can be imported +into Python. There are several ways you can do this: + + * If you have the ability to install packages for all users to access, + you can run ./setup.py install from the unpacked archive's main + directory. + * If you want to install these modules for use in your home directory, + you can run ./setup.py install --home=. + +In some cases, you want to avoid installing the modules altogether. To do +that, modify your PYTHONPATH environment variable to include a directory +which contains the gdata and atom directories for the Google data Python +client library. For instructions on modifying your PYTHONPATH, see the +Appendix at the end of this article. + + * One final option that I'll mention, is copying the gdata and atom + directories from the src directory into whatever directory you are + in when you execute python. Python will look in the current directory + when you do an import, but I don't recommend this method unless you + are creating something quick and simple. + +Once you've installed the Google data library, you're ready to take the +library for a test drive. + +==Running Tests and Samples== + +The Google data Python client library distributions include some test cases +which are used in the development of the library. They can also serve as a +quick check to make sure that your dependencies and library installation are +working. From the top level directory where you've unpacked your copy of the +library, try running: + +./tests/run_data_tests.py + +If this script runs correctly, you should see output on the command line +like this: + +Running all tests in module gdata_test +....... +---------------------------------------------------------------------- +Ran 7 tests in 0.025s + +OK + +Running all tests in module atom_test +.......................................... +---------------------------------------------------------------------- +Ran 42 tests in 0.016s + +OK + +... + +If you did not see any errors as the tests execute, then you have probably set +up your environment correctly. Congratulations! + +For further information see the original article: +http://code.google.com/support/bin/answer.py?answer=75582 + +==Appendix: Modifying the PYTHONPATH== + +When you import a package or module in Python, the interpreter looks for the +file in a series of locations including all of the directories listed in the +PYTHONPATH environment variable. I often modify my PYTHONPATH to point to +modules where I have copied the source code for a library I am using. This +prevents the need to install a module each time it is modified because +Python will load the module directly from directory which contains the +modified source code. + +I recommend the PYTHONPATH approach if you are making changes to the client +library code, or if you do not have admin rights on your system. By editing +the PYTHONPATH, you can put the required modules anywhere you like. + +I modified my PYTHONPATH on a *nix and Mac OS X system by setting it in my +.bashrc shell configuration file. If you are using the bash shell, you can +set the variable by adding the following line to your ~/.bashrc file. + +export PYTHONPATH=$PYTHONPATH:/home//svn/gdata-python-client/src + +You can then apply these changes to your current shell session by executing + +source ~/.bashrc. + +For Windows XP, pull up the Environment Variables for your profile: +Control Panel > System Properties > Advanced > Environment Variables. From +there, you can either create or edit the PYTHONPATH variable and add the +location of your local library copy. diff --git a/gdata.py-1.2.3/MANIFEST b/gdata.py-1.2.3/MANIFEST new file mode 100644 index 0000000..230f8b8 --- /dev/null +++ b/gdata.py-1.2.3/MANIFEST @@ -0,0 +1,267 @@ +README.txt +RELEASE_NOTES.txt +INSTALL.txt +MANIFEST +setup.py +src/atom/__init__.py +src/atom/core.py +src/atom/http.py +src/atom/http_interface.py +src/atom/mock_http.py +src/atom/mock_service.py +src/atom/service.py +src/atom/token_store.py +src/atom/url.py +src/gdata/__init__.py +src/gdata/auth.py +src/gdata/client.py +src/gdata/service.py +src/gdata/test_data.py +src/gdata/urlfetch.py +src/gdata/alt/__init__.py +src/gdata/alt/appengine.py +src/gdata/apps/__init__.py +src/gdata/apps/service.py +src/gdata/apps/emailsettings/__init__.py +src/gdata/apps/emailsettings/service.py +src/gdata/apps/migration/__init__.py +src/gdata/apps/migration/service.py +src/gdata/base/__init__.py +src/gdata/base/service.py +src/gdata/blogger/__init__.py +src/gdata/blogger/service.py +src/gdata/calendar/__init__.py +src/gdata/calendar/service.py +src/gdata/codesearch/__init__.py +src/gdata/codesearch/service.py +src/gdata/contacts/__init__.py +src/gdata/contacts/service.py +src/gdata/docs/__init__.py +src/gdata/docs/service.py +src/gdata/exif/__init__.py +src/gdata/geo/__init__.py +src/gdata/media/__init__.py +src/gdata/photos/__init__.py +src/gdata/photos/service.py +src/gdata/spreadsheet/__init__.py +src/gdata/spreadsheet/service.py +src/gdata/spreadsheet/text_db.py +src/gdata/webmastertools/__init__.py +src/gdata/webmastertools/service.py +src/gdata/youtube/__init__.py +src/gdata/youtube/service.py +src/gdata/oauth/__init__.py +src/gdata/oauth/CHANGES.txt +src/gdata/oauth/rsa.py +src/gdata/Crypto/Protocol/__init__.py +src/gdata/Crypto/Protocol/AllOrNothing.py +src/gdata/Crypto/Protocol/Chaffing.py +src/gdata/Crypto/Hash/MD4.pyd +src/gdata/Crypto/Hash/HMAC.py +src/gdata/Crypto/Hash/SHA.py +src/gdata/Crypto/Hash/SHA256.pyd +src/gdata/Crypto/Hash/__init__.py +src/gdata/Crypto/Hash/RIPEMD.pyd +src/gdata/Crypto/Hash/MD5.py +src/gdata/Crypto/Hash/MD2.pyd +src/gdata/Crypto/PublicKey +src/gdata/Crypto/PublicKey/qNEW.py +src/gdata/Crypto/PublicKey/__init__.py +src/gdata/Crypto/PublicKey/pubkey.py +src/gdata/Crypto/PublicKey/ElGamal.py +src/gdata/Crypto/PublicKey/RSA.py +src/gdata/Crypto/PublicKey/DSA.py +src/gdata/Crypto/Cipher/ARC2.pyd +src/gdata/Crypto/Cipher/AES.pyd +src/gdata/Crypto/Cipher/XOR.pyd +src/gdata/Crypto/Cipher/ARC4.pyd +src/gdata/Crypto/Cipher/RC5.pyd +src/gdata/Crypto/Cipher/CAST.pyd +src/gdata/Crypto/Cipher/DES.pyd +src/gdata/Crypto/Cipher/__init__.py +src/gdata/Crypto/Cipher/Blowfish.pyd +src/gdata/Crypto/Cipher/DES3.pyd +src/gdata/Crypto/Cipher/IDEA.pyd +src/gdata/Crypto/Util/RFC1751.py +src/gdata/Crypto/Util/number.py +src/gdata/Crypto/Util/randpool.py +src/gdata/Crypto/Util/__init__.py +src/gdata/Crypto/Util/test.py +src/gdata/Crypto/test.py +src/gdata/Crypto/__init__.py +src/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py +src/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py +src/gdata/tlslite/integration/IMAP4_TLS.py +src/gdata/tlslite/integration/ClientHelper.py +src/gdata/tlslite/integration/__init__.py +src/gdata/tlslite/integration/HTTPTLSConnection.py +src/gdata/tlslite/integration/XMLRPCTransport.py +src/gdata/tlslite/integration/AsyncStateMachine.py +src/gdata/tlslite/integration/POP3_TLS.py +src/gdata/tlslite/integration/IntegrationHelper.py +src/gdata/tlslite/integration/SMTP_TLS.py +src/gdata/tlslite/integration/TLSSocketServerMixIn.py +src/gdata/tlslite/utils/RSAKey.py +src/gdata/tlslite/utils/__init__.py +src/gdata/tlslite/utils/Cryptlib_RC4.py +src/gdata/tlslite/utils/PyCrypto_RC4.py +src/gdata/tlslite/utils/Cryptlib_TripleDES.py +src/gdata/tlslite/utils/PyCrypto_TripleDES.py +src/gdata/tlslite/utils/PyCrypto_RSAKey.py +src/gdata/tlslite/utils/dateFuncs.py +src/gdata/tlslite/utils/codec.py +src/gdata/tlslite/utils/ASN1Parser.py +src/gdata/tlslite/utils/Cryptlib_AES.py +src/gdata/tlslite/utils/keyfactory.py +src/gdata/tlslite/utils/PyCrypto_AES.py +src/gdata/tlslite/utils/RC4.py +src/gdata/tlslite/utils/rijndael.py +src/gdata/tlslite/utils/Python_RC4.py +src/gdata/tlslite/utils/cryptomath.py +src/gdata/tlslite/utils/entropy.c +src/gdata/tlslite/utils/OpenSSL_RC4.py +src/gdata/tlslite/utils/TripleDES.py +src/gdata/tlslite/utils/OpenSSL_TripleDES.py +src/gdata/tlslite/utils/Python_RSAKey.py +src/gdata/tlslite/utils/OpenSSL_RSAKey.py +src/gdata/tlslite/utils/cipherfactory.py +src/gdata/tlslite/utils/AES.py +src/gdata/tlslite/utils/hmac.py +src/gdata/tlslite/utils/Python_AES.py +src/gdata/tlslite/utils/OpenSSL_AES.py +src/gdata/tlslite/utils/jython_compat.py +src/gdata/tlslite/utils/xmltools.py +src/gdata/tlslite/utils/compat.py +src/gdata/tlslite/utils/win32prng.c +src/gdata/tlslite/TLSRecordLayer.py +src/gdata/tlslite/__init__.py +src/gdata/tlslite/Checker.py +src/gdata/tlslite/X509.py +src/gdata/tlslite/FileObject.py +src/gdata/tlslite/messages.py +src/gdata/tlslite/HandshakeSettings.py +src/gdata/tlslite/SessionCache.py +src/gdata/tlslite/api.py +src/gdata/tlslite/mathtls.py +src/gdata/tlslite/constants.py +src/gdata/tlslite/errors.py +src/gdata/tlslite/BaseDB.py +src/gdata/tlslite/VerifierDB.py +src/gdata/tlslite/TLSConnection.py +src/gdata/tlslite/Session.py +src/gdata/tlslite/SharedKeyDB.py +src/gdata/tlslite/X509CertChain.py +samples/base/baseQueryExample.py +samples/base/dryRunInsert.py +samples/blogger/BloggerExample.py +samples/calendar/calendarExample.py +samples/contacts/contacts_example.py +samples/docs/docs_example.py +samples/mashups/birthdaySample.py +samples/spreadsheets/spreadsheetExample.py +samples/webmastertools/AddDeleteExampleDotCom.py +samples/webmastertools/SitemapsFeedSummary.py +samples/webmastertools/SitesFeedSummary.py +samples/oauth/oauth_example.py +samples/authsub/secure_authsub.py +tests/__init__.py +tests/atom_test.py +tests/gdata_test.py +tests/module_test_runner.py +tests/run_all_tests.py +tests/run_data_tests.py +tests/run_service_tests.py +tests/testimage.jpg +tests/atom_tests/__init__.py +tests/atom_tests/http_interface_test.py +tests/atom_tests/mock_http_test.py +tests/atom_tests/mock_server_test.py +tests/atom_tests/service_test.py +tests/atom_tests/token_store_test.py +tests/atom_tests/url_test.py +tests/atom_tests/mock_client_test.py +tests/atom_tests/core_test.py +tests/gdata_tests/__init__.py +tests/gdata_tests/apps_test.py +tests/gdata_tests/auth_test.py +tests/gdata_tests/base_test.py +tests/gdata_tests/blogger_test.py +tests/gdata_tests/calendar_test.py +tests/gdata_tests/client_online_test.py +tests/gdata_tests/client_test.py +tests/gdata_tests/codesearch_test.py +tests/gdata_tests/contacts_test.py +tests/gdata_tests/docs_test.py +tests/gdata_tests/photos_test.py +tests/gdata_tests/service_test.py +tests/gdata_tests/spreadsheet_test.py +tests/gdata_tests/webmastertools_test.py +tests/gdata_tests/youtube_test.py +tests/gdata_tests/apps/__init__.py +tests/gdata_tests/apps/service_test.py +tests/gdata_tests/apps/emailsettings/__init__.py +tests/gdata_tests/apps/emailsettings/service_test.py +tests/gdata_tests/apps/migration/__init__.py +tests/gdata_tests/apps/migration/service_test.py +tests/gdata_tests/base/__init__.py +tests/gdata_tests/base/service_test.py +tests/gdata_tests/blogger/__init__.py +tests/gdata_tests/blogger/service_test.py +tests/gdata_tests/calendar/__init__.py +tests/gdata_tests/calendar/calendar_acl_test.py +tests/gdata_tests/calendar/service_test.py +tests/gdata_tests/contacts/__init__.py +tests/gdata_tests/contacts/service_test.py +tests/gdata_tests/docs/__init__.py +tests/gdata_tests/docs/service_test.py +tests/gdata_tests/photos/__init__.py +tests/gdata_tests/photos/service_test.py +tests/gdata_tests/spreadsheet/__init__.py +tests/gdata_tests/spreadsheet/service_test.py +tests/gdata_tests/spreadsheet/text_db_test.py +tests/gdata_tests/youtube/__init__.py +tests/gdata_tests/youtube/service_test.py +pydocs/atom.html +pydocs/atom.http_interface.html +pydocs/atom.mock_http.html +pydocs/atom.mock_service.html +pydocs/atom.service.html +pydocs/atom.token_store.html +pydocs/atom.url.html +pydocs/gdata.alt.appengine.html +pydocs/gdata.apps.emailsettings.html +pydocs/gdata.apps.emailsettings.service.html +pydocs/gdata.apps.html +pydocs/gdata.apps.migration.html +pydocs/gdata.apps.migration.service.html +pydocs/gdata.apps.service.html +pydocs/gdata.auth.html +pydocs/gdata.base.html +pydocs/gdata.base.service.html +pydocs/gdata.blogger.html +pydocs/gdata.blogger.service.html +pydocs/gdata.calendar.html +pydocs/gdata.calendar.service.html +pydocs/gdata.client.html +pydocs/gdata.codesearch.html +pydocs/gdata.codesearch.service.html +pydocs/gdata.contacts.html +pydocs/gdata.contacts.service.html +pydocs/gdata.docs.html +pydocs/gdata.docs.service.html +pydocs/gdata.exif.html +pydocs/gdata.geo.html +pydocs/gdata.html +pydocs/gdata.media.html +pydocs/gdata.photos.html +pydocs/gdata.photos.service.html +pydocs/gdata.service.html +pydocs/gdata.spreadsheet.html +pydocs/gdata.spreadsheet.service.html +pydocs/gdata.spreadsheet.text_db.html +pydocs/gdata.urlfetch.html +pydocs/gdata.webmastertools.html +pydocs/gdata.webmastertools.service.html +pydocs/gdata.youtube.html +pydocs/gdata.youtube.service.html +pydocs/generate_docs diff --git a/gdata.py-1.2.3/PKG-INFO b/gdata.py-1.2.3/PKG-INFO new file mode 100644 index 0000000..8a97587 --- /dev/null +++ b/gdata.py-1.2.3/PKG-INFO @@ -0,0 +1,32 @@ +Metadata-Version: 1.0 +Name: gdata.py +Version: 1.2.3 +Summary: Python client library for Google data APIs +Home-page: http://code.google.com/p/gdata-python-client/ +Author: Jeffrey Scudder +Author-email: api.jscudder@gmail.com +License: Apache 2.0 +Description: The Google data Python client library makes it easy to access data + through the Google data APIs. This library provides data model and + service modules for the the following Google data services: + - Google Calendar data API + - Google Contacts data API + - Google Spreadsheets data API + - Google Document List data APIs + - Google Base data API + - Google Apps Provisioning API + - Google Apps Email Migration API + - Google Apps Email Settings API + - Picasa Web Albums Data API + - Google Code Search Data API + - YouTube Data API + - Google Webmaster Tools Data API + - Blogger Data API + - core Google data API functionality + The core Google data code provides sufficient functionality to use this + library with any Google data API (even if a module hasn't been written for + it yet). For example, this client can be used with the Notebook API. + This library may also be used with any Atom Publishing Protocol + service. + +Platform: UNKNOWN diff --git a/gdata.py-1.2.3/README.txt b/gdata.py-1.2.3/README.txt new file mode 100644 index 0000000..dfae49d --- /dev/null +++ b/gdata.py-1.2.3/README.txt @@ -0,0 +1,28 @@ + Copyright (C) 2006 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + For more information on the GData Python client library, please see the +project on code.google.com's hosting service here: +http://code.google.com/p/gdata-python-client/ + + Dependency Modules + + ElementTree - For XML parsing, download here: +http://effbot.org/zone/element-index.htm + + httplib - Part of the core Python library since version 2.0, so it should +already be present. + + urllib - For URL creation and URL encoding, should already be present in +current versions of Python. diff --git a/gdata.py-1.2.3/RELEASE_NOTES.txt b/gdata.py-1.2.3/RELEASE_NOTES.txt new file mode 100644 index 0000000..b9521ab --- /dev/null +++ b/gdata.py-1.2.3/RELEASE_NOTES.txt @@ -0,0 +1,350 @@ +=== 1.2.3 === +Dec 3, 2008 (revision 585) + +- Added support for OAuth (thanks to Kunal Shah!). Your client can now obtain + an authorization token using the OAuth protocol. +- Added support for Secure AuthSub (thanks Eric Bidelman!). Your client can + digitally sign requests using RSA allowing Google service to verify that + the request came from your application. +- Added a new module for parsing XML which will be used in future versions to + support version of the Google Data APIs protocol. This new library handles + versioning of XML schemas. +- The Google Contacts API sample now pages through results. +- Added phone number rel types using in the Google Contacts API. +- The YouTube service module will use cElementTree if it is available. + Improves XML parsing speed. +- Fixed typo in gdata.geo, changed longtitude to longitude but kept an alias + for backwards compatibility. +- Fixed Blogger's GetBlogId regular expressions to extract the ID from + multiple kinds of entries. +- Fixed type check in atom.http to allow unicode URL strings. +- Added webmastertools test to the packaged download which fixed failures + when running all data tests. +- Improved compatibility of unit tests with Python2.3. +- Added copies of tlslite and dependencies to support secure AuthSub and + OAuth. +- Changed the default host for Google Apps API requests to + apps-apis.google.com. + + +=== 1.2.2 === +Oct 15, 2008 (revision 556) + +- Added support for the following APIs: + Google Apps Email Migration API + Google Apps Email Settings API + Google Webmaster Tools Data API + Some modules for the above are not yet fully tested, so please file an + issue if you notice something is not working as expected. +- Restored support for gdata.http_request_handler when using App Engine to + preserve backwards compatibility. +- Simplified auth token management by adding a current_token member to + service classes. Also added settings to control when the token_store + is updated when using SetXToken() methods. The token_store will only be + queried if there is no current_token. +- Fixed issue with requests to HTTPS URLs in which an AuthSub token was seen + as invalid because the request contained the default port number (443). + The library no longer includes the port in the Host header if it is using + the default. +- Resolved issues with YouTube token scopes. +- Fixed issue which appeared when the Calendar API issues a redirect to a + PUT request. The library now correctly retries with a PUT (instead of + a POST). +- Added workaround for differences in how the App Engine SDK handles + redirects. +- Fixed typo in gdata.EntryLink declaration. +- Fixed invalid host errors seen when using some HTTP proxies. + + +=== 1.2.1 === +Sep 15, 2008 (revision 529) + +- The gdata.alt.appengine module now replaces a Service object's token_store + with an AppEngineTokenStore which automatically associates auth tokens + with the current user and stores the auth tokens in the datastore. +- Streamlined the gdata.auth module. There are now six recommended functions + for using Google auth mechanisms. GDataService now uses these six + functions. +- Added an override_token in Service objects (AtomService, GDataService) + which bypasses the token_store. This member is helpful when using a + single use AuthSub token which is only valid for one request and so + would not be reused. +- gdata.alt.appengin.run_on_appengine will now return the modified Service + object. This allows statements like: + client = gdata.alt.appengin.run_on_appengine( + gdata.blogger.service.BloggerService()) +- Fixed content length header issue in gdata.alt.appengine which caused + errors when running on App Engine when HTTP headers are set to + non-string values. +- Fixed function naming issue in gdata.alt.appengine, a "private" function + was not accessible in some import styles. +- Fixed issue in atom.http which surfaces when passing in a string for the + port number. Ports are now cast to ints. +- Updated pydocs. + +=== 1.2.0 === +Sep 9, 2008 (revision 522) + +- Refactored HTTP request mechanisms and the way that Authorization headers + are created and applied to requests. These changes make it easier to + swap out the HTTP layer to allow the library to be used in different + environments (like Google App Engine). The changes in Auth token logic + should make it much easier to add support for secure AuthSub and OAuth. + For more details on this change, see the following wiki page: + http://code.google.com/p/gdata-python-client/wiki/HTTPRequestRefactoring +- Fixed issues with token string modification which caused certain AuthSub + token values to become invalid. +- Created a new module for parsing and constructing URLs (atom.url). +- Created a module for token storage which will lookup the correct auth token + to be used for a particular URL. +- Auth tokens are now represented as objects and contain information about the + scopes (URLs) for which they can be used. The token object is responsible + for adding the Authorization header to the request. +- Added new functions to gdata.auth for extracting the raw token string from + a response to an AuthSub token upgrade request. +- Added support for the location query parameter in YouTube queries. +- Added groups functionality to the Contacts API library. +- Batch request support added for Contacts API operations. +- Added default behavior to text_db GetTables: when no worksheet name or ID + is specified, all tables are retrieved. +- Fixed client login for YouTube. +- Fixed issue in YouTube service when setting the developer key. +- Fixed bug in YouTube service which raised an exception when inserting a + video using a file-like object. +- Added a method to Feed and Entry classes to find the "previous" link. +- A failure when attempting to upgrade a single use AuthSub token to a session + token will now raise an exception. +- AppsForYourDomainException now uses Python2.5 style exception construction. +- Moved the logic for using this library on Google App Engine into a new + module: gdata.alt.appengine. + +=== 1.1.1 === +June 13, 2008 (revision 421) + +- Added support for new feeds available in the Google Contacts API. Adding + contact groups, querying for contacts in a group, specifying extended + properties, and setting the contact's photo are now supported. +- The ExtendedProperty class can now contain a blob of XML instead of just + an attribute value. This is used in the Google Contacts Data API. +- The YouTube service now has methods for updating playlists and removing a + video from a playlist. +- Added geo-location to the YouTube service class. +- When using ClientLogin with Blogger, the default account type is now set + to 'GOOGLE'. This prevents the rare problem of a user having a Google + Apps account which conflicts with his or her Google Account used for + Blogger. +- Added support for the in-reply-to element which appears in Blogger + comments. + +=== 1.1.0 === +June 6, 2008 (revision 403) + +- Added modules for YouTube. +- Added modules for Blogger to simplify using the Blogger API. +- Updated pydocs for all modules. +- New member in service object to allow the ClientLogin server to be changed. + Required to be able to use ClientLogin with YouTube. +- Iterating over Google Apps Feeds can now be accomplished using a generator. +- New unit tests for the Google Apps Provisioning API. +- Bug fixes to the mock_service module used in unit tests. +- Fixed a bug in Query string generation when using category queries. +- Improved ease of use for Calendar query construction, URL components are + now escaped automatically by default. +- Bug fix, added timezone element to CalendarEventFeed. +- Improved docstrings to describe token access methods (specifically + GetAuthSubToken). +- Moved ExtendedProperty class into gdata module from gdata.calendar since + it is now also used in the Google Contacts Data API. + +=== 1.0.13 === +May 8, 2008 (revision 357) + +- Added sample code illustrating the use of the Contacts API modules. +- Added a mock_service module which will allow for faster test execution + by simulating a server and replaying recordings instead of making + HTTP requests to a remote server. +- Fixed a but in the gdata.auth module which could cause certain characters + at the start of the token to be removed incorrectly. +- Fixed a bug in the gdata.apps.service module which caused an import to + fail when loading ElementTree in some cases. +- Service unit tests should run correctly when using run_service_tests + and run_all_tests now that the subtests have been made into packages. + +=== 1.0.12.1 === +April 10, 2008 (revision 341) + +- Added new import statements so that xml.etree.ElementTree can be used. + Needed for the library to run on Google App Engine. + +=== 1.0.12 === +April 8, 2008 (revision 334) + +- Added support for the Google Contacts API. +- Added a gdata.urlfetch module which allows this library to be used in + Google App Engine. This module uses urlfetch instead of httplib to + perform HTTP requests. +- Refactored atom.service and gdata.service to make it easier to swap out the + HTTP communication layer. +- Fixed a bug in the Push method of gdata.spreadsheet.text_db. +- Sped up InsertRow method in gdata.spreadsheet.service. +- Improved XML parsing efficiency in gdata.calendar.service. +- Added new attribuges for the Google Apps login element. +- Improved UpdatePhotoBlob in gdata.photo.service. +- Added pydocs for Google Contacts API and updates in other modules. + +=== 1.0.11.1 === +March 13, 2008 (revision 314) + +- Added text_db module to gdata.spreadsheet which makes using the Spreadsheets + API feel more like using a simple database. This module streamlines use + of the list feed. +- Compatibility fixes for geo and photos modules to run on Python 2.2 and 2.3. +- Added the orderby parameter to gdata.service's Query class. +- Changed the blogger sample to use orderby=updated so that queries on + updated min or max will work. +- Fix to string encoding in the atom core when converting text members to XML. + +=== 1.0.11 === +February 11, 2008 (revision 290) + +- Modified the gdata.service.GDataService to include a handler member to + which all HTTP requests are delegated. This will allow the underlying + HTTP client to be swapped out and pave the way for use of a mock server + in unit tests. +- Fixed character encoding problems when parsing non ASCII XML. +- Added pydocs for gdata's geo, exif, and media packages. + + +=== 1.0.10.1 === +December 20, 2007 (revision 265) + +- Photos packages for working with the Picasa Web Albums Data API. +- New modules for handling media, geo, and exif XML elements. +- Packages for the Google Code Seach API. +- New PyDoc HTML pages generated from the source code. +- Extracted authentication code into a seperate module which can be used in + other applications. +- The core XML parser should now fully support UTF-8 strings, there are also + options to change the default encoding. +- Bugfixes in Atom service's proxy support when authenticating with the proxy. +- Spreadsheets UpdateCell can now take integers for the row and column + parameters. +- Added INSTALL.txt to explain the installation process. The content was + copied from the getting started article here: + http://code.google.com/support/bin/answer.py?answer=75582 +- You can now create update and delete worksheets within a Google + Spreadsheet. +- Added convenience methods to service object to ease the process of + iterating through a feed. +- For Document List queries, added the ability to construct schema-qualified + named folder queries. + +=== 1.0.9 === +October 15, 2007 (revision 213) + +- Added support for batch requests for the Google Spreadsheets cells feed, + Google Calendar's events feed, and Google Base's items feed. +- Authentication requests can now be sent to a different server by modifying + a module variable in gdata.service. +- Fixed the birthdaySample mashup. +- User Agent headers now fit the pattern used in other Google data client + libraries. +- Made it easier to access custom elements for Google Base items. + +=== 1.0.8 === +August 31, 2007 (revision 192) + +- Major refactoring of the data model classes. Improved XML parsing + performance. +- Created a script to run all of the tests, one to run the data model tests, + and one to run all service tests. +- Changes to MediaSource to handle uploading files on Windows. +- All of the sample code should now work in Python 2.2 and higher. I removed + 2.4 dependancies. +- Minor change to the Blogger sample; it now uses entry.GetSelfLink(). +- Added fix to prevent socket.sslerror and redirect failures when using a + proxy. +- Google Calendar event entries can now contain a reference to the original + event. The original event element is present when describing a recurrance + exception. +- Added timezone parameter to Google Calendar queries. This allows the client + to request that event times be expressed in a specific timezone. + +=== 1.0.7 === +August 7, 2007 + +- Added new test for the Documents List data API's service module. +- Updated setup.py to include the docs package in the installation. + +=== 1.0.6 === +Aug 2, 2007 + +- Support for Documents List API added along with sample code. + +=== 1.0.5 === +July 31, 2007 + +- XML parsing can now use cElementTree if it is available, this should speed + up XML parsing. +- Redirects now include all params in the original HTTP request. +- Added support for WebContent and QuickAdd features of Google Calendar data + API. +- Proxy support added. +- Binary MIME uploads now work for Windows files when performing media uploads. +- New calendar functionality for add/edit/delete calendar and add/edit/delete + subscriptions. Also, added new functionality for Calendar Gadgets. + +=== 1.0.4 === +June 23, 2007 + +- Added data and service modules for Google Apps. +- XML parsing can now be configured in gdata module service calls to avoid + multiparsing to convert to the desired type. +- Fixed UTF-8 support in the Atom modules. +- The spreadsheets example now works with UTF-8 column names. +- Fixed a type in the Google Base dry-run sample. +- Calendar's Who element no longer requires a name when converting to XML. + +=== 1.0.3 === +May 30, 2007 + +- Added a mashup sample using the spreadsheets and calendar services. +- Reduced the number of unnecessary namespace declarations in the XML produced + by objects. +- Changed type of custom elements in a spreadsheets list entry. +- Fixed bugs in spreadsheetsExample.py. +- Spreadsheet entries now inherit from GDataEntry and have gdata specific + convenience methods. + +=== 1.0.2 === +May 4, 2007 + +- Added support for media. Services can now upload images, the tests for this + service use the Picasa Web Albums data API. +- Added example code illustrating Blogger operations. +- Fixed host name processing for URLs which have -'s in the host name. +- Added a debug setting for service classes. +- Added user agent header. +- Made finding links of a specific type easier when dealing with Atom elements. + Atom Entries now have GetXLink methods. +- Simplified finding license links in Atom Entries. + +=== 1.0.1 === +April 20, 2007 + +- Rearranged package structure, tests are in their own directory and Google + data API extension classes are nested under the gdata package. +- Simplified accessing extension elements by adding FindExtesions. +- Provided a setup.py file for distutils based installation. +- Added support for the app:draft element. +- Imports ElementTree from XML libraries in Python 2.5 +- Fixed _EntryLinkFromElementTree in the calendar data module. +- Fixed various _TakeChildFromElementTree methods in calendar data module. +- Fixed delete and update operations in the spreadsheetExample script. +- Fixed setting sort order and order by in calendar's service module. +- Added HTTP BasicAuth for AtomService. + +=== 1.0 === +March 26, 2007 + +Initial release diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/__init__.py new file mode 100644 index 0000000..247b271 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/__init__.py @@ -0,0 +1,1395 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains classes representing Atom elements. + + Module objective: provide data classes for Atom constructs. These classes hide + the XML-ness of Atom and provide a set of native Python classes to interact + with. + + Conversions to and from XML should only be necessary when the Atom classes + "touch the wire" and are sent over HTTP. For this reason this module + provides methods and functions to convert Atom classes to and from strings. + + For more information on the Atom data model, see RFC 4287 + (http://www.ietf.org/rfc/rfc4287.txt) + + AtomBase: A foundation class on which Atom classes are built. It + handles the parsing of attributes and children which are common to all + Atom classes. By default, the AtomBase class translates all XML child + nodes into ExtensionElements. + + ExtensionElement: Atom allows Atom objects to contain XML which is not part + of the Atom specification, these are called extension elements. If a + classes parser encounters an unexpected XML construct, it is translated + into an ExtensionElement instance. ExtensionElement is designed to fully + capture the information in the XML. Child nodes in an XML extension are + turned into ExtensionElements as well. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +# XML namespaces which are often used in Atom entities. +ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom' +ELEMENT_TEMPLATE = '{http://www.w3.org/2005/Atom}%s' +APP_NAMESPACE = 'http://purl.org/atom/app#' +APP_TEMPLATE = '{http://purl.org/atom/app#}%s' + +# This encoding is used for converting strings before translating the XML +# into an object. +XML_STRING_ENCODING = 'utf-8' +# The desired string encoding for object members. +MEMBER_STRING_ENCODING = 'utf-8' + + +def CreateClassFromXMLString(target_class, xml_string, string_encoding=None): + """Creates an instance of the target class from the string contents. + + Args: + target_class: class The class which will be instantiated and populated + with the contents of the XML. This class must have a _tag and a + _namespace class variable. + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + string_encoding: str The character encoding which the xml_string should + be converted to before it is interpreted and translated into + objects. The default is None in which case the string encoding + is not changed. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or None if the root XML tag and namespace did not + match those of the target class. + """ + encoding = string_encoding or XML_STRING_ENCODING + if encoding and isinstance(xml_string, unicode): + xml_string = xml_string.encode(encoding) + tree = ElementTree.fromstring(xml_string) + return _CreateClassFromElementTree(target_class, tree) + + +def _CreateClassFromElementTree(target_class, tree, namespace=None, tag=None): + """Instantiates the class and populates members according to the tree. + + Note: Only use this function with classes that have _namespace and _tag + class members. + + Args: + target_class: class The class which will be instantiated and populated + with the contents of the XML. + tree: ElementTree An element tree whose contents will be converted into + members of the new target_class instance. + namespace: str (optional) The namespace which the XML tree's root node must + match. If omitted, the namespace defaults to the _namespace of the + target class. + tag: str (optional) The tag which the XML tree's root node must match. If + omitted, the tag defaults to the _tag class member of the target + class. + + Returns: + An instance of the target class - or None if the tag and namespace of + the XML tree's root node did not match the desired namespace and tag. + """ + if namespace is None: + namespace = target_class._namespace + if tag is None: + tag = target_class._tag + if tree.tag == '{%s}%s' % (namespace, tag): + target = target_class() + target._HarvestElementTree(tree) + return target + else: + return None + + +class ExtensionContainer(object): + + def __init__(self, extension_elements=None, extension_attributes=None, + text=None): + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + # Three methods to create an object from an ElementTree + def _HarvestElementTree(self, tree): + # Fill in the instance members from the contents of the XML tree. + for child in tree: + self._ConvertElementTreeToMember(child) + for attribute, value in tree.attrib.iteritems(): + self._ConvertElementAttributeToMember(attribute, value) + # Encode the text string according to the desired encoding type. (UTF-8) + if tree.text: + self.text = tree.text.encode(MEMBER_STRING_ENCODING) + + def _ConvertElementTreeToMember(self, child_tree, current_class=None): + self.extension_elements.append(_ExtensionElementFromElementTree( + child_tree)) + + def _ConvertElementAttributeToMember(self, attribute, value): + # Encode the attribute value's string with the desired type Default UTF-8 + if value: + self.extension_attributes[attribute] = value.encode( + MEMBER_STRING_ENCODING) + + # One method to create an ElementTree from an object + def _AddMembersToElementTree(self, tree): + for child in self.extension_elements: + child._BecomeChildElement(tree) + for attribute, value in self.extension_attributes.iteritems(): + if value: + # Decode the value from the desired encoding (default UTF-8). + if not isinstance(value, unicode): + tree.attrib[attribute] = value.decode(MEMBER_STRING_ENCODING) + else: + tree.attrib[attribute] = value + if self.text and not isinstance(self.text, unicode): + tree.text = self.text.decode(MEMBER_STRING_ENCODING) + else: + tree.text = self.text + + def FindExtensions(self, tag=None, namespace=None): + """Searches extension elements for child nodes with the desired name. + + Returns a list of extension elements within this object whose tag + and/or namespace match those passed in. To find all extensions in + a particular namespace, specify the namespace but not the tag name. + If you specify only the tag, the result list may contain extension + elements in multiple namespaces. + + Args: + tag: str (optional) The desired tag + namespace: str (optional) The desired namespace + + Returns: + A list of elements whose tag and/or namespace match the parameters + values + """ + + results = [] + + if tag and namespace: + for element in self.extension_elements: + if element.tag == tag and element.namespace == namespace: + results.append(element) + elif tag and not namespace: + for element in self.extension_elements: + if element.tag == tag: + results.append(element) + elif namespace and not tag: + for element in self.extension_elements: + if element.namespace == namespace: + results.append(element) + else: + for element in self.extension_elements: + results.append(element) + + return results + + +class AtomBase(ExtensionContainer): + + _children = {} + _attributes = {} + + def __init__(self, extension_elements=None, extension_attributes=None, + text=None): + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + def _ConvertElementTreeToMember(self, child_tree): + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(_CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + _CreateClassFromElementTree(member_class, child_tree)) + else: + ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + def _ConvertElementAttributeToMember(self, attribute, value): + # Find the attribute in this class's list of attributes. + if self.__class__._attributes.has_key(attribute): + # Find the member of this class which corresponds to the XML attribute + # (lookup in current_class._attributes) and set this member to the + # desired value (using self.__dict__). + if value: + # Encode the string to capture non-ascii characters (default UTF-8) + setattr(self, self.__class__._attributes[attribute], + value.encode(MEMBER_STRING_ENCODING)) + else: + ExtensionContainer._ConvertElementAttributeToMember(self, attribute, + value) + + # Three methods to create an ElementTree from an object + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + if not isinstance(member, unicode): + tree.attrib[xml_attribute] = member.decode(MEMBER_STRING_ENCODING) + else: + tree.attrib[xml_attribute] = member + # Lastly, call the ExtensionContainers's _AddMembersToElementTree to + # convert any extension attributes. + ExtensionContainer._AddMembersToElementTree(self, tree) + + + def _BecomeChildElement(self, tree): + """ + + Note: Only for use with classes that have a _tag and _namespace class + member. It is in AtomBase so that it can be inherited but it should + not be called on instances of AtomBase. + + """ + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = '{%s}%s' % (self.__class__._namespace, + self.__class__._tag) + self._AddMembersToElementTree(new_child) + + def _ToElementTree(self): + """ + + Note, this method is designed to be used only with classes that have a + _tag and _namespace. It is placed in AtomBase for inheritance but should + not be called on this class. + + """ + new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace, + self.__class__._tag)) + self._AddMembersToElementTree(new_tree) + return new_tree + + def ToString(self, string_encoding='UTF-8'): + """Converts the Atom object to a string containing XML.""" + return ElementTree.tostring(self._ToElementTree(), encoding=string_encoding) + + def __str__(self): + return self.ToString() + + +class Name(AtomBase): + """The atom:name element""" + + _tag = 'name' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Name + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NameFromString(xml_string): + return CreateClassFromXMLString(Name, xml_string) + + +class Email(AtomBase): + """The atom:email element""" + + _tag = 'email' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Email + + Args: + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailFromString(xml_string): + return CreateClassFromXMLString(Email, xml_string) + + +class Uri(AtomBase): + """The atom:uri element""" + + _tag = 'uri' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Uri + + Args: + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UriFromString(xml_string): + return CreateClassFromXMLString(Uri, xml_string) + + +class Person(AtomBase): + """A foundation class from which atom:author and atom:contributor extend. + + A person contains information like name, email address, and web page URI for + an author or contributor to an Atom feed. + """ + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _children['{%s}name' % (ATOM_NAMESPACE)] = ('name', Name) + _children['{%s}email' % (ATOM_NAMESPACE)] = ('email', Email) + _children['{%s}uri' % (ATOM_NAMESPACE)] = ('uri', Uri) + + def __init__(self, name=None, email=None, uri=None, + extension_elements=None, extension_attributes=None, text=None): + """Foundation from which author and contributor are derived. + + The constructor is provided for illustrative purposes, you should not + need to instantiate a Person. + + Args: + name: Name The person's name + email: Email The person's email address + uri: Uri The URI of the person's webpage + extension_elements: list A list of ExtensionElement instances which are + children of this element. + extension_attributes: dict A dictionary of strings which are the values + for additional XML attributes of this element. + text: String The text contents of the element. This is the contents + of the Entry's XML text node. (Example: This is the text) + """ + + self.name = name + self.email = email + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +class Author(Person): + """The atom:author element + + An author is a required element in Feed. + """ + + _tag = 'author' + _namespace = ATOM_NAMESPACE + _children = Person._children.copy() + _attributes = Person._attributes.copy() + #_children = {} + #_attributes = {} + + def __init__(self, name=None, email=None, uri=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Author + + Args: + name: Name + email: Email + uri: Uri + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.name = name + self.email = email + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +def AuthorFromString(xml_string): + return CreateClassFromXMLString(Author, xml_string) + + +class Contributor(Person): + """The atom:contributor element""" + + _tag = 'contributor' + _namespace = ATOM_NAMESPACE + _children = Person._children.copy() + _attributes = Person._attributes.copy() + + def __init__(self, name=None, email=None, uri=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Contributor + + Args: + name: Name + email: Email + uri: Uri + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.name = name + self.email = email + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +def ContributorFromString(xml_string): + return CreateClassFromXMLString(Contributor, xml_string) + + +class Link(AtomBase): + """The atom:link element""" + + _tag = 'link' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['href'] = 'href' + _attributes['type'] = 'type' + _attributes['title'] = 'title' + _attributes['length'] = 'length' + _attributes['hreflang'] = 'hreflang' + + def __init__(self, href=None, rel=None, link_type=None, hreflang=None, + title=None, length=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Link + + Args: + href: string The href attribute of the link + rel: string + type: string + hreflang: string The language for the href + title: string + length: string The length of the href's destination + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.href = href + self.rel = rel + self.type = link_type + self.hreflang = hreflang + self.title = title + self.length = length + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LinkFromString(xml_string): + return CreateClassFromXMLString(Link, xml_string) + + +class Generator(AtomBase): + """The atom:generator element""" + + _tag = 'generator' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['uri'] = 'uri' + _attributes['version'] = 'version' + + def __init__(self, uri=None, version=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Generator + + Args: + uri: string + version: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.uri = uri + self.version = version + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def GeneratorFromString(xml_string): + return CreateClassFromXMLString(Generator, xml_string) + + +class Text(AtomBase): + """A foundation class from which atom:title, summary, etc. extend. + + This class should never be instantiated. + """ + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['type'] = 'type' + + def __init__(self, text_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Text + + Args: + text_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = text_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Title(Text): + """The atom:title element""" + + _tag = 'title' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, title_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Title + + Args: + title_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = title_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def TitleFromString(xml_string): + return CreateClassFromXMLString(Title, xml_string) + + +class Subtitle(Text): + """The atom:subtitle element""" + + _tag = 'subtitle' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, subtitle_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Subtitle + + Args: + subtitle_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = subtitle_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SubtitleFromString(xml_string): + return CreateClassFromXMLString(Subtitle, xml_string) + + +class Rights(Text): + """The atom:rights element""" + + _tag = 'rights' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, rights_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Rights + + Args: + rights_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = rights_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def RightsFromString(xml_string): + return CreateClassFromXMLString(Rights, xml_string) + + +class Summary(Text): + """The atom:summary element""" + + _tag = 'summary' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, summary_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Summary + + Args: + summary_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = summary_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SummaryFromString(xml_string): + return CreateClassFromXMLString(Summary, xml_string) + + +class Content(Text): + """The atom:content element""" + + _tag = 'content' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + _attributes['src'] = 'src' + + def __init__(self, content_type=None, src=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Content + + Args: + content_type: string + src: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = content_type + self.src = src + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def ContentFromString(xml_string): + return CreateClassFromXMLString(Content, xml_string) + + +class Category(AtomBase): + """The atom:category element""" + + _tag = 'category' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['term'] = 'term' + _attributes['scheme'] = 'scheme' + _attributes['label'] = 'label' + + def __init__(self, term=None, scheme=None, label=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Category + + Args: + term: str + scheme: str + label: str + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.term = term + self.scheme = scheme + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def CategoryFromString(xml_string): + return CreateClassFromXMLString(Category, xml_string) + + +class Id(AtomBase): + """The atom:id element.""" + + _tag = 'id' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Id + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def IdFromString(xml_string): + return CreateClassFromXMLString(Id, xml_string) + + +class Icon(AtomBase): + """The atom:icon element.""" + + _tag = 'icon' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Icon + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def IconFromString(xml_string): + return CreateClassFromXMLString(Icon, xml_string) + + +class Logo(AtomBase): + """The atom:logo element.""" + + _tag = 'logo' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Logo + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LogoFromString(xml_string): + return CreateClassFromXMLString(Logo, xml_string) + + +class Draft(AtomBase): + """The app:draft element which indicates if this entry should be public.""" + + _tag = 'draft' + _namespace = APP_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for app:draft + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def DraftFromString(xml_string): + return CreateClassFromXMLString(Draft, xml_string) + + +class Control(AtomBase): + """The app:control element indicating restrictions on publication. + + The APP control element may contain a draft element indicating whether or + not this entry should be publicly available. + """ + + _tag = 'control' + _namespace = APP_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _children['{%s}draft' % APP_NAMESPACE] = ('draft', Draft) + + def __init__(self, draft=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for app:control""" + + self.draft = draft + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ControlFromString(xml_string): + return CreateClassFromXMLString(Control, xml_string) + + +class Date(AtomBase): + """A parent class for atom:updated, published, etc.""" + + #TODO Add text to and from time conversion methods to allow users to set + # the contents of a Date to a python DateTime object. + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Updated(Date): + """The atom:updated element.""" + + _tag = 'updated' + _namespace = ATOM_NAMESPACE + _children = Date._children.copy() + _attributes = Date._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Updated + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UpdatedFromString(xml_string): + return CreateClassFromXMLString(Updated, xml_string) + + +class Published(Date): + """The atom:published element.""" + + _tag = 'published' + _namespace = ATOM_NAMESPACE + _children = Date._children.copy() + _attributes = Date._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Published + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PublishedFromString(xml_string): + return CreateClassFromXMLString(Published, xml_string) + + +class LinkFinder(object): + """An "interface" providing methods to find link elements + + Entry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of + links. + + This class is used as a mixin in Atom entries and feeds. + """ + + def GetSelfLink(self): + """Find the first link with rel set to 'self' + + Returns: + An atom.Link or none if none of the links had rel equal to 'self' + """ + + for a_link in self.link: + if a_link.rel == 'self': + return a_link + return None + + def GetEditLink(self): + for a_link in self.link: + if a_link.rel == 'edit': + return a_link + return None + + def GetNextLink(self): + for a_link in self.link: + if a_link.rel == 'next': + return a_link + return None + + def GetLicenseLink(self): + for a_link in self.link: + if a_link.rel == 'license': + return a_link + return None + + def GetAlternateLink(self): + for a_link in self.link: + if a_link.rel == 'alternate': + return a_link + return None + + +class FeedEntryParent(AtomBase, LinkFinder): + """A super class for atom:feed and entry, contains shared attributes""" + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _children['{%s}author' % ATOM_NAMESPACE] = ('author', [Author]) + _children['{%s}category' % ATOM_NAMESPACE] = ('category', [Category]) + _children['{%s}contributor' % ATOM_NAMESPACE] = ('contributor', [Contributor]) + _children['{%s}id' % ATOM_NAMESPACE] = ('id', Id) + _children['{%s}link' % ATOM_NAMESPACE] = ('link', [Link]) + _children['{%s}rights' % ATOM_NAMESPACE] = ('rights', Rights) + _children['{%s}title' % ATOM_NAMESPACE] = ('title', Title) + _children['{%s}updated' % ATOM_NAMESPACE] = ('updated', Updated) + + def __init__(self, author=None, category=None, contributor=None, + atom_id=None, link=None, rights=None, title=None, updated=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.rights = rights + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Source(FeedEntryParent): + """The atom:source element""" + + _tag = 'source' + _namespace = ATOM_NAMESPACE + _children = FeedEntryParent._children.copy() + _attributes = FeedEntryParent._attributes.copy() + _children['{%s}generator' % ATOM_NAMESPACE] = ('generator', Generator) + _children['{%s}icon' % ATOM_NAMESPACE] = ('icon', Icon) + _children['{%s}logo' % ATOM_NAMESPACE] = ('logo', Logo) + _children['{%s}subtitle' % ATOM_NAMESPACE] = ('subtitle', Subtitle) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SourceFromString(xml_string): + return CreateClassFromXMLString(Source, xml_string) + + +class Entry(FeedEntryParent): + """The atom:entry element""" + + _tag = 'entry' + _namespace = ATOM_NAMESPACE + _children = FeedEntryParent._children.copy() + _attributes = FeedEntryParent._attributes.copy() + _children['{%s}content' % ATOM_NAMESPACE] = ('content', Content) + _children['{%s}published' % ATOM_NAMESPACE] = ('published', Published) + _children['{%s}source' % ATOM_NAMESPACE] = ('source', Source) + _children['{%s}summary' % ATOM_NAMESPACE] = ('summary', Summary) + _children['{%s}control' % APP_NAMESPACE] = ('control', Control) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for atom:entry + + Args: + author: list A list of Author instances which belong to this class. + category: list A list of Category instances + content: Content The entry's Content + contributor: list A list on Contributor instances + id: Id The entry's Id element + link: list A list of Link instances + published: Published The entry's Published element + rights: Rights The entry's Rights element + source: Source the entry's source element + summary: Summary the entry's summary element + title: Title the entry's title element + updated: Updated the entry's updated element + control: The entry's app:control element which can be used to mark an + entry as a draft which should not be publicly viewable. + text: String The text contents of the element. This is the contents + of the Entry's XML text node. (Example: This is the text) + extension_elements: list A list of ExtensionElement instances which are + children of this element. + extension_attributes: dict A dictionary of strings which are the values + for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.title = title + self.updated = updated + self.control = control + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EntryFromString(xml_string): + return CreateClassFromXMLString(Entry, xml_string) + + +class Feed(Source): + """The atom:feed element""" + + _tag = 'feed' + _namespace = ATOM_NAMESPACE + _children = Source._children.copy() + _attributes = Source._attributes.copy() + _children['{%s}entry' % ATOM_NAMESPACE] = ('entry', [Entry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + text=None, extension_elements=None, extension_attributes=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + entry: list (optional) A list of the Entry instances contained in the + feed. + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def FeedFromString(xml_string): + return CreateClassFromXMLString(Feed, xml_string) + + +class ExtensionElement(object): + """Represents extra XML elements contained in Atom classes.""" + + def __init__(self, tag, namespace=None, attributes=None, + children=None, text=None): + """Constructor for EtensionElement + + Args: + namespace: string (optional) The XML namespace for this element. + tag: string (optional) The tag (without the namespace qualifier) for + this element. To reconstruct the full qualified name of the element, + combine this tag with the namespace. + attributes: dict (optinal) The attribute value string pairs for the XML + attributes of this element. + children: list (optional) A list of ExtensionElements which represent + the XML child nodes of this element. + """ + + self.namespace = namespace + self.tag = tag + self.attributes = attributes or {} + self.children = children or [] + self.text = text + + def ToString(self): + element_tree = self._TransferToElementTree(ElementTree.Element('')) + return ElementTree.tostring(element_tree, encoding="UTF-8") + + def _TransferToElementTree(self, element_tree): + if self.tag is None: + return None + + if self.namespace is not None: + element_tree.tag = '{%s}%s' % (self.namespace, self.tag) + else: + element_tree.tag = self.tag + + for key, value in self.attributes.iteritems(): + element_tree.attrib[key] = value + + for child in self.children: + child._BecomeChildElement(element_tree) + + element_tree.text = self.text + + return element_tree + + def _BecomeChildElement(self, element_tree): + """Converts this object into an etree element and adds it as a child node. + + Adds self to the ElementTree. This method is required to avoid verbose XML + which constantly redefines the namespace. + + Args: + element_tree: ElementTree._Element The element to which this object's XML + will be added. + """ + new_element = ElementTree.Element('') + element_tree.append(new_element) + self._TransferToElementTree(new_element) + + def FindChildren(self, tag=None, namespace=None): + """Searches child nodes for objects with the desired tag/namespace. + + Returns a list of extension elements within this object whose tag + and/or namespace match those passed in. To find all children in + a particular namespace, specify the namespace but not the tag name. + If you specify only the tag, the result list may contain extension + elements in multiple namespaces. + + Args: + tag: str (optional) The desired tag + namespace: str (optional) The desired namespace + + Returns: + A list of elements whose tag and/or namespace match the parameters + values + """ + + results = [] + + if tag and namespace: + for element in self.children: + if element.tag == tag and element.namespace == namespace: + results.append(element) + elif tag and not namespace: + for element in self.children: + if element.tag == tag: + results.append(element) + elif namespace and not tag: + for element in self.children: + if element.namespace == namespace: + results.append(element) + else: + for element in self.children: + results.append(element) + + return results + + +def ExtensionElementFromString(xml_string): + element_tree = ElementTree.fromstring(xml_string) + return _ExtensionElementFromElementTree(element_tree) + + +def _ExtensionElementFromElementTree(element_tree): + element_tag = element_tree.tag + if '}' in element_tag: + namespace = element_tag[1:element_tag.index('}')] + tag = element_tag[element_tag.index('}')+1:] + else: + namespace = None + tag = element_tag + extension = ExtensionElement(namespace=namespace, tag=tag) + for key, value in element_tree.attrib.iteritems(): + extension.attributes[key] = value + for child in element_tree: + extension.children.append(_ExtensionElementFromElementTree(child)) + extension.text = element_tree.text + return extension diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/core.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/core.py new file mode 100644 index 0000000..b37a54e --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/core.py @@ -0,0 +1,382 @@ +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import inspect +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +class XmlElement(object): + _qname = None + _other_elements = None + _other_attributes = None + _rule_set = None + _members = None + text = None + + def __init__(self, text=None, *args, **kwargs): + if ('_members' not in self.__class__.__dict__ + or self.__class__._members is None): + self.__class__._members = tuple(self.__class__._list_xml_members()) + for member_name, member_type in self.__class__._members: + if member_name in kwargs: + setattr(self, member_name, kwargs[member_name]) + else: + if isinstance(member_type, list): + setattr(self, member_name, []) + else: + setattr(self, member_name, None) + self._other_elements = [] + self._other_attributes = {} + if text is not None: + self.text = text + + def _list_xml_members(cls): + """Generator listing all members which are XML elements or attributes. + + The following members would be considered XML members: + foo = 'abc' - indicates an XML attribute with the qname abc + foo = SomeElement - indicates an XML child element + foo = [AnElement] - indicates a repeating XML child element, each instance + will be stored in a list in this member + foo = ('att1', '{http://example.com/namespace}att2' - indicates an XML + attribute which has different parsing rules in different versions of + the protocol. Version 1 of the XML parsing rules will look for an + attribute with the qname 'att1' but verion 2 of the parsing rules will + look for a namespaced attribute with the local name of 'att2' and an + XML namespace of 'http://example.com/namespace'. + """ + members = [] + for pair in inspect.getmembers(cls): + if not pair[0].startswith('_') and pair[0] != 'text': + member_type = pair[1] + if (isinstance(member_type, tuple) or isinstance(member_type, list) + or isinstance(member_type, (str, unicode)) + or (inspect.isclass(member_type) + and issubclass(member_type, XmlElement))): + members.append(pair) + return members + + _list_xml_members = classmethod(_list_xml_members) + + def _get_rules(cls, version): + # Initialize the _rule_set to make sure there is a slot available to store + # the parsing rules for this version of the XML schema. + # Look for rule set in the class __dict__ proxy so that only the + # _rule_set for this class will be found. By using the dict proxy + # we avoid finding rule_sets defined in superclasses. + # The four lines below provide support for any number of versions, but it + # runs a bit slower then hard coding slots for two versions, so I'm using + # the below two lines. + #if '_rule_set' not in cls.__dict__ or cls._rule_set is None: + # cls._rule_set = [] + #while len(cls.__dict__['_rule_set']) < version: + # cls._rule_set.append(None) + # If there is no rule set cache in the class, provide slots for two XML + # versions. If and when there is a version 3, this list will need to be + # expanded. + if '_rule_set' not in cls.__dict__ or cls._rule_set is None: + cls._rule_set = [None, None] + # If a version higher than 2 is requested, fall back to version 2 because + # 2 is currently the highest supported version. + if version > 2: + return cls._get_rules(2) + # Check the dict proxy for the rule set to avoid finding any rule sets + # which belong to the superclass. We only want rule sets for this class. + if cls._rule_set[version-1] is None: + # The rule set for each version consists of the qname for this element + # ('{namespace}tag'), a dictionary (elements) for looking up the + # corresponding class member when given a child element's qname, and a + # dictionary (attributes) for looking up the corresponding class member + # when given an XML attribute's qname. + elements = {} + attributes = {} + if ('_members' not in cls.__dict__ or cls._members is None): + cls._members = tuple(cls._list_xml_members()) + for member_name, target in cls._members: + if isinstance(target, list): + # This member points to a repeating element. + elements[_get_qname(target[0], version)] = (member_name, target[0], + True) + elif isinstance(target, tuple): + # This member points to a versioned XML attribute. + if version <= len(target): + attributes[target[version-1]] = member_name + else: + attributes[target[-1]] = member_name + elif isinstance(target, (str, unicode)): + # This member points to an XML attribute. + attributes[target] = member_name + elif issubclass(target, XmlElement): + # This member points to a single occurance element. + elements[_get_qname(target, version)] = (member_name, target, False) + version_rules = (_get_qname(cls, version), elements, attributes) + cls._rule_set[version-1] = version_rules + return version_rules + else: + return cls._rule_set[version-1] + + _get_rules = classmethod(_get_rules) + + def get_elements(self, tag=None, namespace=None, version=1): + """Find all sub elements which match the tag and namespace. + + To find all elements in this object, call get_elements with the tag and + namespace both set to None (the default). This method searches through + the object's members and the elements stored in _other_elements which + did not match any of the XML parsing rules for this class. + + Args: + tag: str + namespace: str + version: int Specifies the version of the XML rules to be used when + searching for matching elements. + + Returns: + A list of the matching XmlElements. + """ + matches = [] + ignored1, elements, ignored2 = self.__class__._get_rules(version) + if elements: + for qname, element_def in elements.iteritems(): + member = getattr(self, element_def[0]) + if member: + if _qname_matches(tag, namespace, qname): + if element_def[2]: + # If this is a repeating element, copy all instances into the + # result list. + matches.extend(member) + else: + matches.append(member) + for element in self._other_elements: + if _qname_matches(tag, namespace, element._qname): + matches.append(element) + return matches + + def get_attributes(self, tag=None, namespace=None, version=1): + """Find all attributes which match the tag and namespace. + + To find all attributes in this object, call get_attributes with the tag + and namespace both set to None (the default). This method searches + through the object's members and the attributes stored in + _other_attributes which did not fit any of the XML parsing rules for this + class. + + Args: + tag: str + namespace: str + version: int Specifies the version of the XML rules to be used when + searching for matching attributes. + + Returns: + A list of XmlAttribute objects for the matching attributes. + """ + matches = [] + ignored1, ignored2, attributes = self.__class__._get_rules(version) + if attributes: + for qname, attribute_def in attributes.iteritems(): + member = getattr(self, attribute_def[0]) + if member: + if _qname_matches(tag, namespace, qname): + matches.append(XmlAttribute(qname, member)) + for qname, value in self._other_attributes.iteritems(): + if _qname_matches(tag, namespace, qname): + matches.append(XmlAttribute(qname, value)) + return matches + + def _harvest_tree(self, tree, version=1): + """Populates object members from the data in the tree Element.""" + qname, elements, attributes = self.__class__._get_rules(version) + for element in tree: + if elements and element.tag in elements: + definition = elements[element.tag] + # If this is a repeating element, make sure the member is set to a + # list. + if definition[2]: + if getattr(self, definition[0]) is None: + setattr(self, definition[0], []) + getattr(self, definition[0]).append(_xml_element_from_tree(element, + definition[1])) + else: + setattr(self, definition[0], _xml_element_from_tree(element, + definition[1])) + else: + self._other_elements.append(_xml_element_from_tree(element, XmlElement)) + for attrib, value in tree.attrib.iteritems(): + if attributes and attrib in attributes: + setattr(self, attributes[attrib], value) + else: + self._other_attributes[attrib] = value + if tree.text: + self.text = tree.text + + def _to_tree(self, version=1): + new_tree = ElementTree.Element(_get_qname(self, version)) + self._attach_members(new_tree, version) + return new_tree + + def _attach_members(self, tree, version=1): + """Convert members to XML elements/attributes and add them to the tree. + + Args: + tree: An ElementTree.Element which will be modified. The members of + this object will be added as child elements or attributes + according to the rules described in _expected_elements and + _expected_attributes. The elements and attributes stored in + other_attributes and other_elements are also added a children + of this tree. + version: int Ingnored in this method but used by VersionedElement. + """ + qname, elements, attributes = self.__class__._get_rules(version) + # Add the expected elements and attributes to the tree. + if elements: + for tag, element_def in elements.iteritems(): + member = getattr(self, element_def[0]) + # If this is a repeating element and there are members in the list. + if member and element_def[2]: + for instance in member: + instance._become_child(tree, version) + elif member: + member._become_child(tree, version) + if attributes: + for attribute_tag, member_name in attributes.iteritems(): + value = getattr(self, member_name) + if value: + tree.attrib[attribute_tag] = value + # Add the unexpected (other) elements and attributes to the tree. + for element in self._other_elements: + element._become_child(tree, version) + for key, value in self._other_attributes.iteritems(): + tree.attrib[key] = value + if self.text: + tree.text = self.text + + def to_string(self, version=1): + """Converts this object to XML.""" + return ElementTree.tostring(self._to_tree(version)) + + def _become_child(self, tree, version=1): + """Adds a child element to tree with the XML data in self.""" + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = _get_qname(self, version) + self._attach_members(new_child, version) + + +def _get_qname(element, version): + if isinstance(element._qname, tuple): + if version <= len(element._qname): + return element._qname[version-1] + else: + return element._qname[-1] + else: + return element._qname + + +def _qname_matches(tag, namespace, qname): + """Logic determines if a QName matches the desired local tag and namespace. + + This is used in XmlElement.get_elements and XmlElement.get_attributes to + find matches in the element's members (among all expected-and-unexpected + elements-and-attributes). + + Args: + expected_tag: string + expected_namespace: string + qname: string in the form '{xml_namespace}localtag' or 'tag' if there is + no namespace. + + Returns: + boolean True if the member's tag and namespace fit the expected tag and + namespace. + """ + # If there is no expected namespace or tag, then everything will match. + if qname is None: + member_tag = None + member_namespace = None + else: + if qname.startswith('{'): + member_namespace = qname[1:qname.index('}')] + member_tag = qname[qname.index('}') + 1:] + else: + member_namespace = None + member_tag = qname + return ((tag is None and namespace is None) + # If there is a tag, but no namespace, see if the local tag matches. + or (namespace is None and member_tag == tag) + # There was no tag, but there was a namespace so see if the namespaces + # match. + or (tag is None and member_namespace == namespace) + # There was no tag, and the desired elements have no namespace, so check + # to see that the member's namespace is None. + or (tag is None and namespace == '' + and member_namespace is None) + # The tag and the namespace both match. + or (tag == member_tag + and namespace == member_namespace) + # The tag matches, and the expected namespace is the empty namespace, + # check to make sure the member's namespace is None. + or (tag == member_tag and namespace == '' + and member_namespace is None)) + + +def xml_element_from_string(xml_string, target_class, + version=1, encoding='UTF-8'): + """Parses the XML string according to the rules for the target_class. + + Args: + xml_string: str or unicode + target_class: XmlElement or a subclass. + version: int (optional) The version of the schema which should be used when + converting the XML into an object. The default is 1. + """ +# xml_string = xml_string.encode('UTF-8') + tree = ElementTree.fromstring(xml_string) + #tree = ElementTree.fromstring(unicode(xml_string, encoding)) + return _xml_element_from_tree(tree, target_class, version) + + +def _xml_element_from_tree(tree, target_class, version=1): + if target_class._qname is None: + instance = target_class() + instance._qname = tree.tag + instance._harvest_tree(tree, version) + return instance + # TODO handle the namespace-only case + # Namespace only will be used with Google Spreadsheets rows and + # Google Base item attributes. + elif tree.tag == target_class._qname: + instance = target_class() + instance._harvest_tree(tree, version) + return instance + return None + + +class XmlAttribute(object): + + def __init__(self, qname, value): + self._qname = qname + self.value = value diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/http.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/http.py new file mode 100644 index 0000000..c40b394 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/http.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""HttpClients in this module use httplib to make HTTP requests. + +This module make HTTP requests based on httplib, but there are environments +in which an httplib based approach will not work (if running in Google App +Engine for example). In those cases, higher level classes (like AtomService +and GDataService) can swap out the HttpClient to transparently use a +different mechanism for making HTTP requests. + + HttpClient: Contains a request method which performs an HTTP call to the + server. + + ProxiedHttpClient: Contains a request method which connects to a proxy using + settings stored in operating system environment variables then + performs an HTTP call to the endpoint server. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import types +import os +import httplib +import atom.url +import atom.http_interface +import socket +import base64 + + +class ProxyError(atom.http_interface.Error): + pass + + +DEFAULT_CONTENT_TYPE = 'application/atom+xml' + + +class HttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None): + self.debug = False + self.headers = headers or {} + + def request(self, operation, url, data=None, headers=None): + """Performs an HTTP call to the server, supports GET, POST, PUT, and + DELETE. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.http + client = atom.http.HttpClient() + http_response = client.request('GET', 'http://www.google.com/') + + Args: + operation: str The HTTP operation to be performed. This is usually one + of 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be converted + to a string. Should be set to None when performing a GET or DELETE. + If data is a file-like object which can be read, this method will + read a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be + evaluated and sent. + url: The full URL to which the request should be sent. Can be a string + or atom.url.Url. + headers: dict of strings. HTTP headers which should be sent + in the request. + """ + if not isinstance(url, atom.url.Url): + if isinstance(url, types.StringTypes): + url = atom.url.parse_url(url) + else: + raise atom.http_interface.UnparsableUrlObject('Unable to parse url ' + 'parameter because it was not a string or atom.url.Url') + + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + + connection = self._prepare_connection(url, all_headers) + + if self.debug: + connection.debuglevel = 1 + + connection.putrequest(operation, self._get_access_url(url), + skip_host=True) + connection.putheader('Host', url.host) + + # Overcome a bug in Python 2.4 and 2.5 + # httplib.HTTPConnection.putrequest adding + # HTTP request header 'Host: www.google.com:443' instead of + # 'Host: www.google.com', and thus resulting the error message + # 'Token invalid - AuthSub token has wrong scope' in the HTTP response. + if (url.protocol == 'https' and int(url.port or 443) == 443 and + hasattr(connection, '_buffer') and + isinstance(connection._buffer, list)): + header_line = 'Host: %s:443' % url.host + replacement_header_line = 'Host: %s' % url.host + try: + connection._buffer[connection._buffer.index(header_line)] = ( + replacement_header_line) + except ValueError: # header_line missing from connection._buffer + pass + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + if isinstance(data, types.StringTypes): + all_headers['Content-Length'] = len(data) + else: + raise atom.http_interface.ContentLengthRequired('Unable to calculate ' + 'the length of the data parameter. Specify a value for ' + 'Content-Length') + + # Set the content type to the default value if none was set. + if 'Content-Type' not in all_headers: + all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE + + # Send the HTTP headers. + for header_name in all_headers: + connection.putheader(header_name, all_headers[header_name]) + connection.endheaders() + + # If there is data, send it in the request. + if data: + if isinstance(data, list): + for data_part in data: + _send_data_part(data_part, connection) + else: + _send_data_part(data, connection) + + # Return the HTTP Response from the server. + return connection.getresponse() + + def _prepare_connection(self, url, headers): + if not isinstance(url, atom.url.Url): + if isinstance(url, types.StringTypes): + url = atom.url.parse_url(url) + else: + raise atom.http_interface.UnparsableUrlObject('Unable to parse url ' + 'parameter because it was not a string or atom.url.Url') + if url.protocol == 'https': + if not url.port: + return httplib.HTTPSConnection(url.host) + return httplib.HTTPSConnection(url.host, int(url.port)) + else: + if not url.port: + return httplib.HTTPConnection(url.host) + return httplib.HTTPConnection(url.host, int(url.port)) + + def _get_access_url(self, url): + return url.to_string() + + +class ProxiedHttpClient(HttpClient): + """Performs an HTTP request through a proxy. + + The proxy settings are obtained from enviroment variables. The URL of the + proxy server is assumed to be stored in the environment variables + 'https_proxy' and 'http_proxy' respectively. If the proxy server requires + a Basic Auth authorization header, the username and password are expected to + be in the 'proxy-username' or 'proxy_username' variable and the + 'proxy-password' or 'proxy_password' variable. + + After connecting to the proxy server, the request is completed as in + HttpClient.request. + """ + def _prepare_connection(self, url, headers): + proxy_auth = _get_proxy_auth() + if url.protocol == 'https': + # destination is https + proxy = os.environ.get('https_proxy') + if proxy: + # Set any proxy auth headers + if proxy_auth: + proxy_auth = 'Proxy-authorization: %s' % proxy_auth + + # Construct the proxy connect command. + port = url.port + if not port: + port = '443' + proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port) + + # Set the user agent to send to the proxy + if headers and 'User-Agent' in headers: + user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent']) + else: + user_agent = '' + + proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent) + + # Find the proxy host and port. + proxy_url = atom.url.parse_url(proxy) + if not proxy_url.port: + proxy_url.port = '80' + + # Connect to the proxy server, very simple recv and error checking + p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + p_sock.connect((proxy_url.host, int(proxy_url.port))) + p_sock.sendall(proxy_pieces) + response = '' + + # Wait for the full response. + while response.find("\r\n\r\n") == -1: + response += p_sock.recv(8192) + + p_status = response.split()[1] + if p_status != str(200): + raise ProxyError('Error status=%s' % str(p_status)) + + # Trivial setup for ssl socket. + ssl = socket.ssl(p_sock, None, None) + fake_sock = httplib.FakeSocket(p_sock, ssl) + + # Initalize httplib and replace with the proxy socket. + connection = httplib.HTTPConnection(proxy_url.host) + connection.sock=fake_sock + return connection + else: + # The request was HTTPS, but there was no https_proxy set. + return HttpClient._prepare_connection(self, url, headers) + else: + proxy = os.environ.get('http_proxy') + if proxy: + # Find the proxy host and port. + proxy_url = atom.url.parse_url(proxy) + if not proxy_url.port: + proxy_url.port = '80' + + if proxy_auth: + headers['Proxy-Authorization'] = proxy_auth.strip() + + return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port)) + else: + # The request was HTTP, but there was no http_proxy set. + return HttpClient._prepare_connection(self, url, headers) + + def _get_access_url(self, url): + return url.to_string() + + +def _get_proxy_auth(): + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + if proxy_username: + user_auth = base64.encodestring('%s:%s' % (proxy_username, + proxy_password)) + return 'Basic %s\r\n' % (user_auth.strip()) + else: + return '' + + +def _send_data_part(data, connection): + if isinstance(data, types.StringTypes): + connection.send(data) + return + # Check to see if data is a file-like object that has a read method. + elif hasattr(data, 'read'): + # Read the file and send it a chunk at a time. + while 1: + binarydata = data.read(100000) + if binarydata == '': break + connection.send(binarydata) + return + else: + # The data object was not a file. + # Try to convert to a string and send the data. + connection.send(str(data)) + return diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/http_interface.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/http_interface.py new file mode 100644 index 0000000..36e8d96 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/http_interface.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides a common interface for all HTTP requests. + + HttpResponse: Represents the server's response to an HTTP request. Provides + an interface identical to httplib.HTTPResponse which is the response + expected from higher level classes which use HttpClient.request. + + GenericHttpClient: Provides an interface (superclass) for an object + responsible for making HTTP requests. Subclasses of this object are + used in AtomService and GDataService to make requests to the server. By + changing the http_client member object, the AtomService is able to make + HTTP requests using different logic (for example, when running on + Google App Engine, the http_client makes requests using the App Engine + urlfetch API). +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import StringIO + + +USER_AGENT = '%s GData-Python/1.2.2' + + +class Error(Exception): + pass + + +class UnparsableUrlObject(Error): + pass + + +class ContentLengthRequired(Error): + pass + + +class HttpResponse(object): + def __init__(self, body=None, status=None, reason=None, headers=None): + """Constructor for an HttpResponse object. + + HttpResponse represents the server's response to an HTTP request from + the client. The HttpClient.request method returns a httplib.HTTPResponse + object and this HttpResponse class is designed to mirror the interface + exposed by httplib.HTTPResponse. + + Args: + body: A file like object, with a read() method. The body could also + be a string, and the constructor will wrap it so that + HttpResponse.read(self) will return the full string. + status: The HTTP status code as an int. Example: 200, 201, 404. + reason: The HTTP status message which follows the code. Example: + OK, Created, Not Found + headers: A dictionary containing the HTTP headers in the server's + response. A common header in the response is Content-Length. + """ + if body: + if hasattr(body, 'read'): + self._body = body + else: + self._body = StringIO.StringIO(body) + else: + self._body = None + if status is not None: + self.status = int(status) + else: + self.status = None + self.reason = reason + self._headers = headers or {} + + def getheader(self, name, default=None): + if name in self._headers: + return self._headers[name] + else: + return default + + def read(self, amt=None): + if not amt: + return self._body.read() + else: + return self._body.read(amt) + + +class GenericHttpClient(object): + debug = False + + def __init__(self, http_client, headers=None): + """ + + Args: + http_client: An object which provides a request method to make an HTTP + request. The request method in GenericHttpClient performs a + call-through to the contained HTTP client object. + headers: A dictionary containing HTTP headers which should be included + in every HTTP request. Common persistent headers include + 'User-Agent'. + """ + self.http_client = http_client + self.headers = headers or {} + + def request(self, operation, url, data=None, headers=None): + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + return self.http_client.request(operation, url, data=data, + headers=all_headers) + + def get(self, url, headers=None): + return self.request('GET', url, headers=headers) + + def post(self, url, data, headers=None): + return self.request('POST', url, data=data, headers=headers) + + def put(self, url, data, headers=None): + return self.request('PUT', url, data=data, headers=headers) + + def delete(self, url, headers=None): + return self.request('DELETE', url, headers=headers) + + +class GenericToken(object): + """Represents an Authorization token to be added to HTTP requests. + + Some Authorization headers included calculated fields (digital + signatures for example) which are based on the parameters of the HTTP + request. Therefore the token is responsible for signing the request + and adding the Authorization header. + """ + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """For the GenericToken, no Authorization token is set.""" + return http_client.request(operation, url, data=data, headers=headers) + + def valid_for_scope(self, url): + """Tells the caller if the token authorizes access to the desired URL. + + Since the generic token doesn't add an auth header, it is not valid for + any scope. + """ + return False + + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/mock_http.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/mock_http.py new file mode 100644 index 0000000..c420f37 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/mock_http.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import atom.http_interface +import atom.url + + +class Error(Exception): + pass + + +class NoRecordingFound(Error): + pass + + +class MockRequest(object): + """Holds parameters of an HTTP request for matching against future requests. + """ + def __init__(self, operation, url, data=None, headers=None): + self.operation = operation + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + self.url = url + self.data = data + self.headers = headers + + +class MockResponse(atom.http_interface.HttpResponse): + """Simulates an httplib.HTTPResponse object.""" + def __init__(self, body=None, status=None, reason=None, headers=None): + if body and hasattr(body, 'read'): + self.body = body.read() + else: + self.body = body + if status is not None: + self.status = int(status) + else: + self.status = None + self.reason = reason + self._headers = headers or {} + + def read(self): + return self.body + + +class MockHttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None, recordings=None, real_client=None): + """An HttpClient which responds to request with stored data. + + The request-response pairs are stored as tuples in a member list named + recordings. + + The MockHttpClient can be switched from replay mode to record mode by + setting the real_client member to an instance of an HttpClient which will + make real HTTP requests and store the server's response in list of + recordings. + + Args: + headers: dict containing HTTP headers which should be included in all + HTTP requests. + recordings: The initial recordings to be used for responses. This list + contains tuples in the form: (MockRequest, MockResponse) + real_client: An HttpClient which will make a real HTTP request. The + response will be converted into a MockResponse and stored in + recordings. + """ + self.recordings = recordings or [] + self.real_client = real_client + self.headers = headers or {} + + def add_response(self, response, operation, url, data=None, headers=None): + """Adds a request-response pair to the recordings list. + + After the recording is added, future matching requests will receive the + response. + + Args: + response: MockResponse + operation: str + url: str + data: str, Currently the data is ignored when looking for matching + requests. + headers: dict of strings: Currently the headers are ignored when + looking for matching requests. + """ + request = MockRequest(operation, url, data=data, headers=headers) + self.recordings.append((request, response)) + + def request(self, operation, url, data=None, headers=None): + """Returns a matching MockResponse from the recordings. + + If the real_client is set, the request will be passed along and the + server's response will be added to the recordings and also returned. + + If there is no match, a NoRecordingFound error will be raised. + """ + if self.real_client is None: + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for recording in self.recordings: + if recording[0].operation == operation and recording[0].url == url: + return recording[1] + raise NoRecordingFound('No recodings found for %s %s' % ( + operation, url)) + else: + # There is a real HTTP client, so make the request, and record the + # response. + response = self.real_client.request(operation, url, data=data, + headers=headers) + # TODO: copy the headers + stored_response = MockResponse(body=response, status=response.status, + reason=response.reason) + self.add_response(stored_response, operation, url, data=data, + headers=headers) + return stored_response diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/mock_service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/mock_service.py new file mode 100644 index 0000000..601b68a --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/mock_service.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""MockService provides CRUD ops. for mocking calls to AtomPub services. + + MockService: Exposes the publicly used methods of AtomService to provide + a mock interface which can be used in unit tests. +""" + +import atom.service +import pickle + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +# Recordings contains pairings of HTTP MockRequest objects with MockHttpResponse objects. +recordings = [] +# If set, the mock service HttpRequest are actually made through this object. +real_request_handler = None + +def ConcealValueWithSha(source): + import sha + return sha.new(source[:-5]).hexdigest() + +def DumpRecordings(conceal_func=ConcealValueWithSha): + if conceal_func: + for recording_pair in recordings: + recording_pair[0].ConcealSecrets(conceal_func) + return pickle.dumps(recordings) + +def LoadRecordings(recordings_file_or_string): + if isinstance(recordings_file_or_string, str): + atom.mock_service.recordings = pickle.loads(recordings_file_or_string) + elif hasattr(recordings_file_or_string, 'read'): + atom.mock_service.recordings = pickle.loads( + recordings_file_or_string.read()) + +def HttpRequest(service, operation, data, uri, extra_headers=None, + url_params=None, escape_params=True, content_type='application/atom+xml'): + """Simulates an HTTP call to the server, makes an actual HTTP request if + real_request_handler is set. + + This function operates in two different modes depending on if + real_request_handler is set or not. If real_request_handler is not set, + HttpRequest will look in this module's recordings list to find a response + which matches the parameters in the function call. If real_request_handler + is set, this function will call real_request_handler.HttpRequest, add the + response to the recordings list, and respond with the actual response. + + Args: + service: atom.AtomService object which contains some of the parameters + needed to make the request. The following members are used to + construct the HTTP call: server (str), additional_headers (dict), + port (int), and ssl (bool). + operation: str The HTTP operation to be performed. This is usually one of + 'GET', 'POST', 'PUT', or 'DELETE' + data: ElementTree, filestream, list of parts, or other object which can be + converted to a string. + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, this method will read + a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be evaluated + and sent. + uri: The beginning of the URL to which the request should be sent. + Examples: '/', '/base/feeds/snippets', + '/m8/feeds/contacts/default/base' + extra_headers: dict of strings. HTTP headers which should be sent + in the request. These headers are in addition to those stored in + service.additional_headers. + url_params: dict of strings. Key value pairs to be added to the URL as + URL parameters. For example {'foo':'bar', 'test':'param'} will + become ?foo=bar&test=param. + escape_params: bool default True. If true, the keys and values in + url_params will be URL escaped when the form is constructed + (Special characters converted to %XX form.) + content_type: str The MIME type for the data being sent. Defaults to + 'application/atom+xml', this is only used if data is set. + """ + full_uri = atom.service.BuildUri(uri, url_params, escape_params) + (server, port, ssl, uri) = atom.service.ProcessUrl(service, uri) + current_request = MockRequest(operation, full_uri, host=server, ssl=ssl, + data=data, extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, content_type=content_type) + # If the request handler is set, we should actually make the request using + # the request handler and record the response to replay later. + if real_request_handler: + response = real_request_handler.HttpRequest(service, operation, data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, content_type=content_type) + # TODO: need to copy the HTTP headers from the real response into the + # recorded_response. + recorded_response = MockHttpResponse(body=response.read(), + status=response.status, reason=response.reason) + # Insert a tuple which maps the request to the response object returned + # when making an HTTP call using the real_request_handler. + recordings.append((current_request, recorded_response)) + return recorded_response + else: + # Look through available recordings to see if one matches the current + # request. + for request_response_pair in recordings: + if request_response_pair[0].IsMatch(current_request): + return request_response_pair[1] + return None + + +class MockRequest(object): + """Represents a request made to an AtomPub server. + + These objects are used to determine if a client request matches a recorded + HTTP request to determine what the mock server's response will be. + """ + + def __init__(self, operation, uri, host=None, ssl=False, port=None, + data=None, extra_headers=None, url_params=None, escape_params=True, + content_type='application/atom+xml'): + """Constructor for a MockRequest + + Args: + operation: str One of 'GET', 'POST', 'PUT', or 'DELETE' this is the + HTTP operation requested on the resource. + uri: str The URL describing the resource to be modified or feed to be + retrieved. This should include the protocol (http/https) and the host + (aka domain). For example, these are some valud full_uris: + 'http://example.com', 'https://www.google.com/accounts/ClientLogin' + host: str (optional) The server name which will be placed at the + beginning of the URL if the uri parameter does not begin with 'http'. + Examples include 'example.com', 'www.google.com', 'www.blogger.com'. + ssl: boolean (optional) If true, the request URL will begin with https + instead of http. + data: ElementTree, filestream, list of parts, or other object which can be + converted to a string. (optional) + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, the constructor + will read the entire file into memory. If the data is a list of + parts to be sent, each part will be evaluated and stored. + extra_headers: dict (optional) HTTP headers included in the request. + url_params: dict (optional) Key value pairs which should be added to + the URL as URL parameters in the request. For example uri='/', + url_parameters={'foo':'1','bar':'2'} could become '/?foo=1&bar=2'. + escape_params: boolean (optional) Perform URL escaping on the keys and + values specified in url_params. Defaults to True. + content_type: str (optional) Provides the MIME type of the data being + sent. + """ + self.operation = operation + self.uri = _ConstructFullUrlBase(uri, host=host, ssl=ssl) + self.data = data + self.extra_headers = extra_headers + self.url_params = url_params or {} + self.escape_params = escape_params + self.content_type = content_type + + def ConcealSecrets(self, conceal_func): + """Conceal secret data in this request.""" + if self.extra_headers.has_key('Authorization'): + self.extra_headers['Authorization'] = conceal_func( + self.extra_headers['Authorization']) + + def IsMatch(self, other_request): + """Check to see if the other_request is equivalent to this request. + + Used to determine if a recording matches an incoming request so that a + recorded response should be sent to the client. + + The matching is not exact, only the operation and URL are examined + currently. + + Args: + other_request: MockRequest The request which we want to check this + (self) MockRequest against to see if they are equivalent. + """ + # More accurate matching logic will likely be required. + return (self.operation == other_request.operation and self.uri == + other_request.uri) + + +def _ConstructFullUrlBase(uri, host=None, ssl=False): + """Puts URL components into the form http(s)://full.host.strinf/uri/path + + Used to construct a roughly canonical URL so that URLs which begin with + 'http://example.com/' can be compared to a uri of '/' when the host is + set to 'example.com' + + If the uri contains 'http://host' already, the host and ssl parameters + are ignored. + + Args: + uri: str The path component of the URL, examples include '/' + host: str (optional) The host name which should prepend the URL. Example: + 'example.com' + ssl: boolean (optional) If true, the returned URL will begin with https + instead of http. + + Returns: + String which has the form http(s)://example.com/uri/string/contents + """ + if uri.startswith('http'): + return uri + if ssl: + return 'https://%s%s' % (host, uri) + else: + return 'http://%s%s' % (host, uri) + + +class MockHttpResponse(object): + """Returned from MockService crud methods as the server's response.""" + + def __init__(self, body=None, status=None, reason=None, headers=None): + """Construct a mock HTTPResponse and set members. + + Args: + body: str (optional) The HTTP body of the server's response. + status: int (optional) + reason: str (optional) + headers: dict (optional) + """ + self.body = body + self.status = status + self.reason = reason + self.headers = headers or {} + + def read(self): + return self.body + + def getheader(self, header_name): + return self.headers[header_name] + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/service.py new file mode 100644 index 0000000..cf322a1 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/service.py @@ -0,0 +1,726 @@ +#!/usr/bin/python +# +# Copyright (C) 2006, 2007, 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol. + + AtomService: Encapsulates the ability to perform insert, update and delete + operations with the Atom Publishing Protocol on which GData is + based. An instance can perform query, insertion, deletion, and + update. + + HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request + to the specified end point. An AtomService object or a subclass can be + used to specify information about the request. +""" + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import atom.http_interface +import atom.url +import atom.http +import atom.token_store + +import os +import httplib +import urllib +import re +import base64 +import socket +import warnings +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +class AtomService(object): + """Performs Atom Publishing Protocol CRUD operations. + + The AtomService contains methods to perform HTTP CRUD operations. + """ + + # Default values for members + port = 80 + ssl = False + # Set the current_token to force the AtomService to use this token + # instead of searching for an appropriate token in the token_store. + current_token = None + auto_store_tokens = True + auto_set_current_token = True + + def _get_override_token(self): + return self.current_token + + def _set_override_token(self, token): + self.current_token = token + + override_token = property(_get_override_token, _set_override_token) + + def __init__(self, server=None, additional_headers=None, + application_name='', http_client=None, token_store=None): + """Creates a new AtomService client. + + Args: + server: string (optional) The start of a URL for the server + to which all operations should be directed. Example: + 'www.google.com' + additional_headers: dict (optional) Any additional HTTP headers which + should be included with CRUD operations. + http_client: An object responsible for making HTTP requests using a + request method. If none is provided, a new instance of + atom.http.ProxiedHttpClient will be used. + token_store: Keeps a collection of authorization tokens which can be + applied to requests for a specific URLs. Critical methods are + find_token based on a URL (atom.url.Url or a string), add_token, + and remove_token. + """ + self.http_client = http_client or atom.http.ProxiedHttpClient() + self.token_store = token_store or atom.token_store.TokenStore() + self.server = server + self.additional_headers = additional_headers or {} + self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % ( + application_name,) + # If debug is True, the HTTPConnection will display debug information + self._set_debug(False) + + def _get_debug(self): + return self.http_client.debug + + def _set_debug(self, value): + self.http_client.debug = value + + debug = property(_get_debug, _set_debug, + doc='If True, HTTP debug information is printed.') + + def use_basic_auth(self, username, password, scopes=None): + if username is not None and password is not None: + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + base_64_string = base64.encodestring('%s:%s' % (username, password)) + token = BasicAuthToken('Basic %s' % base_64_string.strip(), + scopes=[atom.token_store.SCOPE_ALL]) + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + return self.token_store.add_token(token) + return True + return False + + def UseBasicAuth(self, username, password, for_proxy=False): + """Sets an Authenticaiton: Basic HTTP header containing plaintext. + + Deprecated, use use_basic_auth instead. + + The username and password are base64 encoded and added to an HTTP header + which will be included in each request. Note that your username and + password are sent in plaintext. + + Args: + username: str + password: str + """ + self.use_basic_auth(username, password) + + def request(self, operation, url, data=None, headers=None, + url_params=None): + if isinstance(url, str): + if not url.startswith('http') and self.ssl: + url = atom.url.parse_url('https://%s%s' % (self.server, url)) + elif not url.startswith('http'): + url = atom.url.parse_url('http://%s%s' % (self.server, url)) + else: + url = atom.url.parse_url(url) + + if url_params: + for name, value in url_params.iteritems(): + url.params[name] = value + + all_headers = self.additional_headers.copy() + if headers: + all_headers.update(headers) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + content_length = CalculateDataLength(data) + if content_length: + all_headers['Content-Length'] = str(content_length) + + # Find an Authorization token for this URL if one is available. + if self.override_token: + auth_token = self.override_token + else: + auth_token = self.token_store.find_token(url) + return auth_token.perform_request(self.http_client, operation, url, + data=data, headers=all_headers) + + # CRUD operations + def Get(self, uri, extra_headers=None, url_params=None, escape_params=True): + """Query the APP server with the given URI + + The uri is the portion of the URI after the server value + (server example: 'www.google.com'). + + Example use: + To perform a query against Google Base, set the server to + 'base.google.com' and set the uri to '/base/feeds/...', where ... is + your query. For example, to find snippets for all digital cameras uri + should be set to: '/base/feeds/snippets?bq=digital+camera' + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dicty (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the query. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse The server's response to the GET request. + """ + return self.request('GET', uri, data=None, headers=extra_headers, + url_params=url_params) + + def Post(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, content_type='application/atom+xml'): + """Insert data into an APP server at the given URI. + + Args: + data: string, ElementTree._Element, or something with a __str__ method + The XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse Server's response to the POST request. + """ + if extra_headers is None: + extra_headers = {} + if content_type: + extra_headers['Content-Type'] = content_type + return self.request('POST', uri, data=data, headers=extra_headers, + url_params=url_params) + + def Put(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, content_type='application/atom+xml'): + """Updates an entry at the given URI. + + Args: + data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The + XML containing the updated data. + uri: string A URI indicating entry to which the update will be applied. + Example: '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse Server's response to the PUT request. + """ + if extra_headers is None: + extra_headers = {} + if content_type: + extra_headers['Content-Type'] = content_type + return self.request('PUT', uri, data=data, headers=extra_headers, + url_params=url_params) + + def Delete(self, uri, extra_headers=None, url_params=None, + escape_params=True): + """Deletes the entry at the given URI. + + Args: + uri: string The URI of the entry to be deleted. Example: + '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse Server's response to the DELETE request. + """ + return self.request('DELETE', uri, data=None, headers=extra_headers, + url_params=url_params) + + +class BasicAuthToken(atom.http_interface.GenericToken): + def __init__(self, auth_header, scopes=None): + """Creates a token used to add Basic Auth headers to HTTP requests. + + Args: + auth_header: str The value for the Authorization header. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + self.auth_header = auth_header + self.scopes = scopes or [] + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header to the basic auth string.""" + if headers is None: + headers = {'Authorization':self.auth_header} + else: + headers['Authorization'] = self.auth_header + return http_client.request(operation, url, data=data, headers=headers) + + def __str__(self): + return self.auth_header + + def valid_for_scope(self, url): + """Tells the caller if the token authorizes access to the desired URL. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +def PrepareConnection(service, full_uri): + """Opens a connection to the server based on the full URI. + + This method is deprecated, instead use atom.http.HttpClient.request. + + Examines the target URI and the proxy settings, which are set as + environment variables, to open a connection with the server. This + connection is used to make an HTTP request. + + Args: + service: atom.AtomService or a subclass. It must have a server string which + represents the server host to which the request should be made. It may also + have a dictionary of additional_headers to send in the HTTP request. + full_uri: str Which is the target relative (lacks protocol and host) or + absolute URL to be opened. Example: + 'https://www.google.com/accounts/ClientLogin' or + 'base/feeds/snippets' where the server is set to www.google.com. + + Returns: + A tuple containing the httplib.HTTPConnection and the full_uri for the + request. + """ + deprecation('calling deprecated function PrepareConnection') + (server, port, ssl, partial_uri) = ProcessUrl(service, full_uri) + if ssl: + # destination is https + proxy = os.environ.get('https_proxy') + if proxy: + (p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True) + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + if proxy_username: + user_auth = base64.encodestring('%s:%s' % (proxy_username, + proxy_password)) + proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % ( + user_auth.strip())) + else: + proxy_authorization = '' + proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port) + user_agent = 'User-Agent: %s\r\n' % ( + service.additional_headers['User-Agent']) + proxy_pieces = (proxy_connect + proxy_authorization + user_agent + + '\r\n') + + #now connect, very simple recv and error checking + p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + p_sock.connect((p_server,p_port)) + p_sock.sendall(proxy_pieces) + response = '' + + # Wait for the full response. + while response.find("\r\n\r\n") == -1: + response += p_sock.recv(8192) + + p_status=response.split()[1] + if p_status!=str(200): + raise 'Error status=',str(p_status) + + # Trivial setup for ssl socket. + ssl = socket.ssl(p_sock, None, None) + fake_sock = httplib.FakeSocket(p_sock, ssl) + + # Initalize httplib and replace with the proxy socket. + connection = httplib.HTTPConnection(server) + connection.sock=fake_sock + full_uri = partial_uri + + else: + connection = httplib.HTTPSConnection(server, port) + full_uri = partial_uri + + else: + # destination is http + proxy = os.environ.get('http_proxy') + if proxy: + (p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True) + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + if proxy_username: + UseBasicAuth(service, proxy_username, proxy_password, True) + connection = httplib.HTTPConnection(p_server, p_port) + if not full_uri.startswith("http://"): + if full_uri.startswith("/"): + full_uri = "http://%s%s" % (service.server, full_uri) + else: + full_uri = "http://%s/%s" % (service.server, full_uri) + else: + connection = httplib.HTTPConnection(server, port) + full_uri = partial_uri + + return (connection, full_uri) + + +def UseBasicAuth(service, username, password, for_proxy=False): + """Sets an Authenticaiton: Basic HTTP header containing plaintext. + + Deprecated, use AtomService.use_basic_auth insread. + + The username and password are base64 encoded and added to an HTTP header + which will be included in each request. Note that your username and + password are sent in plaintext. The auth header is added to the + additional_headers dictionary in the service object. + + Args: + service: atom.AtomService or a subclass which has an + additional_headers dict as a member. + username: str + password: str + """ + deprecation('calling deprecated function UseBasicAuth') + base_64_string = base64.encodestring('%s:%s' % (username, password)) + base_64_string = base_64_string.strip() + if for_proxy: + header_name = 'Proxy-Authorization' + else: + header_name = 'Authorization' + service.additional_headers[header_name] = 'Basic %s' % (base_64_string,) + + +def ProcessUrl(service, url, for_proxy=False): + """Processes a passed URL. If the URL does not begin with https?, then + the default value for server is used + + This method is deprecated, use atom.url.parse_url instead. + """ + if not isinstance(url, atom.url.Url): + url = atom.url.parse_url(url) + + server = url.host + ssl = False + port = 80 + + if not server: + if hasattr(service, 'server'): + server = service.server + else: + server = service + if not url.protocol and hasattr(service, 'ssl'): + ssl = service.ssl + if hasattr(service, 'port'): + port = service.port + else: + if url.protocol == 'https': + ssl = True + elif url.protocol == 'http': + ssl = False + if url.port: + port = int(url.port) + elif port == 80 and ssl: + port = 443 + + return (server, port, ssl, url.get_request_uri()) + +def DictionaryToParamList(url_parameters, escape_params=True): + """Convert a dictionary of URL arguments into a URL parameter string. + + This function is deprcated, use atom.url.Url instead. + + Args: + url_parameters: The dictionaty of key-value pairs which will be converted + into URL parameters. For example, + {'dry-run': 'true', 'foo': 'bar'} + will become ['dry-run=true', 'foo=bar']. + + Returns: + A list which contains a string for each key-value pair. The strings are + ready to be incorporated into a URL by using '&'.join([] + parameter_list) + """ + # Choose which function to use when modifying the query and parameters. + # Use quote_plus when escape_params is true. + transform_op = [str, urllib.quote_plus][bool(escape_params)] + # Create a list of tuples containing the escaped version of the + # parameter-value pairs. + parameter_tuples = [(transform_op(param), transform_op(value)) + for param, value in (url_parameters or {}).items()] + # Turn parameter-value tuples into a list of strings in the form + # 'PARAMETER=VALUE'. + return ['='.join(x) for x in parameter_tuples] + + +def BuildUri(uri, url_params=None, escape_params=True): + """Converts a uri string and a collection of parameters into a URI. + + This function is deprcated, use atom.url.Url instead. + + Args: + uri: string + url_params: dict (optional) + escape_params: boolean (optional) + uri: string The start of the desired URI. This string can alrady contain + URL parameters. Examples: '/base/feeds/snippets', + '/base/feeds/snippets?bq=digital+camera' + url_parameters: dict (optional) Additional URL parameters to be included + in the query. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + string The URI consisting of the escaped URL parameters appended to the + initial uri string. + """ + # Prepare URL parameters for inclusion into the GET request. + parameter_list = DictionaryToParamList(url_params, escape_params) + + # Append the URL parameters to the URL. + if parameter_list: + if uri.find('?') != -1: + # If there are already URL parameters in the uri string, add the + # parameters after a new & character. + full_uri = '&'.join([uri] + parameter_list) + else: + # The uri string did not have any URL parameters (no ? character) + # so put a ? between the uri and URL parameters. + full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list))) + else: + full_uri = uri + + return full_uri + + +def HttpRequest(service, operation, data, uri, extra_headers=None, + url_params=None, escape_params=True, content_type='application/atom+xml'): + """Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE. + + This method is deprecated, use atom.http.HttpClient.request instead. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.service + client = atom.service.AtomService() + http_response = client.Get('http://www.google.com/') + or you could set the client.server to 'www.google.com' and use the + following: + client.server = 'www.google.com' + http_response = client.Get('/') + + Args: + service: atom.AtomService object which contains some of the parameters + needed to make the request. The following members are used to + construct the HTTP call: server (str), additional_headers (dict), + port (int), and ssl (bool). + operation: str The HTTP operation to be performed. This is usually one of + 'GET', 'POST', 'PUT', or 'DELETE' + data: ElementTree, filestream, list of parts, or other object which can be + converted to a string. + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, this method will read + a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be evaluated + and sent. + uri: The beginning of the URL to which the request should be sent. + Examples: '/', '/base/feeds/snippets', + '/m8/feeds/contacts/default/base' + extra_headers: dict of strings. HTTP headers which should be sent + in the request. These headers are in addition to those stored in + service.additional_headers. + url_params: dict of strings. Key value pairs to be added to the URL as + URL parameters. For example {'foo':'bar', 'test':'param'} will + become ?foo=bar&test=param. + escape_params: bool default True. If true, the keys and values in + url_params will be URL escaped when the form is constructed + (Special characters converted to %XX form.) + content_type: str The MIME type for the data being sent. Defaults to + 'application/atom+xml', this is only used if data is set. + """ + deprecation('call to deprecated function HttpRequest') + full_uri = BuildUri(uri, url_params, escape_params) + (connection, full_uri) = PrepareConnection(service, full_uri) + + if extra_headers is None: + extra_headers = {} + + # Turn on debug mode if the debug member is set. + if service.debug: + connection.debuglevel = 1 + + connection.putrequest(operation, full_uri) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if (data and not service.additional_headers.has_key('Content-Length') and + not extra_headers.has_key('Content-Length')): + content_length = CalculateDataLength(data) + if content_length: + extra_headers['Content-Length'] = str(content_length) + + if content_type: + extra_headers['Content-Type'] = content_type + + # Send the HTTP headers. + if isinstance(service.additional_headers, dict): + for header in service.additional_headers: + connection.putheader(header, service.additional_headers[header]) + if isinstance(extra_headers, dict): + for header in extra_headers: + connection.putheader(header, extra_headers[header]) + connection.endheaders() + + # If there is data, send it in the request. + if data: + if isinstance(data, list): + for data_part in data: + __SendDataPart(data_part, connection) + else: + __SendDataPart(data, connection) + + # Return the HTTP Response from the server. + return connection.getresponse() + + +def __SendDataPart(data, connection): + """This method is deprecated, use atom.http._send_data_part""" + deprecated('call to deprecated function __SendDataPart') + if isinstance(data, str): + #TODO add handling for unicode. + connection.send(data) + return + elif ElementTree.iselement(data): + connection.send(ElementTree.tostring(data)) + return + # Check to see if data is a file-like object that has a read method. + elif hasattr(data, 'read'): + # Read the file and send it a chunk at a time. + while 1: + binarydata = data.read(100000) + if binarydata == '': break + connection.send(binarydata) + return + else: + # The data object was not a file. + # Try to convert to a string and send the data. + connection.send(str(data)) + return + + +def CalculateDataLength(data): + """Attempts to determine the length of the data to send. + + This method will respond with a length only if the data is a string or + and ElementTree element. + + Args: + data: object If this is not a string or ElementTree element this funtion + will return None. + """ + if isinstance(data, str): + return len(data) + elif isinstance(data, list): + return None + elif ElementTree.iselement(data): + return len(ElementTree.tostring(data)) + elif hasattr(data, 'read'): + # If this is a file-like object, don't try to guess the length. + return None + else: + return len(str(data)) + + +def deprecation(message): + warnings.warn(message, DeprecationWarning, stacklevel=2) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/token_store.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/token_store.py new file mode 100644 index 0000000..d618965 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/token_store.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides a TokenStore class which is designed to manage +auth tokens required for different services. + +Each token is valid for a set of scopes which is the start of a URL. An HTTP +client will use a token store to find a valid Authorization header to send +in requests to the specified URL. If the HTTP client determines that a token +has expired or been revoked, it can remove the token from the store so that +it will not be used in future requests. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import atom.http_interface +import atom.url + + +SCOPE_ALL = 'http' + + +class TokenStore(object): + """Manages Authorization tokens which will be sent in HTTP headers.""" + def __init__(self, scoped_tokens=None): + self._tokens = scoped_tokens or {} + + def add_token(self, token): + """Adds a new token to the store (replaces tokens with the same scope). + + Args: + token: A subclass of http_interface.GenericToken. The token object is + responsible for adding the Authorization header to the HTTP request. + The scopes defined in the token are used to determine if the token + is valid for a requested scope when find_token is called. + + Returns: + True if the token was added, False if the token was not added becase + no scopes were provided. + """ + if not hasattr(token, 'scopes') or not token.scopes: + return False + + for scope in token.scopes: + self._tokens[str(scope)] = token + return True + + def find_token(self, url): + """Selects an Authorization header token which can be used for the URL. + + Args: + url: str or atom.url.Url or a list containing the same. + The URL which is going to be requested. All + tokens are examined to see if any scopes begin match the beginning + of the URL. The first match found is returned. + + Returns: + The token object which should execute the HTTP request. If there was + no token for the url (the url did not begin with any of the token + scopes available), then the atom.http_interface.GenericToken will be + returned because the GenericToken calls through to the http client + without adding an Authorization header. + """ + if url is None: + return None + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if url in self._tokens: + token = self._tokens[url] + if token.valid_for_scope(url): + return token + else: + del self._tokens[url] + for scope, token in self._tokens.iteritems(): + if token.valid_for_scope(url): + return token + return atom.http_interface.GenericToken() + + def remove_token(self, token): + """Removes the token from the token_store. + + This method is used when a token is determined to be invalid. If the + token was found by find_token, but resulted in a 401 or 403 error stating + that the token was invlid, then the token should be removed to prevent + future use. + + Returns: + True if a token was found and then removed from the token + store. False if the token was not in the TokenStore. + """ + token_found = False + scopes_to_delete = [] + for scope, stored_token in self._tokens.iteritems(): + if stored_token == token: + scopes_to_delete.append(scope) + token_found = True + for scope in scopes_to_delete: + del self._tokens[scope] + return token_found + + def remove_all_tokens(self): + self._tokens = {} diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/url.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/url.py new file mode 100644 index 0000000..0effa10 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/atom/url.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import urlparse +import urllib + + +DEFAULT_PROTOCOL = 'http' +DEFAULT_PORT = 80 + + +def parse_url(url_string): + """Creates a Url object which corresponds to the URL string. + + This method can accept partial URLs, but it will leave missing + members of the Url unset. + """ + parts = urlparse.urlparse(url_string) + url = Url() + if parts[0]: + url.protocol = parts[0] + if parts[1]: + host_parts = parts[1].split(':') + if host_parts[0]: + url.host = host_parts[0] + if len(host_parts) > 1: + url.port = host_parts[1] + if parts[2]: + url.path = parts[2] + if parts[4]: + param_pairs = parts[4].split('&') + for pair in param_pairs: + pair_parts = pair.split('=') + if len(pair_parts) > 1: + url.params[urllib.unquote_plus(pair_parts[0])] = ( + urllib.unquote_plus(pair_parts[1])) + elif len(pair_parts) == 1: + url.params[urllib.unquote_plus(pair_parts[0])] = None + return url + +class Url(object): + """Represents a URL and implements comparison logic. + + URL strings which are not identical can still be equivalent, so this object + provides a better interface for comparing and manipulating URLs than + strings. URL parameters are represented as a dictionary of strings, and + defaults are used for the protocol (http) and port (80) if not provided. + """ + def __init__(self, protocol=None, host=None, port=None, path=None, + params=None): + self.protocol = protocol + self.host = host + self.port = port + self.path = path + self.params = params or {} + + def to_string(self): + url_parts = ['', '', '', '', '', ''] + if self.protocol: + url_parts[0] = self.protocol + if self.host: + if self.port: + url_parts[1] = ':'.join((self.host, str(self.port))) + else: + url_parts[1] = self.host + if self.path: + url_parts[2] = self.path + if self.params: + url_parts[4] = self.get_param_string() + return urlparse.urlunparse(url_parts) + + def get_param_string(self): + param_pairs = [] + for key, value in self.params.iteritems(): + param_pairs.append('='.join((urllib.quote_plus(key), + urllib.quote_plus(str(value))))) + return '&'.join(param_pairs) + + def get_request_uri(self): + """Returns the path with the parameters escaped and appended.""" + param_string = self.get_param_string() + if param_string: + return '?'.join([self.path, param_string]) + else: + return self.path + + def __cmp__(self, other): + if not isinstance(other, Url): + return cmp(self.to_string(), str(other)) + difference = 0 + # Compare the protocol + if self.protocol and other.protocol: + difference = cmp(self.protocol, other.protocol) + elif self.protocol and not other.protocol: + difference = cmp(self.protocol, DEFAULT_PROTOCOL) + elif not self.protocol and other.protocol: + difference = cmp(DEFAULT_PROTOCOL, other.protocol) + if difference != 0: + return difference + # Compare the host + difference = cmp(self.host, other.host) + if difference != 0: + return difference + # Compare the port + if self.port and other.port: + difference = cmp(self.port, other.port) + elif self.port and not other.port: + difference = cmp(self.port, DEFAULT_PORT) + elif not self.port and other.port: + difference = cmp(DEFAULT_PORT, other.port) + if difference != 0: + return difference + # Compare the path + difference = cmp(self.path, other.path) + if difference != 0: + return difference + # Compare the parameters + return cmp(self.params, other.params) + + def __str__(self): + return self.to_string() + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Cipher/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Cipher/__init__.py new file mode 100644 index 0000000..3b2f855 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Cipher/__init__.py @@ -0,0 +1,33 @@ +"""Secret-key encryption algorithms. + +Secret-key encryption algorithms transform plaintext in some way that +is dependent on a key, producing ciphertext. This transformation can +easily be reversed, if (and, hopefully, only if) one knows the key. + +The encryption modules here all support the interface described in PEP +272, "API for Block Encryption Algorithms". + +If you don't know which algorithm to choose, use AES because it's +standard and has undergone a fair bit of examination. + +Crypto.Cipher.AES Advanced Encryption Standard +Crypto.Cipher.ARC2 Alleged RC2 +Crypto.Cipher.ARC4 Alleged RC4 +Crypto.Cipher.Blowfish +Crypto.Cipher.CAST +Crypto.Cipher.DES The Data Encryption Standard. Very commonly used + in the past, but today its 56-bit keys are too small. +Crypto.Cipher.DES3 Triple DES. +Crypto.Cipher.IDEA +Crypto.Cipher.RC5 +Crypto.Cipher.XOR The simple XOR cipher. +""" + +__all__ = ['AES', 'ARC2', 'ARC4', + 'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5', + 'XOR' + ] + +__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $" + + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/HMAC.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/HMAC.py new file mode 100644 index 0000000..eeb5782 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/HMAC.py @@ -0,0 +1,108 @@ +"""HMAC (Keyed-Hashing for Message Authentication) Python module. + +Implements the HMAC algorithm as described by RFC 2104. + +This is just a copy of the Python 2.2 HMAC module, modified to work when +used on versions of Python before 2.2. +""" + +__revision__ = "$Id: HMAC.py,v 1.5 2002/07/25 17:19:02 z3p Exp $" + +import string + +def _strxor(s1, s2): + """Utility method. XOR the two strings s1 and s2 (must have same length). + """ + return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2)) + +# The size of the digests returned by HMAC depends on the underlying +# hashing module used. +digest_size = None + +class HMAC: + """RFC2104 HMAC class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new HMAC object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod == None: + import md5 + digestmod = md5 + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + try: + self.digest_size = digestmod.digest_size + except AttributeError: + self.digest_size = len(self.outer.digest()) + + blocksize = 64 + ipad = "\x36" * blocksize + opad = "\x5C" * blocksize + + if len(key) > blocksize: + key = digestmod.new(key).digest() + + key = key + chr(0) * (blocksize - len(key)) + self.outer.update(_strxor(key, opad)) + self.inner.update(_strxor(key, ipad)) + if (msg): + self.update(msg) + +## def clear(self): +## raise NotImplementedError, "clear() method not available in HMAC." + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = HMAC("") + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([string.zfill(hex(ord(x))[2:], 2) + for x in tuple(self.digest())]) + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/MD5.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/MD5.py new file mode 100644 index 0000000..b0eba39 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/MD5.py @@ -0,0 +1,13 @@ + +# Just use the MD5 module from the Python standard library + +__revision__ = "$Id: MD5.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $" + +from md5 import * + +import md5 +if hasattr(md5, 'digestsize'): + digest_size = digestsize + del digestsize +del md5 + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/SHA.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/SHA.py new file mode 100644 index 0000000..ea3c6a3 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/SHA.py @@ -0,0 +1,11 @@ + +# Just use the SHA module from the Python standard library + +__revision__ = "$Id: SHA.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $" + +from sha import * +import sha +if hasattr(sha, 'digestsize'): + digest_size = digestsize + del digestsize +del sha diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/__init__.py new file mode 100644 index 0000000..920fe74 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Hash/__init__.py @@ -0,0 +1,24 @@ +"""Hashing algorithms + +Hash functions take arbitrary strings as input, and produce an output +of fixed size that is dependent on the input; it should never be +possible to derive the input data given only the hash function's +output. Hash functions can be used simply as a checksum, or, in +association with a public-key algorithm, can be used to implement +digital signatures. + +The hashing modules here all support the interface described in PEP +247, "API for Cryptographic Hash Functions". + +Submodules: +Crypto.Hash.HMAC RFC 2104: Keyed-Hashing for Message Authentication +Crypto.Hash.MD2 +Crypto.Hash.MD4 +Crypto.Hash.MD5 +Crypto.Hash.RIPEMD +Crypto.Hash.SHA +""" + +__all__ = ['HMAC', 'MD2', 'MD4', 'MD5', 'RIPEMD', 'SHA', 'SHA256'] +__revision__ = "$Id: __init__.py,v 1.6 2003/12/19 14:24:25 akuchling Exp $" + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Protocol/AllOrNothing.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Protocol/AllOrNothing.py new file mode 100644 index 0000000..6f3505d --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Protocol/AllOrNothing.py @@ -0,0 +1,295 @@ +"""This file implements all-or-nothing package transformations. + +An all-or-nothing package transformation is one in which some text is +transformed into message blocks, such that all blocks must be obtained before +the reverse transformation can be applied. Thus, if any blocks are corrupted +or lost, the original message cannot be reproduced. + +An all-or-nothing package transformation is not encryption, although a block +cipher algorithm is used. The encryption key is randomly generated and is +extractable from the message blocks. + +This class implements the All-Or-Nothing package transformation algorithm +described in: + +Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform" +http://theory.lcs.mit.edu/~rivest/fusion.pdf + +""" + +__revision__ = "$Id: AllOrNothing.py,v 1.8 2003/02/28 15:23:20 akuchling Exp $" + +import operator +import string +from Crypto.Util.number import bytes_to_long, long_to_bytes + + + +class AllOrNothing: + """Class implementing the All-or-Nothing package transform. + + Methods for subclassing: + + _inventkey(key_size): + Returns a randomly generated key. Subclasses can use this to + implement better random key generating algorithms. The default + algorithm is probably not very cryptographically secure. + + """ + + def __init__(self, ciphermodule, mode=None, IV=None): + """AllOrNothing(ciphermodule, mode=None, IV=None) + + ciphermodule is a module implementing the cipher algorithm to + use. It must provide the PEP272 interface. + + Note that the encryption key is randomly generated + automatically when needed. Optional arguments mode and IV are + passed directly through to the ciphermodule.new() method; they + are the feedback mode and initialization vector to use. All + three arguments must be the same for the object used to create + the digest, and to undigest'ify the message blocks. + """ + + self.__ciphermodule = ciphermodule + self.__mode = mode + self.__IV = IV + self.__key_size = ciphermodule.key_size + if self.__key_size == 0: + self.__key_size = 16 + + __K0digit = chr(0x69) + + def digest(self, text): + """digest(text:string) : [string] + + Perform the All-or-Nothing package transform on the given + string. Output is a list of message blocks describing the + transformed text, where each block is a string of bit length equal + to the ciphermodule's block_size. + """ + + # generate a random session key and K0, the key used to encrypt the + # hash blocks. Rivest calls this a fixed, publically-known encryption + # key, but says nothing about the security implications of this key or + # how to choose it. + key = self._inventkey(self.__key_size) + K0 = self.__K0digit * self.__key_size + + # we need two cipher objects here, one that is used to encrypt the + # message blocks and one that is used to encrypt the hashes. The + # former uses the randomly generated key, while the latter uses the + # well-known key. + mcipher = self.__newcipher(key) + hcipher = self.__newcipher(K0) + + # Pad the text so that its length is a multiple of the cipher's + # block_size. Pad with trailing spaces, which will be eliminated in + # the undigest() step. + block_size = self.__ciphermodule.block_size + padbytes = block_size - (len(text) % block_size) + text = text + ' ' * padbytes + + # Run through the algorithm: + # s: number of message blocks (size of text / block_size) + # input sequence: m1, m2, ... ms + # random key K' (`key' in the code) + # Compute output sequence: m'1, m'2, ... m's' for s' = s + 1 + # Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s + # Let m's' = K' ^ h1 ^ h2 ^ ... hs + # where hi = E(K0, m'i ^ i) for i = 1, 2, ... s + # + # The one complication I add is that the last message block is hard + # coded to the number of padbytes added, so that these can be stripped + # during the undigest() step + s = len(text) / block_size + blocks = [] + hashes = [] + for i in range(1, s+1): + start = (i-1) * block_size + end = start + block_size + mi = text[start:end] + assert len(mi) == block_size + cipherblock = mcipher.encrypt(long_to_bytes(i, block_size)) + mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock) + blocks.append(mticki) + # calculate the hash block for this block + hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size)) + hashes.append(bytes_to_long(hi)) + + # Add the padbytes length as a message block + i = i + 1 + cipherblock = mcipher.encrypt(long_to_bytes(i, block_size)) + mticki = padbytes ^ bytes_to_long(cipherblock) + blocks.append(mticki) + + # calculate this block's hash + hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size)) + hashes.append(bytes_to_long(hi)) + + # Now calculate the last message block of the sequence 1..s'. This + # will contain the random session key XOR'd with all the hash blocks, + # so that for undigest(), once all the hash blocks are calculated, the + # session key can be trivially extracted. Calculating all the hash + # blocks requires that all the message blocks be received, thus the + # All-or-Nothing algorithm succeeds. + mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes) + blocks.append(mtick_stick) + + # we convert the blocks to strings since in Python, byte sequences are + # always represented as strings. This is more consistent with the + # model that encryption and hash algorithms always operate on strings. + return map(long_to_bytes, blocks) + + + def undigest(self, blocks): + """undigest(blocks : [string]) : string + + Perform the reverse package transformation on a list of message + blocks. Note that the ciphermodule used for both transformations + must be the same. blocks is a list of strings of bit length + equal to the ciphermodule's block_size. + """ + + # better have at least 2 blocks, for the padbytes package and the hash + # block accumulator + if len(blocks) < 2: + raise ValueError, "List must be at least length 2." + + # blocks is a list of strings. We need to deal with them as long + # integers + blocks = map(bytes_to_long, blocks) + + # Calculate the well-known key, to which the hash blocks are + # encrypted, and create the hash cipher. + K0 = self.__K0digit * self.__key_size + hcipher = self.__newcipher(K0) + + # Since we have all the blocks (or this method would have been called + # prematurely), we can calcualte all the hash blocks. + hashes = [] + for i in range(1, len(blocks)): + mticki = blocks[i-1] ^ i + hi = hcipher.encrypt(long_to_bytes(mticki)) + hashes.append(bytes_to_long(hi)) + + # now we can calculate K' (key). remember the last block contains + # m's' which we don't include here + key = blocks[-1] ^ reduce(operator.xor, hashes) + + # and now we can create the cipher object + mcipher = self.__newcipher(long_to_bytes(key)) + block_size = self.__ciphermodule.block_size + + # And we can now decode the original message blocks + parts = [] + for i in range(1, len(blocks)): + cipherblock = mcipher.encrypt(long_to_bytes(i, block_size)) + mi = blocks[i-1] ^ bytes_to_long(cipherblock) + parts.append(mi) + + # The last message block contains the number of pad bytes appended to + # the original text string, such that its length was an even multiple + # of the cipher's block_size. This number should be small enough that + # the conversion from long integer to integer should never overflow + padbytes = int(parts[-1]) + text = string.join(map(long_to_bytes, parts[:-1]), '') + return text[:-padbytes] + + def _inventkey(self, key_size): + # TBD: Not a very secure algorithm. Eventually, I'd like to use JHy's + # kernelrand module + import time + from Crypto.Util import randpool + # TBD: key_size * 2 to work around possible bug in RandomPool? + pool = randpool.RandomPool(key_size * 2) + while key_size > pool.entropy: + pool.add_event() + + # we now have enough entropy in the pool to get a key_size'd key + return pool.get_bytes(key_size) + + def __newcipher(self, key): + if self.__mode is None and self.__IV is None: + return self.__ciphermodule.new(key) + elif self.__IV is None: + return self.__ciphermodule.new(key, self.__mode) + else: + return self.__ciphermodule.new(key, self.__mode, self.__IV) + + + +if __name__ == '__main__': + import sys + import getopt + import base64 + + usagemsg = '''\ +Test module usage: %(program)s [-c cipher] [-l] [-h] + +Where: + --cipher module + -c module + Cipher module to use. Default: %(ciphermodule)s + + --aslong + -l + Print the encoded message blocks as long integers instead of base64 + encoded strings + + --help + -h + Print this help message +''' + + ciphermodule = 'AES' + aslong = 0 + + def usage(code, msg=None): + if msg: + print msg + print usagemsg % {'program': sys.argv[0], + 'ciphermodule': ciphermodule} + sys.exit(code) + + try: + opts, args = getopt.getopt(sys.argv[1:], + 'c:l', ['cipher=', 'aslong']) + except getopt.error, msg: + usage(1, msg) + + if args: + usage(1, 'Too many arguments') + + for opt, arg in opts: + if opt in ('-h', '--help'): + usage(0) + elif opt in ('-c', '--cipher'): + ciphermodule = arg + elif opt in ('-l', '--aslong'): + aslong = 1 + + # ugly hack to force __import__ to give us the end-path module + module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new']) + + a = AllOrNothing(module) + print 'Original text:\n==========' + print __doc__ + print '==========' + msgblocks = a.digest(__doc__) + print 'message blocks:' + for i, blk in map(None, range(len(msgblocks)), msgblocks): + # base64 adds a trailing newline + print ' %3d' % i, + if aslong: + print bytes_to_long(blk) + else: + print base64.encodestring(blk)[:-1] + # + # get a new undigest-only object so there's no leakage + b = AllOrNothing(module) + text = b.undigest(msgblocks) + if text == __doc__: + print 'They match!' + else: + print 'They differ!' diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Protocol/Chaffing.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Protocol/Chaffing.py new file mode 100644 index 0000000..fdfb82d --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Protocol/Chaffing.py @@ -0,0 +1,229 @@ +"""This file implements the chaffing algorithm. + +Winnowing and chaffing is a technique for enhancing privacy without requiring +strong encryption. In short, the technique takes a set of authenticated +message blocks (the wheat) and adds a number of chaff blocks which have +randomly chosen data and MAC fields. This means that to an adversary, the +chaff blocks look as valid as the wheat blocks, and so the authentication +would have to be performed on every block. By tailoring the number of chaff +blocks added to the message, the sender can make breaking the message +computationally infeasible. There are many other interesting properties of +the winnow/chaff technique. + +For example, say Alice is sending a message to Bob. She packetizes the +message and performs an all-or-nothing transformation on the packets. Then +she authenticates each packet with a message authentication code (MAC). The +MAC is a hash of the data packet, and there is a secret key which she must +share with Bob (key distribution is an exercise left to the reader). She then +adds a serial number to each packet, and sends the packets to Bob. + +Bob receives the packets, and using the shared secret authentication key, +authenticates the MACs for each packet. Those packets that have bad MACs are +simply discarded. The remainder are sorted by serial number, and passed +through the reverse all-or-nothing transform. The transform means that an +eavesdropper (say Eve) must acquire all the packets before any of the data can +be read. If even one packet is missing, the data is useless. + +There's one twist: by adding chaff packets, Alice and Bob can make Eve's job +much harder, since Eve now has to break the shared secret key, or try every +combination of wheat and chaff packet to read any of the message. The cool +thing is that Bob doesn't need to add any additional code; the chaff packets +are already filtered out because their MACs don't match (in all likelihood -- +since the data and MACs for the chaff packets are randomly chosen it is +possible, but very unlikely that a chaff MAC will match the chaff data). And +Alice need not even be the party adding the chaff! She could be completely +unaware that a third party, say Charles, is adding chaff packets to her +messages as they are transmitted. + +For more information on winnowing and chaffing see this paper: + +Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption" +http://theory.lcs.mit.edu/~rivest/chaffing.txt + +""" + +__revision__ = "$Id: Chaffing.py,v 1.7 2003/02/28 15:23:21 akuchling Exp $" + +from Crypto.Util.number import bytes_to_long + +class Chaff: + """Class implementing the chaff adding algorithm. + + Methods for subclasses: + + _randnum(size): + Returns a randomly generated number with a byte-length equal + to size. Subclasses can use this to implement better random + data and MAC generating algorithms. The default algorithm is + probably not very cryptographically secure. It is most + important that the chaff data does not contain any patterns + that can be used to discern it from wheat data without running + the MAC. + + """ + + def __init__(self, factor=1.0, blocksper=1): + """Chaff(factor:float, blocksper:int) + + factor is the number of message blocks to add chaff to, + expressed as a percentage between 0.0 and 1.0. blocksper is + the number of chaff blocks to include for each block being + chaffed. Thus the defaults add one chaff block to every + message block. By changing the defaults, you can adjust how + computationally difficult it could be for an adversary to + brute-force crack the message. The difficulty is expressed + as: + + pow(blocksper, int(factor * number-of-blocks)) + + For ease of implementation, when factor < 1.0, only the first + int(factor*number-of-blocks) message blocks are chaffed. + """ + + if not (0.0<=factor<=1.0): + raise ValueError, "'factor' must be between 0.0 and 1.0" + if blocksper < 0: + raise ValueError, "'blocksper' must be zero or more" + + self.__factor = factor + self.__blocksper = blocksper + + + def chaff(self, blocks): + """chaff( [(serial-number:int, data:string, MAC:string)] ) + : [(int, string, string)] + + Add chaff to message blocks. blocks is a list of 3-tuples of the + form (serial-number, data, MAC). + + Chaff is created by choosing a random number of the same + byte-length as data, and another random number of the same + byte-length as MAC. The message block's serial number is + placed on the chaff block and all the packet's chaff blocks + are randomly interspersed with the single wheat block. This + method then returns a list of 3-tuples of the same form. + Chaffed blocks will contain multiple instances of 3-tuples + with the same serial number, but the only way to figure out + which blocks are wheat and which are chaff is to perform the + MAC hash and compare values. + """ + + chaffedblocks = [] + + # count is the number of blocks to add chaff to. blocksper is the + # number of chaff blocks to add per message block that is being + # chaffed. + count = len(blocks) * self.__factor + blocksper = range(self.__blocksper) + for i, wheat in map(None, range(len(blocks)), blocks): + # it shouldn't matter which of the n blocks we add chaff to, so for + # ease of implementation, we'll just add them to the first count + # blocks + if i < count: + serial, data, mac = wheat + datasize = len(data) + macsize = len(mac) + addwheat = 1 + # add chaff to this block + for j in blocksper: + import sys + chaffdata = self._randnum(datasize) + chaffmac = self._randnum(macsize) + chaff = (serial, chaffdata, chaffmac) + # mix up the order, if the 5th bit is on then put the + # wheat on the list + if addwheat and bytes_to_long(self._randnum(16)) & 0x40: + chaffedblocks.append(wheat) + addwheat = 0 + chaffedblocks.append(chaff) + if addwheat: + chaffedblocks.append(wheat) + else: + # just add the wheat + chaffedblocks.append(wheat) + return chaffedblocks + + def _randnum(self, size): + # TBD: Not a very secure algorithm. + # TBD: size * 2 to work around possible bug in RandomPool + from Crypto.Util import randpool + import time + pool = randpool.RandomPool(size * 2) + while size > pool.entropy: + pass + + # we now have enough entropy in the pool to get size bytes of random + # data... well, probably + return pool.get_bytes(size) + + + +if __name__ == '__main__': + text = """\ +We hold these truths to be self-evident, that all men are created equal, that +they are endowed by their Creator with certain unalienable Rights, that among +these are Life, Liberty, and the pursuit of Happiness. That to secure these +rights, Governments are instituted among Men, deriving their just powers from +the consent of the governed. That whenever any Form of Government becomes +destructive of these ends, it is the Right of the People to alter or to +abolish it, and to institute new Government, laying its foundation on such +principles and organizing its powers in such form, as to them shall seem most +likely to effect their Safety and Happiness. +""" + print 'Original text:\n==========' + print text + print '==========' + + # first transform the text into packets + blocks = [] ; size = 40 + for i in range(0, len(text), size): + blocks.append( text[i:i+size] ) + + # now get MACs for all the text blocks. The key is obvious... + print 'Calculating MACs...' + from Crypto.Hash import HMAC, SHA + key = 'Jefferson' + macs = [HMAC.new(key, block, digestmod=SHA).digest() + for block in blocks] + + assert len(blocks) == len(macs) + + # put these into a form acceptable as input to the chaffing procedure + source = [] + m = map(None, range(len(blocks)), blocks, macs) + print m + for i, data, mac in m: + source.append((i, data, mac)) + + # now chaff these + print 'Adding chaff...' + c = Chaff(factor=0.5, blocksper=2) + chaffed = c.chaff(source) + + from base64 import encodestring + + # print the chaffed message blocks. meanwhile, separate the wheat from + # the chaff + + wheat = [] + print 'chaffed message blocks:' + for i, data, mac in chaffed: + # do the authentication + h = HMAC.new(key, data, digestmod=SHA) + pmac = h.digest() + if pmac == mac: + tag = '-->' + wheat.append(data) + else: + tag = ' ' + # base64 adds a trailing newline + print tag, '%3d' % i, \ + repr(data), encodestring(mac)[:-1] + + # now decode the message packets and check it against the original text + print 'Undigesting wheat...' + newtext = "".join(wheat) + if newtext == text: + print 'They match!' + else: + print 'They differ!' diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Protocol/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Protocol/__init__.py new file mode 100644 index 0000000..a6d68bc --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Protocol/__init__.py @@ -0,0 +1,17 @@ + +"""Cryptographic protocols + +Implements various cryptographic protocols. (Don't expect to find +network protocols here.) + +Crypto.Protocol.AllOrNothing Transforms a message into a set of message + blocks, such that the blocks can be + recombined to get the message back. + +Crypto.Protocol.Chaffing Takes a set of authenticated message blocks + (the wheat) and adds a number of + randomly generated blocks (the chaff). +""" + +__all__ = ['AllOrNothing', 'Chaffing'] +__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:23:21 akuchling Exp $" diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/DSA.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/DSA.py new file mode 100644 index 0000000..7947b6f --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/DSA.py @@ -0,0 +1,238 @@ + +# +# DSA.py : Digital Signature Algorithm +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: DSA.py,v 1.16 2004/05/06 12:52:54 akuchling Exp $" + +from Crypto.PublicKey.pubkey import * +from Crypto.Util import number +from Crypto.Util.number import bytes_to_long, long_to_bytes +from Crypto.Hash import SHA + +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +class error (Exception): + pass + +def generateQ(randfunc): + S=randfunc(20) + hash1=SHA.new(S).digest() + hash2=SHA.new(long_to_bytes(bytes_to_long(S)+1)).digest() + q = bignum(0) + for i in range(0,20): + c=ord(hash1[i])^ord(hash2[i]) + if i==0: + c=c | 128 + if i==19: + c= c | 1 + q=q*256+c + while (not isPrime(q)): + q=q+2 + if pow(2,159L) < q < pow(2,160L): + return S, q + raise error, 'Bad q value generated' + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate a DSA key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + + if bits<160: + raise error, 'Key length <160 bits' + obj=DSAobj() + # Generate string S and prime q + if progress_func: + progress_func('p,q\n') + while (1): + S, obj.q = generateQ(randfunc) + n=(bits-1)/160 + C, N, V = 0, 2, {} + b=(obj.q >> 5) & 15 + powb=pow(bignum(2), b) + powL1=pow(bignum(2), bits-1) + while C<4096: + for k in range(0, n+1): + V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest()) + W=V[n] % powb + for k in range(n-1, -1, -1): + W=(W<<160L)+V[k] + X=W+powL1 + p=X-(X%(2*obj.q)-1) + if powL1<=p and isPrime(p): + break + C, N = C+1, N+n+1 + if C<4096: + break + if progress_func: + progress_func('4096 multiples failed\n') + + obj.p = p + power=(p-1)/obj.q + if progress_func: + progress_func('h,g\n') + while (1): + h=bytes_to_long(randfunc(bits)) % (p-1) + g=pow(h, power, p) + if 11: + break + obj.g=g + if progress_func: + progress_func('x,y\n') + while (1): + x=bytes_to_long(randfunc(20)) + if 0 < x < obj.q: + break + obj.x, obj.y = x, pow(g, x, p) + return obj + +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long)):DSAobj + Construct a DSA object from a 4- or 5-tuple of numbers. + """ + obj=DSAobj() + if len(tuple) not in [4,5]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class DSAobj(pubkey): + keydata=['y', 'g', 'p', 'q', 'x'] + + def _encrypt(self, s, Kstr): + raise error, 'DSA algorithm cannot encrypt data' + + def _decrypt(self, s): + raise error, 'DSA algorithm cannot decrypt data' + + def _sign(self, M, K): + if (K<2 or self.q<=K): + raise error, 'K is not between 2 and q' + r=pow(self.g, K, self.p) % self.q + s=(inverse(K, self.q)*(M+self.x*r)) % self.q + return (r,s) + + def _verify(self, M, sig): + r, s = sig + if r<=0 or r>=self.q or s<=0 or s>=self.q: + return 0 + w=inverse(s, self.q) + u1, u2 = (M*w) % self.q, (r*w) % self.q + v1 = pow(self.g, u1, self.p) + v2 = pow(self.y, u2, self.p) + v = ((v1*v2) % self.p) + v = v % self.q + if v==r: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return number.size(self.p) - 1 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + if hasattr(self, 'x'): + return 1 + else: + return 0 + + def can_sign(self): + """Return a Boolean value recording whether this algorithm can generate signatures.""" + return 1 + + def can_encrypt(self): + """Return a Boolean value recording whether this algorithm can encrypt data.""" + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.y, self.g, self.p, self.q)) + +object=DSAobj + +generate_py = generate +construct_py = construct + +class DSAobj_c(pubkey): + keydata = ['y', 'g', 'p', 'q', 'x'] + + def __init__(self, key): + self.key = key + + def __getattr__(self, attr): + if attr in self.keydata: + return getattr(self.key, attr) + else: + if self.__dict__.has_key(attr): + self.__dict__[attr] + else: + raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr) + + def __getstate__(self): + d = {} + for k in self.keydata: + if hasattr(self.key, k): + d[k]=getattr(self.key, k) + return d + + def __setstate__(self, state): + y,g,p,q = state['y'], state['g'], state['p'], state['q'] + if not state.has_key('x'): + self.key = _fastmath.dsa_construct(y,g,p,q) + else: + x = state['x'] + self.key = _fastmath.dsa_construct(y,g,p,q,x) + + def _sign(self, M, K): + return self.key._sign(M, K) + + def _verify(self, M, (r, s)): + return self.key._verify(M, r, s) + + def size(self): + return self.key.size() + + def has_private(self): + return self.key.has_private() + + def publickey(self): + return construct_c((self.key.y, self.key.g, self.key.p, self.key.q)) + + def can_sign(self): + return 1 + + def can_encrypt(self): + return 0 + +def generate_c(bits, randfunc, progress_func=None): + obj = generate_py(bits, randfunc, progress_func) + y,g,p,q,x = obj.y, obj.g, obj.p, obj.q, obj.x + return construct_c((y,g,p,q,x)) + +def construct_c(tuple): + key = apply(_fastmath.dsa_construct, tuple) + return DSAobj_c(key) + +if _fastmath: + #print "using C version of DSA" + generate = generate_c + construct = construct_c + error = _fastmath.error diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/ElGamal.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/ElGamal.py new file mode 100644 index 0000000..026881c --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/ElGamal.py @@ -0,0 +1,132 @@ +# +# ElGamal.py : ElGamal encryption/decryption and signatures +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: ElGamal.py,v 1.9 2003/04/04 19:44:26 akuchling Exp $" + +from Crypto.PublicKey.pubkey import * +from Crypto.Util import number + +class error (Exception): + pass + +# Generate an ElGamal key with N bits +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate an ElGamal key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=ElGamalobj() + # Generate prime p + if progress_func: + progress_func('p\n') + obj.p=bignum(getPrime(bits, randfunc)) + # Generate random number g + if progress_func: + progress_func('g\n') + size=bits-1-(ord(randfunc(1)) & 63) # g will be from 1--64 bits smaller than p + if size<1: + size=bits-1 + while (1): + obj.g=bignum(getPrime(size, randfunc)) + if obj.g < obj.p: + break + size=(size+1) % bits + if size==0: + size=4 + # Generate random number x + if progress_func: + progress_func('x\n') + while (1): + size=bits-1-ord(randfunc(1)) # x will be from 1 to 256 bits smaller than p + if size>2: + break + while (1): + obj.x=bignum(getPrime(size, randfunc)) + if obj.x < obj.p: + break + size = (size+1) % bits + if size==0: + size=4 + if progress_func: + progress_func('y\n') + obj.y = pow(obj.g, obj.x, obj.p) + return obj + +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long))) + : ElGamalobj + Construct an ElGamal key from a 3- or 4-tuple of numbers. + """ + + obj=ElGamalobj() + if len(tuple) not in [3,4]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class ElGamalobj(pubkey): + keydata=['p', 'g', 'y', 'x'] + + def _encrypt(self, M, K): + a=pow(self.g, K, self.p) + b=( M*pow(self.y, K, self.p) ) % self.p + return ( a,b ) + + def _decrypt(self, M): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + ax=pow(M[0], self.x, self.p) + plaintext=(M[1] * inverse(ax, self.p ) ) % self.p + return plaintext + + def _sign(self, M, K): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + p1=self.p-1 + if (GCD(K, p1)!=1): + raise error, 'Bad K value: GCD(K,p-1)!=1' + a=pow(self.g, K, self.p) + t=(M-self.x*a) % p1 + while t<0: t=t+p1 + b=(t*inverse(K, p1)) % p1 + return (a, b) + + def _verify(self, M, sig): + v1=pow(self.y, sig[0], self.p) + v1=(v1*pow(sig[0], sig[1], self.p)) % self.p + v2=pow(self.g, M, self.p) + if v1==v2: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return number.size(self.p) - 1 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + if hasattr(self, 'x'): + return 1 + else: + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.p, self.g, self.y)) + + +object=ElGamalobj diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/RSA.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/RSA.py new file mode 100644 index 0000000..e0e877e --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/RSA.py @@ -0,0 +1,256 @@ +# +# RSA.py : RSA encryption/decryption +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: RSA.py,v 1.20 2004/05/06 12:52:54 akuchling Exp $" + +from Crypto.PublicKey import pubkey +from Crypto.Util import number + +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +class error (Exception): + pass + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate an RSA key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=RSAobj() + + # Generate the prime factors of n + if progress_func: + progress_func('p,q\n') + p = q = 1L + while number.size(p*q) < bits: + p = pubkey.getPrime(bits/2, randfunc) + q = pubkey.getPrime(bits/2, randfunc) + + # p shall be smaller than q (for calc of u) + if p > q: + (p, q)=(q, p) + obj.p = p + obj.q = q + + if progress_func: + progress_func('u\n') + obj.u = pubkey.inverse(obj.p, obj.q) + obj.n = obj.p*obj.q + + obj.e = 65537L + if progress_func: + progress_func('d\n') + obj.d=pubkey.inverse(obj.e, (obj.p-1)*(obj.q-1)) + + assert bits <= 1+obj.size(), "Generated key is too small" + + return obj + +def construct(tuple): + """construct(tuple:(long,) : RSAobj + Construct an RSA object from a 2-, 3-, 5-, or 6-tuple of numbers. + """ + + obj=RSAobj() + if len(tuple) not in [2,3,5,6]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + if len(tuple) >= 5: + # Ensure p is smaller than q + if obj.p>obj.q: + (obj.p, obj.q)=(obj.q, obj.p) + + if len(tuple) == 5: + # u not supplied, so we're going to have to compute it. + obj.u=pubkey.inverse(obj.p, obj.q) + + return obj + +class RSAobj(pubkey.pubkey): + keydata = ['n', 'e', 'd', 'p', 'q', 'u'] + def _encrypt(self, plaintext, K=''): + if self.n<=plaintext: + raise error, 'Plaintext too large' + return (pow(plaintext, self.e, self.n),) + + def _decrypt(self, ciphertext): + if (not hasattr(self, 'd')): + raise error, 'Private key not available in this object' + if self.n<=ciphertext[0]: + raise error, 'Ciphertext too large' + return pow(ciphertext[0], self.d, self.n) + + def _sign(self, M, K=''): + return (self._decrypt((M,)),) + + def _verify(self, M, sig): + m2=self._encrypt(sig[0]) + if m2[0]==M: + return 1 + else: return 0 + + def _blind(self, M, B): + tmp = pow(B, self.e, self.n) + return (M * tmp) % self.n + + def _unblind(self, M, B): + tmp = pubkey.inverse(B, self.n) + return (M * tmp) % self.n + + def can_blind (self): + """can_blind() : bool + Return a Boolean value recording whether this algorithm can + blind data. (This does not imply that this + particular key object has the private information required to + to blind a message.) + """ + return 1 + + def size(self): + """size() : int + Return the maximum number of bits that can be handled by this key. + """ + return number.size(self.n) - 1 + + def has_private(self): + """has_private() : bool + Return a Boolean denoting whether the object contains + private components. + """ + if hasattr(self, 'd'): + return 1 + else: return 0 + + def publickey(self): + """publickey(): RSAobj + Return a new key object containing only the public key information. + """ + return construct((self.n, self.e)) + +class RSAobj_c(pubkey.pubkey): + keydata = ['n', 'e', 'd', 'p', 'q', 'u'] + + def __init__(self, key): + self.key = key + + def __getattr__(self, attr): + if attr in self.keydata: + return getattr(self.key, attr) + else: + if self.__dict__.has_key(attr): + self.__dict__[attr] + else: + raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr) + + def __getstate__(self): + d = {} + for k in self.keydata: + if hasattr(self.key, k): + d[k]=getattr(self.key, k) + return d + + def __setstate__(self, state): + n,e = state['n'], state['e'] + if not state.has_key('d'): + self.key = _fastmath.rsa_construct(n,e) + else: + d = state['d'] + if not state.has_key('q'): + self.key = _fastmath.rsa_construct(n,e,d) + else: + p, q, u = state['p'], state['q'], state['u'] + self.key = _fastmath.rsa_construct(n,e,d,p,q,u) + + def _encrypt(self, plain, K): + return (self.key._encrypt(plain),) + + def _decrypt(self, cipher): + return self.key._decrypt(cipher[0]) + + def _sign(self, M, K): + return (self.key._sign(M),) + + def _verify(self, M, sig): + return self.key._verify(M, sig[0]) + + def _blind(self, M, B): + return self.key._blind(M, B) + + def _unblind(self, M, B): + return self.key._unblind(M, B) + + def can_blind (self): + return 1 + + def size(self): + return self.key.size() + + def has_private(self): + return self.key.has_private() + + def publickey(self): + return construct_c((self.key.n, self.key.e)) + +def generate_c(bits, randfunc, progress_func = None): + # Generate the prime factors of n + if progress_func: + progress_func('p,q\n') + + p = q = 1L + while number.size(p*q) < bits: + p = pubkey.getPrime(bits/2, randfunc) + q = pubkey.getPrime(bits/2, randfunc) + + # p shall be smaller than q (for calc of u) + if p > q: + (p, q)=(q, p) + if progress_func: + progress_func('u\n') + u=pubkey.inverse(p, q) + n=p*q + + e = 65537L + if progress_func: + progress_func('d\n') + d=pubkey.inverse(e, (p-1)*(q-1)) + key = _fastmath.rsa_construct(n,e,d,p,q,u) + obj = RSAobj_c(key) + +## print p +## print q +## print number.size(p), number.size(q), number.size(q*p), +## print obj.size(), bits + assert bits <= 1+obj.size(), "Generated key is too small" + return obj + + +def construct_c(tuple): + key = apply(_fastmath.rsa_construct, tuple) + return RSAobj_c(key) + +object = RSAobj + +generate_py = generate +construct_py = construct + +if _fastmath: + #print "using C version of RSA" + generate = generate_c + construct = construct_c + error = _fastmath.error diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/__init__.py new file mode 100644 index 0000000..ad1c80c --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/__init__.py @@ -0,0 +1,17 @@ +"""Public-key encryption and signature algorithms. + +Public-key encryption uses two different keys, one for encryption and +one for decryption. The encryption key can be made public, and the +decryption key is kept private. Many public-key algorithms can also +be used to sign messages, and some can *only* be used for signatures. + +Crypto.PublicKey.DSA Digital Signature Algorithm. (Signature only) +Crypto.PublicKey.ElGamal (Signing and encryption) +Crypto.PublicKey.RSA (Signing, encryption, and blinding) +Crypto.PublicKey.qNEW (Signature only) + +""" + +__all__ = ['RSA', 'DSA', 'ElGamal', 'qNEW'] +__revision__ = "$Id: __init__.py,v 1.4 2003/04/03 20:27:13 akuchling Exp $" + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/pubkey.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/pubkey.py new file mode 100644 index 0000000..5c75c3e --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/pubkey.py @@ -0,0 +1,172 @@ +# +# pubkey.py : Internal functions for public key operations +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: pubkey.py,v 1.11 2003/04/03 20:36:14 akuchling Exp $" + +import types, warnings +from Crypto.Util.number import * + +# Basic public key class +class pubkey: + def __init__(self): + pass + + def __getstate__(self): + """To keep key objects platform-independent, the key data is + converted to standard Python long integers before being + written out. It will then be reconverted as necessary on + restoration.""" + d=self.__dict__ + for key in self.keydata: + if d.has_key(key): d[key]=long(d[key]) + return d + + def __setstate__(self, d): + """On unpickling a key object, the key data is converted to the big +number representation being used, whether that is Python long +integers, MPZ objects, or whatever.""" + for key in self.keydata: + if d.has_key(key): self.__dict__[key]=bignum(d[key]) + + def encrypt(self, plaintext, K): + """encrypt(plaintext:string|long, K:string|long) : tuple + Encrypt the string or integer plaintext. K is a random + parameter required by some algorithms. + """ + wasString=0 + if isinstance(plaintext, types.StringType): + plaintext=bytes_to_long(plaintext) ; wasString=1 + if isinstance(K, types.StringType): + K=bytes_to_long(K) + ciphertext=self._encrypt(plaintext, K) + if wasString: return tuple(map(long_to_bytes, ciphertext)) + else: return ciphertext + + def decrypt(self, ciphertext): + """decrypt(ciphertext:tuple|string|long): string + Decrypt 'ciphertext' using this key. + """ + wasString=0 + if not isinstance(ciphertext, types.TupleType): + ciphertext=(ciphertext,) + if isinstance(ciphertext[0], types.StringType): + ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1 + plaintext=self._decrypt(ciphertext) + if wasString: return long_to_bytes(plaintext) + else: return plaintext + + def sign(self, M, K): + """sign(M : string|long, K:string|long) : tuple + Return a tuple containing the signature for the message M. + K is a random parameter required by some algorithms. + """ + if (not self.has_private()): + raise error, 'Private key not available in this object' + if isinstance(M, types.StringType): M=bytes_to_long(M) + if isinstance(K, types.StringType): K=bytes_to_long(K) + return self._sign(M, K) + + def verify (self, M, signature): + """verify(M:string|long, signature:tuple) : bool + Verify that the signature is valid for the message M; + returns true if the signature checks out. + """ + if isinstance(M, types.StringType): M=bytes_to_long(M) + return self._verify(M, signature) + + # alias to compensate for the old validate() name + def validate (self, M, signature): + warnings.warn("validate() method name is obsolete; use verify()", + DeprecationWarning) + + def blind(self, M, B): + """blind(M : string|long, B : string|long) : string|long + Blind message M using blinding factor B. + """ + wasString=0 + if isinstance(M, types.StringType): + M=bytes_to_long(M) ; wasString=1 + if isinstance(B, types.StringType): B=bytes_to_long(B) + blindedmessage=self._blind(M, B) + if wasString: return long_to_bytes(blindedmessage) + else: return blindedmessage + + def unblind(self, M, B): + """unblind(M : string|long, B : string|long) : string|long + Unblind message M using blinding factor B. + """ + wasString=0 + if isinstance(M, types.StringType): + M=bytes_to_long(M) ; wasString=1 + if isinstance(B, types.StringType): B=bytes_to_long(B) + unblindedmessage=self._unblind(M, B) + if wasString: return long_to_bytes(unblindedmessage) + else: return unblindedmessage + + + # The following methods will usually be left alone, except for + # signature-only algorithms. They both return Boolean values + # recording whether this key's algorithm can sign and encrypt. + def can_sign (self): + """can_sign() : bool + Return a Boolean value recording whether this algorithm can + generate signatures. (This does not imply that this + particular key object has the private information required to + to generate a signature.) + """ + return 1 + + def can_encrypt (self): + """can_encrypt() : bool + Return a Boolean value recording whether this algorithm can + encrypt data. (This does not imply that this + particular key object has the private information required to + to decrypt a message.) + """ + return 1 + + def can_blind (self): + """can_blind() : bool + Return a Boolean value recording whether this algorithm can + blind data. (This does not imply that this + particular key object has the private information required to + to blind a message.) + """ + return 0 + + # The following methods will certainly be overridden by + # subclasses. + + def size (self): + """size() : int + Return the maximum number of bits that can be handled by this key. + """ + return 0 + + def has_private (self): + """has_private() : bool + Return a Boolean denoting whether the object contains + private components. + """ + return 0 + + def publickey (self): + """publickey(): object + Return a new key object containing only the public information. + """ + return self + + def __eq__ (self, other): + """__eq__(other): 0, 1 + Compare us to other for equality. + """ + return self.__getstate__() == other.__getstate__() diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/qNEW.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/qNEW.py new file mode 100644 index 0000000..65f8ae3 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/PublicKey/qNEW.py @@ -0,0 +1,170 @@ +# +# qNEW.py : The q-NEW signature algorithm. +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $" + +from Crypto.PublicKey import pubkey +from Crypto.Util.number import * +from Crypto.Hash import SHA + +class error (Exception): + pass + +HASHBITS = 160 # Size of SHA digests + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate a qNEW key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=qNEWobj() + + # Generate prime numbers p and q. q is a 160-bit prime + # number. p is another prime number (the modulus) whose bit + # size is chosen by the caller, and is generated so that p-1 + # is a multiple of q. + # + # Note that only a single seed is used to + # generate p and q; if someone generates a key for you, you can + # use the seed to duplicate the key generation. This can + # protect you from someone generating values of p,q that have + # some special form that's easy to break. + if progress_func: + progress_func('p,q\n') + while (1): + obj.q = getPrime(160, randfunc) + # assert pow(2, 159L)1. g is kept; h can be discarded. + if progress_func: + progress_func('h,g\n') + while (1): + h=bytes_to_long(randfunc(bits)) % (p-1) + g=pow(h, power, p) + if 11: + break + obj.g=g + + # x is the private key information, and is + # just a random number between 0 and q. + # y=g**x mod p, and is part of the public information. + if progress_func: + progress_func('x,y\n') + while (1): + x=bytes_to_long(randfunc(20)) + if 0 < x < obj.q: + break + obj.x, obj.y=x, pow(g, x, p) + + return obj + +# Construct a qNEW object +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long) + Construct a qNEW object from a 4- or 5-tuple of numbers. + """ + obj=qNEWobj() + if len(tuple) not in [4,5]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class qNEWobj(pubkey.pubkey): + keydata=['p', 'q', 'g', 'y', 'x'] + + def _sign(self, M, K=''): + if (self.q<=K): + raise error, 'K is greater than q' + if M<0: + raise error, 'Illegal value of M (<0)' + if M>=pow(2,161L): + raise error, 'Illegal value of M (too large)' + r=pow(self.g, K, self.p) % self.q + s=(K- (r*M*self.x % self.q)) % self.q + return (r,s) + def _verify(self, M, sig): + r, s = sig + if r<=0 or r>=self.q or s<=0 or s>=self.q: + return 0 + if M<0: + raise error, 'Illegal value of M (<0)' + if M<=0 or M>=pow(2,161L): + return 0 + v1 = pow(self.g, s, self.p) + v2 = pow(self.y, M*r, self.p) + v = ((v1*v2) % self.p) + v = v % self.q + if v==r: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return 160 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + return hasattr(self, 'x') + + def can_sign(self): + """Return a Boolean value recording whether this algorithm can generate signatures.""" + return 1 + + def can_encrypt(self): + """Return a Boolean value recording whether this algorithm can encrypt data.""" + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.p, self.q, self.g, self.y)) + +object = qNEWobj + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/RFC1751.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/RFC1751.py new file mode 100644 index 0000000..0a47952 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/RFC1751.py @@ -0,0 +1,342 @@ +#!/usr/local/bin/python +# rfc1751.py : Converts between 128-bit strings and a human-readable +# sequence of words, as defined in RFC1751: "A Convention for +# Human-Readable 128-bit Keys", by Daniel L. McDonald. + +__revision__ = "$Id: RFC1751.py,v 1.6 2003/04/04 15:15:10 akuchling Exp $" + + +import string, binascii + +binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101', + 6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011', + 12:'1100', 13:'1101', 14:'1110', 15:'1111'} + +def _key2bin(s): + "Convert a key into a string of binary digits" + kl=map(lambda x: ord(x), s) + kl=map(lambda x: binary[x/16]+binary[x&15], kl) + return ''.join(kl) + +def _extract(key, start, length): + """Extract a bitstring from a string of binary digits, and return its + numeric value.""" + k=key[start:start+length] + return reduce(lambda x,y: x*2+ord(y)-48, k, 0) + +def key_to_english (key): + """key_to_english(key:string) : string + Transform an arbitrary key into a string containing English words. + The key length must be a multiple of 8. + """ + english='' + for index in range(0, len(key), 8): # Loop over 8-byte subkeys + subkey=key[index:index+8] + # Compute the parity of the key + skbin=_key2bin(subkey) ; p=0 + for i in range(0, 64, 2): p=p+_extract(skbin, i, 2) + # Append parity bits to the subkey + skbin=_key2bin(subkey+chr((p<<6) & 255)) + for i in range(0, 64, 11): + english=english+wordlist[_extract(skbin, i, 11)]+' ' + + return english[:-1] # Remove the trailing space + +def english_to_key (str): + """english_to_key(string):string + Transform a string into a corresponding key. + The string must contain words separated by whitespace; the number + of words must be a multiple of 6. + """ + + L=string.split(string.upper(str)) ; key='' + for index in range(0, len(L), 6): + sublist=L[index:index+6] ; char=9*[0] ; bits=0 + for i in sublist: + index = wordlist.index(i) + shift = (8-(bits+11)%8) %8 + y = index << shift + cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff + if (shift>5): + char[bits/8] = char[bits/8] | cl + char[bits/8+1] = char[bits/8+1] | cc + char[bits/8+2] = char[bits/8+2] | cr + elif shift>-3: + char[bits/8] = char[bits/8] | cc + char[bits/8+1] = char[bits/8+1] | cr + else: char[bits/8] = char[bits/8] | cr + bits=bits+11 + subkey=reduce(lambda x,y:x+chr(y), char, '') + + # Check the parity of the resulting key + skbin=_key2bin(subkey) + p=0 + for i in range(0, 64, 2): p=p+_extract(skbin, i, 2) + if (p&3) != _extract(skbin, 64, 2): + raise ValueError, "Parity error in resulting key" + key=key+subkey[0:8] + return key + +wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD", + "AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA", + "AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK", + "ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE", + "AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM", + "BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET", + "BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO", + "BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT", + "BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT", + "CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY", + "CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN", + "DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG", + "DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB", + "DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO", + "ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE", + "EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW", + "FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR", + "FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP", + "GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO", + "GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD", + "HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM", + "HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT", + "HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE", + "HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL", + "INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT", + "ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET", + "JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT", + "KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB", + "LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE", + "LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT", + "LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG", + "LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW", + "MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT", + "MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG", + "MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED", + "NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD", + "NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF", + "OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL", + "OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT", + "OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD", + "PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG", + "PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT", + "PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB", + "PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT", + "RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM", + "RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB", + "RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM", + "SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET", + "SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY", + "SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY", + "SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN", + "TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE", + "TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP", + "TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP", + "US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS", + "WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT", + "WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE", + "YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT", + "ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS", + "ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE", + "AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA", + "ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN", + "AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW", + "ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA", + "ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM", + "AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW", + "AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL", + "BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM", + "BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK", + "BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH", + "BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT", + "BEAU", "BECK", "BEEF", "BEEN", "BEER", + "BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN", + "BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE", + "BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE", + "BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT", + "BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK", + "BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT", + "BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK", + "BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS", + "BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN", + "BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD", + "BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG", + "BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST", + "BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF", + "CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL", + "CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL", + "CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF", + "CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG", + "CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY", + "CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA", + "COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN", + "COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK", + "COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST", + "COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB", + "CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY", + "CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE", + "DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN", + "DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS", + "DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED", + "DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK", + "DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT", + "DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES", + "DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA", + "DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG", + "DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK", + "DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK", + "DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST", + "EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT", + "EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT", + "EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED", + "FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL", + "FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT", + "FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST", + "FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE", + "FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE", + "FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW", + "FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM", + "FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL", + "FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL", + "FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY", + "FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY", + "FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA", + "GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH", + "GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE", + "GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT", + "GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN", + "GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD", + "GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG", + "GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB", + "GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN", + "GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH", + "GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR", + "HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK", + "HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE", + "HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR", + "HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL", + "HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN", + "HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT", + "HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE", + "HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK", + "HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL", + "HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK", + "HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE", + "HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH", + "INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE", + "ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE", + "JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL", + "JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN", + "JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY", + "JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST", + "JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL", + "KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL", + "KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW", + "KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD", + "KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN", + "LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD", + "LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS", + "LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER", + "LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST", + "LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU", + "LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB", + "LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST", + "LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE", + "LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD", + "LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK", + "LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE", + "LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE", + "MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI", + "MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK", + "MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE", + "MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK", + "MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH", + "MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT", + "MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS", + "MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD", + "MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON", + "MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH", + "MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK", + "MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL", + "NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR", + "NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS", + "NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA", + "NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON", + "NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB", + "OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY", + "OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE", + "ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS", + "OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY", + "OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT", + "RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE", + "RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR", + "RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA", + "REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT", + "RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD", + "ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME", + "ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS", + "ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY", + "RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE", + "RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE", + "SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE", + "SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR", + "SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK", + "SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS", + "SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN", + "SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE", + "SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE", + "SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW", + "SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY", + "SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT", + "SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB", + "SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA", + "SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE", + "SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR", + "STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH", + "SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF", + "SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM", + "TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK", + "TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM", + "TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS", + "TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN", + "THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER", + "TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY", + "TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG", + "TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR", + "TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG", + "TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE", + "TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK", + "TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER", + "USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST", + "VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY", + "VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE", + "WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK", + "WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM", + "WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY", + "WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR", + "WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM", + "WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE", + "WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE", + "WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD", + "WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE", + "YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR", + "YELL", "YOGA", "YOKE" ] + +if __name__=='__main__': + data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'), + ('CCAC2AED591056BE4F90FD441C534766', + 'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'), + ('EFF81F9BFBC65350920CDD7416DE8009', + 'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL') + ] + + for key, words in data: + print 'Trying key', key + key=binascii.a2b_hex(key) + w2=key_to_english(key) + if w2!=words: + print 'key_to_english fails on key', repr(key), ', producing', str(w2) + k2=english_to_key(words) + if k2!=key: + print 'english_to_key fails on key', repr(key), ', producing', repr(k2) + + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/__init__.py new file mode 100644 index 0000000..0d14768 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/__init__.py @@ -0,0 +1,16 @@ +"""Miscellaneous modules + +Contains useful modules that don't belong into any of the +other Crypto.* subpackages. + +Crypto.Util.number Number-theoretic functions (primality testing, etc.) +Crypto.Util.randpool Random number generation +Crypto.Util.RFC1751 Converts between 128-bit keys and human-readable + strings of words. + +""" + +__all__ = ['randpool', 'RFC1751', 'number'] + +__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:26:00 akuchling Exp $" + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/number.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/number.py new file mode 100644 index 0000000..9d50563 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/number.py @@ -0,0 +1,201 @@ +# +# number.py : Number-theoretic functions +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: number.py,v 1.13 2003/04/04 18:21:07 akuchling Exp $" + +bignum = long +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +# Commented out and replaced with faster versions below +## def long2str(n): +## s='' +## while n>0: +## s=chr(n & 255)+s +## n=n>>8 +## return s + +## import types +## def str2long(s): +## if type(s)!=types.StringType: return s # Integers will be left alone +## return reduce(lambda x,y : x*256+ord(y), s, 0L) + +def size (N): + """size(N:long) : int + Returns the size of the number N in bits. + """ + bits, power = 0,1L + while N >= power: + bits += 1 + power = power << 1 + return bits + +def getRandomNumber(N, randfunc): + """getRandomNumber(N:int, randfunc:callable):long + Return an N-bit random number.""" + + S = randfunc(N/8) + odd_bits = N % 8 + if odd_bits != 0: + char = ord(randfunc(1)) >> (8-odd_bits) + S = chr(char) + S + value = bytes_to_long(S) + value |= 2L ** (N-1) # Ensure high bit is set + assert size(value) >= N + return value + +def GCD(x,y): + """GCD(x:long, y:long): long + Return the GCD of x and y. + """ + x = abs(x) ; y = abs(y) + while x > 0: + x, y = y % x, x + return y + +def inverse(u, v): + """inverse(u:long, u:long):long + Return the inverse of u mod v. + """ + u3, v3 = long(u), long(v) + u1, v1 = 1L, 0L + while v3 > 0: + q=u3 / v3 + u1, v1 = v1, u1 - v1*q + u3, v3 = v3, u3 - v3*q + while u1<0: + u1 = u1 + v + return u1 + +# Given a number of bits to generate and a random generation function, +# find a prime number of the appropriate size. + +def getPrime(N, randfunc): + """getPrime(N:int, randfunc:callable):long + Return a random N-bit prime number. + """ + + number=getRandomNumber(N, randfunc) | 1 + while (not isPrime(number)): + number=number+2 + return number + +def isPrime(N): + """isPrime(N:long):bool + Return true if N is prime. + """ + if N == 1: + return 0 + if N in sieve: + return 1 + for i in sieve: + if (N % i)==0: + return 0 + + # Use the accelerator if available + if _fastmath is not None: + return _fastmath.isPrime(N) + + # Compute the highest bit that's set in N + N1 = N - 1L + n = 1L + while (n> 1L + + # Rabin-Miller test + for c in sieve[:7]: + a=long(c) ; d=1L ; t=n + while (t): # Iterate over the bits in N1 + x=(d*d) % N + if x==1L and d!=1L and d!=N1: + return 0 # Square root of 1 found + if N1 & t: + d=(x*a) % N + else: + d=x + t = t >> 1L + if d!=1L: + return 0 + return 1 + +# Small primes used for checking primality; these are all the primes +# less than 256. This should be enough to eliminate most of the odd +# numbers before needing to do a Rabin-Miller test at all. + +sieve=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, + 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, + 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, + 197, 199, 211, 223, 227, 229, 233, 239, 241, 251] + +# Improved conversion functions contributed by Barry Warsaw, after +# careful benchmarking + +import struct + +def long_to_bytes(n, blocksize=0): + """long_to_bytes(n:long, blocksize:int) : string + Convert a long integer to a byte string. + + If optional blocksize is given and greater than zero, pad the front of the + byte string with binary zeros so that the length is a multiple of + blocksize. + """ + # after much testing, this algorithm was deemed to be the fastest + s = '' + n = long(n) + pack = struct.pack + while n > 0: + s = pack('>I', n & 0xffffffffL) + s + n = n >> 32 + # strip off leading zeros + for i in range(len(s)): + if s[i] != '\000': + break + else: + # only happens when n == 0 + s = '\000' + i = 0 + s = s[i:] + # add back some pad bytes. this could be done more efficiently w.r.t. the + # de-padding being done above, but sigh... + if blocksize > 0 and len(s) % blocksize: + s = (blocksize - len(s) % blocksize) * '\000' + s + return s + +def bytes_to_long(s): + """bytes_to_long(string) : long + Convert a byte string to a long integer. + + This is (essentially) the inverse of long_to_bytes(). + """ + acc = 0L + unpack = struct.unpack + length = len(s) + if length % 4: + extra = (4 - length % 4) + s = '\000' * extra + s + length = length + extra + for i in range(0, length, 4): + acc = (acc << 32) + unpack('>I', s[i:i+4])[0] + return acc + +# For backwards compatibility... +import warnings +def long2str(n, blocksize=0): + warnings.warn("long2str() has been replaced by long_to_bytes()") + return long_to_bytes(n, blocksize) +def str2long(s): + warnings.warn("str2long() has been replaced by bytes_to_long()") + return bytes_to_long(s) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/randpool.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/randpool.py new file mode 100644 index 0000000..467501c --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/randpool.py @@ -0,0 +1,421 @@ +# +# randpool.py : Cryptographically strong random number generation +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: randpool.py,v 1.14 2004/05/06 12:56:54 akuchling Exp $" + +import time, array, types, warnings, os.path +from Crypto.Util.number import long_to_bytes +try: + import Crypto.Util.winrandom as winrandom +except: + winrandom = None + +STIRNUM = 3 + +class RandomPool: + """randpool.py : Cryptographically strong random number generation. + + The implementation here is similar to the one in PGP. To be + cryptographically strong, it must be difficult to determine the RNG's + output, whether in the future or the past. This is done by using + a cryptographic hash function to "stir" the random data. + + Entropy is gathered in the same fashion as PGP; the highest-resolution + clock around is read and the data is added to the random number pool. + A conservative estimate of the entropy is then kept. + + If a cryptographically secure random source is available (/dev/urandom + on many Unixes, Windows CryptGenRandom on most Windows), then use + it. + + Instance Attributes: + bits : int + Maximum size of pool in bits + bytes : int + Maximum size of pool in bytes + entropy : int + Number of bits of entropy in this pool. + + Methods: + add_event([s]) : add some entropy to the pool + get_bytes(int) : get N bytes of random data + randomize([N]) : get N bytes of randomness from external source + """ + + + def __init__(self, numbytes = 160, cipher=None, hash=None): + if hash is None: + from Crypto.Hash import SHA as hash + + # The cipher argument is vestigial; it was removed from + # version 1.1 so RandomPool would work even in the limited + # exportable subset of the code + if cipher is not None: + warnings.warn("'cipher' parameter is no longer used") + + if isinstance(hash, types.StringType): + # ugly hack to force __import__ to give us the end-path module + hash = __import__('Crypto.Hash.'+hash, + None, None, ['new']) + warnings.warn("'hash' parameter should now be a hashing module") + + self.bytes = numbytes + self.bits = self.bytes*8 + self.entropy = 0 + self._hash = hash + + # Construct an array to hold the random pool, + # initializing it to 0. + self._randpool = array.array('B', [0]*self.bytes) + + self._event1 = self._event2 = 0 + self._addPos = 0 + self._getPos = hash.digest_size + self._lastcounter=time.time() + self.__counter = 0 + + self._measureTickSize() # Estimate timer resolution + self._randomize() + + def _updateEntropyEstimate(self, nbits): + self.entropy += nbits + if self.entropy < 0: + self.entropy = 0 + elif self.entropy > self.bits: + self.entropy = self.bits + + def _randomize(self, N = 0, devname = '/dev/urandom'): + """_randomize(N, DEVNAME:device-filepath) + collects N bits of randomness from some entropy source (e.g., + /dev/urandom on Unixes that have it, Windows CryptoAPI + CryptGenRandom, etc) + DEVNAME is optional, defaults to /dev/urandom. You can change it + to /dev/random if you want to block till you get enough + entropy. + """ + data = '' + if N <= 0: + nbytes = int((self.bits - self.entropy)/8+0.5) + else: + nbytes = int(N/8+0.5) + if winrandom: + # Windows CryptGenRandom provides random data. + data = winrandom.new().get_bytes(nbytes) + elif os.path.exists(devname): + # Many OSes support a /dev/urandom device + try: + f=open(devname) + data=f.read(nbytes) + f.close() + except IOError, (num, msg): + if num!=2: raise IOError, (num, msg) + # If the file wasn't found, ignore the error + if data: + self._addBytes(data) + # Entropy estimate: The number of bits of + # data obtained from the random source. + self._updateEntropyEstimate(8*len(data)) + self.stir_n() # Wash the random pool + + def randomize(self, N=0): + """randomize(N:int) + use the class entropy source to get some entropy data. + This is overridden by KeyboardRandomize(). + """ + return self._randomize(N) + + def stir_n(self, N = STIRNUM): + """stir_n(N) + stirs the random pool N times + """ + for i in xrange(N): + self.stir() + + def stir (self, s = ''): + """stir(s:string) + Mix up the randomness pool. This will call add_event() twice, + but out of paranoia the entropy attribute will not be + increased. The optional 's' parameter is a string that will + be hashed with the randomness pool. + """ + + entropy=self.entropy # Save inital entropy value + self.add_event() + + # Loop over the randomness pool: hash its contents + # along with a counter, and add the resulting digest + # back into the pool. + for i in range(self.bytes / self._hash.digest_size): + h = self._hash.new(self._randpool) + h.update(str(self.__counter) + str(i) + str(self._addPos) + s) + self._addBytes( h.digest() ) + self.__counter = (self.__counter + 1) & 0xFFFFffffL + + self._addPos, self._getPos = 0, self._hash.digest_size + self.add_event() + + # Restore the old value of the entropy. + self.entropy=entropy + + + def get_bytes (self, N): + """get_bytes(N:int) : string + Return N bytes of random data. + """ + + s='' + i, pool = self._getPos, self._randpool + h=self._hash.new() + dsize = self._hash.digest_size + num = N + while num > 0: + h.update( self._randpool[i:i+dsize] ) + s = s + h.digest() + num = num - dsize + i = (i + dsize) % self.bytes + if i>1, bits+1 + if bits>8: bits=8 + + self._event1, self._event2 = event, self._event1 + + self._updateEntropyEstimate(bits) + return bits + + # Private functions + def _noise(self): + # Adds a bit of noise to the random pool, by adding in the + # current time and CPU usage of this process. + # The difference from the previous call to _noise() is taken + # in an effort to estimate the entropy. + t=time.time() + delta = (t - self._lastcounter)/self._ticksize*1e6 + self._lastcounter = t + self._addBytes(long_to_bytes(long(1000*time.time()))) + self._addBytes(long_to_bytes(long(1000*time.clock()))) + self._addBytes(long_to_bytes(long(1000*time.time()))) + self._addBytes(long_to_bytes(long(delta))) + + # Reduce delta to a maximum of 8 bits so we don't add too much + # entropy as a result of this call. + delta=delta % 0xff + return int(delta) + + + def _measureTickSize(self): + # _measureTickSize() tries to estimate a rough average of the + # resolution of time that you can see from Python. It does + # this by measuring the time 100 times, computing the delay + # between measurements, and taking the median of the resulting + # list. (We also hash all the times and add them to the pool) + interval = [None] * 100 + h = self._hash.new(`(id(self),id(interval))`) + + # Compute 100 differences + t=time.time() + h.update(`t`) + i = 0 + j = 0 + while i < 100: + t2=time.time() + h.update(`(i,j,t2)`) + j += 1 + delta=int((t2-t)*1e6) + if delta: + interval[i] = delta + i += 1 + t=t2 + + # Take the median of the array of intervals + interval.sort() + self._ticksize=interval[len(interval)/2] + h.update(`(interval,self._ticksize)`) + # mix in the measurement times and wash the random pool + self.stir(h.digest()) + + def _addBytes(self, s): + "XOR the contents of the string S into the random pool" + i, pool = self._addPos, self._randpool + for j in range(0, len(s)): + pool[i]=pool[i] ^ ord(s[j]) + i=(i+1) % self.bytes + self._addPos = i + + # Deprecated method names: remove in PCT 2.1 or later. + def getBytes(self, N): + warnings.warn("getBytes() method replaced by get_bytes()", + DeprecationWarning) + return self.get_bytes(N) + + def addEvent (self, event, s=""): + warnings.warn("addEvent() method replaced by add_event()", + DeprecationWarning) + return self.add_event(s + str(event)) + +class PersistentRandomPool (RandomPool): + def __init__ (self, filename=None, *args, **kwargs): + RandomPool.__init__(self, *args, **kwargs) + self.filename = filename + if filename: + try: + # the time taken to open and read the file might have + # a little disk variability, modulo disk/kernel caching... + f=open(filename, 'rb') + self.add_event() + data = f.read() + self.add_event() + # mix in the data from the file and wash the random pool + self.stir(data) + f.close() + except IOError: + # Oh, well; the file doesn't exist or is unreadable, so + # we'll just ignore it. + pass + + def save(self): + if self.filename == "": + raise ValueError, "No filename set for this object" + # wash the random pool before save, provides some forward secrecy for + # old values of the pool. + self.stir_n() + f=open(self.filename, 'wb') + self.add_event() + f.write(self._randpool.tostring()) + f.close() + self.add_event() + # wash the pool again, provide some protection for future values + self.stir() + +# non-echoing Windows keyboard entry +_kb = 0 +if not _kb: + try: + import msvcrt + class KeyboardEntry: + def getch(self): + c = msvcrt.getch() + if c in ('\000', '\xe0'): + # function key + c += msvcrt.getch() + return c + def close(self, delay = 0): + if delay: + time.sleep(delay) + while msvcrt.kbhit(): + msvcrt.getch() + _kb = 1 + except: + pass + +# non-echoing Posix keyboard entry +if not _kb: + try: + import termios + class KeyboardEntry: + def __init__(self, fd = 0): + self._fd = fd + self._old = termios.tcgetattr(fd) + new = termios.tcgetattr(fd) + new[3]=new[3] & ~termios.ICANON & ~termios.ECHO + termios.tcsetattr(fd, termios.TCSANOW, new) + def getch(self): + termios.tcflush(0, termios.TCIFLUSH) # XXX Leave this in? + return os.read(self._fd, 1) + def close(self, delay = 0): + if delay: + time.sleep(delay) + termios.tcflush(self._fd, termios.TCIFLUSH) + termios.tcsetattr(self._fd, termios.TCSAFLUSH, self._old) + _kb = 1 + except: + pass + +class KeyboardRandomPool (PersistentRandomPool): + def __init__(self, *args, **kwargs): + PersistentRandomPool.__init__(self, *args, **kwargs) + + def randomize(self, N = 0): + "Adds N bits of entropy to random pool. If N is 0, fill up pool." + import os, string, time + if N <= 0: + bits = self.bits - self.entropy + else: + bits = N*8 + if bits == 0: + return + print bits,'bits of entropy are now required. Please type on the keyboard' + print 'until enough randomness has been accumulated.' + kb = KeyboardEntry() + s='' # We'll save the characters typed and add them to the pool. + hash = self._hash + e = 0 + try: + while e < bits: + temp=str(bits-e).rjust(6) + os.write(1, temp) + s=s+kb.getch() + e += self.add_event(s) + os.write(1, 6*chr(8)) + self.add_event(s+hash.new(s).digest() ) + finally: + kb.close() + print '\n\007 Enough. Please wait a moment.\n' + self.stir_n() # wash the random pool. + kb.close(4) + +if __name__ == '__main__': + pool = RandomPool() + print 'random pool entropy', pool.entropy, 'bits' + pool.add_event('something') + print `pool.get_bytes(100)` + import tempfile, os + fname = tempfile.mktemp() + pool = KeyboardRandomPool(filename=fname) + print 'keyboard random pool entropy', pool.entropy, 'bits' + pool.randomize() + print 'keyboard random pool entropy', pool.entropy, 'bits' + pool.randomize(128) + pool.save() + saved = open(fname, 'rb').read() + print 'saved', `saved` + print 'pool ', `pool._randpool.tostring()` + newpool = PersistentRandomPool(fname) + print 'persistent random pool entropy', pool.entropy, 'bits' + os.remove(fname) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/test.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/test.py new file mode 100644 index 0000000..7b23e9f --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/Util/test.py @@ -0,0 +1,453 @@ +# +# test.py : Functions used for testing the modules +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: test.py,v 1.16 2004/08/13 22:24:18 akuchling Exp $" + +import binascii +import string +import testdata + +from Crypto.Cipher import * + +def die(string): + import sys + print '***ERROR: ', string +# sys.exit(0) # Will default to continuing onward... + +def print_timing (size, delta, verbose): + if verbose: + if delta == 0: + print 'Unable to measure time -- elapsed time too small' + else: + print '%.2f K/sec' % (size/delta) + +def exerciseBlockCipher(cipher, verbose): + import string, time + try: + ciph = eval(cipher) + except NameError: + print cipher, 'module not available' + return None + print cipher+ ':' + str='1' # Build 128K of test data + for i in xrange(0, 17): + str=str+str + if ciph.key_size==0: ciph.key_size=16 + password = 'password12345678Extra text for password'[0:ciph.key_size] + IV = 'Test IV Test IV Test IV Test'[0:ciph.block_size] + + if verbose: print ' ECB mode:', + obj=ciph.new(password, ciph.MODE_ECB) + if obj.block_size != ciph.block_size: + die("Module and cipher object block_size don't match") + + text='1234567812345678'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='KuchlingKuchling'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='NotTodayNotEver!'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + + start=time.time() + s=obj.encrypt(str) + s2=obj.decrypt(s) + end=time.time() + if (str!=s2): + die('Error in resulting plaintext from ECB mode') + print_timing(256, end-start, verbose) + del obj + + if verbose: print ' CFB mode:', + obj1=ciph.new(password, ciph.MODE_CFB, IV) + obj2=ciph.new(password, ciph.MODE_CFB, IV) + start=time.time() + ciphertext=obj1.encrypt(str[0:65536]) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str[0:65536]): + die('Error in resulting plaintext from CFB mode') + print_timing(64, end-start, verbose) + del obj1, obj2 + + if verbose: print ' CBC mode:', + obj1=ciph.new(password, ciph.MODE_CBC, IV) + obj2=ciph.new(password, ciph.MODE_CBC, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from CBC mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + if verbose: print ' PGP mode:', + obj1=ciph.new(password, ciph.MODE_PGP, IV) + obj2=ciph.new(password, ciph.MODE_PGP, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from PGP mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + if verbose: print ' OFB mode:', + obj1=ciph.new(password, ciph.MODE_OFB, IV) + obj2=ciph.new(password, ciph.MODE_OFB, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from OFB mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + def counter(length=ciph.block_size): + return length * 'a' + + if verbose: print ' CTR mode:', + obj1=ciph.new(password, ciph.MODE_CTR, counter=counter) + obj2=ciph.new(password, ciph.MODE_CTR, counter=counter) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from CTR mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + # Test the IV handling + if verbose: print ' Testing IV handling' + obj1=ciph.new(password, ciph.MODE_CBC, IV) + plaintext='Test'*(ciph.block_size/4)*3 + ciphertext1=obj1.encrypt(plaintext) + obj1.IV=IV + ciphertext2=obj1.encrypt(plaintext) + if ciphertext1!=ciphertext2: + die('Error in setting IV') + + # Test keyword arguments + obj1=ciph.new(key=password) + obj1=ciph.new(password, mode=ciph.MODE_CBC) + obj1=ciph.new(mode=ciph.MODE_CBC, key=password) + obj1=ciph.new(IV=IV, mode=ciph.MODE_CBC, key=password) + + return ciph + +def exerciseStreamCipher(cipher, verbose): + import string, time + try: + ciph = eval(cipher) + except (NameError): + print cipher, 'module not available' + return None + print cipher + ':', + str='1' # Build 128K of test data + for i in xrange(0, 17): + str=str+str + key_size = ciph.key_size or 16 + password = 'password12345678Extra text for password'[0:key_size] + + obj1=ciph.new(password) + obj2=ciph.new(password) + if obj1.block_size != ciph.block_size: + die("Module and cipher object block_size don't match") + if obj1.key_size != ciph.key_size: + die("Module and cipher object key_size don't match") + + text='1234567812345678Python' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='B1FF I2 A R3A11Y |<00L D00D!!!!!' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='SpamSpamSpamSpamSpamSpamSpamSpamSpam' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + + start=time.time() + s=obj1.encrypt(str) + str=obj2.decrypt(s) + end=time.time() + print_timing(256, end-start, verbose) + del obj1, obj2 + + return ciph + +def TestStreamModules(args=['arc4', 'XOR'], verbose=1): + import sys, string + args=map(string.lower, args) + + if 'arc4' in args: + # Test ARC4 stream cipher + arc4=exerciseStreamCipher('ARC4', verbose) + if (arc4!=None): + for entry in testdata.arc4: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=arc4.new(key) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('ARC4 failed on entry '+`entry`) + + if 'xor' in args: + # Test XOR stream cipher + XOR=exerciseStreamCipher('XOR', verbose) + if (XOR!=None): + for entry in testdata.xor: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=XOR.new(key) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('XOR failed on entry '+`entry`) + + +def TestBlockModules(args=['aes', 'arc2', 'des', 'blowfish', 'cast', 'des3', + 'idea', 'rc5'], + verbose=1): + import string + args=map(string.lower, args) + if 'aes' in args: + ciph=exerciseBlockCipher('AES', verbose) # AES + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.aes: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('AES failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + for entry in testdata.aes_modes: + mode, key, plain, cipher, kw = entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, mode, **kw) + obj2=ciph.new(key, mode, **kw) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('AES encrypt failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + plain2=obj2.decrypt(ciphertext) + if plain2!=plain: + die('AES decrypt failed on entry '+`entry`) + for i in plain2: + if verbose: print hex(ord(i)), + if verbose: print + + + if 'arc2' in args: + ciph=exerciseBlockCipher('ARC2', verbose) # Alleged RC2 + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.arc2: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('ARC2 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + print + + if 'blowfish' in args: + ciph=exerciseBlockCipher('Blowfish',verbose)# Bruce Schneier's Blowfish cipher + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.blowfish: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('Blowfish failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + if 'cast' in args: + ciph=exerciseBlockCipher('CAST', verbose) # CAST-128 + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.cast: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('CAST failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + if 0: + # The full-maintenance test; it requires 4 million encryptions, + # and correspondingly is quite time-consuming. I've disabled + # it; it's faster to compile block/cast.c with -DTEST and run + # the resulting program. + a = b = '\x01\x23\x45\x67\x12\x34\x56\x78\x23\x45\x67\x89\x34\x56\x78\x9A' + + for i in range(0, 1000000): + obj = cast.new(b, cast.MODE_ECB) + a = obj.encrypt(a[:8]) + obj.encrypt(a[-8:]) + obj = cast.new(a, cast.MODE_ECB) + b = obj.encrypt(b[:8]) + obj.encrypt(b[-8:]) + + if a!="\xEE\xA9\xD0\xA2\x49\xFD\x3B\xA6\xB3\x43\x6F\xB8\x9D\x6D\xCA\x92": + if verbose: print 'CAST test failed: value of "a" doesn\'t match' + if b!="\xB2\xC9\x5E\xB0\x0C\x31\xAD\x71\x80\xAC\x05\xB8\xE8\x3D\x69\x6E": + if verbose: print 'CAST test failed: value of "b" doesn\'t match' + + if 'des' in args: + # Test/benchmark DES block cipher + des=exerciseBlockCipher('DES', verbose) + if (des!=None): + # Various tests taken from the DES library packaged with Kerberos V4 + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_ECB) + s=obj.encrypt('Now is t') + if (s!=binascii.a2b_hex('3fa40e8a984d4815')): + die('DES fails test 1') + obj=des.new(binascii.a2b_hex('08192a3b4c5d6e7f'), des.MODE_ECB) + s=obj.encrypt('\000\000\000\000\000\000\000\000') + if (s!=binascii.a2b_hex('25ddac3e96176467')): + die('DES fails test 2') + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC, + binascii.a2b_hex('1234567890abcdef')) + s=obj.encrypt("Now is the time for all ") + if (s!=binascii.a2b_hex('e5c7cdde872bf27c43e934008c389c0f683788499a7c05f6')): + die('DES fails test 3') + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC, + binascii.a2b_hex('fedcba9876543210')) + s=obj.encrypt("7654321 Now is the time for \000\000\000\000") + if (s!=binascii.a2b_hex("ccd173ffab2039f4acd8aefddfd8a1eb468e91157888ba681d269397f7fe62b4")): + die('DES fails test 4') + del obj,s + + # R. Rivest's test: see http://theory.lcs.mit.edu/~rivest/destest.txt + x=binascii.a2b_hex('9474B8E8C73BCA7D') + for i in range(0, 16): + obj=des.new(x, des.MODE_ECB) + if (i & 1): x=obj.decrypt(x) + else: x=obj.encrypt(x) + if x!=binascii.a2b_hex('1B1A2DDB4C642438'): + die("DES fails Rivest's test") + + if verbose: print ' Verifying against test suite...' + for entry in testdata.des: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=des.new(key, des.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('DES failed on entry '+`entry`) + for entry in testdata.des_cbc: + key, iv, plain, cipher=entry + key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher) + obj1=des.new(key, des.MODE_CBC, iv) + obj2=des.new(key, des.MODE_CBC, iv) + ciphertext=obj1.encrypt(plain) + if (ciphertext!=cipher): + die('DES CBC mode failed on entry '+`entry`) + + if 'des3' in args: + ciph=exerciseBlockCipher('DES3', verbose) # Triple DES + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.des3: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('DES3 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + for entry in testdata.des3_cbc: + key, iv, plain, cipher=entry + key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher) + obj1=ciph.new(key, ciph.MODE_CBC, iv) + obj2=ciph.new(key, ciph.MODE_CBC, iv) + ciphertext=obj1.encrypt(plain) + if (ciphertext!=cipher): + die('DES3 CBC mode failed on entry '+`entry`) + + if 'idea' in args: + ciph=exerciseBlockCipher('IDEA', verbose) # IDEA block cipher + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.idea: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('IDEA failed on entry '+`entry`) + + if 'rc5' in args: + # Ronald Rivest's RC5 algorithm + ciph=exerciseBlockCipher('RC5', verbose) + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.rc5: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key[4:], ciph.MODE_ECB, + version =ord(key[0]), + word_size=ord(key[1]), + rounds =ord(key[2]) ) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('RC5 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/__init__.py new file mode 100644 index 0000000..2324ae8 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/__init__.py @@ -0,0 +1,25 @@ + +"""Python Cryptography Toolkit + +A collection of cryptographic modules implementing various algorithms +and protocols. + +Subpackages: +Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4) +Crypto.Hash Hashing algorithms (MD5, SHA, HMAC) +Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing + transform). This package does not contain any + network protocols. +Crypto.PublicKey Public-key encryption and signature algorithms + (RSA, DSA) +Crypto.Util Various useful modules and functions (long-to-string + conversion, random number generation, number + theoretic functions) +""" + +__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util'] + +__version__ = '2.0.1' +__revision__ = "$Id: __init__.py,v 1.12 2005/06/14 01:20:22 akuchling Exp $" + + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/test.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/test.py new file mode 100644 index 0000000..c5ed061 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/Crypto/test.py @@ -0,0 +1,38 @@ +# +# Test script for the Python Cryptography Toolkit. +# + +__revision__ = "$Id: test.py,v 1.7 2002/07/11 14:31:19 akuchling Exp $" + +import os, sys + + +# Add the build directory to the front of sys.path +from distutils.util import get_platform +s = "build/lib.%s-%.3s" % (get_platform(), sys.version) +s = os.path.join(os.getcwd(), s) +sys.path.insert(0, s) +s = os.path.join(os.getcwd(), 'test') +sys.path.insert(0, s) + +from Crypto.Util import test + +args = sys.argv[1:] +quiet = "--quiet" in args +if quiet: args.remove('--quiet') + +if not quiet: + print '\nStream Ciphers:' + print '===============' + +if args: test.TestStreamModules(args, verbose= not quiet) +else: test.TestStreamModules(verbose= not quiet) + +if not quiet: + print '\nBlock Ciphers:' + print '==============' + +if args: test.TestBlockModules(args, verbose= not quiet) +else: test.TestBlockModules(verbose= not quiet) + + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/__init__.py new file mode 100644 index 0000000..2335b2a --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/__init__.py @@ -0,0 +1,835 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains classes representing Google Data elements. + + Extends Atom classes to add Google Data specific elements. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import os +import atom +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +# XML namespaces which are often used in GData entities. +GDATA_NAMESPACE = 'http://schemas.google.com/g/2005' +GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s' +OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/' +OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s' +BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch' +GACL_NAMESPACE = 'http://schemas.google.com/acl/2007' +GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s' + + +# Labels used in batch request entries to specify the desired CRUD operation. +BATCH_INSERT = 'insert' +BATCH_UPDATE = 'update' +BATCH_DELETE = 'delete' +BATCH_QUERY = 'query' + +class Error(Exception): + pass + + +class MissingRequiredParameters(Error): + pass + + +class MediaSource(object): + """GData Entries can refer to media sources, so this class provides a + place to store references to these objects along with some metadata. + """ + + def __init__(self, file_handle=None, content_type=None, content_length=None, + file_path=None, file_name=None): + """Creates an object of type MediaSource. + + Args: + file_handle: A file handle pointing to the file to be encapsulated in the + MediaSource + content_type: string The MIME type of the file. Required if a file_handle + is given. + content_length: int The size of the file. Required if a file_handle is + given. + file_path: string (optional) A full path name to the file. Used in + place of a file_handle. + file_name: string The name of the file without any path information. + Required if a file_handle is given. + """ + self.file_handle = file_handle + self.content_type = content_type + self.content_length = content_length + self.file_name = file_name + + if (file_handle is None and content_type is not None and + file_path is not None): + self.setFile(file_path, content_type) + + def setFile(self, file_name, content_type): + """A helper function which can create a file handle from a given filename + and set the content type and length all at once. + + Args: + file_name: string The path and file name to the file containing the media + content_type: string A MIME type representing the type of the media + """ + + self.file_handle = open(file_name, 'rb') + self.content_type = content_type + self.content_length = os.path.getsize(file_name) + self.file_name = os.path.basename(file_name) + + +class LinkFinder(atom.LinkFinder): + """An "interface" providing methods to find link elements + + GData Entry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of + links. + + This class is used as a mixin in GData entries. + """ + + def GetSelfLink(self): + """Find the first link with rel set to 'self' + + Returns: + An atom.Link or none if none of the links had rel equal to 'self' + """ + + for a_link in self.link: + if a_link.rel == 'self': + return a_link + return None + + def GetEditLink(self): + for a_link in self.link: + if a_link.rel == 'edit': + return a_link + return None + + def GetEditMediaLink(self): + """The Picasa API mistakenly returns media-edit rather than edit-media, but + this may change soon. + """ + for a_link in self.link: + if a_link.rel == 'edit-media': + return a_link + if a_link.rel == 'media-edit': + return a_link + return None + + def GetHtmlLink(self): + """Find the first link with rel of alternate and type of text/html + + Returns: + An atom.Link or None if no links matched + """ + for a_link in self.link: + if a_link.rel == 'alternate' and a_link.type == 'text/html': + return a_link + return None + + def GetPostLink(self): + """Get a link containing the POST target URL. + + The POST target URL is used to insert new entries. + + Returns: + A link object with a rel matching the POST type. + """ + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#post': + return a_link + return None + + def GetAclLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/acl/2007#accessControlList': + return a_link + return None + + def GetFeedLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#feed': + return a_link + return None + + def GetNextLink(self): + for a_link in self.link: + if a_link.rel == 'next': + return a_link + return None + + def GetPrevLink(self): + for a_link in self.link: + if a_link.rel == 'previous': + return a_link + return None + + +class TotalResults(atom.AtomBase): + """opensearch:TotalResults for a GData feed""" + + _tag = 'totalResults' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def TotalResultsFromString(xml_string): + return atom.CreateClassFromXMLString(TotalResults, xml_string) + + +class StartIndex(atom.AtomBase): + """The opensearch:startIndex element in GData feed""" + + _tag = 'startIndex' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def StartIndexFromString(xml_string): + return atom.CreateClassFromXMLString(StartIndex, xml_string) + + +class ItemsPerPage(atom.AtomBase): + """The opensearch:itemsPerPage element in GData feed""" + + _tag = 'itemsPerPage' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ItemsPerPageFromString(xml_string): + return atom.CreateClassFromXMLString(ItemsPerPage, xml_string) + + +class ExtendedProperty(atom.AtomBase): + """The Google Data extendedProperty element. + + Used to store arbitrary key-value information specific to your + application. The value can either be a text string stored as an XML + attribute (.value), or an XML node (XmlBlob) as a child element. + + This element is used in the Google Calendar data API and the Google + Contacts data API. + """ + + _tag = 'extendedProperty' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def GetXmlBlobExtensionElement(self): + """Returns the XML blob as an atom.ExtensionElement. + + Returns: + An atom.ExtensionElement representing the blob's XML, or None if no + blob was set. + """ + if len(self.extension_elements) < 1: + return None + else: + return self.extension_elements[0] + + def GetXmlBlobString(self): + """Returns the XML blob as a string. + + Returns: + A string containing the blob's XML, or None if no blob was set. + """ + blob = self.GetXmlBlobExtensionElement() + if blob: + return blob.ToString() + return None + + def SetXmlBlob(self, blob): + """Sets the contents of the extendedProperty to XML as a child node. + + Since the extendedProperty is only allowed one child element as an XML + blob, setting the XML blob will erase any preexisting extension elements + in this object. + + Args: + blob: str, ElementTree Element or atom.ExtensionElement representing + the XML blob stored in the extendedProperty. + """ + # Erase any existing extension_elements, clears the child nodes from the + # extendedProperty. + self.extension_elements = [] + if isinstance(blob, atom.ExtensionElement): + self.extension_elements.append(blob) + elif ElementTree.iselement(blob): + self.extension_elements.append(atom._ExtensionElementFromElementTree( + blob)) + else: + self.extension_elements.append(atom.ExtensionElementFromString(blob)) + + +def ExtendedPropertyFromString(xml_string): + return atom.CreateClassFromXMLString(ExtendedProperty, xml_string) + + +class GDataEntry(atom.Entry, LinkFinder): + """Extends Atom Entry to provide data processing""" + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def IsMedia(self): + """Determines whether or not an entry is a GData Media entry. + """ + if (self.GetEditMediaLink()): + return True + else: + return False + + def GetMediaURL(self): + """Returns the URL to the media content, if the entry is a media entry. + Otherwise returns None. + """ + if not self.IsMedia(): + return None + else: + return self.content.src + + +def GDataEntryFromString(xml_string): + """Creates a new GDataEntry instance given a string of XML.""" + return atom.CreateClassFromXMLString(GDataEntry, xml_string) + + +class GDataFeed(atom.Feed, LinkFinder): + """A Feed from a GData service""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}totalResults' % OPENSEARCH_NAMESPACE] = ('total_results', + TotalResults) + _children['{%s}startIndex' % OPENSEARCH_NAMESPACE] = ('start_index', + StartIndex) + _children['{%s}itemsPerPage' % OPENSEARCH_NAMESPACE] = ('items_per_page', + ItemsPerPage) + # Add a conversion rule for atom:entry to make it into a GData + # Entry. + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GDataEntry]) + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __GetGenerator(self): + return self.__generator + + def __SetGenerator(self, generator): + self.__generator = generator + if generator is not None: + self.__generator.text = generator.text.strip() + + generator = property(__GetGenerator, __SetGenerator) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + entry: list (optional) A list of the Entry instances contained in the + feed. + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.entry = entry or [] + self.total_results = total_results + self.start_index = start_index + self.items_per_page = items_per_page + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GDataFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GDataFeed, xml_string) + + +class BatchId(atom.AtomBase): + _tag = 'id' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + +def BatchIdFromString(xml_string): + return atom.CreateClassFromXMLString(BatchId, xml_string) + + +class BatchOperation(atom.AtomBase): + _tag = 'operation' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + + def __init__(self, op_type=None, extension_elements=None, + extension_attributes=None, + text=None): + self.type = op_type + atom.AtomBase.__init__(self, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchOperationFromString(xml_string): + return atom.CreateClassFromXMLString(BatchOperation, xml_string) + + +class BatchStatus(atom.AtomBase): + """The batch:status element present in a batch response entry. + + A status element contains the code (HTTP response code) and + reason as elements. In a single request these fields would + be part of the HTTP response, but in a batch request each + Entry operation has a corresponding Entry in the response + feed which includes status information. + + See http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + + _tag = 'status' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['code'] = 'code' + _attributes['reason'] = 'reason' + _attributes['content-type'] = 'content_type' + + def __init__(self, code=None, reason=None, content_type=None, + extension_elements=None, extension_attributes=None, text=None): + self.code = code + self.reason = reason + self.content_type = content_type + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchStatusFromString(xml_string): + return atom.CreateClassFromXMLString(BatchStatus, xml_string) + + +class BatchEntry(GDataEntry): + """An atom:entry for use in batch requests. + + The BatchEntry contains additional members to specify the operation to be + performed on this entry and a batch ID so that the server can reference + individual operations in the response feed. For more information, see: + http://code.google.com/apis/gdata/batch.html + """ + + _tag = GDataEntry._tag + _namespace = GDataEntry._namespace + _children = GDataEntry._children.copy() + _children['{%s}operation' % BATCH_NAMESPACE] = ('batch_operation', BatchOperation) + _children['{%s}id' % BATCH_NAMESPACE] = ('batch_id', BatchId) + _children['{%s}status' % BATCH_NAMESPACE] = ('batch_status', BatchStatus) + _attributes = GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + batch_operation=None, batch_id=None, batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + GDataEntry.__init__(self, author=author, category=category, + content=content, contributor=contributor, atom_id=atom_id, link=link, + published=published, rights=rights, source=source, summary=summary, + control=control, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +def BatchEntryFromString(xml_string): + return atom.CreateClassFromXMLString(BatchEntry, xml_string) + + +class BatchInterrupted(atom.AtomBase): + """The batch:interrupted element sent if batch request was interrupted. + + Only appears in a feed if some of the batch entries could not be processed. + See: http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + + _tag = 'interrupted' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['reason'] = 'reason' + _attributes['success'] = 'success' + _attributes['failures'] = 'failures' + _attributes['parsed'] = 'parsed' + + def __init__(self, reason=None, success=None, failures=None, parsed=None, + extension_elements=None, extension_attributes=None, text=None): + self.reason = reason + self.success = success + self.failures = failures + self.parsed = parsed + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchInterruptedFromString(xml_string): + return atom.CreateClassFromXMLString(BatchInterrupted, xml_string) + + +class BatchFeed(GDataFeed): + """A feed containing a list of batch request entries.""" + + _tag = GDataFeed._tag + _namespace = GDataFeed._namespace + _children = GDataFeed._children.copy() + _attributes = GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchEntry]) + _children['{%s}interrupted' % BATCH_NAMESPACE] = ('interrupted', BatchInterrupted) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + interrupted=None, + extension_elements=None, extension_attributes=None, text=None): + self.interrupted = interrupted + GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + def AddBatchEntry(self, entry=None, id_url_string=None, + batch_id_string=None, operation_string=None): + """Logic for populating members of a BatchEntry and adding to the feed. + + + If the entry is not a BatchEntry, it is converted to a BatchEntry so + that the batch specific members will be present. + + The id_url_string can be used in place of an entry if the batch operation + applies to a URL. For example query and delete operations require just + the URL of an entry, no body is sent in the HTTP request. If an + id_url_string is sent instead of an entry, a BatchEntry is created and + added to the feed. + + This method also assigns the desired batch id to the entry so that it + can be referenced in the server's response. If the batch_id_string is + None, this method will assign a batch_id to be the index at which this + entry will be in the feed's entry list. + + Args: + entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The + entry which will be sent to the server as part of the batch request. + The item must have a valid atom id so that the server knows which + entry this request references. + id_url_string: str (optional) The URL of the entry to be acted on. You + can find this URL in the text member of the atom id for an entry. + If an entry is not sent, this id will be used to construct a new + BatchEntry which will be added to the request feed. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + operation_string: str (optional) The desired batch operation which will + set the batch_operation.type member of the entry. Options are + 'insert', 'update', 'delete', and 'query' + + Raises: + MissingRequiredParameters: Raised if neither an id_ url_string nor an + entry are provided in the request. + + Returns: + The added entry. + """ + if entry is None and id_url_string is None: + raise MissingRequiredParameters('supply either an entry or URL string') + if entry is None and id_url_string is not None: + entry = BatchEntry(atom_id=atom.Id(text=id_url_string)) + # TODO: handle cases in which the entry lacks batch_... members. + #if not isinstance(entry, BatchEntry): + # Convert the entry to a batch entry. + if batch_id_string is not None: + entry.batch_id = BatchId(text=batch_id_string) + elif entry.batch_id is None or entry.batch_id.text is None: + entry.batch_id = BatchId(text=str(len(self.entry))) + if operation_string is not None: + entry.batch_operation = BatchOperation(op_type=operation_string) + self.entry.append(entry) + return entry + + def AddInsert(self, entry, batch_id_string=None): + """Add an insert request to the operations in this batch request feed. + + If the entry doesn't yet have an operation or a batch id, these will + be set to the insert operation and a batch_id specified as a parameter. + + Args: + entry: BatchEntry The entry which will be sent in the batch feed as an + insert request. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + """ + entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_INSERT) + + def AddUpdate(self, entry, batch_id_string=None): + """Add an update request to the list of batch operations in this feed. + + Sets the operation type of the entry to insert if it is not already set + and assigns the desired batch id to the entry so that it can be + referenced in the server's response. + + Args: + entry: BatchEntry The entry which will be sent to the server as an + update (HTTP PUT) request. The item must have a valid atom id + so that the server knows which entry to replace. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. See also comments for AddInsert. + """ + entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_UPDATE) + + def AddDelete(self, url_string=None, entry=None, batch_id_string=None): + """Adds a delete request to the batch request feed. + + This method takes either the url_string which is the atom id of the item + to be deleted, or the entry itself. The atom id of the entry must be + present so that the server knows which entry should be deleted. + + Args: + url_string: str (optional) The URL of the entry to be deleted. You can + find this URL in the text member of the atom id for an entry. + entry: BatchEntry (optional) The entry to be deleted. + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters: Raised if neither a url_string nor an entry + are provided in the request. + """ + entry = self.AddBatchEntry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, + operation_string=BATCH_DELETE) + + def AddQuery(self, url_string=None, entry=None, batch_id_string=None): + """Adds a query request to the batch request feed. + + This method takes either the url_string which is the query URL + whose results will be added to the result feed. The query URL will + be encapsulated in a BatchEntry, and you may pass in the BatchEntry + with a query URL instead of sending a url_string. + + Args: + url_string: str (optional) + entry: BatchEntry (optional) + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters + """ + entry = self.AddBatchEntry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, + operation_string=BATCH_QUERY) + + def GetBatchLink(self): + for link in self.link: + if link.rel == 'http://schemas.google.com/g/2005#batch': + return link + return None + + +def BatchFeedFromString(xml_string): + return atom.CreateClassFromXMLString(BatchFeed, xml_string) + + +class EntryLink(atom.AtomBase): + """The gd:entryLink element""" + + _tag = 'entryLink' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + # The entry used to be an atom.Entry, now it is a GDataEntry. + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', GDataEntry) + _attributes['rel'] = 'rel' + _attributes['readOnly'] = 'read_only' + _attributes['href'] = 'href' + + def __init__(self, href=None, read_only=None, rel=None, + entry=None, extension_elements=None, + extension_attributes=None, text=None): + self.href = href + self.read_only = read_only + self.rel = rel + self.entry = entry + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EntryLinkFromString(xml_string): + return atom.CreateClassFromXMLString(EntryLink, xml_string) + + +class FeedLink(atom.AtomBase): + """The gd:feedLink element""" + + _tag = 'feedLink' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feed' % atom.ATOM_NAMESPACE] = ('feed', GDataFeed) + _attributes['rel'] = 'rel' + _attributes['readOnly'] = 'read_only' + _attributes['countHint'] = 'count_hint' + _attributes['href'] = 'href' + + def __init__(self, count_hint=None, href=None, read_only=None, rel=None, + feed=None, extension_elements=None, extension_attributes=None, + text=None): + self.count_hint = count_hint + self.href = href + self.read_only = read_only + self.rel = rel + self.feed = feed + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def FeedLinkFromString(xml_string): + return atom.CreateClassFromXMLString(EntryLink, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/alt/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/alt/__init__.py new file mode 100644 index 0000000..742980e --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/alt/__init__.py @@ -0,0 +1,20 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This package's modules adapt the gdata library to run in other environments + +The first example is the appengine module which contains functions and +classes which modify a GDataService object to run on Google App Engine. +""" diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/alt/appengine.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/alt/appengine.py new file mode 100644 index 0000000..54a620f --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/alt/appengine.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Provides HTTP functions for gdata.service to use on Google App Engine + +AppEngineHttpClient: Provides an HTTP request method which uses App Engine's + urlfetch API. Set the http_client member of a GDataService object to an + instance of an AppEngineHttpClient to allow the gdata library to run on + Google App Engine. + +run_on_appengine: Function which will modify an existing GDataService object + to allow it to run on App Engine. It works by creating a new instance of + the AppEngineHttpClient and replacing the GDataService object's + http_client. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import StringIO +import pickle +import atom.http_interface +import atom.token_store +from google.appengine.api import urlfetch +from google.appengine.ext import db +from google.appengine.api import users +from google.appengine.api import memcache + + +def run_on_appengine(gdata_service, store_tokens=True, + single_user_mode=False): + """Modifies a GDataService object to allow it to run on App Engine. + + Args: + gdata_service: An instance of AtomService, GDataService, or any + of their subclasses which has an http_client member and a + token_store member. + store_tokens: Boolean, defaults to True. If True, the gdata_service + will attempt to add each token to it's token_store when + SetClientLoginToken or SetAuthSubToken is called. If False + the tokens will not automatically be added to the + token_store. + single_user_mode: Boolean, defaults to False. If True, the current_token + member of gdata_service will be set when + SetClientLoginToken or SetAuthTubToken is called. If set + to True, the current_token is set in the gdata_service + and anyone who accesses the object will use the same + token. + + Note: If store_tokens is set to False and + single_user_mode is set to False, all tokens will be + ignored, since the library assumes: the tokens should not + be stored in the datastore and they should not be stored + in the gdata_service object. This will make it + impossible to make requests which require authorization. + """ + gdata_service.http_client = AppEngineHttpClient() + gdata_service.token_store = AppEngineTokenStore() + gdata_service.auto_store_tokens = store_tokens + gdata_service.auto_set_current_token = single_user_mode + return gdata_service + + +class AppEngineHttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None): + self.debug = False + self.headers = headers or {} + + def request(self, operation, url, data=None, headers=None): + """Performs an HTTP call to the server, supports GET, POST, PUT, and + DELETE. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.http + client = atom.http.HttpClient() + http_response = client.request('GET', 'http://www.google.com/') + + Args: + operation: str The HTTP operation to be performed. This is usually one + of 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be converted + to a string. Should be set to None when performing a GET or DELETE. + If data is a file-like object which can be read, this method will + read a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be + evaluated and sent. + url: The full URL to which the request should be sent. Can be a string + or atom.url.Url. + headers: dict of strings. HTTP headers which should be sent + in the request. + """ + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + + # Construct the full payload. + # Assume that data is None or a string. + data_str = data + if data: + if isinstance(data, list): + # If data is a list of different objects, convert them all to strings + # and join them together. + converted_parts = [_convert_data_part(x) for x in data] + data_str = ''.join(converted_parts) + else: + data_str = _convert_data_part(data) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + all_headers['Content-Length'] = str(len(data_str)) + + # Set the content type to the default value if none was set. + if 'Content-Type' not in all_headers: + all_headers['Content-Type'] = 'application/atom+xml' + + # Lookup the urlfetch operation which corresponds to the desired HTTP verb. + if operation == 'GET': + method = urlfetch.GET + elif operation == 'POST': + method = urlfetch.POST + elif operation == 'PUT': + method = urlfetch.PUT + elif operation == 'DELETE': + method = urlfetch.DELETE + else: + method = None + return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str, + method=method, headers=all_headers, follow_redirects=False)) + + +def _convert_data_part(data): + if not data or isinstance(data, str): + return data + elif hasattr(data, 'read'): + # data is a file like object, so read it completely. + return data.read() + # The data object was not a file. + # Try to convert to a string and send the data. + return str(data) + + +class HttpResponse(object): + """Translates a urlfetch resoinse to look like an hhtplib resoinse. + + Used to allow the resoinse from HttpRequest to be usable by gdata.service + methods. + """ + + def __init__(self, urlfetch_response): + self.body = StringIO.StringIO(urlfetch_response.content) + self.headers = urlfetch_response.headers + self.status = urlfetch_response.status_code + self.reason = '' + + def read(self, length=None): + if not length: + return self.body.read() + else: + return self.body.read(length) + + def getheader(self, name): + if not self.headers.has_key(name): + return self.headers[name.lower()] + return self.headers[name] + + +class TokenCollection(db.Model): + """Datastore Model which associates auth tokens with the current user.""" + user = db.UserProperty() + pickled_tokens = db.BlobProperty() + + +class AppEngineTokenStore(atom.token_store.TokenStore): + """Stores the user's auth tokens in the App Engine datastore. + + Tokens are only written to the datastore if a user is signed in (if + users.get_current_user() returns a user object). + """ + def __init__(self): + pass + + def add_token(self, token): + """Associates the token with the current user and stores it. + + If there is no current user, the token will not be stored. + + Returns: + False if the token was not stored. + """ + tokens = load_auth_tokens() + if not hasattr(token, 'scopes') or not token.scopes: + return False + for scope in token.scopes: + tokens[str(scope)] = token + key = save_auth_tokens(tokens) + if key: + return True + return False + + def find_token(self, url): + """Searches the current user's collection of token for a token which can + be used for a request to the url. + + Returns: + The stored token which belongs to the current user and is valid for the + desired URL. If there is no current user, or there is no valid user + token in the datastore, a atom.http_interface.GenericToken is returned. + """ + if url is None: + return None + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + tokens = load_auth_tokens() + if url in tokens: + token = tokens[url] + if token.valid_for_scope(url): + return token + else: + del tokens[url] + save_auth_tokens(tokens) + for scope, token in tokens.iteritems(): + if token.valid_for_scope(url): + return token + return atom.http_interface.GenericToken() + + def remove_token(self, token): + """Removes the token from the current user's collection in the datastore. + + Returns: + False if the token was not removed, this could be because the token was + not in the datastore, or because there is no current user. + """ + token_found = False + scopes_to_delete = [] + tokens = load_auth_tokens() + for scope, stored_token in tokens.iteritems(): + if stored_token == token: + scopes_to_delete.append(scope) + token_found = True + for scope in scopes_to_delete: + del tokens[scope] + if token_found: + save_auth_tokens(tokens) + return token_found + + def remove_all_tokens(self): + """Removes all of the current user's tokens from the datastore.""" + save_auth_tokens({}) + + +def save_auth_tokens(token_dict): + """Associates the tokens with the current user and writes to the datastore. + + If there us no current user, the tokens are not written and this function + returns None. + + Returns: + The key of the datastore entity containing the user's tokens, or None if + there was no current user. + """ + if users.get_current_user() is None: + return None + user_tokens = TokenCollection.all().filter('user =', users.get_current_user()).get() + if user_tokens: + user_tokens.pickled_tokens = pickle.dumps(token_dict) + return user_tokens.put() + else: + user_tokens = TokenCollection( + user=users.get_current_user(), + pickled_tokens=pickle.dumps(token_dict)) + return user_tokens.put() + + +def load_auth_tokens(): + """Reads a dictionary of the current user's tokens from the datastore. + + If there is no current user (a user is not signed in to the app) or the user + does not have any tokens, an empty dictionary is returned. + """ + if users.get_current_user() is None: + return {} + user_tokens = TokenCollection.all().filter('user =', users.get_current_user()).get() + if user_tokens: + return pickle.loads(user_tokens.pickled_tokens) + return {} + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/__init__.py new file mode 100644 index 0000000..682e7c8 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/__init__.py @@ -0,0 +1,496 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains objects used with Google Apps.""" + +__author__ = 'tmatsuo@sios.com (Takashi MATSUO)' + + +import atom +import gdata + + +# XML namespaces which are often used in Google Apps entity. +APPS_NAMESPACE = 'http://schemas.google.com/apps/2006' +APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s' + + +class EmailList(atom.AtomBase): + """The Google Apps EmailList element""" + + _tag = 'emailList' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def EmailListFromString(xml_string): + return atom.CreateClassFromXMLString(EmailList, xml_string) + + +class Who(atom.AtomBase): + """The Google Apps Who element""" + + _tag = 'who' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['email'] = 'email' + + def __init__(self, rel=None, email=None, extension_elements=None, + extension_attributes=None, text=None): + self.rel = rel + self.email = email + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def WhoFromString(xml_string): + return atom.CreateClassFromXMLString(Who, xml_string) + + +class Login(atom.AtomBase): + """The Google Apps Login element""" + + _tag = 'login' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['userName'] = 'user_name' + _attributes['password'] = 'password' + _attributes['suspended'] = 'suspended' + _attributes['admin'] = 'admin' + _attributes['changePasswordAtNextLogin'] = 'change_password' + _attributes['agreedToTerms'] = 'agreed_to_terms' + _attributes['ipWhitelisted'] = 'ip_whitelisted' + _attributes['hashFunctionName'] = 'hash_function_name' + + def __init__(self, user_name=None, password=None, suspended=None, + ip_whitelisted=None, hash_function_name=None, + admin=None, change_password=None, agreed_to_terms=None, + extension_elements=None, extension_attributes=None, + text=None): + self.user_name = user_name + self.password = password + self.suspended = suspended + self.admin = admin + self.change_password = change_password + self.agreed_to_terms = agreed_to_terms + self.ip_whitelisted = ip_whitelisted + self.hash_function_name = hash_function_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LoginFromString(xml_string): + return atom.CreateClassFromXMLString(Login, xml_string) + + +class Quota(atom.AtomBase): + """The Google Apps Quota element""" + + _tag = 'quota' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['limit'] = 'limit' + + def __init__(self, limit=None, extension_elements=None, + extension_attributes=None, text=None): + self.limit = limit + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def QuotaFromString(xml_string): + return atom.CreateClassFromXMLString(Quota, xml_string) + + +class Name(atom.AtomBase): + """The Google Apps Name element""" + + _tag = 'name' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['familyName'] = 'family_name' + _attributes['givenName'] = 'given_name' + + def __init__(self, family_name=None, given_name=None, + extension_elements=None, extension_attributes=None, text=None): + self.family_name = family_name + self.given_name = given_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NameFromString(xml_string): + return atom.CreateClassFromXMLString(Name, xml_string) + + +class Nickname(atom.AtomBase): + """The Google Apps Nickname element""" + + _tag = 'nickname' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, + extension_elements=None, extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NicknameFromString(xml_string): + return atom.CreateClassFromXMLString(Nickname, xml_string) + + +class NicknameEntry(gdata.GDataEntry): + """A Google Apps flavor of an Atom Entry for Nickname""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}login' % APPS_NAMESPACE] = ('login', Login) + _children['{%s}nickname' % APPS_NAMESPACE] = ('nickname', Nickname) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + login=None, nickname=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.login = login + self.nickname = nickname + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NicknameEntryFromString(xml_string): + return atom.CreateClassFromXMLString(NicknameEntry, xml_string) + + +class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps Nickname feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [NicknameEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def NicknameFeedFromString(xml_string): + return atom.CreateClassFromXMLString(NicknameFeed, xml_string) + + +class UserEntry(gdata.GDataEntry): + """A Google Apps flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}login' % APPS_NAMESPACE] = ('login', Login) + _children['{%s}name' % APPS_NAMESPACE] = ('name', Name) + _children['{%s}quota' % APPS_NAMESPACE] = ('quota', Quota) + # This child may already be defined in GDataEntry, confirm before removing. + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + login=None, name=None, quota=None, who=None, feed_link=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.login = login + self.name = name + self.quota = quota + self.who = who + self.feed_link = feed_link or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(UserEntry, xml_string) + + +class UserFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps User feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [UserEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def UserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(UserFeed, xml_string) + + +class EmailListEntry(gdata.GDataEntry): + """A Google Apps EmailList flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}emailList' % APPS_NAMESPACE] = ('email_list', EmailList) + # Might be able to remove this _children entry. + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + email_list=None, feed_link=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.email_list = email_list + self.feed_link = feed_link or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailListEntryFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListEntry, xml_string) + + +class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps EmailList feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [EmailListEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def EmailListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListFeed, xml_string) + + +class EmailListRecipientEntry(gdata.GDataEntry): + """A Google Apps EmailListRecipient flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + who=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.who = who + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailListRecipientEntryFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListRecipientEntry, xml_string) + + +class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps EmailListRecipient feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [EmailListRecipientEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def EmailListRecipientFeedFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListRecipientFeed, xml_string) + + +class Property(atom.AtomBase): + """The Google Apps Property element""" + + _tag = 'property' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PropertyFromString(xml_string): + return atom.CreateClassFromXMLString(Property, xml_string) + + +class PropertyEntry(gdata.GDataEntry): + """A Google Apps Property flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}property' % APPS_NAMESPACE] = ('property', [Property]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + property=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.property = property + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PropertyEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PropertyEntry, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/emailsettings/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/emailsettings/__init__.py new file mode 100644 index 0000000..275c6a0 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/emailsettings/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/emailsettings/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/emailsettings/service.py new file mode 100644 index 0000000..1bf3b0d --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/emailsettings/service.py @@ -0,0 +1,250 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to set users' email settings. + + EmailSettingsService: Set various email settings. +""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER='2.0' +# Forwarding and POP3 options +KEEP='KEEP' +ARCHIVE='ARCHIVE' +DELETE='DELETE' +ALL_MAIL='ALL_MAIL' +MAIL_FROM_NOW_ON='MAIL_FROM_NOW_ON' + + +class EmailSettingsService(gdata.apps.service.PropertyService): + """Client for the Google Apps Email Settings service.""" + + def _serviceUrl(self, setting_id, username, domain=None): + if domain is None: + domain = self.domain + return '/a/feeds/emailsettings/%s/%s/%s/%s' % (API_VER, domain, username, + setting_id) + + def _bool2str(self, b): + if b is None: + return None + return str(b is True).lower() + + def CreateLabel(self, username, label): + """Create a label. + + Args: + username: User to create label for. + label: Label to create. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('label', username) + properties = {'label': label} + return self._PostProperties(uri, properties) + + def CreateFilter(self, username, from_=None, to=None, subject=None, + has_the_word=None, does_not_have_the_word=None, + has_attachment=None, label=None, should_mark_as_read=None, + should_archive=None): + """Create a filter. + + Args: + username: User to create filter for. + from_: Filter from string. + to: Filter to string. + subject: Filter subject. + has_the_word: Words to filter in. + does_not_have_the_word: Words to filter out. + has_attachment: Boolean for message having attachment. + label: Label to apply. + should_mark_as_read: Boolean for marking message as read. + should_archive: Boolean for archiving message. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('filter', username) + properties = {} + properties['from'] = from_ + properties['to'] = to + properties['subject'] = subject + properties['hasTheWord'] = has_the_word + properties['doesNotHaveTheWord'] = does_not_have_the_word + properties['hasAttachment'] = self._bool2str(has_attachment) + properties['label'] = label + properties['shouldMarkAsRead'] = self._bool2str(should_mark_as_read) + properties['shouldArchive'] = self._bool2str(should_archive) + return self._PostProperties(uri, properties) + + def CreateSendAsAlias(self, username, name, address, reply_to=None, + make_default=None): + """Create alias to send mail as. + + Args: + username: User to create alias for. + name: Name of alias. + address: Email address to send from. + reply_to: Email address to reply to. + make_default: Boolean for whether this is the new default sending alias. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('sendas', username) + properties = {} + properties['name'] = name + properties['address'] = address + properties['replyTo'] = reply_to + properties['makeDefault'] = self._bool2str(make_default) + return self._PostProperties(uri, properties) + + def UpdateForwarding(self, username, enable, forward_to=None, action=None): + """Update forwarding settings. + + Args: + username: User to update forwarding for. + enable: Boolean whether to enable this forwarding rule. + forward_to: Email address to forward to. + action: Action to take after forwarding. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('forwarding', username) + properties = {} + properties['enable'] = self._bool2str(enable) + if enable is True: + properties['forwardTo'] = forward_to + properties['action'] = action + return self._PutProperties(uri, properties) + + def UpdatePop(self, username, enable, enable_for=None, action=None): + """Update POP3 settings. + + Args: + username: User to update POP3 settings for. + enable: Boolean whether to enable POP3. + enable_for: Which messages to make available via POP3. + action: Action to take after user retrieves email via POP3. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('pop', username) + properties = {} + properties['enable'] = self._bool2str(enable) + if enable is True: + properties['enableFor'] = enable_for + properties['action'] = action + return self._PutProperties(uri, properties) + + def UpdateImap(self, username, enable): + """Update IMAP settings. + + Args: + username: User to update IMAP settings for. + enable: Boolean whether to enable IMAP. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('imap', username) + properties = {'enable': self._bool2str(enable)} + return self._PutProperties(uri, properties) + + def UpdateVacation(self, username, enable, subject=None, message=None, + contacts_only=None): + """Update vacation settings. + + Args: + username: User to update vacation settings for. + enable: Boolean whether to enable vacation responses. + subject: Vacation message subject. + message: Vacation message body. + contacts_only: Boolean whether to send message only to contacts. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('vacation', username) + properties = {} + properties['enable'] = self._bool2str(enable) + if enable is True: + properties['subject'] = subject + properties['message'] = message + properties['contactsOnly'] = self._bool2str(contacts_only) + return self._PutProperties(uri, properties) + + def UpdateSignature(self, username, signature): + """Update signature. + + Args: + username: User to update signature for. + signature: Signature string. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('signature', username) + properties = {'signature': signature} + return self._PutProperties(uri, properties) + + def UpdateLanguage(self, username, language): + """Update user interface language. + + Args: + username: User to update language for. + language: Language code. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('language', username) + properties = {'language': language} + return self._PutProperties(uri, properties) + + def UpdateGeneral(self, username, page_size=None, shortcuts=None, arrows=None, + snippets=None, unicode=None): + """Update general settings. + + Args: + username: User to update general settings for. + page_size: Number of messages to show. + shortcuts: Boolean whether shortcuts are enabled. + arrows: Boolean whether arrows are enabled. + snippets: Boolean whether snippets are enabled. + unicode: Wheter unicode is enabled. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('general', username) + properties = {} + properties['pageSize'] = str(page_size) + properties['shortcuts'] = self._bool2str(shortcuts) + properties['arrows'] = self._bool2str(arrows) + properties['snippets'] = self._bool2str(snippets) + properties['unicode'] = self._bool2str(unicode) + return self._PutProperties(uri, properties) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/migration/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/migration/__init__.py new file mode 100644 index 0000000..9892671 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/migration/__init__.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains objects used with Google Apps.""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import atom +import gdata + + +# XML namespaces which are often used in Google Apps entity. +APPS_NAMESPACE = 'http://schemas.google.com/apps/2006' +APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s' + + +class Rfc822Msg(atom.AtomBase): + """The Migration rfc822Msg element.""" + + _tag = 'rfc822Msg' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['encoding'] = 'encoding' + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.encoding = 'base64' + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def Rfc822MsgFromString(xml_string): + """Parse in the Rrc822 message from the XML definition.""" + + return atom.CreateClassFromXMLString(Rfc822Msg, xml_string) + + +class MailItemProperty(atom.AtomBase): + """The Migration mailItemProperty element.""" + + _tag = 'mailItemProperty' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def MailItemPropertyFromString(xml_string): + """Parse in the MailItemProperiy from the XML definition.""" + + return atom.CreateClassFromXMLString(MailItemProperty, xml_string) + + +class Label(atom.AtomBase): + """The Migration label element.""" + + _tag = 'label' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['labelName'] = 'label_name' + + def __init__(self, label_name=None, + extension_elements=None, extension_attributes=None, + text=None): + self.label_name = label_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LabelFromString(xml_string): + """Parse in the mailItemProperty from the XML definition.""" + + return atom.CreateClassFromXMLString(Label, xml_string) + + +class MailEntry(gdata.GDataEntry): + """A Google Migration flavor of an Atom Entry.""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg) + _children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property', + [MailItemProperty]) + _children['{%s}label' % APPS_NAMESPACE] = ('label', [Label]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + rfc822_msg=None, mail_item_property=None, label=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.rfc822_msg = rfc822_msg + self.mail_item_property = mail_item_property + self.label = label + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def MailEntryFromString(xml_string): + """Parse in the MailEntry from the XML definition.""" + + return atom.CreateClassFromXMLString(MailEntry, xml_string) + + +class BatchMailEntry(gdata.BatchEntry): + """A Google Migration flavor of an Atom Entry.""" + + _tag = gdata.BatchEntry._tag + _namespace = gdata.BatchEntry._namespace + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + _children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg) + _children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property', + [MailItemProperty]) + _children['{%s}label' % APPS_NAMESPACE] = ('label', [Label]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + rfc822_msg=None, mail_item_property=None, label=None, + batch_operation=None, batch_id=None, batch_status=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, + batch_id=batch_id, batch_status=batch_status, + title=title, updated=updated) + self.rfc822_msg = rfc822_msg or None + self.mail_item_property = mail_item_property or [] + self.label = label or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def BatchMailEntryFromString(xml_string): + """Parse in the BatchMailEntry from the XML definition.""" + + return atom.CreateClassFromXMLString(BatchMailEntry, xml_string) + + +class BatchMailEventFeed(gdata.BatchFeed): + """A Migration event feed flavor of an Atom Feed.""" + + _tag = gdata.BatchFeed._tag + _namespace = gdata.BatchFeed._namespace + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchMailEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, interrupted=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + interrupted=interrupted, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchMailEventFeedFromString(xml_string): + """Parse in the BatchMailEventFeed from the XML definition.""" + + return atom.CreateClassFromXMLString(BatchMailEventFeed, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/migration/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/migration/service.py new file mode 100644 index 0000000..6319995 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/migration/service.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the methods to import mail via Google Apps Email Migration API. + + MigrationService: Provides methids to import mail. +""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import base64 +import gdata +import gdata.apps.service +import gdata.service +from gdata.apps import migration + + +API_VER = '2.0' + + +class MigrationService(gdata.apps.service.AppsService): + """Client for the EMAPI migration service. Use either ImportMail to import + one message at a time, or AddBatchEntry and SubmitBatch to import a batch of + messages at a time. + """ + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None): + gdata.apps.service.AppsService.__init__( + self, email=email, password=password, domain=domain, source=source, + server=server, additional_headers=additional_headers) + self.mail_batch = migration.BatchMailEventFeed() + + def _BaseURL(self): + return '/a/feeds/migration/%s/%s' % (API_VER, self.domain) + + def ImportMail(self, user_name, mail_message, mail_item_properties, + mail_labels): + """Import a single mail message. + + Args: + user_name: The username to import messages to. + mail_message: An RFC822 format email message. + mail_item_properties: A list of Gmail properties to apply to the message. + mail_labels: A list of labels to apply to the message. + + Returns: + A MailEntry representing the successfully imported message. + + Raises: + AppsForYourDomainException: An error occurred importing the message. + """ + uri = '%s/%s/mail' % (self._BaseURL(), user_name) + + mail_entry = migration.MailEntry() + mail_entry.rfc822_msg = migration.Rfc822Msg(text=(base64.b64encode( + mail_message))) + mail_entry.rfc822_msg.encoding = 'base64' + mail_entry.mail_item_property = map( + lambda x: migration.MailItemProperty(value=x), mail_item_properties) + mail_entry.label = map(lambda x: migration.Label(label_name=x), + mail_labels) + + try: + return migration.MailEntryFromString(str(self.Post(mail_entry, uri))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def AddBatchEntry(self, mail_message, mail_item_properties, + mail_labels): + """Add a message to the current batch that you later will submit. + + Args: + mail_message: An RFC822 format email message. + mail_item_properties: A list of Gmail properties to apply to the message. + mail_labels: A list of labels to apply to the message. + + Returns: + The length of the MailEntry representing the message. + """ + mail_entry = migration.BatchMailEntry() + mail_entry.rfc822_msg = migration.Rfc822Msg(text=(base64.b64encode( + mail_message))) + mail_entry.rfc822_msg.encoding = 'base64' + mail_entry.mail_item_property = map( + lambda x: migration.MailItemProperty(value=x), mail_item_properties) + mail_entry.label = map(lambda x: migration.Label(label_name=x), + mail_labels) + + self.mail_batch.AddBatchEntry(mail_entry) + + return len(str(mail_entry)) + + def SubmitBatch(self, user_name): + """Send a all the mail items you have added to the batch to the server. + + Args: + user_name: The username to import messages to. + + Returns: + A HTTPResponse from the web service call. + + Raises: + AppsForYourDomainException: An error occurred importing the batch. + """ + uri = '%s/%s/mail/batch' % (self._BaseURL(), user_name) + + try: + self.result = self.Post(self.mail_batch, uri, + converter=migration.BatchMailEventFeedFromString) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + self.mail_batch = migration.BatchMailEventFeed() + + return self.result diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/service.py new file mode 100644 index 0000000..fdfb712 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/apps/service.py @@ -0,0 +1,436 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'tmatsuo@sios.com (Takashi MATSUO)' + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import urllib +import gdata +import atom.service +import gdata.service +import gdata.apps +import atom + +API_VER="2.0" +HTTP_OK=200 + +UNKOWN_ERROR=1000 +USER_DELETED_RECENTLY=1100 +USER_SUSPENDED=1101 +DOMAIN_USER_LIMIT_EXCEEDED=1200 +DOMAIN_ALIAS_LIMIT_EXCEEDED=1201 +DOMAIN_SUSPENDED=1202 +DOMAIN_FEATURE_UNAVAILABLE=1203 +ENTITY_EXISTS=1300 +ENTITY_DOES_NOT_EXIST=1301 +ENTITY_NAME_IS_RESERVED=1302 +ENTITY_NAME_NOT_VALID=1303 +INVALID_GIVEN_NAME=1400 +INVALID_FAMILY_NAME=1401 +INVALID_PASSWORD=1402 +INVALID_USERNAME=1403 +INVALID_HASH_FUNCTION_NAME=1404 +INVALID_HASH_DIGGEST_LENGTH=1405 +INVALID_EMAIL_ADDRESS=1406 +INVALID_QUERY_PARAMETER_VALUE=1407 +TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500 + +DEFAULT_QUOTA_LIMIT='2048' + +class Error(Exception): + pass + +class AppsForYourDomainException(Error): + + def __init__(self, response): + + Error.__init__(self, response) + try: + self.element_tree = ElementTree.fromstring(response['body']) + self.error_code = int(self.element_tree[0].attrib['errorCode']) + self.reason = self.element_tree[0].attrib['reason'] + self.invalidInput = self.element_tree[0].attrib['invalidInput'] + except: + self.error_code = UNKOWN_ERROR + +class AppsService(gdata.service.GDataService): + """Client for the Google Apps Provisioning service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='apps', source=source, + server=server, + additional_headers=additional_headers) + self.ssl = True + self.port = 443 + self.domain = domain + + def _baseURL(self): + return "/a/feeds/%s" % self.domain + + def GetGeneratorFromLinkFinder(self, link_finder, func): + """returns a generator for pagination""" + yield link_finder + next = link_finder.GetNextLink() + while next is not None: + next_feed = func(str(self.Get(next.href))) + yield next_feed + next = next_feed.GetNextLink() + + def AddAllElementsFromAllPages(self, link_finder, func): + """retrieve all pages and add all elements""" + next = link_finder.GetNextLink() + while next is not None: + next_feed = func(str(self.Get(next.href))) + for a_entry in next_feed.entry: + link_finder.entry.append(a_entry) + next = next_feed.GetNextLink() + return link_finder + + def RetrievePageOfEmailLists(self, start_email_list_name=None): + """Retrieve one page of email list""" + + uri = "%s/emailList/%s" % (self._baseURL(), API_VER) + if start_email_list_name is not None: + uri += "?startEmailListName=%s" % start_email_list_name + try: + return gdata.apps.EmailListFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveAllEmailLists(self): + """Retrieve all email list of a domain.""" + + ret = self.RetrievePageOfEmailLists() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListFeedFromString) + + def RetrieveEmailList(self, list_name): + """Retreive a single email list by the list's name.""" + + uri = "%s/emailList/%s/%s" % ( + self._baseURL(), API_VER, list_name) + try: + return self.Get(uri, converter=gdata.apps.EmailListEntryFromString) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveEmailLists(self, recipient): + """Retrieve All Email List Subscriptions for an Email Address.""" + + uri = "%s/emailList/%s?recipient=%s" % ( + self._baseURL(), API_VER, recipient) + try: + ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListFeedFromString) + + def RemoveRecipientFromEmailList(self, recipient, list_name): + """Remove recipient from email list.""" + + uri = "%s/emailList/%s/%s/recipient/%s" % ( + self._baseURL(), API_VER, list_name, recipient) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfRecipients(self, list_name, start_recipient=None): + """Retrieve one page of recipient of an email list. """ + + uri = "%s/emailList/%s/%s/recipient" % ( + self._baseURL(), API_VER, list_name) + + if start_recipient is not None: + uri += "?startRecipient=%s" % start_recipient + try: + return gdata.apps.EmailListRecipientFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveAllRecipients(self, list_name): + """Retrieve all recipient of an email list.""" + + ret = self.RetrievePageOfRecipients(list_name) + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListRecipientFeedFromString) + + def AddRecipientToEmailList(self, recipient, list_name): + """Add a recipient to a email list.""" + + uri = "%s/emailList/%s/%s/recipient" % ( + self._baseURL(), API_VER, list_name) + recipient_entry = gdata.apps.EmailListRecipientEntry() + recipient_entry.who = gdata.apps.Who(email=recipient) + + try: + return gdata.apps.EmailListRecipientEntryFromString( + str(self.Post(recipient_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteEmailList(self, list_name): + """Delete a email list""" + + uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateEmailList(self, list_name): + """Create a email list. """ + + uri = "%s/emailList/%s" % (self._baseURL(), API_VER) + email_list_entry = gdata.apps.EmailListEntry() + email_list_entry.email_list = gdata.apps.EmailList(name=list_name) + try: + return gdata.apps.EmailListEntryFromString( + str(self.Post(email_list_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteNickname(self, nickname): + """Delete a nickname""" + + uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfNicknames(self, start_nickname=None): + """Retrieve one page of nicknames in the domain""" + + uri = "%s/nickname/%s" % (self._baseURL(), API_VER) + if start_nickname is not None: + uri += "?startNickname=%s" % start_nickname + try: + return gdata.apps.NicknameFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveAllNicknames(self): + """Retrieve all nicknames in the domain""" + + ret = self.RetrievePageOfNicknames() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.NicknameFeedFromString) + + def RetrieveNicknames(self, user_name): + """Retrieve nicknames of the user""" + + uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name) + try: + ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.NicknameFeedFromString) + + def RetrieveNickname(self, nickname): + """Retrieve a nickname. + + Args: + nickname: string The nickname to retrieve + + Returns: + gdata.apps.NicknameEntry + """ + + uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname) + try: + return gdata.apps.NicknameEntryFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateNickname(self, user_name, nickname): + """Create a nickname""" + + uri = "%s/nickname/%s" % (self._baseURL(), API_VER) + nickname_entry = gdata.apps.NicknameEntry() + nickname_entry.login = gdata.apps.Login(user_name=user_name) + nickname_entry.nickname = gdata.apps.Nickname(name=nickname) + + try: + return gdata.apps.NicknameEntryFromString( + str(self.Post(nickname_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteUser(self, user_name): + """Delete a user account""" + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def UpdateUser(self, user_name, user_entry): + """Update a user account.""" + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateUser(self, user_name, family_name, given_name, password, + suspended='false', quota_limit=None, + password_hash_function=None): + """Create a user account. """ + + uri = "%s/user/%s" % (self._baseURL(), API_VER) + user_entry = gdata.apps.UserEntry() + user_entry.login = gdata.apps.Login( + user_name=user_name, password=password, suspended=suspended, + hash_function_name=password_hash_function) + user_entry.name = gdata.apps.Name(family_name=family_name, + given_name=given_name) + if quota_limit is not None: + user_entry.quota = gdata.apps.Quota(limit=str(quota_limit)) + + try: + return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def SuspendUser(self, user_name): + user_entry = self.RetrieveUser(user_name) + if user_entry.login.suspended != 'true': + user_entry.login.suspended = 'true' + user_entry = self.UpdateUser(user_name, user_entry) + return user_entry + + def RestoreUser(self, user_name): + user_entry = self.RetrieveUser(user_name) + if user_entry.login.suspended != 'false': + user_entry.login.suspended = 'false' + user_entry = self.UpdateUser(user_name, user_entry) + return user_entry + + def RetrieveUser(self, user_name): + """Retrieve an user account. + + Args: + user_name: string The user name to retrieve + + Returns: + gdata.apps.UserEntry + """ + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return gdata.apps.UserEntryFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfUsers(self, start_username=None): + """Retrieve one page of users in this domain.""" + + uri = "%s/user/%s" % (self._baseURL(), API_VER) + if start_username is not None: + uri += "?startUsername=%s" % start_username + try: + return gdata.apps.UserFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllUsers(self): + """Retrieve a generator for all users in this domain.""" + first_page = self.RetrievePageOfUsers() + return self.GetGeneratorFromLinkFinder(first_page, + gdata.apps.UserFeedFromString) + + def RetrieveAllUsers(self): + """Retrieve all users in this domain. OBSOLETE""" + + ret = self.RetrievePageOfUsers() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.UserFeedFromString) + + +class PropertyService(gdata.service.GDataService): + """Client for the Google Apps Property service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='apps', source=source, + server=server, + additional_headers=additional_headers) + self.ssl = True + self.port = 443 + self.domain = domain + + def _GetPropertyEntry(self, properties): + property_entry = gdata.apps.PropertyEntry() + property = [] + for name, value in properties.iteritems(): + if name is not None and value is not None: + property.append(gdata.apps.Property(name=name, value=value)) + property_entry.property = property + return property_entry + + def _PropertyEntry2Dict(self, property_entry): + properties = {} + for i, property in enumerate(property_entry.property): + properties[property.name] = property.value + return properties + + def _GetProperties(self, uri): + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Get(uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _PostProperties(self, uri, properties): + property_entry = self._GetPropertyEntry(properties) + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Post(property_entry, uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _PutProperties(self, uri, properties): + property_entry = self._GetPropertyEntry(properties) + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Put(property_entry, uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/auth.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/auth.py new file mode 100644 index 0000000..803c247 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/auth.py @@ -0,0 +1,929 @@ +#!/usr/bin/python +# +# Copyright (C) 2007, 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import cgi +import math +import random +import re +import time +import types +import urllib +import atom.http_interface +import atom.token_store +import atom.url +import gdata.oauth as oauth +import gdata.oauth.rsa as oauth_rsa +import gdata.tlslite.utils.keyfactory as keyfactory +import gdata.tlslite.utils.cryptomath as cryptomath + +__author__ = 'api.jscudder (Jeff Scudder)' + + +PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth=' +AUTHSUB_AUTH_LABEL = 'AuthSub token=' + + +"""This module provides functions and objects used with Google authentication. + +Details on Google authorization mechanisms used with the Google Data APIs can +be found here: +http://code.google.com/apis/gdata/auth.html +http://code.google.com/apis/accounts/ + +The essential functions are the following. +Related to ClientLogin: + generate_client_login_request_body: Constructs the body of an HTTP request to + obtain a ClientLogin token for a specific + service. + extract_client_login_token: Creates a ClientLoginToken with the token from a + success response to a ClientLogin request. + get_captcha_challenge: If the server responded to the ClientLogin request + with a CAPTCHA challenge, this method extracts the + CAPTCHA URL and identifying CAPTCHA token. + +Related to AuthSub: + generate_auth_sub_url: Constructs a full URL for a AuthSub request. The + user's browser must be sent to this Google Accounts + URL and redirected back to the app to obtain the + AuthSub token. + extract_auth_sub_token_from_url: Once the user's browser has been + redirected back to the web app, use this + function to create an AuthSubToken with + the correct authorization token and scope. + token_from_http_body: Extracts the AuthSubToken value string from the + server's response to an AuthSub session token upgrade + request. +""" + +def generate_client_login_request_body(email, password, service, source, + account_type='HOSTED_OR_GOOGLE', captcha_token=None, + captcha_response=None): + """Creates the body of the autentication request + + See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request + for more details. + + Args: + email: str + password: str + service: str + source: str + account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid + values are 'GOOGLE' and 'HOSTED' + captcha_token: str (optional) + captcha_response: str (optional) + + Returns: + The HTTP body to send in a request for a client login token. + """ + # Create a POST body containing the user's credentials. + request_fields = {'Email': email, + 'Passwd': password, + 'accountType': account_type, + 'service': service, + 'source': source} + if captcha_token and captcha_response: + # Send the captcha token and response as part of the POST body if the + # user is responding to a captch challenge. + request_fields['logintoken'] = captcha_token + request_fields['logincaptcha'] = captcha_response + return urllib.urlencode(request_fields) + + +GenerateClientLoginRequestBody = generate_client_login_request_body + + +def GenerateClientLoginAuthToken(http_body): + """Returns the token value to use in Authorization headers. + + Reads the token from the server's response to a Client Login request and + creates header value to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The value half of an Authorization header. + """ + token = get_client_login_token(http_body) + if token: + return 'GoogleLogin auth=%s' % token + return None + + +def get_client_login_token(http_body): + """Returns the token value for a ClientLoginToken. + + Reads the token from the server's response to a Client Login request and + creates the token value string to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The token value string for a ClientLoginToken. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Auth='): + # Strip off the leading Auth= and return the Authorization value. + return response_line[5:] + return None + + +def extract_client_login_token(http_body, scopes): + """Parses the server's response and returns a ClientLoginToken. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request. It is assumed that the login request was successful. + scopes: list containing atom.url.Urls or strs. The scopes list contains + all of the partial URLs under which the client login token is + valid. For example, if scopes contains ['http://example.com/foo'] + then the client login token would be valid for + http://example.com/foo/bar/baz + + Returns: + A ClientLoginToken which is valid for the specified scopes. + """ + token_string = get_client_login_token(http_body) + token = ClientLoginToken(scopes=scopes) + token.set_token_string(token_string) + return token + + +def get_captcha_challenge(http_body, + captcha_base_url='http://www.google.com/accounts/'): + """Returns the URL and token for a CAPTCHA challenge issued by the server. + + Args: + http_body: str The body of the HTTP response from the server which + contains the CAPTCHA challenge. + captcha_base_url: str This function returns a full URL for viewing the + challenge image which is built from the server's response. This + base_url is used as the beginning of the URL because the server + only provides the end of the URL. For example the server provides + 'Captcha?ctoken=Hi...N' and the URL for the image is + 'http://www.google.com/accounts/Captcha?ctoken=Hi...N' + + Returns: + A dictionary containing the information needed to repond to the CAPTCHA + challenge, the image URL and the ID token of the challenge. The + dictionary is in the form: + {'token': string identifying the CAPTCHA image, + 'url': string containing the URL of the image} + Returns None if there was no CAPTCHA challenge in the response. + """ + contains_captcha_challenge = False + captcha_parameters = {} + for response_line in http_body.splitlines(): + if response_line.startswith('Error=CaptchaRequired'): + contains_captcha_challenge = True + elif response_line.startswith('CaptchaToken='): + # Strip off the leading CaptchaToken= + captcha_parameters['token'] = response_line[13:] + elif response_line.startswith('CaptchaUrl='): + captcha_parameters['url'] = '%s%s' % (captcha_base_url, + response_line[11:]) + if contains_captcha_challenge: + return captcha_parameters + else: + return None + + +GetCaptchaChallenge = get_captcha_challenge + + +def GenerateOAuthRequestTokenUrl( + oauth_input_params, scopes, + request_token_url='https://www.google.com/accounts/OAuthGetRequestToken', + extra_parameters=None): + """Generate a URL at which a request for OAuth request token is to be sent. + + Args: + oauth_input_params: OAuthInputParams OAuth input parameters. + scopes: list of strings The URLs of the services to be accessed. + request_token_url: string The beginning of the request token URL. This is + normally 'https://www.google.com/accounts/OAuthGetRequestToken' or + '/accounts/OAuthGetRequestToken' + extra_parameters: dict (optional) key-value pairs as any additional + parameters to be included in the URL and signature while making a + request for fetching an OAuth request token. All the OAuth parameters + are added by default. But if provided through this argument, any + default parameters will be overwritten. For e.g. a default parameter + oauth_version 1.0 can be overwritten if + extra_parameters = {'oauth_version': '2.0'} + + Returns: + atom.url.Url OAuth request token URL. + """ + scopes_string = ' '.join([str(scope) for scope in scopes]) + parameters = {'scope': scopes_string} + if extra_parameters: + parameters.update(extra_parameters) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + oauth_input_params.GetConsumer(), http_url=request_token_url, + parameters=parameters) + oauth_request.sign_request(oauth_input_params.GetSignatureMethod(), + oauth_input_params.GetConsumer(), None) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateOAuthAuthorizationUrl( + request_token, + authorization_url='https://www.google.com/accounts/OAuthAuthorizeToken', + callback_url=None, extra_params=None, + include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope'): + """Generates URL at which user will login to authorize the request token. + + Args: + request_token: gdata.auth.OAuthToken OAuth request token. + authorization_url: string The beginning of the authorization URL. This is + normally 'https://www.google.com/accounts/OAuthAuthorizeToken' or + '/accounts/OAuthAuthorizeToken' + callback_url: string (optional) The URL user will be sent to after + logging in and granting access. + extra_params: dict (optional) Additional parameters to be sent. + include_scopes_in_callback: Boolean (default=False) if set to True, and + if 'callback_url' is present, the 'callback_url' will be modified to + include the scope(s) from the request token as a URL parameter. The + key for the 'callback' URL's scope parameter will be + OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as + a parameter to the 'callback' URL, is that the page which receives + the OAuth token will be able to tell which URLs the token grants + access to. + scopes_param_prefix: string (default='oauth_token_scope') The URL + parameter key which maps to the list of valid scopes for the token. + This URL parameter will be included in the callback URL along with + the scopes of the token as value if include_scopes_in_callback=True. + + Returns: + atom.url.Url OAuth authorization URL. + """ + scopes = request_token.scopes + if isinstance(scopes, list): + scopes = ' '.join(scopes) + if include_scopes_in_callback and callback_url: + if callback_url.find('?') > -1: + callback_url += '&' + else: + callback_url += '?' + callback_url += urllib.urlencode({scopes_param_prefix:scopes}) + oauth_token = oauth.OAuthToken(request_token.key, request_token.secret) + oauth_request = oauth.OAuthRequest.from_token_and_callback( + token=oauth_token, callback=callback_url, + http_url=authorization_url, parameters=extra_params) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateOAuthAccessTokenUrl( + authorized_request_token, + oauth_input_params, + access_token_url='https://www.google.com/accounts/OAuthGetAccessToken', + oauth_version='1.0'): + """Generates URL at which user will login to authorize the request token. + + Args: + authorized_request_token: gdata.auth.OAuthToken OAuth authorized request + token. + oauth_input_params: OAuthInputParams OAuth input parameters. + access_token_url: string The beginning of the authorization URL. This is + normally 'https://www.google.com/accounts/OAuthGetAccessToken' or + '/accounts/OAuthGetAccessToken' + oauth_version: str (default='1.0') oauth_version parameter. + + Returns: + atom.url.Url OAuth access token URL. + """ + oauth_token = oauth.OAuthToken(authorized_request_token.key, + authorized_request_token.secret) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + oauth_input_params.GetConsumer(), token=oauth_token, + http_url=access_token_url, parameters={'oauth_version': oauth_version}) + oauth_request.sign_request(oauth_input_params.GetSignatureMethod(), + oauth_input_params.GetConsumer(), oauth_token) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateAuthSubUrl(next, scope, secure=False, session=True, + request_url='https://www.google.com/accounts/AuthSubRequest', + domain='default'): + """Generate a URL at which the user will login and be redirected back. + + Users enter their credentials on a Google login page and a token is sent + to the URL specified in next. See documentation for AuthSub login at: + http://code.google.com/apis/accounts/AuthForWebApps.html + + Args: + request_url: str The beginning of the request URL. This is normally + 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + next: string The URL user will be sent to after logging in. + scope: string The URL of the service to be accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + domain: str (optional) The Google Apps domain for this account. If this + is not a Google Apps account, use 'default' which is the default + value. + """ + # Translate True/False values for parameters into numeric values acceoted + # by the AuthSub service. + if secure: + secure = 1 + else: + secure = 0 + + if session: + session = 1 + else: + session = 0 + + request_params = urllib.urlencode({'next': next, 'scope': scope, + 'secure': secure, 'session': session, + 'hd': domain}) + if request_url.find('?') == -1: + return '%s?%s' % (request_url, request_params) + else: + # The request URL already contained url parameters so we should add + # the parameters using the & seperator + return '%s&%s' % (request_url, request_params) + + +def generate_auth_sub_url(next, scopes, secure=False, session=True, + request_url='https://www.google.com/accounts/AuthSubRequest', + domain='default', scopes_param_prefix='auth_sub_scopes'): + """Constructs a URL string for requesting a multiscope AuthSub token. + + The generated token will contain a URL parameter to pass along the + requested scopes to the next URL. When the Google Accounts page + redirects the broswser to the 'next' URL, it appends the single use + AuthSub token value to the URL as a URL parameter with the key 'token'. + However, the information about which scopes were requested is not + included by Google Accounts. This method adds the scopes to the next + URL before making the request so that the redirect will be sent to + a page, and both the token value and the list of scopes can be + extracted from the request URL. + + Args: + next: atom.url.URL or string The URL user will be sent to after + authorizing this web application to access their data. + scopes: list containint strings The URLs of the services to be accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + request_url: atom.url.Url or str The beginning of the request URL. This + is normally 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + domain: The domain which the account is part of. This is used for Google + Apps accounts, the default value is 'default' which means that the + requested account is a Google Account (@gmail.com for example) + scopes_param_prefix: str (optional) The requested scopes are added as a + URL parameter to the next URL so that the page at the 'next' URL can + extract the token value and the valid scopes from the URL. The key + for the URL parameter defaults to 'auth_sub_scopes' + + Returns: + An atom.url.Url which the user's browser should be directed to in order + to authorize this application to access their information. + """ + if isinstance(next, (str, unicode)): + next = atom.url.parse_url(next) + scopes_string = ' '.join([str(scope) for scope in scopes]) + next.params[scopes_param_prefix] = scopes_string + + if isinstance(request_url, (str, unicode)): + request_url = atom.url.parse_url(request_url) + request_url.params['next'] = str(next) + request_url.params['scope'] = scopes_string + if session: + request_url.params['session'] = 1 + else: + request_url.params['session'] = 0 + if secure: + request_url.params['secure'] = 1 + else: + request_url.params['secure'] = 0 + request_url.params['hd'] = domain + return request_url + + +def AuthSubTokenFromUrl(url): + """Extracts the AuthSub token from the URL. + + Used after the AuthSub redirect has sent the user to the 'next' page and + appended the token to the URL. This function returns the value to be used + in the Authorization header. + + Args: + url: str The URL of the current page which contains the AuthSub token as + a URL parameter. + """ + token = TokenFromUrl(url) + if token: + return 'AuthSub token=%s' % token + return None + + +def TokenFromUrl(url): + """Extracts the AuthSub token from the URL. + + Returns the raw token value. + + Args: + url: str The URL or the query portion of the URL string (after the ?) of + the current page which contains the AuthSub token as a URL parameter. + """ + if url.find('?') > -1: + query_params = url.split('?')[1] + else: + query_params = url + for pair in query_params.split('&'): + if pair.startswith('token='): + return pair[6:] + return None + + +def extract_auth_sub_token_from_url(url, + scopes_param_prefix='auth_sub_scopes', rsa_key=None): + """Creates an AuthSubToken and sets the token value and scopes from the URL. + + After the Google Accounts AuthSub pages redirect the user's broswer back to + the web application (using the 'next' URL from the request) the web app must + extract the token from the current page's URL. The token is provided as a + URL parameter named 'token' and if generate_auth_sub_url was used to create + the request, the token's valid scopes are included in a URL parameter whose + name is specified in scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + An AuthSubToken with the token value from the URL and set to be valid for + the scopes passed in on the URL. If no scopes were included in the URL, + the AuthSubToken defaults to being valid for no scopes. If there was no + 'token' parameter in the URL, this function returns None. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if 'token' not in url.params: + return None + scopes = [] + if scopes_param_prefix in url.params: + scopes = url.params[scopes_param_prefix].split(' ') + token_value = url.params['token'] + if rsa_key: + token = SecureAuthSubToken(rsa_key, scopes=scopes) + else: + token = AuthSubToken(scopes=scopes) + token.set_token_string(token_value) + return token + + +def AuthSubTokenFromHttpBody(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The header value to use for Authorization which contains the AuthSub + token. + """ + token_value = token_from_http_body(http_body) + if token_value: + return '%s%s' % (AUTHSUB_AUTH_LABEL, token_value) + return None + + +def token_from_http_body(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The raw token value to use in an AuthSubToken object. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Token='): + # Strip off Token= and return the token value string. + return response_line[6:] + return None + + +TokenFromHttpBody = token_from_http_body + + +def OAuthTokenFromUrl(url, scopes_param_prefix='oauth_token_scope'): + """Creates an OAuthToken and sets token key and scopes (if present) from URL. + + After the Google Accounts OAuth pages redirect the user's broswer back to + the web application (using the 'callback' URL from the request) the web app + can extract the token from the current page's URL. The token is same as the + request token, but it is either authorized (if user grants access) or + unauthorized (if user denies access). The token is provided as a + URL parameter named 'oauth_token' and if it was chosen to use + GenerateOAuthAuthorizationUrl with include_scopes_in_param=True, the token's + valid scopes are included in a URL parameter whose name is specified in + scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + An OAuthToken with the token key from the URL and set to be valid for + the scopes passed in on the URL. If no scopes were included in the URL, + the OAuthToken defaults to being valid for no scopes. If there was no + 'oauth_token' parameter in the URL, this function returns None. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if 'oauth_token' not in url.params: + return None + scopes = [] + if scopes_param_prefix in url.params: + scopes = url.params[scopes_param_prefix].split(' ') + token_key = url.params['oauth_token'] + token = OAuthToken(key=token_key, scopes=scopes) + return token + + +def OAuthTokenFromHttpBody(http_body): + """Parses the HTTP response body and returns an OAuth token. + + The returned OAuth token will just have key and secret parameters set. + It won't have any knowledge about the scopes or oauth_input_params. It is + your responsibility to make it aware of the remaining parameters. + + Returns: + OAuthToken OAuth token. + """ + token = oauth.OAuthToken.from_string(http_body) + oauth_token = OAuthToken(key=token.key, secret=token.secret) + return oauth_token + + +class OAuthSignatureMethod(object): + """Holds valid OAuth signature methods. + + RSA_SHA1: Class to build signature according to RSA-SHA1 algorithm. + HMAC_SHA1: Class to build signature according to HMAC-SHA1 algorithm. + """ + + HMAC_SHA1 = oauth.OAuthSignatureMethod_HMAC_SHA1 + + class RSA_SHA1(oauth_rsa.OAuthSignatureMethod_RSA_SHA1): + """Provides implementation for abstract methods to return RSA certs.""" + + def __init__(self, private_key, public_cert): + self.private_key = private_key + self.public_cert = public_cert + + def _fetch_public_cert(self, unused_oauth_request): + return self.public_cert + + def _fetch_private_cert(self, unused_oauth_request): + return self.private_key + + +class OAuthInputParams(object): + """Stores OAuth input parameters. + + This class is a store for OAuth input parameters viz. consumer key and secret, + signature method and RSA key. + """ + + def __init__(self, signature_method, consumer_key, consumer_secret=None, + rsa_key=None): + """Initializes object with parameters required for using OAuth mechanism. + + NOTE: Though consumer_secret and rsa_key are optional, either of the two + is required depending on the value of the signature_method. + + Args: + signature_method: class which provides implementation for strategy class + oauth.oauth.OAuthSignatureMethod. Signature method to be used for + signing each request. Valid implementations are provided as the + constants defined by gdata.auth.OAuthSignatureMethod. Currently + they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and + gdata.auth.OAuthSignatureMethod.HMAC_SHA1 + consumer_key: string Domain identifying third_party web application. + consumer_secret: string (optional) Secret generated during registration. + Required only for HMAC_SHA1 signature method. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. + """ + if signature_method == OAuthSignatureMethod.RSA_SHA1: + self._signature_method = signature_method(rsa_key, None) + else: + self._signature_method = signature_method() + self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) + + def GetSignatureMethod(self): + """Gets the OAuth signature method. + + Returns: + object of supertype + """ + return self._signature_method + + def GetConsumer(self): + """Gets the OAuth consumer. + + Returns: + object of type + """ + return self._consumer + + +class ClientLoginToken(atom.http_interface.GenericToken): + """Stores the Authorization header in auth_header and adds to requests. + + This token will add it's Authorization header to an HTTP request + as it is made. Ths token class is simple but + some Token classes must calculate portions of the Authorization header + based on the request being made, which is why the token is responsible + for making requests via an http_client parameter. + + Args: + auth_header: str The value for the Authorization header. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + def __init__(self, auth_header=None, scopes=None): + self.auth_header = auth_header + self.scopes = scopes or [] + + def __str__(self): + return self.auth_header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if headers is None: + headers = {'Authorization':self.auth_header} + else: + headers['Authorization'] = self.auth_header + return http_client.request(operation, url, data=data, headers=headers) + + def get_token_string(self): + """Removes PROGRAMMATIC_AUTH_LABEL to give just the token value.""" + return self.auth_header[len(PROGRAMMATIC_AUTH_LABEL):] + + def set_token_string(self, token_string): + self.auth_header = '%s%s' % (PROGRAMMATIC_AUTH_LABEL, token_string) + + def valid_for_scope(self, url): + """Tells the caller if the token authorizes access to the desired URL. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +class AuthSubToken(ClientLoginToken): + def get_token_string(self): + """Removes AUTHSUB_AUTH_LABEL to give just the token value.""" + return self.auth_header[len(AUTHSUB_AUTH_LABEL):] + + def set_token_string(self, token_string): + self.auth_header = '%s%s' % (AUTHSUB_AUTH_LABEL, token_string) + + +class OAuthToken(atom.http_interface.GenericToken): + """Stores the token key, token secret and scopes for which token is valid. + + This token adds the authorization header to each request made. It + re-calculates authorization header for every request since the OAuth + signature to be added to the authorization header is dependent on the + request parameters. + + Attributes: + key: str The value for the OAuth token i.e. token key. + secret: str The value for the OAuth token secret. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + oauth_input_params: OAuthInputParams OAuth input parameters. + """ + + def __init__(self, key=None, secret=None, scopes=None, + oauth_input_params=None): + self.key = key + self.secret = secret + self.scopes = scopes or [] + self.oauth_input_params = oauth_input_params + + def __str__(self): + return self.get_token_string() + + def get_token_string(self): + """Returns the token string. + + The token string returned is of format + oauth_token=[0]&oauth_token_secret=[1], where [0] and [1] are some strings. + + Returns: + A token string of format oauth_token=[0]&oauth_token_secret=[1], + where [0] and [1] are some strings. If self.secret is absent, it just + returns oauth_token=[0]. If self.key is absent, it just returns + oauth_token_secret=[1]. If both are absent, it returns None. + """ + if self.key and self.secret: + return urllib.urlencode({'oauth_token': self.key, + 'oauth_token_secret': self.secret}) + elif self.key: + return 'oauth_token=%s' % self.key + elif self.secret: + return 'oauth_token_secret=%s' % self.secret + else: + return None + + def set_token_string(self, token_string): + """Sets the token key and secret from the token string. + + Args: + token_string: str Token string of form + oauth_token=[0]&oauth_token_secret=[1]. If oauth_token is not present, + self.key will be None. If oauth_token_secret is not present, + self.secret will be None. + """ + token_params = cgi.parse_qs(token_string, keep_blank_values=False) + if 'oauth_token' in token_params: + self.key = token_params['oauth_token'][0] + if 'oauth_token_secret' in token_params: + self.secret = token_params['oauth_token_secret'][0] + + def GetAuthHeader(self, http_method, http_url, realm=''): + """Get the authentication header. + + Args: + http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc. + http_url: string or atom.url.Url HTTP URL to which request is made. + realm: string (default='') realm parameter to be included in the + authorization header. + + Returns: + dict Header to be sent with every subsequent request after + authentication. + """ + if isinstance(http_url, types.StringTypes): + http_url = atom.url.parse_url(http_url) + header = None + token = None + if self.key or self.secret: + token = oauth.OAuthToken(self.key, self.secret) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + self.oauth_input_params.GetConsumer(), token=token, + http_url=str(http_url), http_method=http_method, + parameters=http_url.params) + oauth_request.sign_request(self.oauth_input_params.GetSignatureMethod(), + self.oauth_input_params.GetConsumer(), token) + header = oauth_request.to_header(realm=realm) + header['Authorization'] = header['Authorization'].replace('+', '%2B') + return header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if not headers: + headers = {} + headers.update(self.GetAuthHeader(operation, url)) + return http_client.request(operation, url, data=data, headers=headers) + + def valid_for_scope(self, url): + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +class SecureAuthSubToken(AuthSubToken): + """Stores the rsa private key, token, and scopes for the secure AuthSub token. + + This token adds the authorization header to each request made. It + re-calculates authorization header for every request since the secure AuthSub + signature to be added to the authorization header is dependent on the + request parameters. + + Attributes: + rsa_key: string The RSA private key in PEM format that the token will + use to sign requests + token_string: string (optional) The value for the AuthSub token. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + + def __init__(self, rsa_key, token_string=None, scopes=None): + self.rsa_key = keyfactory.parsePEMKey(rsa_key) + self.token_string = token_string or '' + self.scopes = scopes or [] + + def __str__(self): + return self.get_token_string() + + def get_token_string(self): + return str(self.token_string) + + def set_token_string(self, token_string): + self.token_string = token_string + + def GetAuthHeader(self, http_method, http_url): + """Generates the Authorization header. + + The form of the secure AuthSub Authorization header is + Authorization: AuthSub token="token" sigalg="sigalg" data="data" sig="sig" + and data represents a string in the form + data = http_method http_url timestamp nonce + + Args: + http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc. + http_url: string or atom.url.Url HTTP URL to which request is made. + + Returns: + dict Header to be sent with every subsequent request after authentication. + """ + timestamp = int(math.floor(time.time())) + nonce = '%lu' % random.randrange(1, 2**64) + data = '%s %s %d %s' % (http_method, str(http_url), timestamp, nonce) + sig = cryptomath.bytesToBase64(self.rsa_key.hashAndSign(data)) + header = {'Authorization': '%s"%s" data="%s" sig="%s" sigalg="rsa-sha1"' % + (AUTHSUB_AUTH_LABEL, self.token_string, data, sig)} + return header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if not headers: + headers = {} + headers.update(self.GetAuthHeader(operation, url)) + return http_client.request(operation, url, data=data, headers=headers) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/base/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/base/__init__.py new file mode 100644 index 0000000..c27114a --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/base/__init__.py @@ -0,0 +1,687 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Base.""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + + +# XML namespaces which are often used in Google Base entities. +GBASE_NAMESPACE = 'http://base.google.com/ns/1.0' +GBASE_TEMPLATE = '{http://base.google.com/ns/1.0}%s' +GMETA_NAMESPACE = 'http://base.google.com/ns-metadata/1.0' +GMETA_TEMPLATE = '{http://base.google.com/ns-metadata/1.0}%s' + + +class ItemAttributeContainer(object): + """Provides methods for finding Google Base Item attributes. + + Google Base item attributes are child nodes in the gbase namespace. Google + Base allows you to define your own item attributes and this class provides + methods to interact with the custom attributes. + """ + + def GetItemAttributes(self, name): + """Returns a list of all item attributes which have the desired name. + + Args: + name: str The tag of the desired base attributes. For example, calling + this method with 'rating' would return a list of ItemAttributes + represented by a 'g:rating' tag. + + Returns: + A list of matching ItemAttribute objects. + """ + result = [] + for attrib in self.item_attributes: + if attrib.name == name: + result.append(attrib) + return result + + def FindItemAttribute(self, name): + """Get the contents of the first Base item attribute which matches name. + + This method is deprecated, please use GetItemAttributes instead. + + Args: + name: str The tag of the desired base attribute. For example, calling + this method with name = 'rating' would search for a tag rating + in the GBase namespace in the item attributes. + + Returns: + The text contents of the item attribute, or none if the attribute was + not found. + """ + + for attrib in self.item_attributes: + if attrib.name == name: + return attrib.text + return None + + def AddItemAttribute(self, name, value, value_type=None, access=None): + """Adds a new item attribute tag containing the value. + + Creates a new extension element in the GBase namespace to represent a + Google Base item attribute. + + Args: + name: str The tag name for the new attribute. This must be a valid xml + tag name. The tag will be placed in the GBase namespace. + value: str Contents for the item attribute + value_type: str (optional) The type of data in the vlaue, Examples: text + float + access: str (optional) Used to hide attributes. The attribute is not + exposed in the snippets feed if access is set to 'private'. + """ + + new_attribute = ItemAttribute(name, text=value, + text_type=value_type, access=access) + self.item_attributes.append(new_attribute) + + def SetItemAttribute(self, name, value): + """Changes an existing item attribute's value.""" + + for attrib in self.item_attributes: + if attrib.name == name: + attrib.text = value + return + + def RemoveItemAttribute(self, name): + """Deletes the first extension element which matches name. + + Deletes the first extension element which matches name. + """ + + for i in xrange(len(self.item_attributes)): + if self.item_attributes[i].name == name: + del self.item_attributes[i] + return + + # We need to overwrite _ConvertElementTreeToMember to add special logic to + # convert custom attributes to members + def _ConvertElementTreeToMember(self, child_tree): + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + elif child_tree.tag.find('{%s}' % GBASE_NAMESPACE) == 0: + # If this is in the gbase namespace, make it into an extension element. + name = child_tree.tag[child_tree.tag.index('}')+1:] + value = child_tree.text + if child_tree.attrib.has_key('type'): + value_type = child_tree.attrib['type'] + else: + value_type = None + self.AddItemAttribute(name, value, value_type) + else: + atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + # We need to overwtite _AddMembersToElementTree to add special logic to + # convert custom members to XML nodes. + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Convert all special custom item attributes to nodes + for attribute in self.item_attributes: + attribute._BecomeChildElement(tree) + # Lastly, call the ExtensionContainers's _AddMembersToElementTree to + # convert any extension attributes. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + +class ItemAttribute(atom.Text): + """An optional or user defined attribute for a GBase item. + + Google Base allows items to have custom attribute child nodes. These nodes + have contents and a type attribute which tells Google Base whether the + contents are text, a float value with units, etc. The Atom text class has + the same structure, so this class inherits from Text. + """ + + _namespace = GBASE_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + _attributes['access'] = 'access' + + def __init__(self, name, text_type=None, access=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for a GBase item attribute + + Args: + name: str The name of the attribute. Examples include + price, color, make, model, pages, salary, etc. + text_type: str (optional) The type associated with the text contents + access: str (optional) If the access attribute is set to 'private', the + attribute will not be included in the item's description in the + snippets feed + text: str (optional) The text data in the this element + extension_elements: list (optional) A list of ExtensionElement + instances + extension_attributes: dict (optional) A dictionary of attribute + value string pairs + """ + + self.name = name + self.type = text_type + self.access = access + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _BecomeChildElement(self, tree): + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = '{%s}%s' % (self.__class__._namespace, + self.name) + self._AddMembersToElementTree(new_child) + + def _ToElementTree(self): + new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace, + self.name)) + self._AddMembersToElementTree(new_tree) + return new_tree + + +def ItemAttributeFromString(xml_string): + element_tree = ElementTree.fromstring(xml_string) + return _ItemAttributeFromElementTree(element_tree) + + +def _ItemAttributeFromElementTree(element_tree): + if element_tree.tag.find(GBASE_TEMPLATE % '') == 0: + to_return = ItemAttribute('') + to_return._HarvestElementTree(element_tree) + to_return.name = element_tree.tag[element_tree.tag.index('}')+1:] + if to_return.name and to_return.name != '': + return to_return + return None + + +class Label(atom.AtomBase): + """The Google Base label element""" + + _tag = 'label' + _namespace = GBASE_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LabelFromString(xml_string): + return atom.CreateClassFromXMLString(Label, xml_string) + + +class Thumbnail(atom.AtomBase): + """The Google Base thumbnail element""" + + _tag = 'thumbnail' + _namespace = GMETA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['width'] = 'width' + _attributes['height'] = 'height' + + def __init__(self, width=None, height=None, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.width = width + self.height = height + + +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + + +class ImageLink(atom.Text): + """The Google Base image_link element""" + + _tag = 'image_link' + _namespace = GBASE_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + _children['{%s}thumbnail' % GMETA_NAMESPACE] = ('thumbnail', [Thumbnail]) + + def __init__(self, thumbnail=None, text=None, extension_elements=None, + text_type=None, extension_attributes=None): + self.thumbnail = thumbnail or [] + self.text = text + self.type = text_type + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ImageLinkFromString(xml_string): + return atom.CreateClassFromXMLString(ImageLink, xml_string) + + +class ItemType(atom.Text): + """The Google Base item_type element""" + + _tag = 'item_type' + _namespace = GBASE_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + text_type=None, extension_attributes=None): + self.text = text + self.type = text_type + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ItemTypeFromString(xml_string): + return atom.CreateClassFromXMLString(ItemType, xml_string) + + +class MetaItemType(ItemType): + """The Google Base item_type element""" + + _tag = 'item_type' + _namespace = GMETA_NAMESPACE + _children = ItemType._children.copy() + _attributes = ItemType._attributes.copy() + + +def MetaItemTypeFromString(xml_string): + return atom.CreateClassFromXMLString(MetaItemType, xml_string) + + +class Value(atom.AtomBase): + """Metadata about common values for a given attribute + + A value is a child of an attribute which comes from the attributes feed. + The value's text is a commonly used value paired with an attribute name + and the value's count tells how often this value appears for the given + attribute in the search results. + """ + + _tag = 'value' + _namespace = GMETA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['count'] = 'count' + + def __init__(self, count=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Attribute metadata element + + Args: + count: str (optional) The number of times the value in text is given + for the parent attribute. + text: str (optional) The value which appears in the search results. + extension_elements: list (optional) A list of ExtensionElement + instances + extension_attributes: dict (optional) A dictionary of attribute value + string pairs + """ + + self.count = count + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ValueFromString(xml_string): + return atom.CreateClassFromXMLString(Value, xml_string) + + +class Attribute(atom.Text): + """Metadata about an attribute from the attributes feed + + An entry from the attributes feed contains a list of attributes. Each + attribute describes the attribute's type and count of the items which + use the attribute. + """ + + _tag = 'attribute' + _namespace = GMETA_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + _children['{%s}value' % GMETA_NAMESPACE] = ('value', [Value]) + _attributes['count'] = 'count' + _attributes['name'] = 'name' + + def __init__(self, name=None, attribute_type=None, count=None, value=None, + text=None, extension_elements=None, extension_attributes=None): + """Constructor for Attribute metadata element + + Args: + name: str (optional) The name of the attribute + attribute_type: str (optional) The type for the attribute. Examples: + test, float, etc. + count: str (optional) The number of times this attribute appears in + the query results. + value: list (optional) The values which are often used for this + attirbute. + text: str (optional) The text contents of the XML for this attribute. + extension_elements: list (optional) A list of ExtensionElement + instances + extension_attributes: dict (optional) A dictionary of attribute value + string pairs + """ + + self.name = name + self.type = attribute_type + self.count = count + self.value = value or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def AttributeFromString(xml_string): + return atom.CreateClassFromXMLString(Attribute, xml_string) + + +class Attributes(atom.AtomBase): + """A collection of Google Base metadata attributes""" + + _tag = 'attributes' + _namespace = GMETA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute]) + + def __init__(self, attribute=None, extension_elements=None, + extension_attributes=None, text=None): + self.attribute = attribute or [] + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +class GBaseItem(ItemAttributeContainer, gdata.BatchEntry): + """An Google Base flavor of an Atom Entry. + + Google Base items have required attributes, recommended attributes, and user + defined attributes. The required attributes are stored in this class as + members, and other attributes are stored as extension elements. You can + access the recommended and user defined attributes by using + AddItemAttribute, SetItemAttribute, FindItemAttribute, and + RemoveItemAttribute. + + The Base Item + """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + _children['{%s}label' % GBASE_NAMESPACE] = ('label', [Label]) + _children['{%s}item_type' % GBASE_NAMESPACE] = ('item_type', ItemType) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, updated=None, control=None, + label=None, item_type=None, item_attributes=None, + batch_operation=None, batch_id=None, batch_status=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.title = title + self.updated = updated + self.control = control + self.label = label or [] + self.item_type = item_type + self.item_attributes = item_attributes or [] + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GBaseItemFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseItem, xml_string) + + +class GBaseSnippet(GBaseItem): + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = GBaseItem._children.copy() + _attributes = GBaseItem._attributes.copy() + + +def GBaseSnippetFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseSnippet, xml_string) + + +class GBaseAttributeEntry(gdata.GDataEntry): + """An Atom Entry from the attributes feed""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute]) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, updated=None, label=None, + attribute=None, control=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.label = label or [] + self.attribute = attribute or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GBaseAttributeEntryFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseAttributeEntry, xml_string) + + +class GBaseItemTypeEntry(gdata.GDataEntry): + """An Atom entry from the item types feed + + These entries contain a list of attributes which are stored in one + XML node called attributes. This class simplifies the data structure + by treating attributes as a list of attribute instances. + + Note that the item_type for an item type entry is in the Google Base meta + namespace as opposed to item_types encountered in other feeds. + """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}attributes' % GMETA_NAMESPACE] = ('attributes', Attributes) + _children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute]) + _children['{%s}item_type' % GMETA_NAMESPACE] = ('item_type', MetaItemType) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, updated=None, label=None, + item_type=None, control=None, attribute=None, attributes=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.title = title + self.updated = updated + self.control = control + self.label = label or [] + self.item_type = item_type + self.attributes = attributes + self.attribute = attribute or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GBaseItemTypeEntryFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseItemTypeEntry, xml_string) + + +class GBaseItemFeed(gdata.BatchFeed): + """A feed containing Google Base Items""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItem]) + + +def GBaseItemFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseItemFeed, xml_string) + + +class GBaseSnippetFeed(gdata.GDataFeed): + """A feed containing Google Base Snippets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseSnippet]) + + +def GBaseSnippetFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseSnippetFeed, xml_string) + + +class GBaseAttributesFeed(gdata.GDataFeed): + """A feed containing Google Base Attributes + + A query sent to the attributes feed will return a feed of + attributes which are present in the items that match the + query. + """ + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [GBaseAttributeEntry]) + + +def GBaseAttributesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseAttributesFeed, xml_string) + + +class GBaseLocalesFeed(gdata.GDataFeed): + """The locales feed from Google Base. + + This read-only feed defines the permitted locales for Google Base. The + locale value identifies the language, currency, and date formats used in a + feed. + """ + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + + +def GBaseLocalesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseLocalesFeed, xml_string) + + +class GBaseItemTypesFeed(gdata.GDataFeed): + """A feed from the Google Base item types feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItemTypeEntry]) + + +def GBaseItemTypesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseItemTypesFeed, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/base/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/base/service.py new file mode 100644 index 0000000..dff3451 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/base/service.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GBaseService extends the GDataService to streamline Google Base operations. + + GBaseService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.base +import atom + + +# URL to which all batch requests are sent. +BASE_BATCH_URL = 'http://www.google.com/base/feeds/items/batch' + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class GBaseService(gdata.service.GDataService): + """Client for the Google Base service.""" + + def __init__(self, email=None, password=None, source=None, + server='base.google.com', api_key=None, + additional_headers=None, handler=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='gbase', source=source, + server=server, + additional_headers=additional_headers, + handler=handler) + self.api_key = api_key + + def _SetAPIKey(self, api_key): + if not isinstance(self.additional_headers, dict): + self.additional_headers = {} + self.additional_headers['X-Google-Key'] = api_key + + def __SetAPIKey(self, api_key): + self._SetAPIKey(api_key) + + def _GetAPIKey(self): + if 'X-Google-Key' not in self.additional_headers: + return None + else: + return self.additional_headers['X-Google-Key'] + + def __GetAPIKey(self): + return self._GetAPIKey() + + api_key = property(__GetAPIKey, __SetAPIKey, + doc="""Get or set the API key to be included in all requests.""") + + def Query(self, uri, converter=None): + """Performs a style query and returns a resulting feed or entry. + + Args: + uri: string The full URI which be queried. Examples include + '/base/feeds/snippets?bq=digital+camera', + 'http://www.google.com/base/feeds/snippets?bq=digital+camera' + '/base/feeds/items' + I recommend creating a URI using a query class. + converter: func (optional) A function which will be executed on the + server's response. Examples include GBaseItemFromString, etc. + + Returns: + If converter was specified, returns the results of calling converter on + the server's response. If converter was not specified, and the result + was an Atom Entry, returns a GBaseItem, by default, the method returns + the result of calling gdata.service's Get method. + """ + + result = self.Get(uri, converter=converter) + if converter: + return result + elif isinstance(result, atom.Entry): + return gdata.base.GBaseItemFromString(result.ToString()) + return result + + def QuerySnippetsFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseSnippetFeedFromString) + + def QueryItemsFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseItemFeedFromString) + + def QueryAttributesFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseAttributesFeedFromString) + + def QueryItemTypesFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseItemTypesFeedFromString) + + def QueryLocalesFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseLocalesFeedFromString) + + def GetItem(self, uri): + return self.Get(uri, converter=gdata.base.GBaseItemFromString) + + def GetSnippet(self, uri): + return self.Get(uri, converter=gdata.base.GBaseSnippetFromString) + + def GetAttribute(self, uri): + return self.Get(uri, converter=gdata.base.GBaseAttributeEntryFromString) + + def GetItemType(self, uri): + return self.Get(uri, converter=gdata.base.GBaseItemTypeEntryFromString) + + def GetLocale(self, uri): + return self.Get(uri, converter=gdata.base.GDataEntryFromString) + + def InsertItem(self, new_item, url_params=None, escape_params=True, + converter=None): + """Adds an item to Google Base. + + Args: + new_item: atom.Entry or subclass A new item which is to be added to + Google Base. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + GBaseItemFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a GBaseItem. + """ + + response = self.Post(new_item, '/base/feeds/items', url_params=url_params, + escape_params=escape_params, converter=converter) + + if not converter and isinstance(response, atom.Entry): + return gdata.base.GBaseItemFromString(response.ToString()) + return response + + def DeleteItem(self, item_id, url_params=None, escape_params=True): + """Removes an item with the specified ID from Google Base. + + Args: + item_id: string The ID of the item to be deleted. Example: + 'http://www.google.com/base/feeds/items/13185446517496042648' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete('%s' % (item_id[len('http://www.google.com'):],), + url_params=url_params, escape_params=escape_params) + + def UpdateItem(self, item_id, updated_item, url_params=None, + escape_params=True, + converter=gdata.base.GBaseItemFromString): + """Updates an existing item. + + Args: + item_id: string The ID of the item to be updated. Example: + 'http://www.google.com/base/feeds/items/13185446517496042648' + updated_item: atom.Entry, subclass, or string, containing + the Atom Entry which will replace the base item which is + stored at the item_id. + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + GBaseItemFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a GBaseItem. + """ + + response = self.Put(updated_item, + item_id, url_params=url_params, escape_params=escape_params, + converter=converter) + if not converter and isinstance(response, atom.Entry): + return gdata.base.GBaseItemFromString(response.ToString()) + return response + + def ExecuteBatch(self, batch_feed, + converter=gdata.base.GBaseItemFeedFromString): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.BatchFeed A feed containing BatchEntry elements which + contain the desired CRUD operation and any necessary entry data. + converter: Function (optional) Function to be executed on the server's + response. This function should take one string as a parameter. The + default value is GBaseItemFeedFromString which will turn the result + into a gdata.base.GBaseItem object. + + Returns: + A gdata.BatchFeed containing the results. + """ + + return self.Post(batch_feed, BASE_BATCH_URL, converter=converter) + + +class BaseQuery(gdata.service.Query): + + def _GetBaseQuery(self): + return self['bq'] + + def _SetBaseQuery(self, base_query): + self['bq'] = base_query + + bq = property(_GetBaseQuery, _SetBaseQuery, + doc="""The bq query parameter""") diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/blogger/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/blogger/__init__.py new file mode 100644 index 0000000..0e0417b --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/blogger/__init__.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# +# Copyright (C) 2007, 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains extensions to Atom objects used with Blogger.""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +import atom +import gdata +import re + + +LABEL_SCHEME = 'http://www.blogger.com/atom/ns#' +THR_NAMESPACE = 'http://purl.org/syndication/thread/1.0' + + +class BloggerEntry(gdata.GDataEntry): + """Adds convenience methods inherited by all Blogger entries.""" + + blog_name_pattern = re.compile('(http://)(\w*)') + blog_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)') + blog_id2_pattern = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)') + + def GetBlogId(self): + """Extracts the Blogger id of this blog. + This method is useful when contructing URLs by hand. The blog id is + often used in blogger operation URLs. This should not be confused with + the id member of a BloggerBlog. The id element is the Atom id XML element. + The blog id which this method returns is a part of the Atom id. + + Returns: + The blog's unique id as a string. + """ + if self.id.text: + match = self.blog_id_pattern.match(self.id.text) + if match: + return match.group(2) + else: + return self.blog_id2_pattern.match(self.id.text).group(2) + return None + + def GetBlogName(self): + """Finds the name of this blog as used in the 'alternate' URL. + An alternate URL is in the form 'http://blogName.blogspot.com/'. For an + entry representing the above example, this method would return 'blogName'. + + Returns: + The blog's URL name component as a string. + """ + for link in self.link: + if link.rel == 'alternate': + return self.blog_name_pattern.match(link.href).group(2) + return None + + +class BlogEntry(BloggerEntry): + """Describes a blog entry in the feed listing a user's blogs.""" + + +def BlogEntryFromString(xml_string): + return atom.CreateClassFromXMLString(BlogEntry, xml_string) + + +class BlogFeed(gdata.GDataFeed): + """Describes a feed of a user's blogs.""" + + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogEntry]) + + +def BlogFeedFromString(xml_string): + return atom.CreateClassFromXMLString(BlogFeed, xml_string) + + +class BlogPostEntry(BloggerEntry): + """Describes a blog post entry in the feed of a blog's posts.""" + + post_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)') + + def AddLabel(self, label): + """Adds a label to the blog post. + + The label is represented by an Atom category element, so this method + is shorthand for appending a new atom.Category object. + + Args: + label: str + """ + self.category.append(atom.Category(scheme=LABEL_SCHEME, term=label)) + + def GetPostId(self): + """Extracts the postID string from the entry's Atom id. + + Returns: A string of digits which identify this post within the blog. + """ + if self.id.text: + return self.post_id_pattern.match(self.id.text).group(4) + return None + + +def BlogPostEntryFromString(xml_string): + return atom.CreateClassFromXMLString(BlogPostEntry, xml_string) + + +class BlogPostFeed(gdata.GDataFeed): + """Describes a feed of a blog's posts.""" + + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogPostEntry]) + + +def BlogPostFeedFromString(xml_string): + return atom.CreateClassFromXMLString(BlogPostFeed, xml_string) + + +class InReplyTo(atom.AtomBase): + _tag = 'in-reply-to' + _namespace = THR_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['href'] = 'href' + _attributes['ref'] = 'ref' + _attributes['source'] = 'source' + _attributes['type'] = 'type' + + def __init__(self, href=None, ref=None, source=None, type=None, + extension_elements=None, extension_attributes=None, text=None): + self.href = href + self.ref = ref + self.source = source + self.type = type + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +def InReplyToFromString(xml_string): + return atom.CreateClassFromXMLString(InReplyTo, xml_string) + + +class CommentEntry(BloggerEntry): + """Describes a blog post comment entry in the feed of a blog post's + comments.""" + + _children = BloggerEntry._children.copy() + _children['{%s}in-reply-to' % THR_NAMESPACE] = ('in_reply_to', InReplyTo) + + comment_id_pattern = re.compile('.*-(\w*)$') + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + in_reply_to=None, extension_elements=None, extension_attributes=None, + text=None): + BloggerEntry.__init__(self, author=author, category=category, + content=content, contributor=contributor, atom_id=atom_id, link=link, + published=published, rights=rights, source=source, summary=summary, + control=control, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + self.in_reply_to = in_reply_to + + def GetCommentId(self): + """Extracts the commentID string from the entry's Atom id. + + Returns: A string of digits which identify this post within the blog. + """ + if self.id.text: + return self.comment_id_pattern.match(self.id.text).group(1) + return None + + +def CommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CommentEntry, xml_string) + + +class CommentFeed(gdata.GDataFeed): + """Describes a feed of a blog post's comments.""" + + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CommentEntry]) + + +def CommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CommentFeed, xml_string) + + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/blogger/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/blogger/service.py new file mode 100644 index 0000000..50c27d2 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/blogger/service.py @@ -0,0 +1,138 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Classes to interact with the Blogger server.""" + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import gdata.service +import gdata.blogger + + +class BloggerService(gdata.service.GDataService): + + def __init__(self, email=None, password=None, source=None, + server=None, api_key=None, + additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='blogger', source=source, + server=server, + additional_headers=additional_headers) + self.accountType = 'GOOGLE' + + def GetBlogFeed(self, uri=None): + """Retrieve a list of the blogs to which the current user may manage.""" + if not uri: + uri = 'http://www.blogger.com/feeds/default/blogs' + return self.Get(uri, converter=gdata.blogger.BlogFeedFromString) + + def GetBlogCommentFeed(self, blog_id=None, uri=None): + """Retrieve a list of the comments for this blog.""" + if blog_id: + uri = 'http://www.blogger.com/feeds/%s/comments/default' % blog_id + return self.Get(uri, converter=gdata.blogger.CommentFeedFromString) + + def GetBlogPostFeed(self, blog_id=None, uri=None): + if blog_id: + uri = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id + return self.Get(uri, converter=gdata.blogger.BlogPostFeedFromString) + + def GetPostCommentFeed(self, blog_id=None, post_id=None, uri=None): + """Retrieve a list of the comments for this particular blog post.""" + if blog_id and post_id: + uri = 'http://www.blogger.com/feeds/%s/%s/comments/default' % (blog_id, + post_id) + return self.Get(uri, converter=gdata.blogger.CommentFeedFromString) + + def AddPost(self, entry, blog_id=None, uri=None): + if blog_id: + uri = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id + return self.Post(entry, uri, + converter=gdata.blogger.BlogPostEntryFromString) + + def UpdatePost(self, entry, uri=None): + if not uri: + uri = entry.GetEditLink().href + return self.Put(entry, uri, + converter=gdata.blogger.BlogPostEntryFromString) + + def DeletePost(self, entry=None, uri=None): + if not uri: + uri = entry.GetEditLink().href + return self.Delete(uri) + + def AddComment(self, comment_entry, blog_id=None, post_id=None, uri=None): + """Adds a new comment to the specified blog post.""" + if blog_id and post_id: + uri = 'http://www.blogger.com/feeds/%s/%s/comments/default' % ( + blog_id, post_id) + return self.Post(comment_entry, uri, + converter=gdata.blogger.CommentEntryFromString) + + def DeleteComment(self, entry=None, uri=None): + if not uri: + uri = entry.GetEditLink().href + return self.Delete(uri) + + +class BlogQuery(gdata.service.Query): + + def __init__(self, feed=None, params=None, categories=None, blog_id=None): + """Constructs a query object for the list of a user's Blogger blogs. + + Args: + feed: str (optional) The beginning of the URL to be queried. If the + feed is not set, and there is no blog_id passed in, the default + value is used ('http://www.blogger.com/feeds/default/blogs'). + params: dict (optional) + categories: list (optional) + blog_id: str (optional) + """ + if not feed and blog_id: + feed = 'http://www.blogger.com/feeds/default/blogs/%s' % blog_id + elif not feed: + feed = 'http://www.blogger.com/feeds/default/blogs' + gdata.service.Query.__init__(self, feed=feed, params=params, + categories=categories) + + +class BlogPostQuery(gdata.service.Query): + + def __init__(self, feed=None, params=None, categories=None, blog_id=None, + post_id=None): + if not feed and blog_id and post_id: + feed = 'http://www.blogger.com/feeds/%s/posts/default/%s' % (blog_id, + post_id) + elif not feed and blog_id: + feed = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id + gdata.service.Query.__init__(self, feed=feed, params=params, + categories=categories) + + +class BlogCommentQuery(gdata.service.Query): + + def __init__(self, feed=None, params=None, categories=None, blog_id=None, + post_id=None, comment_id=None): + if not feed and blog_id and comment_id: + feed = 'http://www.blogger.com/feeds/%s/comments/default/%s' % ( + blog_id, comment_id) + elif not feed and blog_id and post_id: + feed = 'http://www.blogger.com/feeds/%s/%s/comments/default' % ( + blog_id, post_id) + elif not feed and blog_id: + feed = 'http://www.blogger.com/feeds/%s/comments/default' % blog_id + gdata.service.Query.__init__(self, feed=feed, params=params, + categories=categories) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/calendar/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/calendar/__init__.py new file mode 100644 index 0000000..cea1a03 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/calendar/__init__.py @@ -0,0 +1,907 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains extensions to ElementWrapper objects used with Google Calendar.""" + + +__author__ = 'api.vli (Vivian Li), api.rboyd (Ryan Boyd)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + + +# XML namespaces which are often used in Google Calendar entities. +GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005' +GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005}%s' +WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent') +GACL_NAMESPACE = gdata.GACL_NAMESPACE +GACL_TEMPLATE = gdata.GACL_TEMPLATE + + + +class ValueAttributeContainer(atom.AtomBase): + """A parent class for all Calendar classes which have a value attribute. + + Children include Color, AccessLevel, Hidden + """ + + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Color(ValueAttributeContainer): + """The Google Calendar color element""" + + _tag = 'color' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + + +class AccessLevel(ValueAttributeContainer): + """The Google Calendar accesslevel element""" + + _tag = 'accesslevel' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Hidden(ValueAttributeContainer): + """The Google Calendar hidden element""" + + _tag = 'hidden' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Selected(ValueAttributeContainer): + """The Google Calendar selected element""" + + _tag = 'selected' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Timezone(ValueAttributeContainer): + """The Google Calendar timezone element""" + + _tag = 'timezone' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Where(atom.AtomBase): + """The Google Calendar Where element""" + + _tag = 'where' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['valueString'] = 'value_string' + + def __init__(self, value_string=None, extension_elements=None, + extension_attributes=None, text=None): + self.value_string = value_string + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar meta Entry flavor of an Atom Entry """ + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}color' % GCAL_NAMESPACE] = ('color', Color) + _children['{%s}accesslevel' % GCAL_NAMESPACE] = ('access_level', + AccessLevel) + _children['{%s}hidden' % GCAL_NAMESPACE] = ('hidden', Hidden) + _children['{%s}selected' % GCAL_NAMESPACE] = ('selected', Selected) + _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) + _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', Where) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + color=None, access_level=None, hidden=None, timezone=None, + selected=None, + where=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + + self.color = color + self.access_level = access_level + self.hidden = hidden + self.selected = selected + self.timezone = timezone + self.where = where + + +class CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar meta feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarListEntry]) + + +class Scope(atom.AtomBase): + """The Google ACL scope element""" + + _tag = 'scope' + _namespace = GACL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + _attributes['type'] = 'type' + + def __init__(self, extension_elements=None, value=None, scope_type=None, + extension_attributes=None, text=None): + self.value = value + self.type = scope_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Role(ValueAttributeContainer): + """The Google Calendar timezone element""" + + _tag = 'role' + _namespace = GACL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar ACL Entry flavor of an Atom Entry """ + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}scope' % GACL_NAMESPACE] = ('scope', Scope) + _children['{%s}role' % GACL_NAMESPACE] = ('role', Role) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + scope=None, role=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + self.scope = scope + self.role = role + + +class CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar ACL feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarAclEntry]) + + +class CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar event comments entry flavor of an Atom Entry""" + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +class CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar event comments feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [CalendarEventCommentEntry]) + + +class ExtendedProperty(gdata.ExtendedProperty): + """A transparent subclass of gdata.ExtendedProperty added to this module + for backwards compatibility.""" + + +class Reminder(atom.AtomBase): + """The Google Calendar reminder element""" + + _tag = 'reminder' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['absoluteTime'] = 'absolute_time' + _attributes['days'] = 'days' + _attributes['hours'] = 'hours' + _attributes['minutes'] = 'minutes' + + def __init__(self, absolute_time=None, + days=None, hours=None, minutes=None, + extension_elements=None, + extension_attributes=None, text=None): + self.absolute_time = absolute_time + if days is not None: + self.days = str(days) + else: + self.days = None + if hours is not None: + self.hours = str(hours) + else: + self.hours = None + if minutes is not None: + self.minutes = str(minutes) + else: + self.minutes = None + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class When(atom.AtomBase): + """The Google Calendar When element""" + + _tag = 'when' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder]) + _attributes['startTime'] = 'start_time' + _attributes['endTime'] = 'end_time' + + def __init__(self, start_time=None, end_time=None, reminder=None, + extension_elements=None, extension_attributes=None, text=None): + self.start_time = start_time + self.end_time = end_time + self.reminder = reminder or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Recurrence(atom.AtomBase): + """The Google Calendar Recurrence element""" + + _tag = 'recurrence' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + +class UriEnumElement(atom.AtomBase): + + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, tag, enum_map, attrib_name='value', + extension_elements=None, extension_attributes=None, text=None): + self.tag=tag + self.enum_map=enum_map + self.attrib_name=attrib_name + self.value=None + self.text=text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def findKey(self, value): + res=[item[0] for item in self.enum_map.items() if item[1] == value] + if res is None or len(res) == 0: + return None + return res[0] + + def _ConvertElementAttributeToMember(self, attribute, value): + # Special logic to use the enum_map to set the value of the object's value member. + if attribute == self.attrib_name and value != '': + self.value = self.enum_map[value] + return + # Find the attribute in this class's list of attributes. + if self.__class__._attributes.has_key(attribute): + # Find the member of this class which corresponds to the XML attribute + # (lookup in current_class._attributes) and set this member to the + # desired value (using self.__dict__). + setattr(self, self.__class__._attributes[attribute], value) + else: + # The current class doesn't map this attribute, so try to parent class. + atom.ExtensionContainer._ConvertElementAttributeToMember(self, + attribute, + value) + + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Special logic to set the desired XML attribute. + key = self.findKey(self.value) + if key is not None: + tree.attrib[self.attrib_name]=key + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Lastly, call the parent's _AddMembersToElementTree to get any + # extension elements. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + + +class AttendeeStatus(UriEnumElement): + """The Google Calendar attendeeStatus element""" + + _tag = 'attendeeStatus' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + attendee_enum = { + 'http://schemas.google.com/g/2005#event.accepted' : 'ACCEPTED', + 'http://schemas.google.com/g/2005#event.declined' : 'DECLINED', + 'http://schemas.google.com/g/2005#event.invited' : 'INVITED', + 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'attendeeStatus', AttendeeStatus.attendee_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class AttendeeType(UriEnumElement): + """The Google Calendar attendeeType element""" + + _tag = 'attendeeType' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + attendee_type_enum = { + 'http://schemas.google.com/g/2005#event.optional' : 'OPTIONAL', + 'http://schemas.google.com/g/2005#event.required' : 'REQUIRED' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'attendeeType', + AttendeeType.attendee_type_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes,text=text) + + +class Visibility(UriEnumElement): + """The Google Calendar Visibility element""" + + _tag = 'visibility' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + visibility_enum = { + 'http://schemas.google.com/g/2005#event.confidential' : 'CONFIDENTIAL', + 'http://schemas.google.com/g/2005#event.default' : 'DEFAULT', + 'http://schemas.google.com/g/2005#event.private' : 'PRIVATE', + 'http://schemas.google.com/g/2005#event.public' : 'PUBLIC' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'visibility', Visibility.visibility_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Transparency(UriEnumElement): + """The Google Calendar Transparency element""" + + _tag = 'transparency' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + transparency_enum = { + 'http://schemas.google.com/g/2005#event.opaque' : 'OPAQUE', + 'http://schemas.google.com/g/2005#event.transparent' : 'TRANSPARENT' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, tag='transparency', + enum_map=Transparency.transparency_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Comments(atom.AtomBase): + """The Google Calendar comments element""" + + _tag = 'comments' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + gdata.FeedLink) + _attributes['rel'] = 'rel' + + def __init__(self, rel=None, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + self.rel = rel + self.feed_link = feed_link + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class EventStatus(UriEnumElement): + """The Google Calendar eventStatus element""" + + _tag = 'eventStatus' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + status_enum = { 'http://schemas.google.com/g/2005#event.canceled' : 'CANCELED', + 'http://schemas.google.com/g/2005#event.confirmed' : 'CONFIRMED', + 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, tag='eventStatus', + enum_map=EventStatus.status_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + +class Who(UriEnumElement): + """The Google Calendar Who element""" + + _tag = 'who' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + _children['{%s}attendeeStatus' % gdata.GDATA_NAMESPACE] = ( + 'attendee_status', AttendeeStatus) + _children['{%s}attendeeType' % gdata.GDATA_NAMESPACE] = ('attendee_type', + AttendeeType) + _attributes['valueString'] = 'name' + _attributes['email'] = 'email' + + relEnum = { 'http://schemas.google.com/g/2005#event.attendee' : 'ATTENDEE', + 'http://schemas.google.com/g/2005#event.organizer' : 'ORGANIZER', + 'http://schemas.google.com/g/2005#event.performer' : 'PERFORMER', + 'http://schemas.google.com/g/2005#event.speaker' : 'SPEAKER', + 'http://schemas.google.com/g/2005#message.bcc' : 'BCC', + 'http://schemas.google.com/g/2005#message.cc' : 'CC', + 'http://schemas.google.com/g/2005#message.from' : 'FROM', + 'http://schemas.google.com/g/2005#message.reply-to' : 'REPLY_TO', + 'http://schemas.google.com/g/2005#message.to' : 'TO' } + + def __init__(self, name=None, email=None, attendee_status=None, + attendee_type=None, rel=None, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'who', Who.relEnum, attrib_name='rel', + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.name = name + self.email = email + self.attendee_status = attendee_status + self.attendee_type = attendee_type + self.rel = rel + + +class OriginalEvent(atom.AtomBase): + """The Google Calendar OriginalEvent element""" + + _tag = 'originalEvent' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + # TODO: The when tag used to map to a EntryLink, make sure it should really be a When. + _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', When) + _attributes['id'] = 'id' + _attributes['href'] = 'href' + + def __init__(self, id=None, href=None, when=None, + extension_elements=None, extension_attributes=None, text=None): + self.id = id + self.href = href + self.when = when + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GetCalendarEventEntryClass(): + return CalendarEventEntry + +# This class is not completely defined here, because of a circular reference +# in which CalendarEventEntryLink and CalendarEventEntry refer to one another. +class CalendarEventEntryLink(gdata.EntryLink): + """An entryLink which contains a calendar event entry + + Within an event's recurranceExceptions, an entry link + points to a calendar event entry. This class exists + to capture the calendar specific extensions in the entry. + """ + + _tag = 'entryLink' + _namespace = gdata.GDATA_NAMESPACE + _children = gdata.EntryLink._children.copy() + _attributes = gdata.EntryLink._attributes.copy() + # The CalendarEventEntryLink should like CalendarEventEntry as a child but + # that class hasn't been defined yet, so we will wait until after defining + # CalendarEventEntry to list it in _children. + + +class RecurrenceException(atom.AtomBase): + """The Google Calendar RecurrenceException element""" + + _tag = 'recurrenceException' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ('entry_link', + CalendarEventEntryLink) + _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', + OriginalEvent) + _attributes['specialized'] = 'specialized' + + def __init__(self, specialized=None, entry_link=None, + original_event=None, extension_elements=None, + extension_attributes=None, text=None): + self.specialized = specialized + self.entry_link = entry_link + self.original_event = original_event + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class SendEventNotifications(atom.AtomBase): + """The Google Calendar sendEventNotifications element""" + + _tag = 'sendEventNotifications' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, extension_elements=None, + value=None, extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class QuickAdd(atom.AtomBase): + """The Google Calendar quickadd element""" + + _tag = 'quickadd' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, extension_elements=None, + value=None, extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _TransferToElementTree(self, element_tree): + if self.value: + element_tree.attrib['value'] = self.value + element_tree.tag = GCAL_TEMPLATE % 'quickadd' + atom.AtomBase._TransferToElementTree(self, element_tree) + return element_tree + + def _TakeAttributeFromElementTree(self, attribute, element_tree): + if attribute == 'value': + self.value = element_tree.attrib[attribute] + del element_tree.attrib[attribute] + else: + atom.AtomBase._TakeAttributeFromElementTree(self, attribute, + element_tree) + + +class WebContentGadgetPref(atom.AtomBase): + + _tag = 'webContentGadgetPref' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + """The Google Calendar Web Content Gadget Preferences element""" + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContent(atom.AtomBase): + + _tag = 'webContent' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}webContentGadgetPref' % GCAL_NAMESPACE] = ('gadget_pref', + [WebContentGadgetPref]) + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + + def __init__(self, url=None, width=None, height=None, text=None, + gadget_pref=None, extension_elements=None, extension_attributes=None): + self.url = url + self.width = width + self.height = height + self.text = text + self.gadget_pref = gadget_pref or [] + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContentLink(atom.Link): + + _tag = 'link' + _namespace = atom.ATOM_NAMESPACE + _children = atom.Link._children.copy() + _attributes = atom.Link._attributes.copy() + _children['{%s}webContent' % GCAL_NAMESPACE] = ('web_content', WebContent) + + def __init__(self, title=None, href=None, link_type=None, + web_content=None): + atom.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href, + link_type=link_type) + self.web_content = web_content + + +class CalendarEventEntry(gdata.BatchEntry): + """A Google Calendar flavor of an Atom Entry """ + + _tag = gdata.BatchEntry._tag + _namespace = gdata.BatchEntry._namespace + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + # This class also contains WebContentLinks but converting those members + # is handled in a special version of _ConvertElementTreeToMember. + _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', [Where]) + _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', [When]) + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', [Who]) + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [ExtendedProperty]) + _children['{%s}visibility' % gdata.GDATA_NAMESPACE] = ('visibility', + Visibility) + _children['{%s}transparency' % gdata.GDATA_NAMESPACE] = ('transparency', + Transparency) + _children['{%s}eventStatus' % gdata.GDATA_NAMESPACE] = ('event_status', + EventStatus) + _children['{%s}recurrence' % gdata.GDATA_NAMESPACE] = ('recurrence', + Recurrence) + _children['{%s}recurrenceException' % gdata.GDATA_NAMESPACE] = ( + 'recurrence_exception', [RecurrenceException]) + _children['{%s}sendEventNotifications' % GCAL_NAMESPACE] = ( + 'send_event_notifications', SendEventNotifications) + _children['{%s}quickadd' % GCAL_NAMESPACE] = ('quick_add', QuickAdd) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', + OriginalEvent) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + transparency=None, comments=None, event_status=None, + send_event_notifications=None, visibility=None, + recurrence=None, recurrence_exception=None, + where=None, when=None, who=None, quick_add=None, + extended_property=None, original_event=None, + batch_operation=None, batch_id=None, batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, + title=title, updated=updated) + + self.transparency = transparency + self.comments = comments + self.event_status = event_status + self.send_event_notifications = send_event_notifications + self.visibility = visibility + self.recurrence = recurrence + self.recurrence_exception = recurrence_exception or [] + self.where = where or [] + self.when = when or [] + self.who = who or [] + self.quick_add = quick_add + self.extended_property = extended_property or [] + self.original_event = original_event + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + # We needed to add special logic to _ConvertElementTreeToMember because we + # want to make links with a rel of WEB_CONTENT_LINK_REL into a + # WebContentLink + def _ConvertElementTreeToMember(self, child_tree): + # Special logic to handle Web Content links + if (child_tree.tag == '{%s}link' % atom.ATOM_NAMESPACE and + child_tree.attrib['rel'] == WEB_CONTENT_LINK_REL): + if self.link is None: + self.link = [] + self.link.append(atom._CreateClassFromElementTree(WebContentLink, + child_tree)) + return + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + else: + atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + + def GetWebContentLink(self): + """Finds the first link with rel set to WEB_CONTENT_REL + + Returns: + A gdata.calendar.WebContentLink or none if none of the links had rel + equal to WEB_CONTENT_REL + """ + + for a_link in self.link: + if a_link.rel == WEB_CONTENT_LINK_REL: + return a_link + return None + + +def CalendarEventEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventEntry, xml_string) + + +def CalendarEventCommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventCommentEntry, xml_string) + + +CalendarEventEntryLink._children = {'{%s}entry' % atom.ATOM_NAMESPACE: + ('entry', CalendarEventEntry)} + + +def CalendarEventEntryLinkFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventEntryLink, xml_string) + + +class CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Calendar event feed flavor of an Atom Feed""" + + _tag = gdata.BatchFeed._tag + _namespace = gdata.BatchFeed._namespace + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [CalendarEventEntry]) + _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + interrupted=None, timezone=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + interrupted=interrupted, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.timezone = timezone + + +def CalendarListEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarListEntry, xml_string) + + +def CalendarAclEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarAclEntry, xml_string) + + +def CalendarListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarListFeed, xml_string) + + +def CalendarAclFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarAclFeed, xml_string) + + +def CalendarEventFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventFeed, xml_string) + + +def CalendarEventCommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventCommentFeed, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/calendar/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/calendar/service.py new file mode 100644 index 0000000..39745dd --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/calendar/service.py @@ -0,0 +1,585 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CalendarService extends the GDataService to streamline Google Calendar operations. + + CalendarService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + + +__author__ = 'api.vli (Vivian Li)' + + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.calendar +import atom + + +DEFAULT_BATCH_URL = ('http://www.google.com/calendar/feeds/default/private' + '/full/batch') + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class CalendarService(gdata.service.GDataService): + """Client for the Google Calendar service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', + additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='cl', source=source, + server=server, + additional_headers=additional_headers) + + def GetCalendarEventFeed(self, uri='/calendar/feeds/default/private/full'): + return self.Get(uri, converter=gdata.calendar.CalendarEventFeedFromString) + + def GetCalendarEventEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventEntryFromString) + + def GetCalendarListFeed(self, uri='/calendar/feeds/default/allcalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetAllCalendarsFeed(self, uri='/calendar/feeds/default/allcalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetOwnCalendarsFeed(self, uri='/calendar/feeds/default/owncalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetCalendarListEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarListEntryFromString) + + def GetCalendarAclFeed(self, uri='/calendar/feeds/default/acl/full'): + return self.Get(uri, converter=gdata.calendar.CalendarAclFeedFromString) + + def GetCalendarAclEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarAclEntryFromString) + + def GetCalendarEventCommentFeed(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventCommentFeedFromString) + + def GetCalendarEventCommentEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventCommentEntryFromString) + + def Query(self, uri, converter=None): + """Performs a query and returns a resulting feed or entry. + + Args: + feed: string The feed which is to be queried + + Returns: + On success, a GDataFeed or Entry depending on which is sent from the + server. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + if converter: + result = self.Get(uri, converter=converter) + else: + result = self.Get(uri) + return result + + def CalendarQuery(self, query): + if isinstance(query, CalendarEventQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarEventFeedFromString) + elif isinstance(query, CalendarListQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarListFeedFromString) + elif isinstance(query, CalendarEventCommentQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarEventCommentFeedFromString) + else: + return self.Query(query.ToUri()) + + def InsertEvent(self, new_event, insert_uri, url_params=None, + escape_params=True): + """Adds an event to Google Calendar. + + Args: + new_event: atom.Entry or subclass A new event which is to be added to + Google Calendar. + insert_uri: the URL to post new events to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the event created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_event, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventEntryFromString) + + def InsertCalendarSubscription(self, calendar, url_params=None, + escape_params=True): + """Subscribes the authenticated user to the provided calendar. + + Args: + calendar: The calendar to which the user should be subscribed. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the subscription created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + insert_uri = '/calendar/feeds/default/allcalendars/full' + return self.Post(calendar, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + + def InsertCalendar(self, new_calendar, url_params=None, + escape_params=True): + """Creates a new calendar. + + Args: + new_calendar: The calendar to be created + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the calendar created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + insert_uri = '/calendar/feeds/default/owncalendars/full' + response = self.Post(new_calendar, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + return response + + def UpdateCalendar(self, calendar, url_params=None, + escape_params=True): + """Updates a calendar. + + Args: + calendar: The calendar which should be updated + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the calendar created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + update_uri = calendar.GetEditLink().href + response = self.Put(data=calendar, uri=update_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + return response + + def InsertAclEntry(self, new_entry, insert_uri, url_params=None, + escape_params=True): + """Adds an ACL entry (rule) to Google Calendar. + + Args: + new_entry: atom.Entry or subclass A new ACL entry which is to be added to + Google Calendar. + insert_uri: the URL to post new entries to the ACL feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the ACL entry created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_entry, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarAclEntryFromString) + + def InsertEventComment(self, new_entry, insert_uri, url_params=None, + escape_params=True): + """Adds an entry to Google Calendar. + + Args: + new_entry: atom.Entry or subclass A new entry which is to be added to + Google Calendar. + insert_uri: the URL to post new entrys to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the comment created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_entry, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventCommentEntryFromString) + + def DeleteEvent(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an event with the specified ID from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/default/private/full/abx' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Delete('/%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def DeleteAclEntry(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an ACL entry at the given edit_uri from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Delete('/%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def DeleteCalendarEntry(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes a calendar entry at the given edit_uri from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/default/allcalendars/abcdef@group.calendar.google.com' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, True is returned + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Delete(edit_uri, url_params=url_params, + escape_params=escape_params) + + def UpdateEvent(self, edit_uri, updated_event, url_params=None, + escape_params=True): + """Updates an existing event. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_event: string, atom.Entry, or subclass containing + the Atom Entry which will replace the event which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Put(updated_event, '/%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventEntryFromString) + + def UpdateAclEntry(self, edit_uri, updated_rule, url_params=None, + escape_params=True): + """Updates an existing ACL rule. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_rule: string, atom.Entry, or subclass containing + the Atom Entry which will replace the event which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Put(updated_rule, '/%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarAclEntryFromString) + + def ExecuteBatch(self, batch_feed, url, + converter=gdata.calendar.CalendarEventFeedFromString): + """Sends a batch request feed to the server. + + The batch request needs to be sent to the batch URL for a particular + calendar. You can find the URL by calling GetBatchLink().href on the + CalendarEventFeed. + + Args: + batch_feed: gdata.calendar.CalendarEventFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL for the Calendar to which these operations should + be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is + CalendarEventFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a CalendarEventFeed. + """ + return self.Post(batch_feed, url, converter=converter) + + +class CalendarEventQuery(gdata.service.Query): + + def __init__(self, user='default', visibility='private', projection='full', + text_query=None, params=None, categories=None): + gdata.service.Query.__init__(self, + feed='http://www.google.com/calendar/feeds/%s/%s/%s' % ( + urllib.quote(user), + urllib.quote(visibility), + urllib.quote(projection)), + text_query=text_query, params=params, categories=categories) + + def _GetStartMin(self): + if 'start-min' in self.keys(): + return self['start-min'] + else: + return None + + def _SetStartMin(self, val): + self['start-min'] = val + + start_min = property(_GetStartMin, _SetStartMin, + doc="""The start-min query parameter""") + + def _GetStartMax(self): + if 'start-max' in self.keys(): + return self['start-max'] + else: + return None + + def _SetStartMax(self, val): + self['start-max'] = val + + start_max = property(_GetStartMax, _SetStartMax, + doc="""The start-max query parameter""") + + def _GetOrderBy(self): + if 'orderby' in self.keys(): + return self['orderby'] + else: + return None + + def _SetOrderBy(self, val): + if val is not 'lastmodified' and val is not 'starttime': + raise Error, "Order By must be either 'lastmodified' or 'starttime'" + self['orderby'] = val + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The orderby query parameter""") + + def _GetSortOrder(self): + if 'sortorder' in self.keys(): + return self['sortorder'] + else: + return None + + def _SetSortOrder(self, val): + if (val is not 'ascending' and val is not 'descending' + and val is not 'a' and val is not 'd' and val is not 'ascend' + and val is not 'descend'): + raise Error, "Sort order must be either ascending, ascend, " + ( + "a or descending, descend, or d") + self['sortorder'] = val + + sortorder = property(_GetSortOrder, _SetSortOrder, + doc="""The sortorder query parameter""") + + def _GetSingleEvents(self): + if 'singleevents' in self.keys(): + return self['singleevents'] + else: + return None + + def _SetSingleEvents(self, val): + self['singleevents'] = val + + singleevents = property(_GetSingleEvents, _SetSingleEvents, + doc="""The singleevents query parameter""") + + def _GetFutureEvents(self): + if 'futureevents' in self.keys(): + return self['futureevents'] + else: + return None + + def _SetFutureEvents(self, val): + self['futureevents'] = val + + futureevents = property(_GetFutureEvents, _SetFutureEvents, + doc="""The futureevents query parameter""") + + def _GetRecurrenceExpansionStart(self): + if 'recurrence-expansion-start' in self.keys(): + return self['recurrence-expansion-start'] + else: + return None + + def _SetRecurrenceExpansionStart(self, val): + self['recurrence-expansion-start'] = val + + recurrence_expansion_start = property(_GetRecurrenceExpansionStart, + _SetRecurrenceExpansionStart, + doc="""The recurrence-expansion-start query parameter""") + + def _GetRecurrenceExpansionEnd(self): + if 'recurrence-expansion-end' in self.keys(): + return self['recurrence-expansion-end'] + else: + return None + + def _SetRecurrenceExpansionEnd(self, val): + self['recurrence-expansion-end'] = val + + recurrence_expansion_end = property(_GetRecurrenceExpansionEnd, + _SetRecurrenceExpansionEnd, + doc="""The recurrence-expansion-end query parameter""") + + def _SetTimezone(self, val): + self['ctz'] = val + + def _GetTimezone(self): + if 'ctz' in self.keys(): + return self['ctz'] + else: + return None + + ctz = property(_GetTimezone, _SetTimezone, + doc="""The ctz query parameter which sets report time on the server.""") + + +class CalendarListQuery(gdata.service.Query): + """Queries the Google Calendar meta feed""" + + def __init__(self, userId=None, text_query=None, + params=None, categories=None): + if userId is None: + userId = 'default' + + gdata.service.Query.__init__(self, feed='http://www.google.com/calendar/feeds/' + +userId, + text_query=text_query, params=params, + categories=categories) + +class CalendarEventCommentQuery(gdata.service.Query): + """Queries the Google Calendar event comments feed""" + + def __init__(self, feed=None): + gdata.service.Query.__init__(self, feed=feed) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/client.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/client.py new file mode 100644 index 0000000..62db08e --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/client.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import urllib +import urlparse +import gdata.auth +import gdata.service +import atom.service + + +SCOPE_URL_PARAM_NAME = gdata.service.SCOPE_URL_PARAM_NAME +# Maps the service names used in ClientLogin to scope URLs. +CLIENT_LOGIN_SCOPES = gdata.service.CLIENT_LOGIN_SCOPES + + +class AuthorizationRequired(gdata.service.Error): + pass + + +class GDataClient(gdata.service.GDataService): + """This class is deprecated. + + All functionality has been migrated to gdata.service.GDataService. + """ + def __init__(self, application_name=None, tokens=None): + gdata.service.GDataService.__init__(self, source=application_name, + tokens=tokens) + + def ClientLogin(self, username, password, service_name, source=None, + account_type=None, auth_url=None, login_token=None, login_captcha=None): + gdata.service.GDataService.ClientLogin(self, username=username, + password=password, account_type=account_type, service=service_name, + auth_service_url=auth_url, source=source, captcha_token=login_token, + captcha_response=login_captcha) + + def Get(self, url, parser): + """Simplified interface for Get. + + Requires a parser function which takes the server response's body as + the only argument. + + Args: + url: A string or something that can be converted to a string using str. + The URL of the requested resource. + parser: A function which takes the HTTP body from the server as it's + only result. Common values would include str, + gdata.GDataEntryFromString, and gdata.GDataFeedFromString. + + Returns: The result of calling parser(http_response_body). + """ + return gdata.service.GDataService.Get(self, uri=url, converter=parser) + + def Post(self, data, url, parser, media_source=None): + """Streamlined version of Post. + + Requires a parser function which takes the server response's body as + the only argument. + """ + return gdata.service.GDataService.Post(self, data=data, uri=url, + media_source=media_source, converter=parser) + + def Put(self, data, url, parser, media_source=None): + """Streamlined version of Put. + + Requires a parser function which takes the server response's body as + the only argument. + """ + return gdata.service.GDataService.Put(self, data=data, uri=url, + media_source=media_source, converter=parser) + + def Delete(self, url): + return gdata.service.GDataService.Delete(self, uri=url) + + +ExtractToken = gdata.service.ExtractToken +GenerateAuthSubRequestUrl = gdata.service.GenerateAuthSubRequestUrl diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/codesearch/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/codesearch/__init__.py new file mode 100644 index 0000000..fa23ef0 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/codesearch/__init__.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2007 Benoit Chesneau +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +"""Contains extensions to Atom objects used by Google Codesearch""" + +__author__ = 'Benoit Chesneau' + + +import atom +import gdata + + +CODESEARCH_NAMESPACE='http://schemas.google.com/codesearch/2006' +CODESEARCH_TEMPLATE='{http://shema.google.com/codesearch/2006}%s' + + +class Match(atom.AtomBase): + """ The Google Codesearch match element """ + _tag = 'match' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['lineNumber'] = 'line_number' + _attributes['type'] = 'type' + + def __init__(self, line_number=None, type=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.type = type + self.line_number = line_number + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class File(atom.AtomBase): + """ The Google Codesearch file element""" + _tag = 'file' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.name = name + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Package(atom.AtomBase): + """ The Google Codesearch package element""" + _tag = 'package' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['uri'] = 'uri' + + def __init__(self, name=None, uri=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.name = name + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class CodesearchEntry(gdata.GDataEntry): + """ Google codesearch atom entry""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + _children['{%s}file' % CODESEARCH_NAMESPACE] = ('file', File) + _children['{%s}package' % CODESEARCH_NAMESPACE] = ('package', Package) + _children['{%s}match' % CODESEARCH_NAMESPACE] = ('match', [Match]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + match=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + + self.match = match or [] + + +def CodesearchEntryFromString(xml_string): + """Converts an XML string into a CodesearchEntry object. + + Args: + xml_string: string The XML describing a Codesearch feed entry. + + Returns: + A CodesearchEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(CodesearchEntry, xml_string) + + +class CodesearchFeed(gdata.GDataFeed): + """feed containing list of Google codesearch Items""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CodesearchEntry]) + + +def CodesearchFeedFromString(xml_string): + """Converts an XML string into a CodesearchFeed object. + Args: + xml_string: string The XML describing a Codesearch feed. + Returns: + A CodeseartchFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(CodesearchFeed, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/codesearch/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/codesearch/service.py new file mode 100644 index 0000000..e4210c7 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/codesearch/service.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2007 Benoit Chesneau +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +"""CodesearchService extends GDataService to streamline Google Codesearch +operations""" + + +__author__ = 'Benoit Chesneau' + + +import atom +import gdata.service +import gdata.codesearch + + +class CodesearchService(gdata.service.GDataService): + """Client extension for Google codesearch service""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', additional_headers=None): + """Constructor for the CodesearchService. + + Args: + email: string (optional) The e-mail address of the account to use for + authentication. + password: string (optional) The password of the account to use for + authentication. + source: string (optional) The name of the user's application. + server: string (optional) The server the feed is hosted on. + additional_headers: dict (optional) Any additional HTTP headers to be + transmitted to the service in the form of key-value + pairs. + Yields: + A CodesearchService object used to communicate with the Google Codesearch + service. + """ + + gdata.service.GDataService.__init__(self, + email=email, password=password, service='codesearch', + source=source,server=server, + additional_headers=additional_headers) + + def Query(self, uri, converter=gdata.codesearch.CodesearchFeedFromString): + """Queries the Codesearch feed and returns the resulting feed of + entries. + + Args: + uri: string The full URI to be queried. This can contain query + parameters, a hostname, or simply the relative path to a Document + List feed. The DocumentQuery object is useful when constructing + query parameters. + converter: func (optional) A function which will be executed on the + retrieved item, generally to render it into a Python object. + By default the CodesearchFeedFromString function is used to + return a CodesearchFeed object. This is because most feed + queries will result in a feed and not a single entry. + + Returns : + A CodesearchFeed objects representing the feed returned by the server + """ + return self.Get(uri, converter=converter) + + def GetSnippetsFeed(self, text_query=None): + """Retrieve Codesearch feed for a keyword + + Args: + text_query : string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + Returns: + A CodesearchFeed objects representing the feed returned by the server + """ + + query=gdata.codesearch.service.CodesearchQuery(text_query=text_query) + feed = self.Query(query.ToUri()) + return feed + + +class CodesearchQuery(gdata.service.Query): + """Object used to construct the query to the Google Codesearch feed. here only as a shorcut""" + + def __init__(self, feed='/codesearch/feeds/search', text_query=None, + params=None, categories=None): + """Constructor for Codesearch Query. + + Args: + feed: string (optional) The path for the feed. (e.g. '/codesearch/feeds/search') + text_query: string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + + Yelds: + A CodesearchQuery object to construct a URI based on Codesearch feed + """ + + gdata.service.Query.__init__(self, feed, text_query, params, categories) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/contacts/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/contacts/__init__.py new file mode 100644 index 0000000..a03a730 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/contacts/__init__.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to ElementWrapper objects used with Google Contacts.""" + +__author__ = 'dbrattli (Dag Brattli)' + + +import atom +import gdata + + +## Constants from http://code.google.com/apis/gdata/elements.html ## +REL_HOME = 'http://schemas.google.com/g/2005#home' +REL_WORK = 'http://schemas.google.com/g/2005#work' +REL_OTHER = 'http://schemas.google.com/g/2005#other' + + +IM_AIM = 'http://schemas.google.com/g/2005#AIM' # AOL Instant Messenger protocol +IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol +IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol +IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol +IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol +# Google Talk protocol +IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK' +IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol +IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol + + +PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo' +PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo' + + +PHONE_CAR = 'http://schemas.google.com/g/2005#car' # Number of a car phone. +PHONE_FAX = 'http://schemas.google.com/g/2005#fax' +# Unknown or unspecified type, such as a business phone number that doesn't +# belong to a particular person. +PHONE_GENERAL = 'http://schemas.google.com/g/2005#general' +PHONE_HOME = REL_HOME +PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax' +# Phone number that makes sense only in a context known to the user (such as +# an enterprise PBX). +PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension' +PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile' +# A special type of number for which no other rel value makes sense. +# For example, a TTY device. label can be used to indicate the actual type. +PHONE_OTHER = REL_OTHER +PHONE_PAGER = 'http://schemas.google.com/g/2005#pager' +PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite' +PHONE_VOIP = 'http://schemas.google.com/g/2005#voip' +PHONE_WORK = REL_WORK +PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax' + + +CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008' + + +class OrgName(atom.AtomBase): + _tag = 'orgName' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class OrgTitle(atom.AtomBase): + _tag = 'orgTitle' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Organization(atom.AtomBase): + _tag = 'organization' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['rel'] = 'rel' + _attributes['label'] = 'label' + _attributes['primary'] = 'primary' + + _children['{%s}orgName' % gdata.GDATA_NAMESPACE] = ('org_name', OrgName) + _children['{%s}orgTitle' % gdata.GDATA_NAMESPACE] = ('org_title', OrgTitle) + + def __init__(self, rel=None, primary='false', org_name=None, org_title=None, + label=None, text=None, extension_elements=None, + extension_attributes=None): + self.rel = rel or REL_OTHER + self.primary = primary + self.org_name = org_name + self.org_title = org_title + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class PostalAddress(atom.AtomBase): + _tag = 'postalAddress' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + + def __init__(self, primary=None, rel=None, text=None, + extension_elements=None, extension_attributes=None): + self.primary = primary + self.rel = rel or REL_OTHER + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class IM(atom.AtomBase): + _tag = 'im' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['address'] = 'address' + _attributes['primary'] = 'primary' + _attributes['protocol'] = 'protocol' + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + + def __init__(self, primary=None, rel=None, address=None, protocol=None, + label=None, text=None, extension_elements=None, + extension_attributes=None): + self.protocol = protocol + self.address = address + self.primary = primary + self.rel = rel or REL_OTHER + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Email(atom.AtomBase): + _tag = 'email' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['address'] = 'address' + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + _attributes['label'] = 'label' + + def __init__(self, primary=None, rel=None, address=None, text=None, + label=None, extension_elements=None, extension_attributes=None): + self.address = address + self.primary = primary + self.rel = rel or REL_OTHER + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class PhoneNumber(atom.AtomBase): + _tag = 'phoneNumber' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + + def __init__(self, primary=None, rel=None, text=None, + extension_elements=None, extension_attributes=None): + self.primary = primary + self.rel = rel or REL_OTHER + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Deleted(atom.AtomBase): + _tag = 'deleted' + _namespace = gdata.GDATA_NAMESPACE + + def __init__(self, text=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class GroupMembershipInfo(atom.AtomBase): + _tag = 'groupMembershipInfo' + _namespace = CONTACTS_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + + _attributes['deleted'] = 'deleted' + _attributes['href'] = 'href' + + def __init__(self, deleted=None, href=None, text=None, + extension_elements=None, extension_attributes=None): + self.deleted = deleted + self.href = href + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class ContactEntry(gdata.BatchEntry): + """A Google Contact flavor of an Atom Entry """ + + _children = gdata.BatchEntry._children.copy() + + _children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address', + [PostalAddress]) + _children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = ('phone_number', + [PhoneNumber]) + _children['{%s}organization' % gdata.GDATA_NAMESPACE] = ('organization', + Organization) + _children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email]) + _children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM]) + _children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted) + _children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = ( + 'group_membership_info', [GroupMembershipInfo]) + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [gdata.ExtendedProperty]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, email=None, postal_address=None, + deleted=None, organization=None, phone_number=None, im=None, + extended_property=None, group_membership_info=None, + batch_operation=None, batch_id=None, batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, title=title, updated=updated) + self.organization = organization + self.deleted = deleted + self.phone_number = phone_number or [] + self.postal_address = postal_address or [] + self.im = im or [] + self.extended_property = extended_property or [] + self.email = email or [] + self.group_membership_info = group_membership_info or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def GetPhotoLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_LINK_REL: + return a_link + return None + + def GetPhotoEditLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_EDIT_LINK_REL: + return a_link + return None + + +def ContactEntryFromString(xml_string): + return atom.CreateClassFromXMLString(ContactEntry, xml_string) + + +class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Contacts feed flavor of an Atom Feed""" + + _children = gdata.BatchFeed._children.copy() + + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def ContactsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(ContactsFeed, xml_string) + + +class GroupEntry(gdata.BatchEntry): + """Represents a contact group.""" + _children = gdata.BatchEntry._children.copy() + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [gdata.ExtendedProperty]) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + extended_property=None, batch_operation=None, batch_id=None, + batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, + title=title, updated=updated) + self.extended_property = extended_property or [] + + +def GroupEntryFromString(xml_string): + return atom.CreateClassFromXMLString(GroupEntry, xml_string) + + +class GroupsFeed(gdata.BatchFeed): + """A Google contact groups feed flavor of an Atom Feed""" + _children = gdata.BatchFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry]) + + +def GroupsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GroupsFeed, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/contacts/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/contacts/service.py new file mode 100644 index 0000000..17a76b0 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/contacts/service.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ContactsService extends the GDataService to streamline Google Contacts operations. + + ContactsService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'dbrattli (Dag Brattli)' + + +import gdata +import atom.service +import gdata.service +import gdata.calendar +import atom + +DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full' + '/batch') + +class Error(Exception): + pass + +class RequestError(Error): + pass + +class ContactsService(gdata.service.GDataService): + """Client for the Google Contats service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', + additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='cp', source=source, + server=server, + additional_headers=additional_headers) + + def GetContactsFeed(self, + uri='http://www.google.com/m8/feeds/contacts/default/full'): + return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString) + + def GetContact(self, uri): + return self.Get(uri, converter=gdata.contacts.ContactEntryFromString) + + def CreateContact(self, new_contact, + insert_uri='/m8/feeds/contacts/default/full', url_params=None, + escape_params=True): + """Adds an new contact to Google Contacts. + + Args: + new_contact: atom.Entry or subclass A new contact which is to be added to + Google Contacts. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + return self.Post(new_contact, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.ContactEntryFromString) + + + def UpdateContact(self, edit_uri, updated_contact, url_params=None, + escape_params=True): + """Updates an existing contact. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_contact: string, atom.Entry or subclass containing + the Atom Entry which will replace the contact which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Put(updated_contact, '/%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.ContactEntryFromString) + + def DeleteContact(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an contact with the specified ID from Google Contacts. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/m8/feeds/contacts/default/full/xxx/yyy' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Delete('/%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def GetGroupsFeed(self, + uri='http://www.google.com/m8/feeds/groups/default/full'): + return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString) + + def CreateGroup(self, new_group, + insert_uri='/m8/feeds/groups/default/full', url_params=None, + escape_params=True): + return self.Post(new_group, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.GroupEntryFromString) + + def UpdateGroup(self, edit_uri, updated_group, url_params=None, + escape_params=True): + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Put(updated_group, '/%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.GroupEntryFromString) + + def DeleteGroup(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Delete('/%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def ChangePhoto(self, media, contact_entry_or_url, content_type=None, + content_length=None): + """Change the photo for the contact by uploading a new photo. + + Performs a PUT against the photo edit URL to send the binary data for the + photo. + + Args: + media: filename, file-like-object, or a gdata.MediaSource object to send. + contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this + method will search for an edit photo link URL and + perform a PUT to the URL. + content_type: str (optional) the mime type for the photo data. This is + necessary if media is a file or file name, but if media + is a MediaSource object then the media object can contain + the mime type. If media_type is set, it will override the + mime type in the media object. + content_length: int or str (optional) Specifying the content length is + only required if media is a file-like object. If media + is a filename, the length is determined using + os.path.getsize. If media is a MediaSource object, it is + assumed that it already contains the content length. + """ + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if isinstance(media, gdata.MediaSource): + payload = media + # If the media object is a file-like object, then use it as the file + # handle in the in the MediaSource. + elif hasattr(media, 'read'): + payload = gdata.MediaSource(file_handle=media, + content_type=content_type, content_length=content_length) + # Assume that the media object is a file name. + else: + payload = gdata.MediaSource(content_type=content_type, + content_length=content_length, file_path=media) + return self.Put(payload, url) + + def GetPhoto(self, contact_entry_or_url): + """Retrives the binary data for the contact's profile photo as a string. + + Args: + contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string + containing the photo link's URL. If the contact entry does not + contain a photo link, the image will not be fetched and this method + will return None. + """ + # TODO: add the ability to write out the binary image data to a file, + # reading and writing a chunk at a time to avoid potentially using up + # large amounts of memory. + url = None + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + photo_link = contact_entry_or_url.GetPhotoLink() + if photo_link: + url = photo_link.href + else: + url = contact_entry_or_url + if url: + return self.Get(url, converter=str) + else: + return None + + def DeletePhoto(self, contact_entry_or_url): + url = None + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if url: + self.Delete(url) + + def ExecuteBatch(self, batch_feed, url, + converter=gdata.contacts.ContactsFeedFromString): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.contacts.ContactFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is ContactsFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ContactsFeed. + """ + return self.Post(batch_feed, url, converter=converter) + +class ContactsQuery(gdata.service.Query): + + def __init__(self, feed=None, text_query=None, params=None, + categories=None, group=None): + self.feed = feed or '/m8/feeds/contacts/default/full' + if group: + self._SetGroup(group) + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) + + def _GetGroup(self): + if 'group' in self: + return self['group'] + else: + return None + + def _SetGroup(self, group_id): + self['group'] = group_id + + group = property(_GetGroup, _SetGroup, + doc='The group query parameter to find only contacts in this group') + +class GroupsQuery(gdata.service.Query): + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + self.feed = feed or '/m8/feeds/groups/default/full' + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/docs/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/docs/__init__.py new file mode 100644 index 0000000..76afab0 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/docs/__init__.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Documents.""" + +__author__ = 'api.jfisher (Jeff Fisher)' + +import atom +import gdata + + +class DocumentListEntry(gdata.GDataEntry): + """The Google Documents version of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +def DocumentListEntryFromString(xml_string): + """Converts an XML string into a DocumentListEntry object. + + Args: + xml_string: string The XML describing a Document List feed entry. + + Returns: + A DocumentListEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListEntry, xml_string) + + +class DocumentListFeed(gdata.GDataFeed): + """A feed containing a list of Google Documents Items""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [DocumentListEntry]) + + +def DocumentListFeedFromString(xml_string): + """Converts an XML string into a DocumentListFeed object. + + Args: + xml_string: string The XML describing a DocumentList feed. + + Returns: + A DocumentListFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListFeed, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/docs/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/docs/service.py new file mode 100644 index 0000000..2d35ee2 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/docs/service.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DocsService extends the GDataService to streamline Google Documents + operations. + + DocsService: Provides methods to query feeds and manipulate items. + Extends GDataService. + + DocumentQuery: Queries a Google Document list feed. +""" + + +__author__ = 'api.jfisher (Jeff Fisher)' + + +import urllib +import atom +import gdata.service +import gdata.docs + + +# XML Namespaces used in Google Documents entities. +DATA_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind' +DOCUMENT_KIND_TERM = 'http://schemas.google.com/docs/2007#document' +SPREADSHEET_KIND_TERM = 'http://schemas.google.com/docs/2007#spreadsheet' +PRESENTATION_KIND_TERM = 'http://schemas.google.com/docs/2007#presentation' +# File extensions of documents that are permitted to be uploaded. +SUPPORTED_FILETYPES = { + 'CSV': 'text/csv', + 'TSV': 'text/tab-separated-values', + 'TAB': 'text/tab-separated-values', + 'DOC': 'application/msword', + 'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet', + 'ODT': 'application/vnd.oasis.opendocument.text', + 'RTF': 'application/rtf', + 'SXW': 'application/vnd.sun.xml.writer', + 'TXT': 'text/plain', + 'XLS': 'application/vnd.ms-excel', + 'PPT': 'application/vnd.ms-powerpoint', + 'PPS': 'application/vnd.ms-powerpoint', + 'HTM': 'text/html', + 'HTML' : 'text/html'} + + +class DocsService(gdata.service.GDataService): + + """Client extension for the Google Documents service Document List feed.""" + + def __init__(self, email=None, password=None, source=None, + server='docs.google.com', additional_headers=None): + """Constructor for the DocsService. + + Args: + email: string (optional) The e-mail address of the account to use for + authentication. + password: string (optional) The password of the account to use for + authentication. + source: string (optional) The name of the user's application. + server: string (optional) The server the feed is hosted on. + additional_headers: dict (optional) Any additional HTTP headers to be + transmitted to the service in the form of key-value + pairs. + + Yields: + A DocsService object used to communicate with the Google Documents + service. + """ + gdata.service.GDataService.__init__(self, email=email, password=password, + service='writely', source=source, + server=server, + additional_headers=additional_headers) + + def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString): + """Queries the Document List feed and returns the resulting feed of + entries. + + Args: + uri: string The full URI to be queried. This can contain query + parameters, a hostname, or simply the relative path to a Document + List feed. The DocumentQuery object is useful when constructing + query parameters. + converter: func (optional) A function which will be executed on the + retrieved item, generally to render it into a Python object. + By default the DocumentListFeedFromString function is used to + return a DocumentListFeed object. This is because most feed + queries will result in a feed and not a single entry. + """ + return self.Get(uri, converter=converter) + + def QueryDocumentListFeed(self, uri): + """Retrieves a DocumentListFeed by retrieving a URI based off the Document + List feed, including any query parameters. A DocumentQuery object can + be used to construct these parameters. + + Args: + uri: string The URI of the feed being retrieved possibly with query + parameters. + + Returns: + A DocumentListFeed object representing the feed returned by the server. + """ + return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString) + + def GetDocumentListEntry(self, uri): + """Retrieves a particular DocumentListEntry by its unique URI. + + Args: + uri: string The unique URI of an entry in a Document List feed. + + Returns: + A DocumentListEntry object representing the retrieved entry. + """ + return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString) + + def GetDocumentListFeed(self): + """Retrieves a feed containing all of a user's documents.""" + q = gdata.docs.service.DocumentQuery(); + return self.QueryDocumentListFeed(q.ToUri()) + + def UploadPresentation(self, media_source, title): + """Uploads a presentation inside of a MediaSource object to the Document + List feed with the given title. + + Args: + media_source: MediaSource The MediaSource object containing a + presentation file to be uploaded. + title: string The title of the presentation on the server after being + uploaded. + + Returns: + A GDataEntry containing information about the presentation created on the + Google Documents service. + """ + category = atom.Category(scheme=DATA_KIND_SCHEME, + term=PRESENTATION_KIND_TERM) + return self._UploadFile(media_source, title, category) + + def UploadSpreadsheet(self, media_source, title): + """Uploads a spreadsheet inside of a MediaSource object to the Document + List feed with the given title. + + Args: + media_source: MediaSource The MediaSource object containing a spreadsheet + file to be uploaded. + title: string The title of the spreadsheet on the server after being + uploaded. + + Returns: + A GDataEntry containing information about the spreadsheet created on the + Google Documents service. + """ + category = atom.Category(scheme=DATA_KIND_SCHEME, + term=SPREADSHEET_KIND_TERM) + return self._UploadFile(media_source, title, category) + + def UploadDocument(self, media_source, title): + """Uploads a document inside of a MediaSource object to the Document List + feed with the given title. + + Args: + media_source: MediaSource The gdata.MediaSource object containing a + document file to be uploaded. + title: string The title of the document on the server after being + uploaded. + + Returns: + A GDataEntry containing information about the document created on the + Google Documents service. + """ + category = atom.Category(scheme=DATA_KIND_SCHEME, + term=DOCUMENT_KIND_TERM) + return self._UploadFile(media_source, title, category) + + def _UploadFile(self, media_source, title, category): + """Uploads a file to the Document List feed. + + Args: + media_source: A gdata.MediaSource object containing the file to be + uploaded. + title: string The title of the document on the server after being + uploaded. + category: An atom.Category object specifying the appropriate document + type + Returns: + A GDataEntry containing information about the document created on + the Google Documents service. + """ + media_entry = gdata.GDataEntry() + media_entry.title = atom.Title(text=title) + media_entry.category.append(category) + media_entry = self.Post(media_entry, '/feeds/documents/private/full', + media_source = media_source, + extra_headers = {'Slug' : media_source.file_name }) + + return media_entry + + +class DocumentQuery(gdata.service.Query): + + """Object used to construct a URI to query the Google Document List feed""" + + def __init__(self, feed='/feeds/documents', visibility='private', + projection='full', text_query=None, params=None, + categories=None): + """Constructor for Document List Query + + Args: + feed: string (optional) The path for the feed. (e.g. '/feeds/documents') + visibility: string (optional) The visibility chosen for the current feed. + projection: string (optional) The projection chosen for the current feed. + text_query: string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + + Yields: + A DocumentQuery object used to construct a URI based on the Document + List feed. + """ + self.visibility = visibility + self.projection = projection + gdata.service.Query.__init__(self, feed, text_query, params, categories) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Document + List feed. + """ + old_feed = self.feed + self.feed = '/'.join([old_feed, self.visibility, self.projection]) + new_feed = gdata.service.Query.ToUri(self) + self.feed = old_feed + return new_feed + + def AddNamedFolder(self, email, folder_name): + """Adds a named folder category, qualified by a schema. + + This function lets you query for documents that are contained inside a + named folder without fear of collision with other categories. + + Args: + email: string The email of the user who owns the folder. + folder_name: string The name of the folder. + + Returns: + The string of the category that was added to the object. + """ + + category = '{http://schemas.google.com/docs/2007/folders/' + category += email + '}' + folder_name + + self.categories.append(category) + + return category + + def RemoveNamedFolder(self, email, folder_name): + """Removes a named folder category, qualified by a schema. + + Args: + email: string The email of the user who owns the folder. + folder_name: string The name of the folder. + + Returns: + The string of the category that was removed to the object. + """ + + category = '{http://schemas.google.com/docs/2007/folders/' + category += email + '}' + folder_name + + self.categories.remove(category) + + return category diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/exif/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/exif/__init__.py new file mode 100644 index 0000000..7f1f9c2 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/exif/__init__.py @@ -0,0 +1,217 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.exif, implementing the exif namespace in gdata +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module maps elements from the {EXIF} namespace[1] to GData objects. +These elements describe image data, using exif attributes[2]. + +Picasa Web Albums uses the exif namespace to represent Exif data encoded +in a photo [3]. + +Picasa Web Albums uses the following exif elements: +exif:distance +exif:exposure +exif:flash +exif:focallength +exif:fstop +exif:imageUniqueID +exif:iso +exif:make +exif:model +exif:tags +exif:time + +[1]: http://schemas.google.com/photos/exif/2007. +[2]: http://en.wikipedia.org/wiki/Exif +[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference +""" + + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007' + +class ExifBaseElement(atom.AtomBase): + """Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag + """ % EXIF_NAMESPACE + + _tag = '' + _namespace = EXIF_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Distance(ExifBaseElement): + "(float) The distance to the subject, e.g. 0.0" + + _tag = 'distance' +def DistanceFromString(xml_string): + return atom.CreateClassFromXMLString(Distance, xml_string) + +class Exposure(ExifBaseElement): + "(float) The exposure time used, e.g. 0.025 or 8.0E4" + + _tag = 'exposure' +def ExposureFromString(xml_string): + return atom.CreateClassFromXMLString(Exposure, xml_string) + +class Flash(ExifBaseElement): + """(string) Boolean value indicating whether the flash was used. + The .text attribute will either be `true' or `false' + + As a convenience, this object's .bool method will return what you want, + so you can say: + + flash_used = bool(Flash) + + """ + + _tag = 'flash' + def __bool__(self): + if self.text.lower() in ('true','false'): + return self.text.lower() == 'true' +def FlashFromString(xml_string): + return atom.CreateClassFromXMLString(Flash, xml_string) + +class Focallength(ExifBaseElement): + "(float) The focal length used, e.g. 23.7" + + _tag = 'focallength' +def FocallengthFromString(xml_string): + return atom.CreateClassFromXMLString(Focallength, xml_string) + +class Fstop(ExifBaseElement): + "(float) The fstop value used, e.g. 5.0" + + _tag = 'fstop' +def FstopFromString(xml_string): + return atom.CreateClassFromXMLString(Fstop, xml_string) + +class ImageUniqueID(ExifBaseElement): + "(string) The unique image ID for the photo. Generated by Google Photo servers" + + _tag = 'imageUniqueID' +def ImageUniqueIDFromString(xml_string): + return atom.CreateClassFromXMLString(ImageUniqueID, xml_string) + +class Iso(ExifBaseElement): + "(int) The iso equivalent value used, e.g. 200" + + _tag = 'iso' +def IsoFromString(xml_string): + return atom.CreateClassFromXMLString(Iso, xml_string) + +class Make(ExifBaseElement): + "(string) The make of the camera used, e.g. Fictitious Camera Company" + + _tag = 'make' +def MakeFromString(xml_string): + return atom.CreateClassFromXMLString(Make, xml_string) + +class Model(ExifBaseElement): + "(string) The model of the camera used,e.g AMAZING-100D" + + _tag = 'model' +def ModelFromString(xml_string): + return atom.CreateClassFromXMLString(Model, xml_string) + +class Time(ExifBaseElement): + """(int) The date/time the photo was taken, e.g. 1180294337000. + Represented as the number of milliseconds since January 1st, 1970. + + The value of this element will always be identical to the value + of the . + + Look at this object's .isoformat() for a human friendly datetime string: + + photo_epoch = Time.text # 1180294337000 + photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z' + + Alternatively: + photo_datetime = Time.datetime() # (requires python >= 2.3) + """ + + _tag = 'time' + def isoformat(self): + """(string) Return the timestamp as a ISO 8601 formatted string, + e.g. '2007-05-27T19:32:17.000Z' + """ + import time + epoch = float(self.text)/1000 + return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch)) + + def datetime(self): + """(datetime.datetime) Return the timestamp as a datetime.datetime object + + Requires python 2.3 + """ + import datetime + epoch = float(self.text)/1000 + return datetime.datetime.fromtimestamp(epoch) + +def TimeFromString(xml_string): + return atom.CreateClassFromXMLString(Time, xml_string) + +class Tags(ExifBaseElement): + """The container for all exif elements. + The element can appear as a child of a photo entry. + """ + + _tag = 'tags' + _children = atom.AtomBase._children.copy() + _children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop) + _children['{%s}make' % EXIF_NAMESPACE] = ('make', Make) + _children['{%s}model' % EXIF_NAMESPACE] = ('model', Model) + _children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance) + _children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure) + _children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash) + _children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength) + _children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso) + _children['{%s}time' % EXIF_NAMESPACE] = ('time', Time) + _children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID) + + def __init__(self, extension_elements=None, extension_attributes=None, text=None): + ExifBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.fstop=None + self.make=None + self.model=None + self.distance=None + self.exposure=None + self.flash=None + self.focallength=None + self.iso=None + self.time=None + self.imageUniqueID=None +def TagsFromString(xml_string): + return atom.CreateClassFromXMLString(Tags, xml_string) + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/geo/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/geo/__init__.py new file mode 100644 index 0000000..1fcf604 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/geo/__init__.py @@ -0,0 +1,185 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.geo, implementing geological positioning in gdata structures +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Picasa Web Albums uses the georss and gml namespaces for +elements defined in the GeoRSS and Geography Markup Language specifications. + +Specifically, Picasa Web Albums uses the following elements: + +georss:where +gml:Point +gml:pos + +http://code.google.com/apis/picasaweb/reference.html#georss_reference + + +Picasa Web Albums also accepts geographic-location data in two other formats: +W3C format and plain-GeoRSS (without GML) format. +""" +# +#Over the wire, the Picasa Web Albums only accepts and sends the +#elements mentioned above, but this module will let you seamlessly convert +#between the different formats (TODO 2007-10-18 hg) + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#' +GML_NAMESPACE = 'http://www.opengis.net/gml' +GEORSS_NAMESPACE = 'http://www.georss.org/georss' + +class GeoBaseElement(atom.AtomBase): + """Base class for elements. + + To add new elements, you only need to add the element tag name to self._tag + and the namespace to self._namespace + """ + + _tag = '' + _namespace = GML_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Pos(GeoBaseElement): + """(string) Specifies a latitude and longitude, separated by a space, + e.g. `35.669998 139.770004'""" + + _tag = 'pos' +def PosFromString(xml_string): + return atom.CreateClassFromXMLString(Pos, xml_string) + +class Point(GeoBaseElement): + """(container) Specifies a particular geographical point, by means of + a element.""" + + _tag = 'Point' + _children = atom.AtomBase._children.copy() + _children['{%s}pos' % GML_NAMESPACE] = ('pos', Pos) + def __init__(self, pos=None, extension_elements=None, extension_attributes=None, text=None): + GeoBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + if pos is None: + pos = Pos() + self.pos=pos +def PointFromString(xml_string): + return atom.CreateClassFromXMLString(Point, xml_string) + +class Where(GeoBaseElement): + """(container) Specifies a geographical location or region. + A container element, containing a single element. + (Not to be confused with .) + + Note that the (only) child attribute, .Point, is title-cased. + This reflects the names of elements in the xml stream + (principle of least surprise). + + As a convenience, you can get a tuple of (lat, lon) with Where.location(), + and set the same data with Where.setLocation( (lat, lon) ). + + Similarly, there are methods to set and get only latitude and longitude. + """ + + _tag = 'where' + _namespace = GEORSS_NAMESPACE + _children = atom.AtomBase._children.copy() + _children['{%s}Point' % GML_NAMESPACE] = ('Point', Point) + def __init__(self, point=None, extension_elements=None, extension_attributes=None, text=None): + GeoBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + if point is None: + point = Point() + self.Point=point + def location(self): + "(float, float) Return Where.Point.pos.text as a (lat,lon) tuple" + try: + return tuple([float(z) for z in self.Point.pos.text.split(' ')]) + except AttributeError: + return tuple() + def set_location(self, latlon): + """(bool) Set Where.Point.pos.text from a (lat,lon) tuple. + + Arguments: + lat (float): The latitude in degrees, from -90.0 to 90.0 + lon (float): The longitude in degrees, from -180.0 to 180.0 + + Returns True on success. + + """ + + assert(isinstance(latlon[0], float)) + assert(isinstance(latlon[1], float)) + try: + self.Point.pos.text = "%s %s" % (latlon[0], latlon[1]) + return True + except AttributeError: + return False + def latitude(self): + "(float) Get the latitude value of the geo-tag. See also .location()" + lat, lon = self.location() + return lat + + def longitude(self): + "(float) Get the longtitude value of the geo-tag. See also .location()" + lat, lon = self.location() + return lon + + longtitude = longitude + + def set_latitude(self, lat): + """(bool) Set the latitude value of the geo-tag. + + Args: + lat (float): The new latitude value + + See also .set_location() + """ + _lat, lon = self.location() + return self.set_location(lat, lon) + + def set_longitude(self, lon): + """(bool) Set the longtitude value of the geo-tag. + + Args: + lat (float): The new latitude value + + See also .set_location() + """ + lat, _lon = self.location() + return self.set_location(lat, lon) + + set_longtitude = set_longitude + +def WhereFromString(xml_string): + return atom.CreateClassFromXMLString(Where, xml_string) + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/media/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/media/__init__.py new file mode 100644 index 0000000..fd2abc7 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/media/__init__.py @@ -0,0 +1,331 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.media, implementing parts of the MediaRSS spec in gdata structures +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Essential attributes of photos in Google Photos/Picasa Web Albums are +expressed using elements from the `media' namespace, defined in the +MediaRSS specification[1]. + +Due to copyright issues, the elements herein are documented sparingly, please +consult with the Google Photos API Reference Guide[2], alternatively the +official MediaRSS specification[1] for details. +(If there is a version conflict between the two sources, stick to the +Google Photos API). + +[1]: http://search.yahoo.com/mrss (version 1.1.1) +[2]: http://code.google.com/apis/picasaweb/reference.html#media_reference + +Keep in mind that Google Photos only uses a subset of the MediaRSS elements +(and some of the attributes are trimmed down, too): + +media:content +media:credit +media:description +media:group +media:keywords +media:thumbnail +media:title +""" + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/' +YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007' + +class MediaBaseElement(atom.AtomBase): + """Base class for elements in the MEDIA_NAMESPACE. + To add new elements, you only need to add the element tag name to self._tag + """ + + _tag = '' + _namespace = MEDIA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Content(MediaBaseElement): + """(attribute container) This element describes the original content, + e.g. an image or a video. There may be multiple Content elements + in a media:Group. + + For example, a video may have a + element that specifies a JPEG + representation of the video, and a + element that specifies the URL of the video itself. + + Attributes: + url: non-ambigous reference to online object + width: width of the object frame, in pixels + height: width of the object frame, in pixels + medium: one of `image' or `video', allowing the api user to quickly + determine the object's type + type: Internet media Type[1] (a.k.a. mime type) of the object -- a more + verbose way of determining the media type + (optional) fileSize: the size of the object, in bytes + + [1]: http://en.wikipedia.org/wiki/Internet_media_type + """ + + _tag = 'content' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + _attributes['medium'] = 'medium' + _attributes['type'] = 'type' + _attributes['fileSize'] = 'fileSize' + + def __init__(self, url=None, width=None, height=None, + medium=None, content_type=None, fileSize=None, format=None, + extension_elements=None, extension_attributes=None, text=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.url = url + self.width = width + self.height = height + self.medium = medium + self.type = content_type + self.fileSize = fileSize + +def ContentFromString(xml_string): + return atom.CreateClassFromXMLString(Content, xml_string) + +class Credit(MediaBaseElement): + """(string) Contains the nickname of the user who created the content, + e.g. `Liz Bennet'. + + This is a user-specified value that should be used when referring to + the user by name. + + Note that none of the attributes from the MediaRSS spec are supported. + """ + + _tag = 'credit' +def CreditFromString(xml_string): + return atom.CreateClassFromXMLString(Credit, xml_string) + +class Description(MediaBaseElement): + """(string) A description of the media object. + Either plain unicode text, or entity-encoded html (look at the `type' + attribute). + + E.g `A set of photographs I took while vacationing in Italy.' + + For `api' projections, the description is in plain text; + for `base' projections, the description is in HTML. + + Attributes: + type: either `text' or `html'. + """ + + _tag = 'description' + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + def __init__(self, description_type=None, + extension_elements=None, extension_attributes=None, text=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + self.type = description_type +def DescriptionFromString(xml_string): + return atom.CreateClassFromXMLString(Description, xml_string) + +class Keywords(MediaBaseElement): + """(string) Lists the tags associated with the entry, + e.g `italy, vacation, sunset'. + + Contains a comma-separated list of tags that have been added to the photo, or + all tags that have been added to photos in the album. + """ + + _tag = 'keywords' +def KeywordsFromString(xml_string): + return atom.CreateClassFromXMLString(Keywords, xml_string) + +class Thumbnail(MediaBaseElement): + """(attributes) Contains the URL of a thumbnail of a photo or album cover. + + There can be multiple elements for a given ; + for example, a given item may have multiple thumbnails at different sizes. + Photos generally have two thumbnails at different sizes; + albums generally have one cropped thumbnail. + + If the thumbsize parameter is set to the initial query, this element points + to thumbnails of the requested sizes; otherwise the thumbnails are the + default thumbnail size. + + This element must not be confused with the element. + + Attributes: + url: The URL of the thumbnail image. + height: The height of the thumbnail image, in pixels. + width: The width of the thumbnail image, in pixels. + """ + + _tag = 'thumbnail' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + def __init__(self, url=None, width=None, height=None, + extension_attributes=None, text=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.url = url + self.width = width + self.height = height + + +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + +class Title(MediaBaseElement): + """(string) Contains the title of the entry's media content, in plain text. + + Attributes: + type: Always set to plain + """ + + _tag = 'title' + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + def __init__(self, title_type=None, + extension_attributes=None, text=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.type = title_type +def TitleFromString(xml_string): + return atom.CreateClassFromXMLString(Title, xml_string) + +class Player(MediaBaseElement): + """(string) Contains the embeddable player URL for the entry's media content + if the media is a video. + + Attributes: + url: Always set to plain + """ + + _tag = 'player' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + + def __init__(self, player_url=None, + extension_attributes=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.url= player_url + +class Private(atom.AtomBase): + """The YouTube Private element""" + _tag = 'private' + _namespace = YOUTUBE_NAMESPACE + +class Duration(atom.AtomBase): + """The YouTube Duration element""" + _tag = 'duration' + _namespace = YOUTUBE_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['seconds'] = 'seconds' + +class Category(MediaBaseElement): + """The mediagroup:category element""" + + _tag = 'category' + _attributes = atom.AtomBase._attributes.copy() + _attributes['term'] = 'term' + _attributes['scheme'] = 'scheme' + _attributes['label'] = 'label' + + def __init__(self, term=None, scheme=None, label=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Category + + Args: + term: str + scheme: str + label: str + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.term = term + self.scheme = scheme + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Group(MediaBaseElement): + """Container element for all media elements. + The element can appear as a child of an album, photo or + video entry.""" + + _tag = 'group' + _children = atom.AtomBase._children.copy() + _children['{%s}content' % MEDIA_NAMESPACE] = ('content', [Content,]) + _children['{%s}credit' % MEDIA_NAMESPACE] = ('credit', Credit) + _children['{%s}description' % MEDIA_NAMESPACE] = ('description', Description) + _children['{%s}keywords' % MEDIA_NAMESPACE] = ('keywords', Keywords) + _children['{%s}thumbnail' % MEDIA_NAMESPACE] = ('thumbnail', [Thumbnail,]) + _children['{%s}title' % MEDIA_NAMESPACE] = ('title', Title) + _children['{%s}category' % MEDIA_NAMESPACE] = ('category', [Category,]) + _children['{%s}duration' % YOUTUBE_NAMESPACE] = ('duration', Duration) + _children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', Private) + _children['{%s}player' % MEDIA_NAMESPACE] = ('player', Player) + + def __init__(self, content=None, credit=None, description=None, keywords=None, + thumbnail=None, title=None, duration=None, private=None, + category=None, player=None, extension_elements=None, + extension_attributes=None, text=None): + + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.content=content + self.credit=credit + self.description=description + self.keywords=keywords + self.thumbnail=thumbnail or [] + self.title=title + self.duration=duration + self.private=private + self.category=category or [] + self.player=player + +def GroupFromString(xml_string): + return atom.CreateClassFromXMLString(Group, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/oauth/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/oauth/__init__.py new file mode 100644 index 0000000..baf543e --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/oauth/__init__.py @@ -0,0 +1,524 @@ +import cgi +import urllib +import time +import random +import urlparse +import hmac +import binascii + +VERSION = '1.0' # Hi Blaine! +HTTP_METHOD = 'GET' +SIGNATURE_METHOD = 'PLAINTEXT' + +# Generic exception class +class OAuthError(RuntimeError): + def __init__(self, message='OAuth error occured.'): + self.message = message + +# optional WWW-Authenticate header (401 error) +def build_authenticate_header(realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + +# url escape +def escape(s): + # escape '/' too + return urllib.quote(s, safe='~') + +# util function: current timestamp +# seconds since epoch (UTC) +def generate_timestamp(): + return int(time.time()) + +# util function: nonce +# pseudorandom number +def generate_nonce(length=8): + return ''.join([str(random.randint(0, 9)) for i in range(length)]) + +# OAuthConsumer is a data type that represents the identity of the Consumer +# via its shared secret with the Service Provider. +class OAuthConsumer(object): + key = None + secret = None + + def __init__(self, key, secret): + self.key = key + self.secret = secret + +# OAuthToken is a data type that represents an End User via either an access +# or request token. +class OAuthToken(object): + # access tokens and request tokens + key = None + secret = None + + ''' + key = the token + secret = the token secret + ''' + def __init__(self, key, secret): + self.key = key + self.secret = secret + + def to_string(self): + return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret}) + + # return a token from something like: + # oauth_token_secret=digg&oauth_token=digg + def from_string(s): + params = cgi.parse_qs(s, keep_blank_values=False) + key = params['oauth_token'][0] + secret = params['oauth_token_secret'][0] + return OAuthToken(key, secret) + from_string = staticmethod(from_string) + + def __str__(self): + return self.to_string() + +# OAuthRequest represents the request and can be serialized +class OAuthRequest(object): + ''' + OAuth parameters: + - oauth_consumer_key + - oauth_token + - oauth_signature_method + - oauth_signature + - oauth_timestamp + - oauth_nonce + - oauth_version + ... any additional parameters, as defined by the Service Provider. + ''' + parameters = None # oauth parameters + http_method = HTTP_METHOD + http_url = None + version = VERSION + + def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None): + self.http_method = http_method + self.http_url = http_url + self.parameters = parameters or {} + + def set_parameter(self, parameter, value): + self.parameters[parameter] = value + + def get_parameter(self, parameter): + try: + return self.parameters[parameter] + except: + raise OAuthError('Parameter not found: %s' % parameter) + + def _get_timestamp_nonce(self): + return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce') + + # get any non-oauth parameters + def get_nonoauth_parameters(self): + parameters = {} + for k, v in self.parameters.iteritems(): + # ignore oauth parameters + if k.find('oauth_') < 0: + parameters[k] = v + return parameters + + # serialize as a header for an HTTPAuth request + def to_header(self, realm=''): + auth_header = 'OAuth realm="%s"' % realm + # add the oauth parameters + if self.parameters: + for k, v in self.parameters.iteritems(): + if k[:6] == 'oauth_': + auth_header += ', %s="%s"' % (k, escape(str(v))) + return {'Authorization': auth_header} + + # serialize as post data for a POST request + def to_postdata(self): + return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()]) + + # serialize as a url for a GET request + def to_url(self): + return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata()) + + # return a string that consists of all the parameters that need to be signed + def get_normalized_parameters(self): + params = self.parameters + try: + # exclude the signature if it exists + del params['oauth_signature'] + except: + pass + key_values = params.items() + # sort lexicographically, first after key, then after value + key_values.sort() + # combine key value pairs in string and escape + return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values]) + + # just uppercases the http method + def get_normalized_http_method(self): + return self.http_method.upper() + + # parses the url and rebuilds it to be scheme://host/path + def get_normalized_http_url(self): + parts = urlparse.urlparse(self.http_url) + url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path + return url_string + + # set the signature parameter to the result of build_signature + def sign_request(self, signature_method, consumer, token): + # set the signature method + self.set_parameter('oauth_signature_method', signature_method.get_name()) + # set the signature + self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token)) + + def build_signature(self, signature_method, consumer, token): + # call the build signature method within the signature method + return signature_method.build_signature(self, consumer, token) + + def from_request(http_method, http_url, headers=None, parameters=None, query_string=None): + # combine multiple parameter sources + if parameters is None: + parameters = {} + + # headers + if headers and 'Authorization' in headers: + auth_header = headers['Authorization'] + # check that the authorization header is OAuth + if auth_header.index('OAuth') > -1: + try: + # get the parameters from the header + header_params = OAuthRequest._split_header(auth_header) + parameters.update(header_params) + except: + raise OAuthError('Unable to parse OAuth parameters from Authorization header.') + + # GET or POST query string + if query_string: + query_params = OAuthRequest._split_url_string(query_string) + parameters.update(query_params) + + # URL parameters + param_str = urlparse.urlparse(http_url)[4] # query + url_params = OAuthRequest._split_url_string(param_str) + parameters.update(url_params) + + if parameters: + return OAuthRequest(http_method, http_url, parameters) + + return None + from_request = staticmethod(from_request) + + def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + defaults = { + 'oauth_consumer_key': oauth_consumer.key, + 'oauth_timestamp': generate_timestamp(), + 'oauth_nonce': generate_nonce(), + 'oauth_version': OAuthRequest.version, + } + + defaults.update(parameters) + parameters = defaults + + if token: + parameters['oauth_token'] = token.key + + return OAuthRequest(http_method, http_url, parameters) + from_consumer_and_token = staticmethod(from_consumer_and_token) + + def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + parameters['oauth_token'] = token.key + + if callback: + parameters['oauth_callback'] = callback + + return OAuthRequest(http_method, http_url, parameters) + from_token_and_callback = staticmethod(from_token_and_callback) + + # util function: turn Authorization: header into parameters, has to do some unescaping + def _split_header(header): + params = {} + parts = header.split(',') + for param in parts: + # ignore realm parameter + if param.find('OAuth realm') > -1: + continue + # remove whitespace + param = param.strip() + # split key-value + param_parts = param.split('=', 1) + # remove quotes and unescape the value + params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"')) + return params + _split_header = staticmethod(_split_header) + + # util function: turn url string into parameters, has to do some unescaping + def _split_url_string(param_str): + parameters = cgi.parse_qs(param_str, keep_blank_values=False) + for k, v in parameters.iteritems(): + parameters[k] = urllib.unquote(v[0]) + return parameters + _split_url_string = staticmethod(_split_url_string) + +# OAuthServer is a worker to check a requests validity against a data store +class OAuthServer(object): + timestamp_threshold = 300 # in seconds, five minutes + version = VERSION + signature_methods = None + data_store = None + + def __init__(self, data_store=None, signature_methods=None): + self.data_store = data_store + self.signature_methods = signature_methods or {} + + def set_data_store(self, oauth_data_store): + self.data_store = data_store + + def get_data_store(self): + return self.data_store + + def add_signature_method(self, signature_method): + self.signature_methods[signature_method.get_name()] = signature_method + return self.signature_methods + + # process a request_token request + # returns the request token on success + def fetch_request_token(self, oauth_request): + try: + # get the request token for authorization + token = self._get_token(oauth_request, 'request') + except OAuthError: + # no token required for the initial token request + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + self._check_signature(oauth_request, consumer, None) + # fetch a new token + token = self.data_store.fetch_request_token(consumer) + return token + + # process an access_token request + # returns the access token on success + def fetch_access_token(self, oauth_request): + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the request token + token = self._get_token(oauth_request, 'request') + self._check_signature(oauth_request, consumer, token) + new_token = self.data_store.fetch_access_token(consumer, token) + return new_token + + # verify an api call, checks all the parameters + def verify_request(self, oauth_request): + # -> consumer and token + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the access token + token = self._get_token(oauth_request, 'access') + self._check_signature(oauth_request, consumer, token) + parameters = oauth_request.get_nonoauth_parameters() + return consumer, token, parameters + + # authorize a request token + def authorize_token(self, token, user): + return self.data_store.authorize_request_token(token, user) + + # get the callback url + def get_callback(self, oauth_request): + return oauth_request.get_parameter('oauth_callback') + + # optional support for the authenticate header + def build_authenticate_header(self, realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + + # verify the correct version request for this server + def _get_version(self, oauth_request): + try: + version = oauth_request.get_parameter('oauth_version') + except: + version = VERSION + if version and version != self.version: + raise OAuthError('OAuth version %s not supported.' % str(version)) + return version + + # figure out the signature with some defaults + def _get_signature_method(self, oauth_request): + try: + signature_method = oauth_request.get_parameter('oauth_signature_method') + except: + signature_method = SIGNATURE_METHOD + try: + # get the signature method object + signature_method = self.signature_methods[signature_method] + except: + signature_method_names = ', '.join(self.signature_methods.keys()) + raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names)) + + return signature_method + + def _get_consumer(self, oauth_request): + consumer_key = oauth_request.get_parameter('oauth_consumer_key') + if not consumer_key: + raise OAuthError('Invalid consumer key.') + consumer = self.data_store.lookup_consumer(consumer_key) + if not consumer: + raise OAuthError('Invalid consumer.') + return consumer + + # try to find the token for the provided request token key + def _get_token(self, oauth_request, token_type='access'): + token_field = oauth_request.get_parameter('oauth_token') + token = self.data_store.lookup_token(token_type, token_field) + if not token: + raise OAuthError('Invalid %s token: %s' % (token_type, token_field)) + return token + + def _check_signature(self, oauth_request, consumer, token): + timestamp, nonce = oauth_request._get_timestamp_nonce() + self._check_timestamp(timestamp) + self._check_nonce(consumer, token, nonce) + signature_method = self._get_signature_method(oauth_request) + try: + signature = oauth_request.get_parameter('oauth_signature') + except: + raise OAuthError('Missing signature.') + # validate the signature + valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature) + if not valid_sig: + key, base = signature_method.build_signature_base_string(oauth_request, consumer, token) + raise OAuthError('Invalid signature. Expected signature base string: %s' % base) + built = signature_method.build_signature(oauth_request, consumer, token) + + def _check_timestamp(self, timestamp): + # verify that timestamp is recentish + timestamp = int(timestamp) + now = int(time.time()) + lapsed = now - timestamp + if lapsed > self.timestamp_threshold: + raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) + + def _check_nonce(self, consumer, token, nonce): + # verify that the nonce is uniqueish + nonce = self.data_store.lookup_nonce(consumer, token, nonce) + if nonce: + raise OAuthError('Nonce already used: %s' % str(nonce)) + +# OAuthClient is a worker to attempt to execute a request +class OAuthClient(object): + consumer = None + token = None + + def __init__(self, oauth_consumer, oauth_token): + self.consumer = oauth_consumer + self.token = oauth_token + + def get_consumer(self): + return self.consumer + + def get_token(self): + return self.token + + def fetch_request_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def access_resource(self, oauth_request): + # -> some protected resource + raise NotImplementedError + +# OAuthDataStore is a database abstraction used to lookup consumers and tokens +class OAuthDataStore(object): + + def lookup_consumer(self, key): + # -> OAuthConsumer + raise NotImplementedError + + def lookup_token(self, oauth_consumer, token_type, token_token): + # -> OAuthToken + raise NotImplementedError + + def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp): + # -> OAuthToken + raise NotImplementedError + + def fetch_request_token(self, oauth_consumer): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_consumer, oauth_token): + # -> OAuthToken + raise NotImplementedError + + def authorize_request_token(self, oauth_token, user): + # -> OAuthToken + raise NotImplementedError + +# OAuthSignatureMethod is a strategy class that implements a signature method +class OAuthSignatureMethod(object): + def get_name(self): + # -> str + raise NotImplementedError + + def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token): + # -> str key, str raw + raise NotImplementedError + + def build_signature(self, oauth_request, oauth_consumer, oauth_token): + # -> str + raise NotImplementedError + + def check_signature(self, oauth_request, consumer, token, signature): + built = self.build_signature(oauth_request, consumer, token) + return built == signature + +class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod): + + def get_name(self): + return 'HMAC-SHA1' + + def build_signature_base_string(self, oauth_request, consumer, token): + sig = ( + escape(oauth_request.get_normalized_http_method()), + escape(oauth_request.get_normalized_http_url()), + escape(oauth_request.get_normalized_parameters()), + ) + + key = '%s&' % escape(consumer.secret) + if token: + key += escape(token.secret) + raw = '&'.join(sig) + return key, raw + + def build_signature(self, oauth_request, consumer, token): + # build the base signature string + key, raw = self.build_signature_base_string(oauth_request, consumer, token) + + # hmac object + try: + import hashlib # 2.5 + hashed = hmac.new(key, raw, hashlib.sha1) + except: + import sha # deprecated + hashed = hmac.new(key, raw, sha) + + # calculate the digest base 64 + return binascii.b2a_base64(hashed.digest())[:-1] + +class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod): + + def get_name(self): + return 'PLAINTEXT' + + def build_signature_base_string(self, oauth_request, consumer, token): + # concatenate the consumer key and secret + sig = escape(consumer.secret) + '&' + if token: + sig = sig + escape(token.secret) + return sig + + def build_signature(self, oauth_request, consumer, token): + return self.build_signature_base_string(oauth_request, consumer, token) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/oauth/rsa.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/oauth/rsa.py new file mode 100644 index 0000000..f8d9b85 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/oauth/rsa.py @@ -0,0 +1,120 @@ +#!/usr/bin/python + +""" +requires tlslite - http://trevp.net/tlslite/ + +""" + +import binascii + +from gdata.tlslite.utils import keyfactory +from gdata.tlslite.utils import cryptomath + +# XXX andy: ugly local import due to module name, oauth.oauth +import gdata.oauth as oauth + +class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod): + def get_name(self): + return "RSA-SHA1" + + def _fetch_public_cert(self, oauth_request): + # not implemented yet, ideas are: + # (1) do a lookup in a table of trusted certs keyed off of consumer + # (2) fetch via http using a url provided by the requester + # (3) some sort of specific discovery code based on request + # + # either way should return a string representation of the certificate + raise NotImplementedError + + def _fetch_private_cert(self, oauth_request): + # not implemented yet, ideas are: + # (1) do a lookup in a table of trusted certs keyed off of consumer + # + # either way should return a string representation of the certificate + raise NotImplementedError + + def build_signature_base_string(self, oauth_request, consumer, token): + sig = ( + oauth.escape(oauth_request.get_normalized_http_method()), + oauth.escape(oauth_request.get_normalized_http_url()), + oauth.escape(oauth_request.get_normalized_parameters()), + ) + key = '' + raw = '&'.join(sig) + return key, raw + + def build_signature(self, oauth_request, consumer, token): + key, base_string = self.build_signature_base_string(oauth_request, + consumer, + token) + + # Fetch the private key cert based on the request + cert = self._fetch_private_cert(oauth_request) + + # Pull the private key from the certificate + privatekey = keyfactory.parsePrivateKey(cert) + + # Convert base_string to bytes + #base_string_bytes = cryptomath.createByteArraySequence(base_string) + + # Sign using the key + signed = privatekey.hashAndSign(base_string) + + return binascii.b2a_base64(signed)[:-1] + + def check_signature(self, oauth_request, consumer, token, signature): + decoded_sig = base64.b64decode(signature); + + key, base_string = self.build_signature_base_string(oauth_request, + consumer, + token) + + # Fetch the public key cert based on the request + cert = self._fetch_public_cert(oauth_request) + + # Pull the public key from the certificate + publickey = keyfactory.parsePEMKey(cert, public=True) + + # Check the signature + ok = publickey.hashAndVerify(decoded_sig, base_string) + + return ok + + +class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1): + def _fetch_public_cert(self, oauth_request): + cert = """ +-----BEGIN CERTIFICATE----- +MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0 +IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV +BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY +zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb +mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3 +DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d +4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb +WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J +-----END CERTIFICATE----- +""" + return cert + + def _fetch_private_cert(self, oauth_request): + cert = """ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V +A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d +7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ +hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H +X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm +uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw +rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z +zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn +qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG +WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno +cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+ +3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8 +AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54 +Lw03eHTNQghS0A== +-----END PRIVATE KEY----- +""" + return cert diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/photos/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/photos/__init__.py new file mode 100644 index 0000000..1952135 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/photos/__init__.py @@ -0,0 +1,1112 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is the base file for the PicasaWeb python client. +# It is used for lower level operations. +# +# $Id: __init__.py 148 2007-10-28 15:09:19Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# Portions (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides a pythonic, gdata-centric interface to Google Photos +(a.k.a. Picasa Web Services. + +It is modelled after the gdata/* interfaces from the gdata-python-client +project[1] by Google. + +You'll find the user-friendly api in photos.service. Please see the +documentation or live help() system for available methods. + +[1]: http://gdata-python-client.googlecode.com/ + + """ + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' +__version__ = '$Revision: 164 $'[11:-2] + +import re +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + +# importing google photo submodules +import gdata.media as Media, gdata.exif as Exif, gdata.geo as Geo + +# XML namespaces which are often used in Google Photo elements +PHOTOS_NAMESPACE = 'http://schemas.google.com/photos/2007' +MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/' +EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007' +OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/' +GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#' +GML_NAMESPACE = 'http://www.opengis.net/gml' +GEORSS_NAMESPACE = 'http://www.georss.org/georss' +PHEED_NAMESPACE = 'http://www.pheed.com/pheed/' +BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch' + + +class PhotosBaseElement(atom.AtomBase): + """Base class for elements in the PHOTO_NAMESPACE. To add new elements, + you only need to add the element tag name to self._tag + """ + + _tag = '' + _namespace = PHOTOS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + #def __str__(self): + #return str(self.text) + #def __unicode__(self): + #return unicode(self.text) + def __int__(self): + return int(self.text) + def bool(self): + return self.text == 'true' + +class GPhotosBaseFeed(gdata.GDataFeed, gdata.LinkFinder): + "Base class for all Feeds in gdata.photos" + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _attributes = gdata.GDataFeed._attributes.copy() + _children = gdata.GDataFeed._children.copy() + # We deal with Entry elements ourselves + del _children['{%s}entry' % atom.ATOM_NAMESPACE] + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + def kind(self): + "(string) Returns the kind" + try: + return self.category[0].term.split('#')[1] + except IndexError: + return None + + def _feedUri(self, kind): + "Convenience method to return a uri to a feed of a special kind" + assert(kind in ('album', 'tag', 'photo', 'comment', 'user')) + here_href = self.GetSelfLink().href + if 'kind=%s' % kind in here_href: + return here_href + if not 'kind=' in here_href: + sep = '?' + if '?' in here_href: sep = '&' + return here_href + "%skind=%s" % (sep, kind) + rx = re.match('.*(kind=)(album|tag|photo|comment)', here_href) + return here_href[:rx.end(1)] + kind + here_href[rx.end(2):] + + def _ConvertElementTreeToMember(self, child_tree): + """Re-implementing the method from AtomBase, since we deal with + Entry elements specially""" + category = child_tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + return atom.AtomBase._ConvertElementTreeToMember(self, child_tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + return atom.AtomBase._ConvertElementTreeToMember(self, child_tree) + ## TODO: is it safe to use getattr on gdata.photos? + entry_class = getattr(gdata.photos, '%sEntry' % kind.title()) + if not hasattr(self, 'entry') or self.entry is None: + self.entry = [] + self.entry.append(atom._CreateClassFromElementTree( + entry_class, child_tree)) + +class GPhotosBaseEntry(gdata.GDataEntry, gdata.LinkFinder): + "Base class for all Entry elements in gdata.photos" + _tag = 'entry' + _kind = '' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.category.append( + atom.Category(scheme='http://schemas.google.com/g/2005#kind', + term = 'http://schemas.google.com/photos/2007#%s' % self._kind)) + + def kind(self): + "(string) Returns the kind" + try: + return self.category[0].term.split('#')[1] + except IndexError: + return None + + def _feedUri(self, kind): + "Convenience method to get the uri to this entry's feed of the some kind" + try: + href = self.GetFeedLink().href + except AttributeError: + return None + sep = '?' + if '?' in href: sep = '&' + return '%s%skind=%s' % (href, sep, kind) + + +class PhotosBaseEntry(GPhotosBaseEntry): + pass + +class PhotosBaseFeed(GPhotosBaseFeed): + pass + +class GPhotosBaseData(object): + pass + +class Access(PhotosBaseElement): + """The Google Photo `Access' element. + + The album's access level. Valid values are `public' or `private'. + In documentation, access level is also referred to as `visibility.'""" + + _tag = 'access' +def AccessFromString(xml_string): + return atom.CreateClassFromXMLString(Access, xml_string) + +class Albumid(PhotosBaseElement): + "The Google Photo `Albumid' element" + + _tag = 'albumid' +def AlbumidFromString(xml_string): + return atom.CreateClassFromXMLString(Albumid, xml_string) + +class BytesUsed(PhotosBaseElement): + "The Google Photo `BytesUsed' element" + + _tag = 'bytesUsed' +def BytesUsedFromString(xml_string): + return atom.CreateClassFromXMLString(BytesUsed, xml_string) + +class Client(PhotosBaseElement): + "The Google Photo `Client' element" + + _tag = 'client' +def ClientFromString(xml_string): + return atom.CreateClassFromXMLString(Client, xml_string) + +class Checksum(PhotosBaseElement): + "The Google Photo `Checksum' element" + + _tag = 'checksum' +def ChecksumFromString(xml_string): + return atom.CreateClassFromXMLString(Checksum, xml_string) + +class CommentCount(PhotosBaseElement): + "The Google Photo `CommentCount' element" + + _tag = 'commentCount' +def CommentCountFromString(xml_string): + return atom.CreateClassFromXMLString(CommentCount, xml_string) + +class CommentingEnabled(PhotosBaseElement): + "The Google Photo `CommentingEnabled' element" + + _tag = 'commentingEnabled' +def CommentingEnabledFromString(xml_string): + return atom.CreateClassFromXMLString(CommentingEnabled, xml_string) + +class Height(PhotosBaseElement): + "The Google Photo `Height' element" + + _tag = 'height' +def HeightFromString(xml_string): + return atom.CreateClassFromXMLString(Height, xml_string) + +class Id(PhotosBaseElement): + "The Google Photo `Id' element" + + _tag = 'id' +def IdFromString(xml_string): + return atom.CreateClassFromXMLString(Id, xml_string) + +class Location(PhotosBaseElement): + "The Google Photo `Location' element" + + _tag = 'location' +def LocationFromString(xml_string): + return atom.CreateClassFromXMLString(Location, xml_string) + +class MaxPhotosPerAlbum(PhotosBaseElement): + "The Google Photo `MaxPhotosPerAlbum' element" + + _tag = 'maxPhotosPerAlbum' +def MaxPhotosPerAlbumFromString(xml_string): + return atom.CreateClassFromXMLString(MaxPhotosPerAlbum, xml_string) + +class Name(PhotosBaseElement): + "The Google Photo `Name' element" + + _tag = 'name' +def NameFromString(xml_string): + return atom.CreateClassFromXMLString(Name, xml_string) + +class Nickname(PhotosBaseElement): + "The Google Photo `Nickname' element" + + _tag = 'nickname' +def NicknameFromString(xml_string): + return atom.CreateClassFromXMLString(Nickname, xml_string) + +class Numphotos(PhotosBaseElement): + "The Google Photo `Numphotos' element" + + _tag = 'numphotos' +def NumphotosFromString(xml_string): + return atom.CreateClassFromXMLString(Numphotos, xml_string) + +class Numphotosremaining(PhotosBaseElement): + "The Google Photo `Numphotosremaining' element" + + _tag = 'numphotosremaining' +def NumphotosremainingFromString(xml_string): + return atom.CreateClassFromXMLString(Numphotosremaining, xml_string) + +class Position(PhotosBaseElement): + "The Google Photo `Position' element" + + _tag = 'position' +def PositionFromString(xml_string): + return atom.CreateClassFromXMLString(Position, xml_string) + +class Photoid(PhotosBaseElement): + "The Google Photo `Photoid' element" + + _tag = 'photoid' +def PhotoidFromString(xml_string): + return atom.CreateClassFromXMLString(Photoid, xml_string) + +class Quotacurrent(PhotosBaseElement): + "The Google Photo `Quotacurrent' element" + + _tag = 'quotacurrent' +def QuotacurrentFromString(xml_string): + return atom.CreateClassFromXMLString(Quotacurrent, xml_string) + +class Quotalimit(PhotosBaseElement): + "The Google Photo `Quotalimit' element" + + _tag = 'quotalimit' +def QuotalimitFromString(xml_string): + return atom.CreateClassFromXMLString(Quotalimit, xml_string) + +class Rotation(PhotosBaseElement): + "The Google Photo `Rotation' element" + + _tag = 'rotation' +def RotationFromString(xml_string): + return atom.CreateClassFromXMLString(Rotation, xml_string) + +class Size(PhotosBaseElement): + "The Google Photo `Size' element" + + _tag = 'size' +def SizeFromString(xml_string): + return atom.CreateClassFromXMLString(Size, xml_string) + +class Snippet(PhotosBaseElement): + """The Google Photo `snippet' element. + + When searching, the snippet element will contain a + string with the word you're looking for, highlighted in html markup + E.g. when your query is `hafjell', this element may contain: + `... here at Hafjell.' + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:truncated and gphoto:snippettype. + + """ + + _tag = 'snippet' +def SnippetFromString(xml_string): + return atom.CreateClassFromXMLString(Snippet, xml_string) + +class Snippettype(PhotosBaseElement): + """The Google Photo `Snippettype' element + + When searching, this element will tell you the type of element that matches. + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:snippet and gphoto:truncated. + + Possible values and their interpretation: + o ALBUM_TITLE - The album title matches + o PHOTO_TAGS - The match is a tag/keyword + o PHOTO_DESCRIPTION - The match is in the photo's description + + If you discover a value not listed here, please submit a patch to update this docstring. + + """ + + _tag = 'snippettype' +def SnippettypeFromString(xml_string): + return atom.CreateClassFromXMLString(Snippettype, xml_string) + +class Thumbnail(PhotosBaseElement): + """The Google Photo `Thumbnail' element + + Used to display user's photo thumbnail (hackergotchi). + + (Not to be confused with the element, which gives you + small versions of the photo object.)""" + + _tag = 'thumbnail' +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + +class Timestamp(PhotosBaseElement): + """The Google Photo `Timestamp' element + Represented as the number of milliseconds since January 1st, 1970. + + + Take a look at the convenience methods .isoformat() and .datetime(): + + photo_epoch = Time.text # 1180294337000 + photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z' + + Alternatively: + photo_datetime = Time.datetime() # (requires python >= 2.3) + """ + + _tag = 'timestamp' + def isoformat(self): + """(string) Return the timestamp as a ISO 8601 formatted string, + e.g. '2007-05-27T19:32:17.000Z' + """ + import time + epoch = float(self.text)/1000 + return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch)) + + def datetime(self): + """(datetime.datetime) Return the timestamp as a datetime.datetime object + + Requires python 2.3 + """ + import datetime + epoch = float(self.text)/1000 + return datetime.datetime.fromtimestamp(epoch) +def TimestampFromString(xml_string): + return atom.CreateClassFromXMLString(Timestamp, xml_string) + +class Truncated(PhotosBaseElement): + """The Google Photo `Truncated' element + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:snippet and gphoto:snippettype. + + Possible values and their interpretation: + 0 -- unknown + """ + + _tag = 'Truncated' +def TruncatedFromString(xml_string): + return atom.CreateClassFromXMLString(Truncated, xml_string) + +class User(PhotosBaseElement): + "The Google Photo `User' element" + + _tag = 'user' +def UserFromString(xml_string): + return atom.CreateClassFromXMLString(User, xml_string) + +class Version(PhotosBaseElement): + "The Google Photo `Version' element" + + _tag = 'version' +def VersionFromString(xml_string): + return atom.CreateClassFromXMLString(Version, xml_string) + +class Width(PhotosBaseElement): + "The Google Photo `Width' element" + + _tag = 'width' +def WidthFromString(xml_string): + return atom.CreateClassFromXMLString(Width, xml_string) + +class Weight(PhotosBaseElement): + """The Google Photo `Weight' element. + + The weight of the tag is the number of times the tag + appears in the collection of tags currently being viewed. + The default weight is 1, in which case this tags is omitted.""" + _tag = 'weight' +def WeightFromString(xml_string): + return atom.CreateClassFromXMLString(Weight, xml_string) + +class CommentAuthor(atom.Author): + """The Atom `Author' element in CommentEntry entries is augmented to + contain elements from the PHOTOS_NAMESPACE + + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + _children = atom.Author._children.copy() + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail) +def CommentAuthorFromString(xml_string): + return atom.CreateClassFromXMLString(CommentAuthor, xml_string) + +########################## ################################ + +class AlbumData(object): + _children = {} + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}name' % PHOTOS_NAMESPACE] = ('name', Name) + _children['{%s}location' % PHOTOS_NAMESPACE] = ('location', Location) + _children['{%s}access' % PHOTOS_NAMESPACE] = ('access', Access) + _children['{%s}bytesUsed' % PHOTOS_NAMESPACE] = ('bytesUsed', BytesUsed) + _children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp) + _children['{%s}numphotos' % PHOTOS_NAMESPACE] = ('numphotos', Numphotos) + _children['{%s}numphotosremaining' % PHOTOS_NAMESPACE] = \ + ('numphotosremaining', Numphotosremaining) + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \ + ('commentingEnabled', CommentingEnabled) + _children['{%s}commentCount' % PHOTOS_NAMESPACE] = \ + ('commentCount', CommentCount) + ## NOTE: storing media:group as self.media, to create a self-explaining api + gphoto_id = None + name = None + location = None + access = None + bytesUsed = None + timestamp = None + numphotos = None + numphotosremaining = None + user = None + nickname = None + commentingEnabled = None + commentCount = None + +class AlbumEntry(GPhotosBaseEntry, AlbumData): + """All metadata for a Google Photos Album + + Take a look at AlbumData for metadata accessible as attributes to this object. + + Notes: + To avoid name clashes, and to create a more sensible api, some + objects have names that differ from the original elements: + + o media:group -> self.media, + o geo:where -> self.geo, + o photo:id -> self.gphoto_id + """ + + _kind = 'album' + _children = GPhotosBaseEntry._children.copy() + _children.update(AlbumData._children.copy()) + # child tags only for Album entries, not feeds + _children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where) + _children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group) + media = Media.Group() + geo = Geo.Where() + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + #GPHOTO NAMESPACE: + gphoto_id=None, name=None, location=None, access=None, + timestamp=None, numphotos=None, user=None, nickname=None, + commentingEnabled=None, commentCount=None, thumbnail=None, + # MEDIA NAMESPACE: + media=None, + # GEORSS NAMESPACE: + geo=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + self.gphoto_id = gphoto_id + self.name = name + self.location = location + self.access = access + self.timestamp = timestamp + self.numphotos = numphotos + self.user = user + self.nickname = nickname + self.commentingEnabled = commentingEnabled + self.commentCount = commentCount + self.thumbnail = thumbnail + self.extended_property = extended_property or [] + self.text = text + ## NOTE: storing media:group as self.media, and geo:where as geo, + ## to create a self-explaining api + self.media = media or Media.Group() + self.geo = geo or Geo.Where() + + def GetAlbumId(self): + "Return the id of this album" + + return self.GetFeedLink().href.split('/')[-1] + + def GetPhotosUri(self): + "(string) Return the uri to this albums feed of the PhotoEntry kind" + return self._feedUri('photo') + + def GetCommentsUri(self): + "(string) Return the uri to this albums feed of the CommentEntry kind" + return self._feedUri('comment') + + def GetTagsUri(self): + "(string) Return the uri to this albums feed of the TagEntry kind" + return self._feedUri('tag') + +def AlbumEntryFromString(xml_string): + return atom.CreateClassFromXMLString(AlbumEntry, xml_string) + +class AlbumFeed(GPhotosBaseFeed, AlbumData): + """All metadata for a Google Photos Album, including its sub-elements + + This feed represents an album as the container for other objects. + + A Album feed contains entries of + PhotoEntry, CommentEntry or TagEntry, + depending on the `kind' parameter in the original query. + + Take a look at AlbumData for accessible attributes. + + """ + + _children = GPhotosBaseFeed._children.copy() + _children.update(AlbumData._children.copy()) + + def GetPhotosUri(self): + "(string) Return the uri to the same feed, but of the PhotoEntry kind" + + return self._feedUri('photo') + + def GetTagsUri(self): + "(string) Return the uri to the same feed, but of the TagEntry kind" + + return self._feedUri('tag') + + def GetCommentsUri(self): + "(string) Return the uri to the same feed, but of the CommentEntry kind" + + return self._feedUri('comment') + +def AlbumFeedFromString(xml_string): + return atom.CreateClassFromXMLString(AlbumFeed, xml_string) + + +class PhotoData(object): + _children = {} + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid) + _children['{%s}checksum' % PHOTOS_NAMESPACE] = ('checksum', Checksum) + _children['{%s}client' % PHOTOS_NAMESPACE] = ('client', Client) + _children['{%s}height' % PHOTOS_NAMESPACE] = ('height', Height) + _children['{%s}position' % PHOTOS_NAMESPACE] = ('position', Position) + _children['{%s}rotation' % PHOTOS_NAMESPACE] = ('rotation', Rotation) + _children['{%s}size' % PHOTOS_NAMESPACE] = ('size', Size) + _children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp) + _children['{%s}version' % PHOTOS_NAMESPACE] = ('version', Version) + _children['{%s}width' % PHOTOS_NAMESPACE] = ('width', Width) + _children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \ + ('commentingEnabled', CommentingEnabled) + _children['{%s}commentCount' % PHOTOS_NAMESPACE] = \ + ('commentCount', CommentCount) + ## NOTE: storing media:group as self.media, exif:tags as self.exif, and + ## geo:where as self.geo, to create a self-explaining api + _children['{%s}tags' % EXIF_NAMESPACE] = ('exif', Exif.Tags) + _children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where) + _children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group) + # These elements show up in search feeds + _children['{%s}snippet' % PHOTOS_NAMESPACE] = ('snippet', Snippet) + _children['{%s}snippettype' % PHOTOS_NAMESPACE] = ('snippettype', Snippettype) + _children['{%s}truncated' % PHOTOS_NAMESPACE] = ('truncated', Truncated) + gphoto_id = None + albumid = None + checksum = None + client = None + height = None + position = None + rotation = None + size = None + timestamp = None + version = None + width = None + commentingEnabled = None + commentCount = None + snippet=None + snippettype=None + truncated=None + media = Media.Group() + geo = Geo.Where() + tags = Exif.Tags() + +class PhotoEntry(GPhotosBaseEntry, PhotoData): + """All metadata for a Google Photos Photo + + Take a look at PhotoData for metadata accessible as attributes to this object. + + Notes: + To avoid name clashes, and to create a more sensible api, some + objects have names that differ from the original elements: + + o media:group -> self.media, + o exif:tags -> self.exif, + o geo:where -> self.geo, + o photo:id -> self.gphoto_id + """ + + _kind = 'photo' + _children = GPhotosBaseEntry._children.copy() + _children.update(PhotoData._children.copy()) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, text=None, + # GPHOTO NAMESPACE: + gphoto_id=None, albumid=None, checksum=None, client=None, height=None, + position=None, rotation=None, size=None, timestamp=None, version=None, + width=None, commentCount=None, commentingEnabled=None, + # MEDIARSS NAMESPACE: + media=None, + # EXIF_NAMESPACE: + exif=None, + # GEORSS NAMESPACE: + geo=None, + extension_elements=None, extension_attributes=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + self.gphoto_id = gphoto_id + self.albumid = albumid + self.checksum = checksum + self.client = client + self.height = height + self.position = position + self.rotation = rotation + self.size = size + self.timestamp = timestamp + self.version = version + self.width = width + self.commentingEnabled = commentingEnabled + self.commentCount = commentCount + ## NOTE: storing media:group as self.media, to create a self-explaining api + self.media = media or Media.Group() + self.exif = exif or Exif.Tags() + self.geo = geo or Geo.Where() + + def GetPostLink(self): + "Return the uri to this photo's `POST' link (use it for updates of the object)" + + return self.GetFeedLink() + + def GetCommentsUri(self): + "Return the uri to this photo's feed of CommentEntry comments" + return self._feedUri('comment') + + def GetTagsUri(self): + "Return the uri to this photo's feed of TagEntry tags" + return self._feedUri('tag') + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this photo""" + + href = self.GetSelfLink().href + return href[:href.find('/photoid')] + +def PhotoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PhotoEntry, xml_string) + +class PhotoFeed(GPhotosBaseFeed, PhotoData): + """All metadata for a Google Photos Photo, including its sub-elements + + This feed represents a photo as the container for other objects. + + A Photo feed contains entries of + CommentEntry or TagEntry, + depending on the `kind' parameter in the original query. + + Take a look at PhotoData for metadata accessible as attributes to this object. + + """ + _children = GPhotosBaseFeed._children.copy() + _children.update(PhotoData._children.copy()) + + def GetTagsUri(self): + "(string) Return the uri to the same feed, but of the TagEntry kind" + + return self._feedUri('tag') + + def GetCommentsUri(self): + "(string) Return the uri to the same feed, but of the CommentEntry kind" + + return self._feedUri('comment') + +def PhotoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PhotoFeed, xml_string) + +class TagData(GPhotosBaseData): + _children = {} + _children['{%s}weight' % PHOTOS_NAMESPACE] = ('weight', Weight) + weight=None + +class TagEntry(GPhotosBaseEntry, TagData): + """All metadata for a Google Photos Tag + + The actual tag is stored in the .title.text attribute + + """ + + _kind = 'tag' + _children = GPhotosBaseEntry._children.copy() + _children.update(TagData._children.copy()) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + weight=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + self.weight = weight + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this tag""" + + href = self.GetSelfLink().href + pos = href.find('/photoid') + if pos == -1: + return None + return href[:pos] + + def GetPhotoUri(self): + """Return the uri to the PhotoEntry containing this tag""" + + href = self.GetSelfLink().href + pos = href.find('/tag') + if pos == -1: + return None + return href[:pos] + +def TagEntryFromString(xml_string): + return atom.CreateClassFromXMLString(TagEntry, xml_string) + + +class TagFeed(GPhotosBaseFeed, TagData): + """All metadata for a Google Photos Tag, including its sub-elements""" + + _children = GPhotosBaseFeed._children.copy() + _children.update(TagData._children.copy()) + +def TagFeedFromString(xml_string): + return atom.CreateClassFromXMLString(TagFeed, xml_string) + +class CommentData(GPhotosBaseData): + _children = {} + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid) + _children['{%s}photoid' % PHOTOS_NAMESPACE] = ('photoid', Photoid) + _children['{%s}author' % atom.ATOM_NAMESPACE] = ('author', [CommentAuthor,]) + gphoto_id=None + albumid=None + photoid=None + author=None + +class CommentEntry(GPhotosBaseEntry, CommentData): + """All metadata for a Google Photos Comment + + The comment is stored in the .content.text attribute, + with a content type in .content.type. + + + """ + + _kind = 'comment' + _children = GPhotosBaseEntry._children.copy() + _children.update(CommentData._children.copy()) + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + gphoto_id=None, albumid=None, photoid=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + self.gphoto_id = gphoto_id + self.albumid = albumid + self.photoid = photoid + + def GetCommentId(self): + """Return the globally unique id of this comment""" + return self.GetSelfLink().href.split('/')[-1] + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this comment""" + + href = self.GetSelfLink().href + return href[:href.find('/photoid')] + + def GetPhotoUri(self): + """Return the uri to the PhotoEntry containing this comment""" + + href = self.GetSelfLink().href + return href[:href.find('/commentid')] + +def CommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CommentEntry, xml_string) + +class CommentFeed(GPhotosBaseFeed, CommentData): + """All metadata for a Google Photos Comment, including its sub-elements""" + + _children = GPhotosBaseFeed._children.copy() + _children.update(CommentData._children.copy()) + +def CommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CommentFeed, xml_string) + +class UserData(GPhotosBaseData): + _children = {} + _children['{%s}maxPhotosPerAlbum' % PHOTOS_NAMESPACE] = ('maxPhotosPerAlbum', MaxPhotosPerAlbum) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}quotalimit' % PHOTOS_NAMESPACE] = ('quotalimit', Quotalimit) + _children['{%s}quotacurrent' % PHOTOS_NAMESPACE] = ('quotacurrent', Quotacurrent) + _children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail) + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + + maxPhotosPerAlbum=None + nickname=None + quotalimit=None + quotacurrent=None + thumbnail=None + user=None + gphoto_id=None + + +class UserEntry(GPhotosBaseEntry, UserData): + """All metadata for a Google Photos User + + This entry represents an album owner and all appropriate metadata. + + Take a look at at the attributes of the UserData for metadata available. + """ + _children = GPhotosBaseEntry._children.copy() + _children.update(UserData._children.copy()) + _kind = 'user' + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + gphoto_id=None, maxPhotosPerAlbum=None, nickname=None, quotalimit=None, + quotacurrent=None, thumbnail=None, user=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + + self.gphoto_id=gphoto_id + self.maxPhotosPerAlbum=maxPhotosPerAlbum + self.nickname=nickname + self.quotalimit=quotalimit + self.quotacurrent=quotacurrent + self.thumbnail=thumbnail + self.user=user + + def GetAlbumsUri(self): + "(string) Return the uri to this user's feed of the AlbumEntry kind" + return self._feedUri('album') + + def GetPhotosUri(self): + "(string) Return the uri to this user's feed of the PhotoEntry kind" + return self._feedUri('photo') + + def GetCommentsUri(self): + "(string) Return the uri to this user's feed of the CommentEntry kind" + return self._feedUri('comment') + + def GetTagsUri(self): + "(string) Return the uri to this user's feed of the TagEntry kind" + return self._feedUri('tag') + +def UserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(UserEntry, xml_string) + +class UserFeed(GPhotosBaseFeed, UserData): + """Feed for a User in the google photos api. + + This feed represents a user as the container for other objects. + + A User feed contains entries of + AlbumEntry, PhotoEntry, CommentEntry, UserEntry or TagEntry, + depending on the `kind' parameter in the original query. + + The user feed itself also contains all of the metadata available + as part of a UserData object.""" + _children = GPhotosBaseFeed._children.copy() + _children.update(UserData._children.copy()) + + def GetAlbumsUri(self): + """Get the uri to this feed, but with entries of the AlbumEntry kind.""" + return self._feedUri('album') + + def GetTagsUri(self): + """Get the uri to this feed, but with entries of the TagEntry kind.""" + return self._feedUri('tag') + + def GetPhotosUri(self): + """Get the uri to this feed, but with entries of the PhotosEntry kind.""" + return self._feedUri('photo') + + def GetCommentsUri(self): + """Get the uri to this feed, but with entries of the CommentsEntry kind.""" + return self._feedUri('comment') + +def UserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(UserFeed, xml_string) + + + +def AnyFeedFromString(xml_string): + """Creates an instance of the appropriate feed class from the + xml string contents. + + Args: + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or a basic gdata.GDataFeed instance if it is + impossible to determine the appropriate class (look for extra elements + in GDataFeed's .FindExtensions() and extension_elements[] ). + """ + tree = ElementTree.fromstring(xml_string) + category = tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree) + ## TODO: is getattr safe this way? + feed_class = getattr(gdata.photos, '%sFeed' % kind.title()) + return atom._CreateClassFromElementTree(feed_class, tree) + +def AnyEntryFromString(xml_string): + """Creates an instance of the appropriate entry class from the + xml string contents. + + Args: + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or a basic gdata.GDataEndry instance if it is + impossible to determine the appropriate class (look for extra elements + in GDataEntry's .FindExtensions() and extension_elements[] ). + """ + tree = ElementTree.fromstring(xml_string) + category = tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree) + ## TODO: is getattr safe this way? + feed_class = getattr(gdata.photos, '%sEntry' % kind.title()) + return atom._CreateClassFromElementTree(feed_class, tree) + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/photos/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/photos/service.py new file mode 100644 index 0000000..fc8815b --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/photos/service.py @@ -0,0 +1,685 @@ +#!/usr/bin/env python +# -*-*- encoding: utf-8 -*-*- +# +# This is the service file for the Google Photo python client. +# It is used for higher level operations. +# +# $Id: service.py 144 2007-10-25 21:03:34Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google PhotoService provides a human-friendly interface to +Google Photo (a.k.a Picasa Web) services[1]. + +It extends gdata.service.GDataService and as such hides all the +nasty details about authenticating, parsing and communicating with +Google Photos. + +[1]: http://code.google.com/apis/picasaweb/gdata.html + +Example: + import gdata.photos, gdata.photos.service + pws = gdata.photos.service.PhotosService() + pws.ClientLogin(username, password) + #Get all albums + albums = pws.GetUserFeed().entry + # Get all photos in second album + photos = pws.GetFeed(albums[1].GetPhotosUri()).entry + # Get all tags for photos in second album and print them + tags = pws.GetFeed(albums[1].GetTagsUri()).entry + print [ tag.summary.text for tag in tags ] + # Get all comments for the first photos in list and print them + comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry + print [ c.summary.text for c in comments ] + + # Get a photo to work with + photo = photos[0] + # Update metadata + + # Attributes from the namespace + photo.summary.text = u'A nice view from my veranda' + photo.title.text = u'Verandaview.jpg' + + # Attributes from the namespace + photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated + + # Adding attributes to media object + + # Rotate 90 degrees clockwise + photo.rotation = gdata.photos.Rotation(text='90') + + # Submit modified photo object + photo = pws.UpdatePhotoMetadata(photo) + + # Make sure you only modify the newly returned object, else you'll get + # versioning errors. See Optimistic-concurrency + + # Add comment to a picture + comment = pws.InsertComment(photo, u'I wish the water always was this warm') + + # Remove comment because it was silly + print "*blush*" + pws.Delete(comment.GetEditLink().href) + +""" + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' +__version__ = '$Revision: 176 $'[11:-2] + + +import sys, os.path, StringIO +import time +import gdata.service +import gdata +import atom.service +import atom +import gdata.photos + +SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png') + +UNKOWN_ERROR=1000 +GPHOTOS_BAD_REQUEST=400 +GPHOTOS_CONFLICT=409 +GPHOTOS_INTERNAL_SERVER_ERROR=500 +GPHOTOS_INVALID_ARGUMENT=601 +GPHOTOS_INVALID_CONTENT_TYPE=602 +GPHOTOS_NOT_AN_IMAGE=603 +GPHOTOS_INVALID_KIND=604 + +class GooglePhotosException(Exception): + def __init__(self, response): + + self.error_code = response['status'] + self.reason = response['reason'].strip() + if '' in str(response['body']): #general html message, discard it + response['body'] = "" + self.body = response['body'].strip() + self.message = "(%(status)s) %(body)s -- %(reason)s" % response + + #return explicit error codes + error_map = { '(12) Not an image':GPHOTOS_NOT_AN_IMAGE, + 'kind: That is not one of the acceptable values': + GPHOTOS_INVALID_KIND, + + } + for msg, code in error_map.iteritems(): + if self.body == msg: + self.error_code = code + break + self.args = [self.error_code, self.reason, self.body] + +class PhotosService(gdata.service.GDataService): + userUri = '/data/feed/api/user/%s' + + def __init__(self, email=None, password=None, + source=None, server='picasaweb.google.com', additional_headers=None): + """ GooglePhotosService constructor. + + Arguments: + email: string (optional) The e-mail address of the account to use for + authentication. + password: string (optional) The password of the account to use for + authentication. + source: string (optional) The name of the user's application. + server: string (optional) The server the feed is hosted on. + additional_headers: dict (optional) Any additional HTTP headers to be + transmitted to the service in the form of key-value + pairs. + + Returns: + A PhotosService object used to communicate with the Google Photos + service. + """ + self.email = email + self.client = source + gdata.service.GDataService.__init__(self, email=self.email, password=password, + service='lh2', source=source, + server=server, + additional_headers=additional_headers) + + def GetFeed(self, uri, limit=None, start_index=None): + """Get a feed. + + The results are ordered by the values of their `updated' elements, + with the most recently updated entry appearing first in the feed. + + Arguments: + uri: the uri to fetch + limit (optional): the maximum number of entries to return. Defaults to what + the server returns. + + Returns: + one of gdata.photos.AlbumFeed, + gdata.photos.UserFeed, + gdata.photos.PhotoFeed, + gdata.photos.CommentFeed, + gdata.photos.TagFeed, + depending on the results of the query. + Raises: + GooglePhotosException + + See: + http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual + """ + if limit is not None: + uri += '&max-results=%s' % limit + if start_index is not None: + uri += '&start-index=%s' % start_index + try: + return self.Get(uri, converter=gdata.photos.AnyFeedFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def GetEntry(self, uri, limit=None, start_index=None): + """Get an Entry. + + Arguments: + uri: the uri to the entry + limit (optional): the maximum number of entries to return. Defaults to what + the server returns. + + Returns: + one of gdata.photos.AlbumEntry, + gdata.photos.UserEntry, + gdata.photos.PhotoEntry, + gdata.photos.CommentEntry, + gdata.photos.TagEntry, + depending on the results of the query. + Raises: + GooglePhotosException + """ + if limit is not None: + uri += '&max-results=%s' % limit + if start_index is not None: + uri += '&start-index=%s' % start_index + try: + return self.Get(uri, converter=gdata.photos.AnyEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def GetUserFeed(self, kind='album', user='default', limit=None): + """Get user-based feed, containing albums, photos, comments or tags; + defaults to albums. + + The entries are ordered by the values of their `updated' elements, + with the most recently updated entry appearing first in the feed. + + Arguments: + kind: the kind of entries to get, either `album', `photo', + `comment' or `tag', or a python list of these. Defaults to `album'. + user (optional): whose albums we're querying. Defaults to current user. + limit (optional): the maximum number of entries to return. + Defaults to everything the server returns. + + + Returns: + gdata.photos.UserFeed, containing appropriate Entry elements + + See: + http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual + http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html + """ + if isinstance(kind, (list, tuple) ): + kind = ",".join(kind) + + uri = '/data/feed/api/user/%s?kind=%s' % (user, kind) + return self.GetFeed(uri, limit=limit) + + def GetTaggedPhotos(self, tag, user='default', limit=None): + """Get all photos belonging to a specific user, tagged by the given keyword + + Arguments: + tag: The tag you're looking for, e.g. `dog' + user (optional): Whose images/videos you want to search, defaults + to current user + limit (optional): the maximum number of entries to return. + Defaults to everything the server returns. + + Returns: + gdata.photos.UserFeed containing PhotoEntry elements + """ + # Lower-casing because of + # http://code.google.com/p/gdata-issues/issues/detail?id=194 + uri = '/data/feed/api/user/%s?kind=photo&tag=%s' % (user, tag.lower()) + return self.GetFeed(uri, limit) + + def SearchUserPhotos(self, query, user='default', limit=100): + """Search through all photos for a specific user and return a feed. + This will look for matches in file names and image tags (a.k.a. keywords) + + Arguments: + query: The string you're looking for, e.g. `vacation' + user (optional): The username of whose photos you want to search, defaults + to current user. + limit (optional): Don't return more than `limit' hits, defaults to 100 + + Only public photos are searched, unless you are authenticated and + searching through your own photos. + + Returns: + gdata.photos.UserFeed with PhotoEntry elements + """ + uri = '/data/feed/api/user/%s?kind=photo&q=%s' % (user, query) + return self.GetFeed(uri, limit=limit) + + def SearchCommunityPhotos(self, query, limit=100): + """Search through all public photos and return a feed. + This will look for matches in file names and image tags (a.k.a. keywords) + + Arguments: + query: The string you're looking for, e.g. `vacation' + limit (optional): Don't return more than `limit' hits, defaults to 100 + + Returns: + gdata.GDataFeed with PhotoEntry elements + """ + uri='/data/feed/api/all?q=%s' % query + return self.GetFeed(uri, limit=limit) + + def GetContacts(self, user='default', limit=None): + """Retrieve a feed that contains a list of your contacts + + Arguments: + user: Username of the user whose contacts you want + + Returns + gdata.photos.UserFeed, with UserEntry entries + + See: + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + uri = '/data/feed/api/user/%s/contacts?kind=user' % user + return self.GetFeed(uri, limit=limit) + + def SearchContactsPhotos(self, user='default', search=None, limit=None): + """Search over your contacts' photos and return a feed + + Arguments: + user: Username of the user whose contacts you want + search (optional): What to search for (photo title, description and keywords) + + Returns + gdata.photos.UserFeed, with PhotoEntry elements + + See: + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + + uri = '/data/feed/api/user/%s/contacts?kind=photo&q=%s' % (user, search) + return self.GetFeed(uri, limit=limit) + + def InsertAlbum(self, title, summary, location=None, access='public', + commenting_enabled='true', timestamp=None): + """Add an album. + + Needs authentication, see self.ClientLogin() + + Arguments: + title: Album title + summary: Album summary / description + access (optional): `private' or `public'. Public albums are searchable + by everyone on the internet. Defaults to `public' + commenting_enabled (optional): `true' or `false'. Defaults to `true'. + timestamp (optional): A date and time for the album, in milliseconds since + Unix epoch[1] UTC. Defaults to now. + + Returns: + The newly created gdata.photos.AlbumEntry + + See: + http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed + + [1]: http://en.wikipedia.org/wiki/Unix_epoch + """ + album = gdata.photos.AlbumEntry() + album.title = atom.Title(text=title, title_type='text') + album.summary = atom.Summary(text=summary, summary_type='text') + if location is not None: + album.location = gdata.photos.Location(text=location) + album.access = gdata.photos.Access(text=access) + if commenting_enabled in ('true', 'false'): + album.commentingEnabled = gdata.photos.CommentingEnabled(text=commenting_enabled) + if timestamp is None: + timestamp = '%i' % int(time.time() * 1000) + album.timestamp = gdata.photos.Timestamp(text=timestamp) + try: + return self.Post(album, uri=self.userUri % self.email, + converter=gdata.photos.AlbumEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertPhoto(self, album_or_uri, photo, filename_or_handle, + content_type='image/jpeg'): + """Add a PhotoEntry + + Needs authentication, see self.ClientLogin() + + Arguments: + album_or_uri: AlbumFeed or uri of the album where the photo should go + photo: PhotoEntry to add + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + + """ + + try: + assert(isinstance(photo, gdata.photos.PhotoEntry)) + except AssertionError: + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`photo` must be a gdata.photos.PhotoEntry instance', + 'reason':'Found %s, not PhotoEntry' % type(photo) + }) + try: + majtype, mintype = content_type.split('/') + assert(mintype in SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % \ + ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] + }) + if isinstance(filename_or_handle, (str, unicode)) and \ + os.path.exists(filename_or_handle): # it's a file name + mediasource = gdata.MediaSource() + mediasource.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'):# it's a file-like resource + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) # rewind pointer to the start of the file + # gdata.MediaSource needs the content length, so read the whole image + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'image' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: #filename_or_handle is not valid + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`filename_or_handle` must be a path name or a file-like object', + 'reason':'Found %s, not path name or object with a .read() method' % \ + type(filename_or_handle) + }) + + if isinstance(album_or_uri, (str, unicode)): # it's a uri + feed_uri = album_or_uri + elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object + feed_uri = album_or_uri.GetFeedLink().href + + try: + return self.Post(photo, uri=feed_uri, media_source=mediasource, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle, + content_type='image/jpeg', keywords=None): + """Add a photo without constructing a PhotoEntry. + + Needs authentication, see self.ClientLogin() + + Arguments: + album_or_uri: AlbumFeed or uri of the album where the photo should go + title: Photo title + summary: Photo summary / description + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + keywords (optional): a 1) comma separated string or 2) a python list() of + keywords (a.k.a. tags) to add to the image. + E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation'] + + Returns: + The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors + + See: + http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed + [1]: http://en.wikipedia.org/wiki/Unix_epoch + """ + + metadata = gdata.photos.PhotoEntry() + metadata.title=atom.Title(text=title) + metadata.summary = atom.Summary(text=summary, summary_type='text') + if keywords is not None: + if isinstance(keywords, list): + keywords = ','.join(keywords) + metadata.media.keywords = gdata.media.Keywords(text=keywords) + return self.InsertPhoto(album_or_uri, metadata, filename_or_handle, + content_type) + + def UpdatePhotoMetadata(self, photo): + """Update a photo's metadata. + + Needs authentication, see self.ClientLogin() + + You can update any or all of the following metadata properties: + * + * <media:description> + * <gphoto:checksum> + * <gphoto:client> + * <gphoto:rotation> + * <gphoto:timestamp> + * <gphoto:commentingEnabled> + + Arguments: + photo: a gdata.photos.PhotoEntry object with updated elements + + Returns: + The modified gdata.photos.PhotoEntry + + Example: + p = GetFeed(uri).entry[0] + p.title.text = u'My new text' + p.commentingEnabled.text = 'false' + p = UpdatePhotoMetadata(p) + + It is important that you don't keep the old object around, once + it has been updated. See + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + try: + return self.Put(data=photo, uri=photo.GetEditLink().href, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + + def UpdatePhotoBlob(self, photo_or_uri, filename_or_handle, + content_type = 'image/jpeg'): + """Update a photo's binary data. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a + `edit-media' uri pointing to it + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + + Returns: + The modified gdata.photos.PhotoEntry + + Example: + p = GetFeed(PhotoUri) + p = UpdatePhotoBlob(p, '/tmp/newPic.jpg') + + It is important that you don't keep the old object around, once + it has been updated. See + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + + try: + majtype, mintype = content_type.split('/') + assert(mintype in SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % \ + ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] + }) + + if isinstance(filename_or_handle, (str, unicode)) and \ + os.path.exists(filename_or_handle): # it's a file name + photoblob = gdata.MediaSource() + photoblob.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'):# it's a file-like resource + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) # rewind pointer to the start of the file + # gdata.MediaSource needs the content length, so read the whole image + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'image' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: #filename_or_handle is not valid + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`filename_or_handle` must be a path name or a file-like object', + 'reason':'Found %s, not path name or an object with .read() method' % \ + type(filename_or_handle) + }) + + if isinstance(photo_or_uri, (str, unicode)): + entry_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + entry_uri = photo_or_uri.GetEditMediaLink().href + try: + return self.Put(photoblob, entry_uri, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertTag(self, photo_or_uri, tag): + """Add a tag (a.k.a. keyword) to a photo. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a + `post' uri pointing to it + (string) tag: The tag/keyword + + Returns: + The new gdata.photos.TagEntry + + Example: + p = GetFeed(PhotoUri) + tag = InsertTag(p, 'Beautiful sunsets') + + """ + tag = gdata.photos.TagEntry(title=atom.Title(text=tag)) + if isinstance(photo_or_uri, (str, unicode)): + post_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + post_uri = photo_or_uri.GetPostLink().href + try: + return self.Post(data=tag, uri=post_uri, + converter=gdata.photos.TagEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + + def InsertComment(self, photo_or_uri, comment): + """Add a comment to a photo. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented + , or a `post' uri pointing to it + (string) comment: The actual comment + + Returns: + The new gdata.photos.CommentEntry + + Example: + p = GetFeed(PhotoUri) + tag = InsertComment(p, 'OOOH! I would have loved to be there. + Who's that in the back?') + + """ + comment = gdata.photos.CommentEntry(content=atom.Content(text=comment)) + if isinstance(photo_or_uri, (str, unicode)): + post_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + post_uri = photo_or_uri.GetPostLink().href + try: + return self.Post(data=comment, uri=post_uri, + converter=gdata.photos.CommentEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def Delete(self, object_or_uri, *args, **kwargs): + """Delete an object. + + Re-implementing the GDataService.Delete method, to add some + convenience. + + Arguments: + object_or_uri: Any object that has a GetEditLink() method that + returns a link, or a uri to that object. + + Returns: + ? or GooglePhotosException on errors + """ + try: + uri = object_or_uri.GetEditLink().href + except AttributeError: + uri = object_or_uri + try: + return gdata.service.GDataService.Delete(self, uri, *args, **kwargs) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + +def GetSmallestThumbnail(media_thumbnail_list): + """Helper function to get the smallest thumbnail of a list of + gdata.media.Thumbnail. + Returns gdata.media.Thumbnail """ + r = {} + for thumb in media_thumbnail_list: + r[int(thumb.width)*int(thumb.height)] = thumb + keys = r.keys() + keys.sort() + return r[keys[0]] + +def ConvertAtomTimestampToEpoch(timestamp): + """Helper function to convert a timestamp string, for instance + from atom:updated or atom:published, to milliseconds since Unix epoch + (a.k.a. POSIX time). + + `2007-07-22T00:45:10.000Z' -> """ + return time.mktime(time.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.000Z')) + ## TODO: Timezone aware diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/service.py new file mode 100644 index 0000000..60b18a2 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/service.py @@ -0,0 +1,1616 @@ +#!/usr/bin/python +# +# Copyright (C) 2006,2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""GDataService provides CRUD ops. and programmatic login for GData services. + + Error: A base exception class for all exceptions in the gdata_client + module. + + CaptchaRequired: This exception is thrown when a login attempt results in a + captcha challenge from the ClientLogin service. When this + exception is thrown, the captcha_token and captcha_url are + set to the values provided in the server's response. + + BadAuthentication: Raised when a login attempt is made with an incorrect + username or password. + + NotAuthenticated: Raised if an operation requiring authentication is called + before a user has authenticated. + + NonAuthSubToken: Raised if a method to modify an AuthSub token is used when + the user is either not authenticated or is authenticated + through another authentication mechanism. + + NonOAuthToken: Raised if a method to modify an OAuth token is used when the + user is either not authenticated or is authenticated through + another authentication mechanism. + + RequestError: Raised if a CRUD request returned a non-success code. + + UnexpectedReturnType: Raised if the response from the server was not of the + desired type. For example, this would be raised if the + server sent a feed when the client requested an entry. + + GDataService: Encapsulates user credentials needed to perform insert, update + and delete operations with the GData API. An instance can + perform user authentication, query, insertion, deletion, and + update. + + Query: Eases query URI creation by allowing URI parameters to be set as + dictionary attributes. For example a query with a feed of + '/base/feeds/snippets' and ['bq'] set to 'digital camera' will + produce '/base/feeds/snippets?bq=digital+camera' when .ToUri() is + called on it. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +import re +import urllib +import urlparse +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom.service +import gdata +import atom +import atom.http_interface +import atom.token_store +import gdata.auth + + +AUTH_SERVER_HOST = 'https://www.google.com' + + +# When requesting an AuthSub token, it is often helpful to track the scope +# which is being requested. One way to accomplish this is to add a URL +# parameter to the 'next' URL which contains the requested scope. This +# constant is the default name (AKA key) for the URL parameter. +SCOPE_URL_PARAM_NAME = 'authsub_token_scope' +# When requesting an OAuth access token or authorization of an existing OAuth +# request token, it is often helpful to track the scope(s) which is/are being +# requested. One way to accomplish this is to add a URL parameter to the +# 'callback' URL which contains the requested scope. This constant is the +# default name (AKA key) for the URL parameter. +OAUTH_SCOPE_URL_PARAM_NAME = 'oauth_token_scope' +# Maps the service names used in ClientLogin to scope URLs. +CLIENT_LOGIN_SCOPES = { + 'cl': [ # Google Calendar + 'https://www.google.com/calendar/feeds/', + 'http://www.google.com/calendar/feeds/'], + 'gbase': [ # Google Base + 'http://base.google.com/base/feeds/', + 'http://www.google.com/base/feeds/'], + 'blogger': [ # Blogger + 'http://www.blogger.com/feeds/'], + 'codesearch': [ # Google Code Search + 'http://www.google.com/codesearch/feeds/'], + 'cp': [ # Contacts API + 'https://www.google.com/m8/feeds/', + 'http://www.google.com/m8/feeds/'], + 'finance': [ # Google Finance + 'http://finance.google.com/finance/feeds/'], + 'health': [ # Google Health + 'https://www.google.com/health/feeds/'], + 'writely': [ # Documents List API + 'https://docs.google.com/feeds/', + 'http://docs.google.com/feeds/'], + 'lh2': [ # Picasa Web Albums + 'http://picasaweb.google.com/data/'], + 'apps': [ # Google Apps Provisioning API + 'http://www.google.com/a/feeds/', + 'https://www.google.com/a/feeds/', + 'http://apps-apis.google.com/a/feeds/', + 'https://apps-apis.google.com/a/feeds/'], + 'weaver': [ # Health H9 Sandbox + 'https://www.google.com/h9/'], + 'wise': [ # Spreadsheets Data API + 'https://spreadsheets.google.com/feeds/', + 'http://spreadsheets.google.com/feeds/'], + 'sitemaps': [ # Google Webmaster Tools + 'https://www.google.com/webmasters/tools/feeds/'], + 'youtube': [ # YouTube + 'http://gdata.youtube.com/feeds/api/', + 'http://uploads.gdata.youtube.com/feeds/api', + 'http://gdata.youtube.com/action/GetUploadToken']} + + +def lookup_scopes(service_name): + """Finds the scope URLs for the desired service. + + In some cases, an unknown service may be used, and in those cases this + function will return None. + """ + if service_name in CLIENT_LOGIN_SCOPES: + return CLIENT_LOGIN_SCOPES[service_name] + return None + + +# Module level variable specifies which module should be used by GDataService +# objects to make HttpRequests. This setting can be overridden on each +# instance of GDataService. +# This module level variable is deprecated. Reassign the http_client member +# of a GDataService object instead. +http_request_handler = atom.service + + +class Error(Exception): + pass + + +class CaptchaRequired(Error): + pass + + +class BadAuthentication(Error): + pass + + +class NotAuthenticated(Error): + pass + + +class NonAuthSubToken(Error): + pass + + +class NonOAuthToken(Error): + pass + + +class RequestError(Error): + pass + + +class UnexpectedReturnType(Error): + pass + + +class BadAuthenticationServiceURL(Error): + pass + + +class FetchingOAuthRequestTokenFailed(RequestError): + pass + + +class TokenUpgradeFailed(RequestError): + pass + + +class RevokingOAuthTokenFailed(RequestError): + pass + + +class AuthorizationRequired(Error): + pass + + +class TokenHadNoScope(Error): + pass + + +class GDataService(atom.service.AtomService): + """Contains elements needed for GData login and CRUD request headers. + + Maintains additional headers (tokens for example) needed for the GData + services to allow a user to perform inserts, updates, and deletes. + """ + # The hander member is deprecated, use http_client instead. + handler = None + # The auth_token member is deprecated, use the token_store instead. + auth_token = None + # The tokens dict is deprecated in favor of the token_store. + tokens = None + + def __init__(self, email=None, password=None, account_type='HOSTED_OR_GOOGLE', + service=None, auth_service_url=None, source=None, server=None, + additional_headers=None, handler=None, tokens=None, + http_client=None, token_store=None): + """Creates an object of type GDataService. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + account_type: string (optional) The type of account to use. Use + 'GOOGLE' for regular Google accounts or 'HOSTED' for Google + Apps accounts, or 'HOSTED_OR_GOOGLE' to try finding a HOSTED + account first and, if it doesn't exist, try finding a regular + GOOGLE account. Default value: 'HOSTED_OR_GOOGLE'. + service: string (optional) The desired service for which credentials + will be obtained. + auth_service_url: string (optional) User-defined auth token request URL + allows users to explicitly specify where to send auth token requests. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'base.google.com'. + additional_headers: dictionary (optional) Any additional headers which + should be included with CRUD operations. + handler: module (optional) This parameter is deprecated and has been + replaced by http_client. + tokens: This parameter is deprecated, calls should be made to + token_store instead. + http_client: An object responsible for making HTTP requests using a + request method. If none is provided, a new instance of + atom.http.ProxiedHttpClient will be used. + token_store: Keeps a collection of authorization tokens which can be + applied to requests for a specific URLs. Critical methods are + find_token based on a URL (atom.url.Url or a string), add_token, + and remove_token. + """ + atom.service.AtomService.__init__(self, http_client=http_client, + token_store=token_store) + self.email = email + self.password = password + self.account_type = account_type + self.service = service + self.auth_service_url = auth_service_url + self.server = server + self.additional_headers = additional_headers or {} + self._oauth_input_params = None + self.__SetSource(source) + self.__captcha_token = None + self.__captcha_url = None + self.__gsessionid = None + + if http_request_handler.__name__ == 'gdata.urlfetch': + import gdata.alt.appengine + self.http_client = gdata.alt.appengine.AppEngineHttpClient() + + # Define properties for GDataService + def _SetAuthSubToken(self, auth_token, scopes=None): + """Deprecated, use SetAuthSubToken instead.""" + self.SetAuthSubToken(auth_token, scopes=scopes) + + def __SetAuthSubToken(self, auth_token, scopes=None): + """Deprecated, use SetAuthSubToken instead.""" + self._SetAuthSubToken(auth_token, scopes=scopes) + + def _GetAuthToken(self): + """Returns the auth token used for authenticating requests. + + Returns: + string + """ + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if hasattr(token, 'auth_header'): + return token.auth_header + return None + + def _GetCaptchaToken(self): + """Returns a captcha token if the most recent login attempt generated one. + + The captcha token is only set if the Programmatic Login attempt failed + because the Google service issued a captcha challenge. + + Returns: + string + """ + return self.__captcha_token + + def __GetCaptchaToken(self): + return self._GetCaptchaToken() + + captcha_token = property(__GetCaptchaToken, + doc="""Get the captcha token for a login request.""") + + def _GetCaptchaURL(self): + """Returns the URL of the captcha image if a login attempt generated one. + + The captcha URL is only set if the Programmatic Login attempt failed + because the Google service issued a captcha challenge. + + Returns: + string + """ + return self.__captcha_url + + def __GetCaptchaURL(self): + return self._GetCaptchaURL() + + captcha_url = property(__GetCaptchaURL, + doc="""Get the captcha URL for a login request.""") + + def SetOAuthInputParameters(self, signature_method, consumer_key, + consumer_secret=None, rsa_key=None, + two_legged_oauth=False): + """Sets parameters required for using OAuth authentication mechanism. + + NOTE: Though consumer_secret and rsa_key are optional, either of the two + is required depending on the value of the signature_method. + + Args: + signature_method: class which provides implementation for strategy class + oauth.oauth.OAuthSignatureMethod. Signature method to be used for + signing each request. Valid implementations are provided as the + constants defined by gdata.auth.OAuthSignatureMethod. Currently + they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and + gdata.auth.OAuthSignatureMethod.HMAC_SHA1 + consumer_key: string Domain identifying third_party web application. + consumer_secret: string (optional) Secret generated during registration. + Required only for HMAC_SHA1 signature method. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. + two_legged_oauth: string (default=False) Enables two-legged OAuth process. + """ + self._oauth_input_params = gdata.auth.OAuthInputParams( + signature_method, consumer_key, consumer_secret=consumer_secret, + rsa_key=rsa_key) + if two_legged_oauth: + oauth_token = gdata.auth.OAuthToken( + oauth_input_params=self._oauth_input_params) + self.SetOAuthToken(oauth_token) + + def FetchOAuthRequestToken(self, scopes=None, extra_parameters=None): + """Fetches OAuth request token and returns it. + + Args: + scopes: string or list of string base URL(s) of the service(s) to be + accessed. If None, then this method tries to determine the + scope(s) from the current service. + extra_parameters: dict (optional) key-value pairs as any additional + parameters to be included in the URL and signature while making a + request for fetching an OAuth request token. All the OAuth parameters + are added by default. But if provided through this argument, any + default parameters will be overwritten. For e.g. a default parameter + oauth_version 1.0 can be overwritten if + extra_parameters = {'oauth_version': '2.0'} + + Returns: + The fetched request token as a gdata.auth.OAuthToken object. + + Raises: + FetchingOAuthRequestTokenFailed if the server responded to the request + with an error. + """ + if scopes is None: + scopes = lookup_scopes(self.service) + if not isinstance(scopes, (list, tuple)): + scopes = [scopes,] + request_token_url = gdata.auth.GenerateOAuthRequestTokenUrl( + self._oauth_input_params, scopes, + request_token_url='%s/accounts/OAuthGetRequestToken' % AUTH_SERVER_HOST, + extra_parameters=extra_parameters) + response = self.http_client.request('GET', str(request_token_url)) + if response.status == 200: + token = gdata.auth.OAuthToken() + token.set_token_string(response.read()) + token.scopes = scopes + token.oauth_input_params = self._oauth_input_params + return token + error = { + 'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response.read() + } + raise FetchingOAuthRequestTokenFailed(error) + + def SetOAuthToken(self, oauth_token): + """Attempts to set the current token and add it to the token store. + + The oauth_token can be any OAuth token i.e. unauthorized request token, + authorized request token or access token. + This method also attempts to add the token to the token store. + Use this method any time you want the current token to point to the + oauth_token passed. For e.g. call this method with the request token + you receive from FetchOAuthRequestToken. + + Args: + request_token: gdata.auth.OAuthToken OAuth request token. + """ + if self.auto_set_current_token: + self.current_token = oauth_token + if self.auto_store_tokens: + self.token_store.add_token(oauth_token) + + def GenerateOAuthAuthorizationURL( + self, request_token=None, callback_url=None, extra_params=None, + include_scopes_in_callback=False, + scopes_param_prefix=OAUTH_SCOPE_URL_PARAM_NAME): + """Generates URL at which user will login to authorize the request token. + + Args: + request_token: gdata.auth.OAuthToken (optional) OAuth request token. + If not specified, then the current token will be used if it is of + type <gdata.auth.OAuthToken>, else it is found by looking in the + token_store by looking for a token for the current scope. + callback_url: string (optional) The URL user will be sent to after + logging in and granting access. + extra_params: dict (optional) Additional parameters to be sent. + include_scopes_in_callback: Boolean (default=False) if set to True, and + if 'callback_url' is present, the 'callback_url' will be modified to + include the scope(s) from the request token as a URL parameter. The + key for the 'callback' URL's scope parameter will be + OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as + a parameter to the 'callback' URL, is that the page which receives + the OAuth token will be able to tell which URLs the token grants + access to. + scopes_param_prefix: string (default='oauth_token_scope') The URL + parameter key which maps to the list of valid scopes for the token. + This URL parameter will be included in the callback URL along with + the scopes of the token as value if include_scopes_in_callback=True. + + Returns: + A string URL at which the user is required to login. + + Raises: + NonOAuthToken if the user's request token is not an OAuth token or if a + request token was not available. + """ + if request_token and not isinstance(request_token, gdata.auth.OAuthToken): + raise NonOAuthToken + if not request_token: + if isinstance(self.current_token, gdata.auth.OAuthToken): + request_token = self.current_token + else: + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.OAuthToken): + request_token = token + if not request_token: + raise NonOAuthToken + return str(gdata.auth.GenerateOAuthAuthorizationUrl( + request_token, + authorization_url='%s/accounts/OAuthAuthorizeToken' % AUTH_SERVER_HOST, + callback_url=callback_url, extra_params=extra_params, + include_scopes_in_callback=include_scopes_in_callback, + scopes_param_prefix=scopes_param_prefix)) + + def UpgradeToOAuthAccessToken(self, authorized_request_token=None, + oauth_version='1.0'): + """Upgrades the authorized request token to an access token. + + Args: + authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request + token. If not specified, then the current token will be used if it is + of type <gdata.auth.OAuthToken>, else it is found by looking in the + token_store by looking for a token for the current scope. + oauth_version: str (default='1.0') oauth_version parameter. All other + 'oauth_' parameters are added by default. This parameter too, is + added by default but here you can override it's value. + + Raises: + NonOAuthToken if the user's authorized request token is not an OAuth + token or if an authorized request token was not available. + TokenUpgradeFailed if the server responded to the request with an + error. + """ + if (authorized_request_token and + not isinstance(authorized_request_token, gdata.auth.OAuthToken)): + raise NonOAuthToken + if not authorized_request_token: + if isinstance(self.current_token, gdata.auth.OAuthToken): + authorized_request_token = self.current_token + else: + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.OAuthToken): + authorized_request_token = token + if not authorized_request_token: + raise NonOAuthToken + access_token_url = gdata.auth.GenerateOAuthAccessTokenUrl( + authorized_request_token, + self._oauth_input_params, + access_token_url='%s/accounts/OAuthGetAccessToken' % AUTH_SERVER_HOST, + oauth_version=oauth_version) + response = self.http_client.request('GET', str(access_token_url)) + if response.status == 200: + token = gdata.auth.OAuthTokenFromHttpBody(response.read()) + token.scopes = authorized_request_token.scopes + token.oauth_input_params = authorized_request_token.oauth_input_params + self.SetOAuthToken(token) + else: + raise TokenUpgradeFailed({'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response.read()}) + + def RevokeOAuthToken(self): + """Revokes an existing OAuth token. + + Raises: + NonOAuthToken if the user's auth token is not an OAuth token. + RevokingOAuthTokenFailed if request for revoking an OAuth token failed. + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.OAuthToken): + raise NonOAuthToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + if response.status == 200: + self.token_store.remove_token(token) + else: + raise RevokingOAuthTokenFailed + + def GetAuthSubToken(self): + """Returns the AuthSub token as a string. + + If the token is an gdta.auth.AuthSubToken, the Authorization Label + ("AuthSub token") is removed. + + This method examines the current_token to see if it is an AuthSubToken + or SecureAuthSubToken. If not, it searches the token_store for a token + which matches the current scope. + + The current scope is determined by the service name string member. + + Returns: + If the current_token is set to an AuthSubToken/SecureAuthSubToken, + return the token string. If there is no current_token, a token string + for a token which matches the service object's default scope is returned. + If there are no tokens valid for the scope, returns None. + """ + if isinstance(self.current_token, gdata.auth.AuthSubToken): + return self.current_token.get_token_string() + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.AuthSubToken): + return token.get_token_string() + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + return None + + def SetAuthSubToken(self, token, scopes=None, rsa_key=None): + """Sets the token sent in requests to an AuthSub token. + + Sets the current_token and attempts to add the token to the token_store. + + Only use this method if you have received a token from the AuthSub + service. The auth token is set automatically when UpgradeToSessionToken() + is used. See documentation for Google AuthSub here: + http://code.google.com/apis/accounts/AuthForWebApps.html + + Args: + token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string + The token returned by the AuthSub service. If the token is an + AuthSubToken or SecureAuthSubToken, the scope information stored in + the token is used. If the token is a string, the scopes parameter is + used to determine the valid scopes. + scopes: list of URLs for which the token is valid. This is only used + if the token parameter is a string. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. This parameter is necessary if the token is a string + representing a secure token. + """ + if not isinstance(token, gdata.auth.AuthSubToken): + token_string = token + if rsa_key: + token = gdata.auth.SecureAuthSubToken(rsa_key) + else: + token = gdata.auth.AuthSubToken() + + token.set_token_string(token_string) + + # If no scopes were set for the token, use the scopes passed in, or + # try to determine the scopes based on the current service name. If + # all else fails, set the token to match all requests. + if not token.scopes: + if scopes is None: + scopes = lookup_scopes(self.service) + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + token.scopes = scopes + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + self.token_store.add_token(token) + + def GetClientLoginToken(self): + """Returns the token string for the current token or a token matching the + service scope. + + If the current_token is a ClientLoginToken, the token string for + the current token is returned. If the current_token is not set, this method + searches for a token in the token_store which is valid for the service + object's current scope. + + The current scope is determined by the service name string member. + The token string is the end of the Authorization header, it doesn not + include the ClientLogin label. + """ + if isinstance(self.current_token, gdata.auth.ClientLoginToken): + return self.current_token.get_token_string() + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + return None + + def SetClientLoginToken(self, token, scopes=None): + """Sets the token sent in requests to a ClientLogin token. + + This method sets the current_token to a new ClientLoginToken and it + also attempts to add the ClientLoginToken to the token_store. + + Only use this method if you have received a token from the ClientLogin + service. The auth_token is set automatically when ProgrammaticLogin() + is used. See documentation for Google ClientLogin here: + http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html + + Args: + token: string or instance of a ClientLoginToken. + """ + if not isinstance(token, gdata.auth.ClientLoginToken): + token_string = token + token = gdata.auth.ClientLoginToken() + token.set_token_string(token_string) + + if not token.scopes: + if scopes is None: + scopes = lookup_scopes(self.service) + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + token.scopes = scopes + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + self.token_store.add_token(token) + + # Private methods to create the source property. + def __GetSource(self): + return self.__source + + def __SetSource(self, new_source): + self.__source = new_source + # Update the UserAgent header to include the new application name. + self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % ( + self.__source,) + + source = property(__GetSource, __SetSource, + doc="""The source is the name of the application making the request. + It should be in the form company_id-app_name-app_version""") + + # Authentication operations + + def ProgrammaticLogin(self, captcha_token=None, captcha_response=None): + """Authenticates the user and sets the GData Auth token. + + Login retreives a temporary auth token which must be used with all + requests to GData services. The auth token is stored in the GData client + object. + + Login is also used to respond to a captcha challenge. If the user's login + attempt failed with a CaptchaRequired error, the user can respond by + calling Login with the captcha token and the answer to the challenge. + + Args: + captcha_token: string (optional) The identifier for the captcha challenge + which was presented to the user. + captcha_response: string (optional) The user's answer to the captch + challenge. + + Raises: + CaptchaRequired if the login service will require a captcha response + BadAuthentication if the login service rejected the username or password + Error if the login service responded with a 403 different from the above + """ + request_body = gdata.auth.generate_client_login_request_body(self.email, + self.password, self.service, self.source, self.account_type, + captcha_token, captcha_response) + + # If the user has defined their own authentication service URL, + # send the ClientLogin requests to this URL: + if not self.auth_service_url: + auth_request_url = AUTH_SERVER_HOST + '/accounts/ClientLogin' + else: + auth_request_url = self.auth_service_url + + auth_response = self.http_client.request('POST', auth_request_url, + data=request_body, + headers={'Content-Type':'application/x-www-form-urlencoded'}) + response_body = auth_response.read() + + if auth_response.status == 200: + # TODO: insert the token into the token_store directly. + self.SetClientLoginToken( + gdata.auth.get_client_login_token(response_body)) + self.__captcha_token = None + self.__captcha_url = None + + elif auth_response.status == 403: + # Examine each line to find the error type and the captcha token and + # captch URL if they are present. + captcha_parameters = gdata.auth.get_captcha_challenge(response_body, + captcha_base_url='%s/accounts/' % AUTH_SERVER_HOST) + if captcha_parameters: + self.__captcha_token = captcha_parameters['token'] + self.__captcha_url = captcha_parameters['url'] + raise CaptchaRequired, 'Captcha Required' + elif response_body.splitlines()[0] == 'Error=BadAuthentication': + self.__captcha_token = None + self.__captcha_url = None + raise BadAuthentication, 'Incorrect username or password' + else: + self.__captcha_token = None + self.__captcha_url = None + raise Error, 'Server responded with a 403 code' + elif auth_response.status == 302: + self.__captcha_token = None + self.__captcha_url = None + # Google tries to redirect all bad URLs back to + # http://www.google.<locale>. If a redirect + # attempt is made, assume the user has supplied an incorrect authentication URL + raise BadAuthenticationServiceURL, 'Server responded with a 302 code.' + + def ClientLogin(self, username, password, account_type=None, service=None, + auth_service_url=None, source=None, captcha_token=None, + captcha_response=None): + """Convenience method for authenticating using ProgrammaticLogin. + + Sets values for email, password, and other optional members. + + Args: + username: + password: + account_type: string (optional) + service: string (optional) + auth_service_url: string (optional) + captcha_token: string (optional) + captcha_response: string (optional) + """ + self.email = username + self.password = password + + if account_type: + self.account_type = account_type + if service: + self.service = service + if source: + self.source = source + if auth_service_url: + self.auth_service_url = auth_service_url + + self.ProgrammaticLogin(captcha_token, captcha_response) + + def GenerateAuthSubURL(self, next, scope, secure=False, session=True, + domain='default'): + """Generate a URL at which the user will login and be redirected back. + + Users enter their credentials on a Google login page and a token is sent + to the URL specified in next. See documentation for AuthSub login at: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + next: string The URL user will be sent to after logging in. + scope: string or list of strings. The URLs of the services to be + accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + """ + if not isinstance(scope, (list, tuple)): + scope = (scope,) + return gdata.auth.generate_auth_sub_url(next, scope, secure=secure, + session=session, + request_url='%s/accounts/AuthSubRequest' % AUTH_SERVER_HOST, + domain=domain) + + def UpgradeToSessionToken(self, token=None): + """Upgrades a single use AuthSub token to a session token. + + Args: + token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken + (optional) which is good for a single use but can be upgraded + to a session token. If no token is passed in, the token + is found by looking in the token_store by looking for a token + for the current scope. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + TokenUpgradeFailed if the server responded to the request with an + error. + """ + if token is None: + scopes = lookup_scopes(self.service) + if scopes: + token = self.token_store.find_token(scopes[0]) + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + self.SetAuthSubToken(self.upgrade_to_session_token(token)) + + def upgrade_to_session_token(self, token): + """Upgrades a single use AuthSub token to a session token. + + Args: + token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken + which is good for a single use but can be upgraded to a + session token. + + Returns: + The upgraded token as a gdata.auth.AuthSubToken object. + + Raises: + TokenUpgradeFailed if the server responded to the request with an + error. + """ + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubSessionToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + response_body = response.read() + if response.status == 200: + token.set_token_string( + gdata.auth.token_from_http_body(response_body)) + return token + else: + raise TokenUpgradeFailed({'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response_body}) + + def RevokeAuthSubToken(self): + """Revokes an existing AuthSub token. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + if response.status == 200: + self.token_store.remove_token(token) + + def AuthSubTokenInfo(self): + """Fetches the AuthSub token's metadata from the server. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubTokenInfo', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + result_body = response.read() + if response.status == 200: + return result_body + else: + raise RequestError, {'status': response.status, + 'body': result_body} + + # CRUD operations + def Get(self, uri, extra_headers=None, redirects_remaining=4, + encoding='UTF-8', converter=None): + """Query the GData API with the given URI + + The uri is the portion of the URI after the server value + (ex: www.google.com). + + To perform a query against Google Base, set the server to + 'base.google.com' and set the uri to '/base/feeds/...', where ... is + your query. For example, to find snippets for all digital cameras uri + should be set to: '/base/feeds/snippets?bq=digital+camera' + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + redirects_remaining: int (optional) Tracks the number of additional + redirects this method will allow. If the service object receives + a redirect and remaining is 0, it will not follow the redirect. + This was added to avoid infinite redirect loops. + encoding: string (optional) The character encoding for the server's + response. Default is UTF-8 + converter: func (optional) A function which will transform + the server's results before it is returned. Example: use + GDataFeedFromString to parse the server response as if it + were a GDataFeed. + + Returns: + If there is no ResultsTransformer specified in the call, a GDataFeed + or GDataEntry depending on which is sent from the server. If the + response is niether a feed or entry and there is no ResultsTransformer, + return a string. If there is a ResultsTransformer, the returned value + will be that of the ResultsTransformer function. + """ + + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if uri.find('?') > -1: + uri += '&gsessionid=%s' % (self.__gsessionid,) + else: + uri += '?gsessionid=%s' % (self.__gsessionid,) + + server_response = self.request('GET', uri, + headers=extra_headers) + result_body = server_response.read() + + if server_response.status == 200: + if converter: + return converter(result_body) + # There was no ResultsTransformer specified, so try to convert the + # server's response into a GDataFeed. + feed = gdata.GDataFeedFromString(result_body) + if not feed: + # If conversion to a GDataFeed failed, try to convert the server's + # response to a GDataEntry. + entry = gdata.GDataEntryFromString(result_body) + if not entry: + # The server's response wasn't a feed, or an entry, so return the + # response body as a string. + return result_body + return entry + return feed + elif server_response.status == 302: + if redirects_remaining > 0: + location = server_response.getheader('Location') + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.Get(self, location, extra_headers, redirects_remaining - 1, + encoding=encoding, converter=converter) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + def GetMedia(self, uri, extra_headers=None): + """Returns a MediaSource containing media and its metadata from the given + URI string. + """ + response_handle = self.request('GET', uri, + headers=extra_headers) + return gdata.MediaSource(response_handle, response_handle.getheader( + 'Content-Type'), + response_handle.getheader('Content-Length')) + + def GetEntry(self, uri, extra_headers=None): + """Query the GData API with the given URI and receive an Entry. + + See also documentation for gdata.service.Get + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + + Returns: + A GDataEntry built from the XML in the server's response. + """ + + result = GDataService.Get(self, uri, extra_headers, + converter=atom.EntryFromString) + if isinstance(result, atom.Entry): + return result + else: + raise UnexpectedReturnType, 'Server did not send an entry' + + def GetFeed(self, uri, extra_headers=None, + converter=gdata.GDataFeedFromString): + """Query the GData API with the given URI and receive a Feed. + + See also documentation for gdata.service.Get + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + + Returns: + A GDataFeed built from the XML in the server's response. + """ + + result = GDataService.Get(self, uri, extra_headers, converter=converter) + if isinstance(result, atom.Feed): + return result + else: + raise UnexpectedReturnType, 'Server did not send a feed' + + def GetNext(self, feed): + """Requests the next 'page' of results in the feed. + + This method uses the feed's next link to request an additional feed + and uses the class of the feed to convert the results of the GET request. + + Args: + feed: atom.Feed or a subclass. The feed should contain a next link and + the type of the feed will be applied to the results from the + server. The new feed which is returned will be of the same class + as this feed which was passed in. + + Returns: + A new feed representing the next set of results in the server's feed. + The type of this feed will match that of the feed argument. + """ + next_link = feed.GetNextLink() + # Create a closure which will convert an XML string to the class of + # the feed object passed in. + def ConvertToFeedClass(xml_string): + return atom.CreateClassFromXMLString(feed.__class__, xml_string) + # Make a GET request on the next link and use the above closure for the + # converted which processes the XML string from the server. + if next_link and next_link.href: + return GDataService.Get(self, next_link.href, + converter=ConvertToFeedClass) + else: + return None + + def Post(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4, media_source=None, + converter=None): + """Insert or update data into a GData service at the given URI. + + Args: + data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The + XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + media_source: MediaSource (optional) Container for the media to be sent + along with the entry, if provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the post succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + return GDataService.PostOrPut(self, 'POST', data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, redirects_remaining=redirects_remaining, + media_source=media_source, converter=converter) + + def PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4, media_source=None, + converter=None): + """Insert data into a GData service at the given URI. + + Args: + verb: string, either 'POST' or 'PUT' + data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The + XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + media_source: MediaSource (optional) Container for the media to be sent + along with the entry, if provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the post succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if uri.find('?') > -1: + uri += '&gsessionid=%s' % (self.__gsessionid,) + else: + uri += '?gsessionid=%s' % (self.__gsessionid,) + + if data and media_source: + if ElementTree.iselement(data): + data_str = ElementTree.tostring(data) + else: + data_str = str(data) + + multipart = [] + multipart.append('Media multipart posting\r\n--END_OF_PART\r\n' + \ + 'Content-Type: application/atom+xml\r\n\r\n') + multipart.append('\r\n--END_OF_PART\r\nContent-Type: ' + \ + media_source.content_type+'\r\n\r\n') + multipart.append('\r\n--END_OF_PART--\r\n') + + extra_headers['MIME-version'] = '1.0' + extra_headers['Content-Length'] = str(len(multipart[0]) + + len(multipart[1]) + len(multipart[2]) + + len(data_str) + media_source.content_length) + + extra_headers['Content-Type'] = 'multipart/related; boundary=END_OF_PART' + server_response = self.request(verb, uri, + data=[multipart[0], data_str, multipart[1], media_source.file_handle, + multipart[2]], headers=extra_headers) + result_body = server_response.read() + + elif media_source or isinstance(data, gdata.MediaSource): + if isinstance(data, gdata.MediaSource): + media_source = data + extra_headers['Content-Length'] = str(media_source.content_length) + extra_headers['Content-Type'] = media_source.content_type + server_response = self.request(verb, uri, + data=media_source.file_handle, headers=extra_headers) + result_body = server_response.read() + + else: + http_data = data + content_type = 'application/atom+xml' + extra_headers['Content-Type'] = content_type + server_response = self.request(verb, uri, data=http_data, + headers=extra_headers) + result_body = server_response.read() + + # Server returns 201 for most post requests, but when performing a batch + # request the server responds with a 200 on success. + if server_response.status == 201 or server_response.status == 200: + if converter: + return converter(result_body) + feed = gdata.GDataFeedFromString(result_body) + if not feed: + entry = gdata.GDataEntryFromString(result_body) + if not entry: + return result_body + return entry + return feed + elif server_response.status == 302: + if redirects_remaining > 0: + location = server_response.getheader('Location') + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.PostOrPut(self, verb, data, location, + extra_headers, url_params, escape_params, + redirects_remaining - 1, media_source, converter=converter) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + def Put(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=3, media_source=None, + converter=None): + """Updates an entry at the given URI. + + Args: + data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The + XML containing the updated data. + uri: string A URI indicating entry to which the update will be applied. + Example: '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the put succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + return GDataService.PostOrPut(self, 'PUT', data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, redirects_remaining=redirects_remaining, + media_source=media_source, converter=converter) + + def Delete(self, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4): + """Deletes the entry at the given URI. + + Args: + uri: string The URI of the entry to be deleted. Example: + '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + True if the entry was deleted. + """ + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if uri.find('?') > -1: + uri += '&gsessionid=%s' % (self.__gsessionid,) + else: + uri += '?gsessionid=%s' % (self.__gsessionid,) + + server_response = self.request('DELETE', uri, + headers=extra_headers) + result_body = server_response.read() + + if server_response.status == 200: + return True + elif server_response.status == 302: + if redirects_remaining > 0: + location = server_response.getheader('Location') + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.Delete(self, location, extra_headers, + url_params, escape_params, redirects_remaining - 1) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + +def ExtractToken(url, scopes_included_in_next=True): + """Gets the AuthSub token from the current page's URL. + + Designed to be used on the URL that the browser is sent to after the user + authorizes this application at the page given by GenerateAuthSubRequestUrl. + + Args: + url: The current page's URL. It should contain the token as a URL + parameter. Example: 'http://example.com/?...&token=abcd435' + scopes_included_in_next: If True, this function looks for a scope value + associated with the token. The scope is a URL parameter with the + key set to SCOPE_URL_PARAM_NAME. This parameter should be present + if the AuthSub request URL was generated using + GenerateAuthSubRequestUrl with include_scope_in_next set to True. + + Returns: + A tuple containing the token string and a list of scope strings for which + this token should be valid. If the scope was not included in the URL, the + tuple will contain (token, None). + """ + parsed = urlparse.urlparse(url) + token = gdata.auth.AuthSubTokenFromUrl(parsed[4]) + scopes = '' + if scopes_included_in_next: + for pair in parsed[4].split('&'): + if pair.startswith('%s=' % SCOPE_URL_PARAM_NAME): + scopes = urllib.unquote_plus(pair.split('=')[1]) + return (token, scopes.split(' ')) + + +def GenerateAuthSubRequestUrl(next, scopes, hd='default', secure=False, + session=True, request_url='http://www.google.com/accounts/AuthSubRequest', + include_scopes_in_next=True): + """Creates a URL to request an AuthSub token to access Google services. + + For more details on AuthSub, see the documentation here: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + next: The URL where the browser should be sent after the user authorizes + the application. This page is responsible for receiving the token + which is embeded in the URL as a parameter. + scopes: The base URL to which access will be granted. Example: + 'http://www.google.com/calendar/feeds' will grant access to all + URLs in the Google Calendar data API. If you would like a token for + multiple scopes, pass in a list of URL strings. + hd: The domain to which the user's account belongs. This is set to the + domain name if you are using Google Apps. Example: 'example.org' + Defaults to 'default' + secure: If set to True, all requests should be signed. The default is + False. + session: If set to True, the token received by the 'next' URL can be + upgraded to a multiuse session token. If session is set to False, the + token may only be used once and cannot be upgraded. Default is True. + request_url: The base of the URL to which the user will be sent to + authorize this application to access their data. The default is + 'http://www.google.com/accounts/AuthSubRequest'. + include_scopes_in_next: Boolean if set to true, the 'next' parameter will + be modified to include the requested scope as a URL parameter. The + key for the next's scope parameter will be SCOPE_URL_PARAM_NAME. The + benefit of including the scope URL as a parameter to the next URL, is + that the page which receives the AuthSub token will be able to tell + which URLs the token grants access to. + + Returns: + A URL string to which the browser should be sent. + """ + if isinstance(scopes, list): + scope = ' '.join(scopes) + else: + scope = scopes + if include_scopes_in_next: + if next.find('?') > -1: + next += '&%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope}) + else: + next += '?%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope}) + return gdata.auth.GenerateAuthSubUrl(next=next, scope=scope, secure=secure, + session=session, request_url=request_url, domain=hd) + + +class Query(dict): + """Constructs a query URL to be used in GET requests + + Url parameters are created by adding key-value pairs to this object as a + dict. For example, to add &max-results=25 to the URL do + my_query['max-results'] = 25 + + Category queries are created by adding category strings to the categories + member. All items in the categories list will be concatenated with the / + symbol (symbolizing a category x AND y restriction). If you would like to OR + 2 categories, append them as one string with a | between the categories. + For example, do query.categories.append('Fritz|Laurie') to create a query + like this feed/-/Fritz%7CLaurie . This query will look for results in both + categories. + """ + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + """Constructor for Query + + Args: + feed: str (optional) The path for the feed (Examples: + '/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full' + text_query: str (optional) The contents of the q query parameter. The + contents of the text_query are URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to the + query's items (key-value pairs). + categories: list (optional) List of category strings which should be + included as query categories. See + http://code.google.com/apis/gdata/reference.html#Queries for + details. If you want to get results from category A or B (both + categories), specify a single list item 'A|B'. + """ + + self.feed = feed + self.categories = [] + if text_query: + self.text_query = text_query + if isinstance(params, dict): + for param in params: + self[param] = params[param] + if isinstance(categories, list): + for category in categories: + self.categories.append(category) + + def _GetTextQuery(self): + if 'q' in self.keys(): + return self['q'] + else: + return None + + def _SetTextQuery(self, query): + self['q'] = query + + text_query = property(_GetTextQuery, _SetTextQuery, + doc="""The feed query's q parameter""") + + def _GetAuthor(self): + if 'author' in self.keys(): + return self['author'] + else: + return None + + def _SetAuthor(self, query): + self['author'] = query + + author = property(_GetAuthor, _SetAuthor, + doc="""The feed query's author parameter""") + + def _GetAlt(self): + if 'alt' in self.keys(): + return self['alt'] + else: + return None + + def _SetAlt(self, query): + self['alt'] = query + + alt = property(_GetAlt, _SetAlt, + doc="""The feed query's alt parameter""") + + def _GetUpdatedMin(self): + if 'updated-min' in self.keys(): + return self['updated-min'] + else: + return None + + def _SetUpdatedMin(self, query): + self['updated-min'] = query + + updated_min = property(_GetUpdatedMin, _SetUpdatedMin, + doc="""The feed query's updated-min parameter""") + + def _GetUpdatedMax(self): + if 'updated-max' in self.keys(): + return self['updated-max'] + else: + return None + + def _SetUpdatedMax(self, query): + self['updated-max'] = query + + updated_max = property(_GetUpdatedMax, _SetUpdatedMax, + doc="""The feed query's updated-max parameter""") + + def _GetPublishedMin(self): + if 'published-min' in self.keys(): + return self['published-min'] + else: + return None + + def _SetPublishedMin(self, query): + self['published-min'] = query + + published_min = property(_GetPublishedMin, _SetPublishedMin, + doc="""The feed query's published-min parameter""") + + def _GetPublishedMax(self): + if 'published-max' in self.keys(): + return self['published-max'] + else: + return None + + def _SetPublishedMax(self, query): + self['published-max'] = query + + published_max = property(_GetPublishedMax, _SetPublishedMax, + doc="""The feed query's published-max parameter""") + + def _GetStartIndex(self): + if 'start-index' in self.keys(): + return self['start-index'] + else: + return None + + def _SetStartIndex(self, query): + if not isinstance(query, str): + query = str(query) + self['start-index'] = query + + start_index = property(_GetStartIndex, _SetStartIndex, + doc="""The feed query's start-index parameter""") + + def _GetMaxResults(self): + if 'max-results' in self.keys(): + return self['max-results'] + else: + return None + + def _SetMaxResults(self, query): + if not isinstance(query, str): + query = str(query) + self['max-results'] = query + + max_results = property(_GetMaxResults, _SetMaxResults, + doc="""The feed query's max-results parameter""") + + def _GetOrderBy(self): + if 'orderby' in self.keys(): + return self['orderby'] + else: + return None + + def _SetOrderBy(self, query): + self['orderby'] = query + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The feed query's orderby parameter""") + + def ToUri(self): + q_feed = self.feed or '' + category_string = '/'.join( + [urllib.quote_plus(c) for c in self.categories]) + # Add categories to the feed if there are any. + if len(self.categories) > 0: + q_feed = q_feed + '/-/' + category_string + return atom.service.BuildUri(q_feed, self) + + def __str__(self): + return self.ToUri() diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/spreadsheet/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/spreadsheet/__init__.py new file mode 100644 index 0000000..25ec13a --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/spreadsheet/__init__.py @@ -0,0 +1,474 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Spreadsheets. +""" + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata +import re +import string + + +# XML namespaces which are often used in Google Spreadsheets entities. +GSPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006' +GSPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' + +GSPREADSHEETS_EXTENDED_NAMESPACE = ('http://schemas.google.com/spreadsheets' + '/2006/extended') +GSPREADSHEETS_EXTENDED_TEMPLATE = ('{http://schemas.google.com/spreadsheets' + '/2006/extended}%s') + + +class ColCount(atom.AtomBase): + """The Google Spreadsheets colCount element """ + + _tag = 'colCount' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ColCountFromString(xml_string): + return atom.CreateClassFromXMLString(ColCount, xml_string) + + +class RowCount(atom.AtomBase): + """The Google Spreadsheets rowCount element """ + + _tag = 'rowCount' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def RowCountFromString(xml_string): + return atom.CreateClassFromXMLString(RowCount, xml_string) + + +class Cell(atom.AtomBase): + """The Google Spreadsheets cell element """ + + _tag = 'cell' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['row'] = 'row' + _attributes['col'] = 'col' + _attributes['inputValue'] = 'inputValue' + _attributes['numericValue'] = 'numericValue' + + def __init__(self, text=None, row=None, col=None, inputValue=None, + numericValue=None, extension_elements=None, extension_attributes=None): + self.text = text + self.row = row + self.col = col + self.inputValue = inputValue + self.numericValue = numericValue + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def CellFromString(xml_string): + return atom.CreateClassFromXMLString(Cell, xml_string) + + +class Custom(atom.AtomBase): + """The Google Spreadsheets custom element""" + + _namespace = GSPREADSHEETS_EXTENDED_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, column=None, text=None, extension_elements=None, + extension_attributes=None): + self.column = column # The name of the column + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _BecomeChildElement(self, tree): + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = '{%s}%s' % (self.__class__._namespace, + self.column) + self._AddMembersToElementTree(new_child) + + def _ToElementTree(self): + new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace, + self.column)) + self._AddMembersToElementTree(new_tree) + return new_tree + + def _HarvestElementTree(self, tree): + namespace_uri, local_tag = string.split(tree.tag[1:], "}", 1) + self.column = local_tag + # Fill in the instance members from the contents of the XML tree. + for child in tree: + self._ConvertElementTreeToMember(child) + for attribute, value in tree.attrib.iteritems(): + self._ConvertElementAttributeToMember(attribute, value) + self.text = tree.text + + +def CustomFromString(xml_string): + element_tree = ElementTree.fromstring(xml_string) + return _CustomFromElementTree(element_tree) + + +def _CustomFromElementTree(element_tree): + namespace_uri, local_tag = string.split(element_tree.tag[1:], "}", 1) + if namespace_uri == GSPREADSHEETS_EXTENDED_NAMESPACE: + new_custom = Custom() + new_custom._HarvestElementTree(element_tree) + new_custom.column = local_tag + return new_custom + return None + + + + + +class SpreadsheetsSpreadsheet(gdata.GDataEntry): + """A Google Spreadsheets flavor of a Spreadsheet Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsSpreadsheetFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheet, + xml_string) + + +class SpreadsheetsWorksheet(gdata.GDataEntry): + """A Google Spreadsheets flavor of a Worksheet Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count', + RowCount) + _children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count', + ColCount) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + row_count=None, col_count=None, text=None, extension_elements=None, + extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.row_count = row_count + self.col_count = col_count + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsWorksheetFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsWorksheet, + xml_string) + + +class SpreadsheetsCell(gdata.BatchEntry): + """A Google Spreadsheets flavor of a Cell Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + _children['{%s}cell' % GSPREADSHEETS_NAMESPACE] = ('cell', Cell) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + cell=None, batch_operation=None, batch_id=None, batch_status=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + self.updated = updated + self.cell = cell + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsCellFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsCell, + xml_string) + + +class SpreadsheetsList(gdata.GDataEntry): + """A Google Spreadsheets flavor of a List Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + custom=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.custom = custom or {} + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + # We need to overwrite _ConvertElementTreeToMember to add special logic to + # convert custom attributes to members + def _ConvertElementTreeToMember(self, child_tree): + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + elif child_tree.tag.find('{%s}' % GSPREADSHEETS_EXTENDED_NAMESPACE) == 0: + # If this is in the custom namespace, make add it to the custom dict. + name = child_tree.tag[child_tree.tag.index('}')+1:] + custom = _CustomFromElementTree(child_tree) + if custom: + self.custom[name] = custom + else: + ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + # We need to overwtite _AddMembersToElementTree to add special logic to + # convert custom members to XML nodes. + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Convert all special custom item attributes to nodes + for name, custom in self.custom.iteritems(): + custom._BecomeChildElement(tree) + # Lastly, call the ExtensionContainers's _AddMembersToElementTree to + # convert any extension attributes. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + +def SpreadsheetsListFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsList, + xml_string) + element_tree = ElementTree.fromstring(xml_string) + return _SpreadsheetsListFromElementTree(element_tree) + + +class SpreadsheetsSpreadsheetsFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsSpreadsheet]) + + +def SpreadsheetsSpreadsheetsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheetsFeed, + xml_string) + + +class SpreadsheetsWorksheetsFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsWorksheet]) + + +def SpreadsheetsWorksheetsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsWorksheetsFeed, + xml_string) + + +class SpreadsheetsCellsFeed(gdata.BatchFeed): + """A feed containing Google Spreadsheets Cells""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsCell]) + _children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count', + RowCount) + _children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count', + ColCount) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None, row_count=None, + col_count=None, interrupted=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text, interrupted=interrupted) + self.row_count = row_count + self.col_count = col_count + + def GetBatchLink(self): + for link in self.link: + if link.rel == 'http://schemas.google.com/g/2005#batch': + return link + return None + + +def SpreadsheetsCellsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsCellsFeed, + xml_string) + + +class SpreadsheetsListFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsList]) + + +def SpreadsheetsListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsListFeed, + xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/spreadsheet/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/spreadsheet/service.py new file mode 100644 index 0000000..3109a1b --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/spreadsheet/service.py @@ -0,0 +1,467 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SpreadsheetsService extends the GDataService to streamline Google +Spreadsheets operations. + + GBaseService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +import gdata +import atom.service +import gdata.service +import gdata.spreadsheet +import atom + + +class Error(Exception): + """Base class for exceptions in this module.""" + pass + + +class RequestError(Error): + pass + + +class SpreadsheetsService(gdata.service.GDataService): + """Client for the Google Spreadsheets service.""" + + def __init__(self, email=None, password=None, source=None, + server='spreadsheets.google.com', + additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='wise', source=source, + server=server, + additional_headers=additional_headers) + + def GetSpreadsheetsFeed(self, key=None, query=None, visibility='private', + projection='full'): + """Gets a spreadsheets feed or a specific entry if a key is defined + Args: + key: string (optional) The spreadsheet key defined in /ccc?key= + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no key, then a SpreadsheetsSpreadsheetsFeed. + If there is a key, then a SpreadsheetsSpreadsheet. + """ + + uri = ('http://%s/feeds/spreadsheets/%s/%s' + % (self.server, visibility, projection)) + + if key is not None: + uri = '%s/%s' % (uri, key) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if key: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsSpreadsheetFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString) + + def GetWorksheetsFeed(self, key, wksht_id=None, query=None, + visibility='private', projection='full'): + """Gets a worksheets feed or a specific entry if a wksht is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string (optional) The id for a specific worksheet entry + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no wksht_id, then a SpreadsheetsWorksheetsFeed. + If there is a wksht_id, then a SpreadsheetsWorksheet. + """ + + uri = ('http://%s/feeds/worksheets/%s/%s/%s' + % (self.server, key, visibility, projection)) + + if wksht_id != None: + uri = '%s/%s' % (uri, wksht_id) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if wksht_id: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString) + + def AddWorksheet(self, title, row_count, col_count, key): + """Creates a new worksheet in the desired spreadsheet. + + The new worksheet is appended to the end of the list of worksheets. The + new worksheet will only have the available number of columns and cells + specified. + + Args: + title: str The title which will be displayed in the list of worksheets. + row_count: int or str The number of rows in the new worksheet. + col_count: int or str The number of columns in the new worksheet. + key: str The spreadsheet key to the spreadsheet to which the new + worksheet should be added. + + Returns: + A SpreadsheetsWorksheet if the new worksheet was created succesfully. + """ + new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheet( + title=atom.Title(text=title), + row_count=gdata.spreadsheet.RowCount(text=str(row_count)), + col_count=gdata.spreadsheet.ColCount(text=str(col_count))) + return self.Post(new_worksheet, + 'http://%s/feeds/worksheets/%s/private/full' % (self.server, key), + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + + def UpdateWorksheet(self, worksheet_entry, url=None): + """Changes the size and/or title of the desired worksheet. + + Args: + worksheet_entry: SpreadsheetWorksheet The new contents of the + worksheet. + url: str (optional) The URL to which the edited worksheet entry should + be sent. If the url is None, the edit URL from the worksheet will + be used. + + Returns: + A SpreadsheetsWorksheet with the new information about the worksheet. + """ + target_url = url or worksheet_entry.GetEditLink().href + return self.Put(worksheet_entry, target_url, + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + + def DeleteWorksheet(self, worksheet_entry=None, url=None): + """Removes the desired worksheet from the spreadsheet + + Args: + worksheet_entry: SpreadsheetWorksheet (optional) The worksheet to + be deleted. If this is none, then the DELETE reqest is sent to + the url specified in the url parameter. + url: str (optaional) The URL to which the DELETE request should be + sent. If left as None, the worksheet's edit URL is used. + + Returns: + True if the worksheet was deleted successfully. + """ + if url: + target_url = url + else: + target_url = worksheet_entry.GetEditLink().href + return self.Delete(target_url) + + def GetCellsFeed(self, key, wksht_id='default', cell=None, query=None, + visibility='private', projection='full'): + """Gets a cells feed or a specific entry if a cell is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string The id for a specific worksheet entry + cell: string (optional) The R1C1 address of the cell + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no cell, then a SpreadsheetsCellsFeed. + If there is a cell, then a SpreadsheetsCell. + """ + + uri = ('http://%s/feeds/cells/%s/%s/%s/%s' + % (self.server, key, wksht_id, visibility, projection)) + + if cell != None: + uri = '%s/%s' % (uri, cell) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if cell: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsCellFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString) + + def GetListFeed(self, key, wksht_id='default', row_id=None, query=None, + visibility='private', projection='full'): + """Gets a list feed or a specific entry if a row_id is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string The id for a specific worksheet entry + row_id: string (optional) The row_id of a row in the list + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no row_id, then a SpreadsheetsListFeed. + If there is a row_id, then a SpreadsheetsList. + """ + + uri = ('http://%s/feeds/list/%s/%s/%s/%s' + % (self.server, key, wksht_id, visibility, projection)) + + if row_id is not None: + uri = '%s/%s' % (uri, row_id) + + if query is not None: + query.feed = uri + uri = query.ToUri() + + if row_id: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsListFeedFromString) + + def UpdateCell(self, row, col, inputValue, key, wksht_id='default'): + """Updates an existing cell. + + Args: + row: int The row the cell to be editted is in + col: int The column the cell to be editted is in + inputValue: str the new value of the cell + key: str The key of the spreadsheet in which this cell resides. + wksht_id: str The ID of the worksheet which holds this cell. + + Returns: + The updated cell entry + """ + row = str(row) + col = str(col) + # make the new cell + new_cell = gdata.spreadsheet.Cell(row=row, col=col, inputValue=inputValue) + # get the edit uri and PUT + cell = 'R%sC%s' % (row, col) + entry = self.GetCellsFeed(key, wksht_id, cell) + for a_link in entry.link: + if a_link.rel == 'edit': + entry.cell = new_cell + return self.Put(entry, a_link.href, + converter=gdata.spreadsheet.SpreadsheetsCellFromString) + + def _GenerateCellsBatchUrl(self, spreadsheet_key, worksheet_id): + return ('http://spreadsheets.google.com/feeds/cells/%s/%s/' + 'private/full/batch' % (spreadsheet_key, worksheet_id)) + + def ExecuteBatch(self, batch_feed, url=None, spreadsheet_key=None, + worksheet_id=None, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString): + """Sends a batch request feed to the server. + + The batch request needs to be sent to the batch URL for a particular + worksheet. You can specify the worksheet by providing the spreadsheet_key + and worksheet_id, or by sending the URL from the cells feed's batch link. + + Args: + batch_feed: gdata.spreadsheet.SpreadsheetsCellFeed A feed containing + BatchEntry elements which contain the desired CRUD operation and + any necessary data to modify a cell. + url: str (optional) The batch URL for the cells feed to which these + changes should be applied. This can be found by calling + cells_feed.GetBatchLink().href. + spreadsheet_key: str (optional) Used to generate the batch request URL + if the url argument is None. If using the spreadsheet key to + generate the URL, the worksheet id is also required. + worksheet_id: str (optional) Used if the url is not provided, it is + oart of the batch feed target URL. This is used with the spreadsheet + key. + converter: Function (optional) Function to be executed on the server's + response. This function should take one string as a parameter. The + default value is SpreadsheetsCellsFeedFromString which will turn the result + into a gdata.base.GBaseItem object. + + Returns: + A gdata.BatchFeed containing the results. + """ + + if url is None: + url = self._GenerateCellsBatchUrl(spreadsheet_key, worksheet_id) + return self.Post(batch_feed, url, converter=converter) + + def InsertRow(self, row_data, key, wksht_id='default'): + """Inserts a new row with the provided data + + Args: + uri: string The post uri of the list feed + row_data: dict A dictionary of column header to row data + + Returns: + The inserted row + """ + new_entry = gdata.spreadsheet.SpreadsheetsList() + for k, v in row_data.iteritems(): + new_custom = gdata.spreadsheet.Custom() + new_custom.column = k + new_custom.text = v + new_entry.custom[new_custom.column] = new_custom + # Generate the post URL for the worksheet which will receive the new entry. + post_url = 'http://spreadsheets.google.com/feeds/list/%s/%s/private/full'%( + key, wksht_id) + return self.Post(new_entry, post_url, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + + def UpdateRow(self, entry, new_row_data): + """Updates a row with the provided data + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The entry to be updated + new_row_data: dict A dictionary of column header to row data + + Returns: + The updated row + """ + entry.custom = {} + for k, v in new_row_data.iteritems(): + new_custom = gdata.spreadsheet.Custom() + new_custom.column = k + new_custom.text = v + entry.custom[k] = new_custom + for a_link in entry.link: + if a_link.rel == 'edit': + return self.Put(entry, a_link.href, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + + def DeleteRow(self, entry): + """Deletes a row, the provided entry + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The row to be deleted + + Returns: + The delete response + """ + for a_link in entry.link: + if a_link.rel == 'edit': + return self.Delete(a_link.href) + + +class DocumentQuery(gdata.service.Query): + + def _GetTitleQuery(self): + return self['title'] + + def _SetTitleQuery(self, document_query): + self['title'] = document_query + + title = property(_GetTitleQuery, _SetTitleQuery, + doc="""The title query parameter""") + + def _GetTitleExactQuery(self): + return self['title-exact'] + + def _SetTitleExactQuery(self, document_query): + self['title-exact'] = document_query + + title_exact = property(_GetTitleExactQuery, _SetTitleExactQuery, + doc="""The title-exact query parameter""") + + +class CellQuery(gdata.service.Query): + + def _GetMinRowQuery(self): + return self['min-row'] + + def _SetMinRowQuery(self, cell_query): + self['min-row'] = cell_query + + min_row = property(_GetMinRowQuery, _SetMinRowQuery, + doc="""The min-row query parameter""") + + def _GetMaxRowQuery(self): + return self['max-row'] + + def _SetMaxRowQuery(self, cell_query): + self['max-row'] = cell_query + + max_row = property(_GetMaxRowQuery, _SetMaxRowQuery, + doc="""The max-row query parameter""") + + def _GetMinColQuery(self): + return self['min-col'] + + def _SetMinColQuery(self, cell_query): + self['min-col'] = cell_query + + min_col = property(_GetMinColQuery, _SetMinColQuery, + doc="""The min-col query parameter""") + + def _GetMaxColQuery(self): + return self['max-col'] + + def _SetMaxColQuery(self, cell_query): + self['max-col'] = cell_query + + max_col = property(_GetMaxColQuery, _SetMaxColQuery, + doc="""The max-col query parameter""") + + def _GetRangeQuery(self): + return self['range'] + + def _SetRangeQuery(self, cell_query): + self['range'] = cell_query + + range = property(_GetRangeQuery, _SetRangeQuery, + doc="""The range query parameter""") + + def _GetReturnEmptyQuery(self): + return self['return-empty'] + + def _SetReturnEmptyQuery(self, cell_query): + self['return-empty'] = cell_query + + return_empty = property(_GetReturnEmptyQuery, _SetReturnEmptyQuery, + doc="""The return-empty query parameter""") + + +class ListQuery(gdata.service.Query): + + def _GetSpreadsheetQuery(self): + return self['sq'] + + def _SetSpreadsheetQuery(self, list_query): + self['sq'] = list_query + + sq = property(_GetSpreadsheetQuery, _SetSpreadsheetQuery, + doc="""The sq query parameter""") + + def _GetOrderByQuery(self): + return self['orderby'] + + def _SetOrderByQuery(self, list_query): + self['orderby'] = list_query + + orderby = property(_GetOrderByQuery, _SetOrderByQuery, + doc="""The orderby query parameter""") + + def _GetReverseQuery(self): + return self['reverse'] + + def _SetReverseQuery(self, list_query): + self['reverse'] = list_query + + reverse = property(_GetReverseQuery, _SetReverseQuery, + doc="""The reverse query parameter""") diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/spreadsheet/text_db.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/spreadsheet/text_db.py new file mode 100644 index 0000000..a8de546 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/spreadsheet/text_db.py @@ -0,0 +1,559 @@ +#!/usr/bin/python +# +# Copyright Google 2007-2008, all rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import StringIO +import gdata +import gdata.service +import gdata.spreadsheet +import gdata.spreadsheet.service +import gdata.docs +import gdata.docs.service + + +"""Make the Google Documents API feel more like using a database. + +This module contains a client and other classes which make working with the +Google Documents List Data API and the Google Spreadsheets Data API look a +bit more like working with a heirarchical database. Using the DatabaseClient, +you can create or find spreadsheets and use them like a database, with +worksheets representing tables and rows representing records. + +Example Usage: +# Create a new database, a new table, and add records. +client = gdata.spreadsheet.text_db.DatabaseClient(username='jo@example.com', + password='12345') +database = client.CreateDatabase('My Text Database') +table = database.CreateTable('addresses', ['name','email', + 'phonenumber', 'mailingaddress']) +record = table.AddRecord({'name':'Bob', 'email':'bob@example.com', + 'phonenumber':'555-555-1234', 'mailingaddress':'900 Imaginary St.'}) + +# Edit a record +record.content['email'] = 'bob2@example.com' +record.Push() + +# Delete a table +table.Delete + +Warnings: +Care should be exercised when using this module on spreadsheets +which contain formulas. This module treats all rows as containing text and +updating a row will overwrite any formula with the output of the formula. +The intended use case is to allow easy storage of text data in a spreadsheet. + + Error: Domain specific extension of Exception. + BadCredentials: Error raised is username or password was incorrect. + CaptchaRequired: Raised if a login attempt failed and a CAPTCHA challenge + was issued. + DatabaseClient: Communicates with Google Docs APIs servers. + Database: Represents a spreadsheet and interacts with tables. + Table: Represents a worksheet and interacts with records. + RecordResultSet: A list of records in a table. + Record: Represents a row in a worksheet allows manipulation of text data. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +class Error(Exception): + pass + + +class BadCredentials(Error): + pass + + +class CaptchaRequired(Error): + pass + + +class DatabaseClient(object): + """Allows creation and finding of Google Spreadsheets databases. + + The DatabaseClient simplifies the process of creating and finding Google + Spreadsheets and will talk to both the Google Spreadsheets API and the + Google Documents List API. + """ + + def __init__(self, username=None, password=None): + """Constructor for a Database Client. + + If the username and password are present, the constructor will contact + the Google servers to authenticate. + + Args: + username: str (optional) Example: jo@example.com + password: str (optional) + """ + self.__docs_client = gdata.docs.service.DocsService() + self.__spreadsheets_client = ( + gdata.spreadsheet.service.SpreadsheetsService()) + self.SetCredentials(username, password) + + def SetCredentials(self, username, password): + """Attempts to log in to Google APIs using the provided credentials. + + If the username or password are None, the client will not request auth + tokens. + + Args: + username: str (optional) Example: jo@example.com + password: str (optional) + """ + self.__docs_client.email = username + self.__docs_client.password = password + self.__spreadsheets_client.email = username + self.__spreadsheets_client.password = password + if username and password: + try: + self.__docs_client.ProgrammaticLogin() + self.__spreadsheets_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + raise CaptchaRequired('Please visit https://www.google.com/accounts/' + 'DisplayUnlockCaptcha to unlock your account.') + except gdata.service.BadAuthentication: + raise BadCredentials('Username or password incorrect.') + + def CreateDatabase(self, name): + """Creates a new Google Spreadsheet with the desired name. + + Args: + name: str The title for the spreadsheet. + + Returns: + A Database instance representing the new spreadsheet. + """ + # Create a Google Spreadsheet to form the foundation of this database. + # Spreadsheet is created by uploading a file to the Google Documents + # List API. + virtual_csv_file = StringIO.StringIO(',,,') + virtual_media_source = gdata.MediaSource(file_handle=virtual_csv_file, content_type='text/csv', content_length=3) + db_entry = self.__docs_client.UploadSpreadsheet(virtual_media_source, name) + return Database(spreadsheet_entry=db_entry, database_client=self) + + def GetDatabases(self, spreadsheet_key=None, name=None): + """Finds spreadsheets which have the unique key or title. + + If querying on the spreadsheet_key there will be at most one result, but + searching by name could yield multiple results. + + Args: + spreadsheet_key: str The unique key for the spreadsheet, this + usually in the the form 'pk23...We' or 'o23...423.12,,,3'. + name: str The title of the spreadsheets. + + Returns: + A list of Database objects representing the desired spreadsheets. + """ + if spreadsheet_key: + db_entry = self.__docs_client.GetDocumentListEntry( + r'/feeds/documents/private/full/spreadsheet%3A' + spreadsheet_key) + return [Database(spreadsheet_entry=db_entry, database_client=self)] + else: + title_query = gdata.docs.service.DocumentQuery() + title_query['title'] = name + db_feed = self.__docs_client.QueryDocumentListFeed(title_query.ToUri()) + matching_databases = [] + for entry in db_feed.entry: + matching_databases.append(Database(spreadsheet_entry=entry, + database_client=self)) + return matching_databases + + def _GetDocsClient(self): + return self.__docs_client + + def _GetSpreadsheetsClient(self): + return self.__spreadsheets_client + + +class Database(object): + """Provides interface to find and create tables. + + The database represents a Google Spreadsheet. + """ + + def __init__(self, spreadsheet_entry=None, database_client=None): + """Constructor for a database object. + + Args: + spreadsheet_entry: gdata.docs.DocumentListEntry The + Atom entry which represents the Google Spreadsheet. The + spreadsheet's key is extracted from the entry and stored as a + member. + database_client: DatabaseClient A client which can talk to the + Google Spreadsheets servers to perform operations on worksheets + within this spreadsheet. + """ + self.entry = spreadsheet_entry + if self.entry: + id_parts = spreadsheet_entry.id.text.split('/') + self.spreadsheet_key = id_parts[-1].replace('spreadsheet%3A', '') + self.client = database_client + + def CreateTable(self, name, fields=None): + """Add a new worksheet to this spreadsheet and fill in column names. + + Args: + name: str The title of the new worksheet. + fields: list of strings The column names which are placed in the + first row of this worksheet. These names are converted into XML + tags by the server. To avoid changes during the translation + process I recommend using all lowercase alphabetic names. For + example ['somelongname', 'theothername'] + + Returns: + Table representing the newly created worksheet. + """ + worksheet = self.client._GetSpreadsheetsClient().AddWorksheet(title=name, + row_count=1, col_count=len(fields), key=self.spreadsheet_key) + return Table(name=name, worksheet_entry=worksheet, + database_client=self.client, + spreadsheet_key=self.spreadsheet_key, fields=fields) + + def GetTables(self, worksheet_id=None, name=None): + """Searches for a worksheet with the specified ID or name. + + The list of results should have one table at most, or no results + if the id or name were not found. + + Args: + worksheet_id: str The ID of the worksheet, example: 'od6' + name: str The title of the worksheet. + + Returns: + A list of length 0 or 1 containing the desired Table. A list is returned + to make this method feel like GetDatabases and GetRecords. + """ + if worksheet_id: + worksheet_entry = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, wksht_id=worksheet_id) + return [Table(name=worksheet_entry.title.text, + worksheet_entry=worksheet_entry, database_client=self.client, + spreadsheet_key=self.spreadsheet_key)] + else: + matching_tables = [] + query = None + if name: + query = gdata.spreadsheet.service.DocumentQuery() + query.title = name + + worksheet_feed = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, query=query) + for entry in worksheet_feed.entry: + matching_tables.append(Table(name=entry.title.text, + worksheet_entry=entry, database_client=self.client, + spreadsheet_key=self.spreadsheet_key)) + return matching_tables + + def Delete(self): + """Deletes the entire database spreadsheet from Google Spreadsheets.""" + entry = self.client._GetDocsClient().Get( + r'http://docs.google.com/feeds/documents/private/full/spreadsheet%3A' + + self.spreadsheet_key) + self.client._GetDocsClient().Delete(entry.GetEditLink().href) + + +class Table(object): + + def __init__(self, name=None, worksheet_entry=None, database_client=None, + spreadsheet_key=None, fields=None): + self.name = name + self.entry = worksheet_entry + id_parts = worksheet_entry.id.text.split('/') + self.worksheet_id = id_parts[-1] + self.spreadsheet_key = spreadsheet_key + self.client = database_client + self.fields = fields or [] + if fields: + self.SetFields(fields) + + def LookupFields(self): + """Queries to find the column names in the first row of the worksheet. + + Useful when you have retrieved the table from the server and you don't + know the column names. + """ + if self.entry: + first_row_contents = [] + query = gdata.spreadsheet.service.CellQuery() + query.max_row = '1' + query.min_row = '1' + feed = self.client._GetSpreadsheetsClient().GetCellsFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=query) + for entry in feed.entry: + first_row_contents.append(entry.content.text) + # Get the next set of cells if needed. + next_link = feed.GetNextLink() + while next_link: + feed = self.client._GetSpreadsheetsClient().Get(next_link.href, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString) + for entry in feed.entry: + first_row_contents.append(entry.content.text) + next_link = feed.GetNextLink() + # Convert the contents of the cells to valid headers. + self.fields = ConvertStringsToColumnHeaders(first_row_contents) + + def SetFields(self, fields): + """Changes the contents of the cells in the first row of this worksheet. + + Args: + fields: list of strings The names in the list comprise the + first row of the worksheet. These names are converted into XML + tags by the server. To avoid changes during the translation + process I recommend using all lowercase alphabetic names. For + example ['somelongname', 'theothername'] + """ + # TODO: If the table already had fields, we might want to clear out the, + # current column headers. + self.fields = fields + i = 0 + for column_name in fields: + i = i + 1 + # TODO: speed this up by using a batch request to update cells. + self.client._GetSpreadsheetsClient().UpdateCell(1, i, column_name, + self.spreadsheet_key, self.worksheet_id) + + def Delete(self): + """Deletes this worksheet from the spreadsheet.""" + worksheet = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id) + self.client._GetSpreadsheetsClient().DeleteWorksheet( + worksheet_entry=worksheet) + + def AddRecord(self, data): + """Adds a new row to this worksheet. + + Args: + data: dict of strings Mapping of string values to column names. + + Returns: + Record which represents this row of the spreadsheet. + """ + new_row = self.client._GetSpreadsheetsClient().InsertRow(data, + self.spreadsheet_key, wksht_id=self.worksheet_id) + return Record(content=data, row_entry=new_row, + spreadsheet_key=self.spreadsheet_key, worksheet_id=self.worksheet_id, + database_client=self.client) + + def GetRecord(self, row_id=None, row_number=None): + """Gets a single record from the worksheet based on row ID or number. + + Args: + row_id: The ID for the individual row. + row_number: str or int The position of the desired row. Numbering + begins at 1, which refers to the second row in the worksheet since + the first row is used for column names. + + Returns: + Record for the desired row. + """ + if row_id: + row_entry = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=row_id) + return Record(content=None, row_entry=row_entry, + spreadsheet_key=self.spreadsheet_key, + worksheet_id=self.worksheet_id, database_client=self.client) + else: + row_query = gdata.spreadsheet.service.ListQuery() + row_query.start_index = str(row_number) + row_query.max_results = '1' + row_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + if len(row_feed.entry) >= 1: + return Record(content=None, row_entry=row_feed.entry[0], + spreadsheet_key=self.spreadsheet_key, + worksheet_id=self.worksheet_id, database_client=self.client) + else: + return None + + def GetRecords(self, start_row, end_row): + """Gets all rows between the start and end row numbers inclusive. + + Args: + start_row: str or int + end_row: str or int + + Returns: + RecordResultSet for the desired rows. + """ + start_row = int(start_row) + end_row = int(end_row) + max_rows = end_row - start_row + 1 + row_query = gdata.spreadsheet.service.ListQuery() + row_query.start_index = str(start_row) + row_query.max_results = str(max_rows) + rows_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + return RecordResultSet(rows_feed, self.client, self.spreadsheet_key, + self.worksheet_id) + + def FindRecords(self, query_string): + """Performs a query against the worksheet to find rows which match. + + For details on query string syntax see the section on sq under + http://code.google.com/apis/spreadsheets/reference.html#list_Parameters + + Args: + query_string: str Examples: 'name == john' to find all rows with john + in the name column, '(cost < 19.50 and name != toy) or cost > 500' + + Returns: + RecordResultSet with the first group of matches. + """ + row_query = gdata.spreadsheet.service.ListQuery() + row_query.sq = query_string + matching_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + return RecordResultSet(matching_feed, self.client, + self.spreadsheet_key, self.worksheet_id) + + +class RecordResultSet(list): + """A collection of rows which allows fetching of the next set of results. + + The server may not send all rows in the requested range because there are + too many. Using this result set you can access the first set of results + as if it is a list, then get the next batch (if there are more results) by + calling GetNext(). + """ + + def __init__(self, feed, client, spreadsheet_key, worksheet_id): + self.client = client + self.spreadsheet_key = spreadsheet_key + self.worksheet_id = worksheet_id + self.feed = feed + list(self) + for entry in self.feed.entry: + self.append(Record(content=None, row_entry=entry, + spreadsheet_key=spreadsheet_key, worksheet_id=worksheet_id, + database_client=client)) + + def GetNext(self): + """Fetches the next batch of rows in the result set. + + Returns: + A new RecordResultSet. + """ + next_link = self.feed.GetNextLink() + if next_link and next_link.href: + new_feed = self.client._GetSpreadsheetsClient().Get(next_link.href, + converter=gdata.spreadsheet.SpreadsheetsListFeedFromString) + return RecordResultSet(new_feed, self.client, self.spreadsheet_key, + self.worksheet_id) + + +class Record(object): + """Represents one row in a worksheet and provides a dictionary of values. + + Attributes: + custom: dict Represents the contents of the row with cell values mapped + to column headers. + """ + + def __init__(self, content=None, row_entry=None, spreadsheet_key=None, + worksheet_id=None, database_client=None): + """Constructor for a record. + + Args: + content: dict of strings Mapping of string values to column names. + row_entry: gdata.spreadsheet.SpreadsheetsList The Atom entry + representing this row in the worksheet. + spreadsheet_key: str The ID of the spreadsheet in which this row + belongs. + worksheet_id: str The ID of the worksheet in which this row belongs. + database_client: DatabaseClient The client which can be used to talk + the Google Spreadsheets server to edit this row. + """ + self.entry = row_entry + self.spreadsheet_key = spreadsheet_key + self.worksheet_id = worksheet_id + if row_entry: + self.row_id = row_entry.id.text.split('/')[-1] + else: + self.row_id = None + self.client = database_client + self.content = content or {} + if not content: + self.ExtractContentFromEntry(row_entry) + + def ExtractContentFromEntry(self, entry): + """Populates the content and row_id based on content of the entry. + + This method is used in the Record's contructor. + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The Atom entry + representing this row in the worksheet. + """ + self.content = {} + if entry: + self.row_id = entry.id.text.split('/')[-1] + for label, custom in entry.custom.iteritems(): + self.content[label] = custom.text + + def Push(self): + """Send the content of the record to spreadsheets to edit the row. + + All items in the content dictionary will be sent. Items which have been + removed from the content may remain in the row. The content member + of the record will not be modified so additional fields in the row + might be absent from this local copy. + """ + self.entry = self.client._GetSpreadsheetsClient().UpdateRow(self.entry, self.content) + + def Pull(self): + """Query Google Spreadsheets to get the latest data from the server. + + Fetches the entry for this row and repopulates the content dictionary + with the data found in the row. + """ + if self.row_id: + self.entry = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=self.row_id) + self.ExtractContentFromEntry(self.entry) + + def Delete(self): + self.client._GetSpreadsheetsClient().DeleteRow(self.entry) + + +def ConvertStringsToColumnHeaders(proposed_headers): + """Converts a list of strings to column names which spreadsheets accepts. + + When setting values in a record, the keys which represent column names must + fit certain rules. They are all lower case, contain no spaces or special + characters. If two columns have the same name after being sanitized, the + columns further to the right have _2, _3 _4, etc. appended to them. + + If there are column names which consist of all special characters, or if + the column header is blank, an obfuscated value will be used for a column + name. This method does not handle blank column names or column names with + only special characters. + """ + headers = [] + for input_string in proposed_headers: + # TODO: probably a more efficient way to do this. Perhaps regex. + sanitized = input_string.lower().replace('_', '').replace( + ':', '').replace(' ', '') + # When the same sanitized header appears multiple times in the first row + # of a spreadsheet, _n is appended to the name to make it unique. + header_count = headers.count(sanitized) + if header_count > 0: + headers.append('%s_%i' % (sanitized, header_count+1)) + else: + headers.append(sanitized) + return headers diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/test_data.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/test_data.py new file mode 100644 index 0000000..31e0a02 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/test_data.py @@ -0,0 +1,2838 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +XML_ENTRY_1 = """<?xml version='1.0'?> +<entry xmlns='http://www.w3.org/2005/Atom' + xmlns:g='http://base.google.com/ns/1.0'> + <category scheme="http://base.google.com/categories/itemtypes" + term="products"/> + <id> http://www.google.com/test/id/url </id> + <title type='text'>Testing 2000 series laptop + +
A Testing Laptop
+
+ + + Computer + Laptop + testing laptop + products +""" + + +TEST_BASE_ENTRY = """ + + + Testing 2000 series laptop + +
A Testing Laptop
+
+ + yes + + + + Computer + Laptop + testing laptop + products +
""" + + +BIG_FEED = """ + + dive into mark + + A <em>lot</em> of effort + went into making this effortless + + 2005-07-31T12:29:29Z + tag:example.org,2003:3 + + + Copyright (c) 2003, Mark Pilgrim + + Example Toolkit + + + Atom draft-07 snapshot + + + tag:example.org,2003:3.2397 + 2005-07-31T12:29:29Z + 2003-12-13T08:29:29-04:00 + + Mark Pilgrim + http://example.org/ + f8dy@example.com + + + Sam Ruby + + + Joe Gregorio + + +
+

[Update: The Atom draft is finished.]

+
+
+
+
+""" + +SMALL_FEED = """ + + Example Feed + + 2003-12-13T18:30:02Z + + John Doe + + urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6 + + Atom-Powered Robots Run Amok + + urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a + 2003-12-13T18:30:02Z + Some text. + + +""" + +GBASE_FEED = """ + +http://www.google.com/base/feeds/snippets +2007-02-08T23:18:21.935Z +Items matching query: digital camera + + + + + + + + +GoogleBase +2171885 +1 +25 + +http://www.google.com/base/feeds/snippets/13246453826751927533 +2007-02-08T13:23:27.000Z +2007-02-08T16:40:57.000Z + + +Digital Camera Battery Notebook Computer 12v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables +Notebook Computer 12v DC Power Cable - 5.5mm x 2.1mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power portable computers that operate with 12v power and have a 2.1mm power connector (center +) Digital ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +PayPal & Bill Me Later credit available online only. +new +420 9th Ave. 10001 +305668-REG +Products +Digital Camera Battery +2007-03-10T13:23:27.000Z +1172711 +34.95 usd +Digital Photography>Camera Connecting Cables +EN +DCB5092 +US +1.0 +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305668.jpg&dhm=ffffffff84c9a95e&size=6 + + +http://www.google.com/base/feeds/snippets/10145771037331858608 +2007-02-08T13:23:27.000Z +2007-02-08T16:40:57.000Z + + +Digital Camera Battery Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables +Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power any electronic device that operates with 5v power and has a 2.5mm power connector (center +) Digital ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +420 9th Ave. 10001 +new +0.18 +US +Digital Photography>Camera Connecting Cables +PayPal & Bill Me Later credit available online only. +305656-REG +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305656.jpg&dhm=7315bdc8&size=6 +DCB5108 +838098005108 +34.95 usd +EN +Digital Camera Battery +1172711 +Products +2007-03-10T13:23:27.000Z + + +http://www.google.com/base/feeds/snippets/3128608193804768644 +2007-02-08T02:21:27.000Z +2007-02-08T15:40:13.000Z + + +Digital Camera Battery Power Cable for Kodak 645 Pro-Back ProBack & DCS-300 Series Camera Connecting Cables +Camera Connection Cable - to Power Kodak 645 Pro-Back DCS-300 Series Digital Cameras This connection cable will allow any Digital Pursuits battery pack to power the following digital cameras: Kodak DCS Pro Back 645 DCS-300 series Digital Photography ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +0.3 +DCB6006 +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305685.jpg&dhm=72f0ca0a&size=6 +420 9th Ave. 10001 +PayPal & Bill Me Later credit available online only. +Products +US +digital kodak camera +Digital Camera Battery +2007-03-10T02:21:27.000Z +EN +new +34.95 usd +1172711 +Digital Photography>Camera Connecting Cables +305685-REG + +""" + +EXTENSION_TREE = """ + + + John Doe + Bar + + + +""" + +TEST_AUTHOR = """ + + John Doe + johndoes@someemailadress.com + http://www.google.com + +""" + +TEST_LINK = """ + +""" + +TEST_GBASE_ATTRIBUTE = """ + Digital Camera Battery +""" + + +CALENDAR_FEED = """ + + http://www.google.com/calendar/feeds/default + 2007-03-20T22:48:57.833Z + GData Ops Demo's Calendar List + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + Google Calendar + 1 + + + http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com + 2007-03-20T22:48:57.837Z + 2007-03-20T22:48:52.000Z + GData Ops Demo + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com + 2007-03-20T22:48:57.837Z + 2007-03-20T22:48:53.000Z + GData Ops Demo Secondary Calendar + + + + + + + GData Ops Demo Secondary Calendar + + + + + + + + +""" + +CALENDAR_FULL_EVENT_FEED = """ + + + http://www.google.com/calendar/feeds/default/private/full + 2007-03-20T21:29:57.000Z + + GData Ops Demo + GData Ops Demo + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + Google Calendar + 10 + 1 + 25 + + + + http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100 + 2007-03-20T21:29:52.000Z + 2007-03-20T21:29:57.000Z + + test deleted + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0 + 2007-03-20T21:26:04.000Z + 2007-03-20T21:28:46.000Z + + Afternoon at Dolores Park with Kim + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos + 2007-03-20T21:28:37.000Z + 2007-03-20T21:28:37.000Z + + Team meeting + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + DTSTART;TZID=America/Los_Angeles:20070323T090000 + DTEND;TZID=America/Los_Angeles:20070323T100000 + RRULE:FREQ=WEEKLY;BYDAY=FR;UNTIL=20070817T160000Z;WKST=SU + BEGIN:VTIMEZONE TZID:America/Los_Angeles + X-LIC-LOCATION:America/Los_Angeles BEGIN:STANDARD + TZOFFSETFROM:-0700 TZOFFSETTO:-0800 TZNAME:PST + DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU + END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0800 TZOFFSETTO:-0700 + TZNAME:PDT DTSTART:19700405T020000 + RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT + END:VTIMEZONE + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo + 2007-03-20T21:25:46.000Z + 2007-03-20T21:25:46.000Z + + Movie with Kim and danah + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo + 2007-03-20T21:24:43.000Z + 2007-03-20T21:25:08.000Z + + Dinner with Kim and Sarah + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g + 2007-03-20T21:24:19.000Z + 2007-03-20T21:25:05.000Z + + Dinner with Jane and John + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc + 2007-03-20T21:24:33.000Z + 2007-03-20T21:24:33.000Z + + Tennis with Elizabeth + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c + 2007-03-20T21:24:00.000Z + 2007-03-20T21:24:00.000Z + + Lunch with Jenn + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco + 2007-03-20T07:50:02.000Z + 2007-03-20T20:39:26.000Z + + test entry + test desc + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg + 2007-02-14T23:23:37.000Z + 2007-02-14T23:25:30.000Z + + test + + + + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc + 2007-07-16T22:13:28.000Z + 2007-07-16T22:13:29.000Z + + + + + + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + +""" + +CALENDAR_BATCH_REQUEST = """ + + + + 1 + + + Event inserted via batch + + + 2 + + http://www.google.com/calendar/feeds/default/private/full/glcs0kv2qqa0gf52qi1jo018gc + + Event queried via batch + + + 3 + + http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs + + Event updated via batch + + + + + + 4 + + http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc + + Event deleted via batch + + + + + +""" + +CALENDAR_BATCH_RESPONSE = """ + + http://www.google.com/calendar/feeds/default/private/full + 2007-09-21T23:01:00.380Z + + Batch Feed + + + + + 1 + + + http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek + + Event inserted via batch + + + + + + 2 + + + http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc + + Event queried via batch + + + + + + 3 + + + http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs + + Event updated via batch + + + + 3 + + + + + 4 + + + http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc + + Event deleted via batch + Deleted + + +""" + +GBASE_ATTRIBUTE_FEED = """ + + http://www.google.com/base/feeds/attributes + 2006-11-01T20:35:59.578Z + + + Attribute histogram for query: [item type:jobs] + + + + GoogleBase + 16 + 1 + 16 + + http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D + 2006-11-01T20:36:00.100Z + job industry(text) + Attribute"job industry" of type text. + + + + it internet + healthcare + information technology + accounting + clerical and administrative + other + sales and sales management + information systems + engineering and architecture + sales + + + +""" + + +GBASE_ATTRIBUTE_ENTRY = """ + + http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D + 2006-11-01T20:36:00.100Z + job industry(text) + Attribute"job industry" of type text. + + + + it internet + healthcare + information technology + accounting + clerical and administrative + other + sales and sales management + information systems + engineering and architecture + sales + + +""" + +GBASE_LOCALES_FEED = """ + + http://www.google.com/base/feeds/locales/ + 2006-06-13T18:11:40.120Z + Locales + + + + + Google Inc. + base@google.com + + GoogleBase + 3 + 25 + + + http://www.google.com/base/feeds/locales/en_US + 2006-03-27T22:27:36.658Z + + + en_US + en_US + + + + + + http://www.google.com/base/feeds/locales/en_GB + 2006-06-13T18:14:18.601Z + + en_GB + en_GB + + + + + http://www.google.com/base/feeds/locales/de_DE + 2006-06-13T18:14:18.601Z + + de_DE + de_DE + + + +""" + +GBASE_STRING_ENCODING_ENTRY = """ + + http://www.google.com/base/feeds/snippets/17495780256183230088 + 2007-12-09T03:13:07.000Z + 2008-01-07T03:26:46.000Z + + Digital Camera Cord Fits SONY Cybershot DSC-R1 S40 + SONY \xC2\xB7 Cybershot Digital Camera Usb Cable DESCRIPTION + This is a 2.5 USB 2.0 A to Mini B (5 Pin) high quality digital camera + cable used for connecting your Sony Digital Cameras and Camcoders. Backward + Compatible with USB 2.0, 1.0 and 1.1. Fully ... + + + + eBay + + Products + EN + US + 0.99 usd + http://thumbs.ebaystatic.com/pict/270195049057_1.jpg + Cameras & Photo>Digital Camera Accessories>Cables + Cords & Connectors>USB Cables>For Other Brands + 11729 + 270195049057 + 2008-02-06T03:26:46Z +""" + + +RECURRENCE_EXCEPTION_ENTRY = """ + + http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g + 2007-04-05T21:51:49.000Z + 2007-04-05T21:51:49.000Z + + testDavid + + + + + + gdata ops + gdata.ops.test@gmail.com + + + + + + + + + + DTSTART;TZID=America/Anchorage:20070403T100000 + DTEND;TZID=America/Anchorage:20070403T110000 + RRULE:FREQ=DAILY;UNTIL=20070408T180000Z;WKST=SU + EXDATE;TZID=America/Anchorage:20070407T100000 + EXDATE;TZID=America/Anchorage:20070405T100000 + EXDATE;TZID=America/Anchorage:20070404T100000 BEGIN:VTIMEZONE + TZID:America/Anchorage X-LIC-LOCATION:America/Anchorage + BEGIN:STANDARD TZOFFSETFROM:-0800 TZOFFSETTO:-0900 TZNAME:AKST + DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU + END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0900 TZOFFSETTO:-0800 + TZNAME:AKDT DTSTART:19700405T020000 + RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT + END:VTIMEZONE + + + + + + i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z + 2007-04-05T21:51:49.000Z + 2007-04-05T21:52:58.000Z + + testDavid + + + + gdata ops + gdata.ops.test@gmail.com + + + + + + + + + + + + + + + + + + + 2007-04-05T21:54:09.285Z + + + Comments for: testDavid + + + + + + + + + + + + """ + +NICK_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo + 1970-01-01T00:00:00.000Z + + Foo + + + + +""" + +NICK_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0 + + 1970-01-01T00:00:00.000Z + + Nicknames for user SusanJones + + + + 1 + 2 + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo + + + Foo + + + + + + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/suse + + + suse + + + + + +""" + +USER_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser + 1970-01-01T00:00:00.000Z + + TestUser + + + + + + + +""" + +USER_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/user/2.0 + + 1970-01-01T00:00:00.000Z + + Users + """ + +EMAIL_LIST_ENTRY = """ + + + https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist + + 1970-01-01T00:00:00.000Z + + testlist + + + + +""" + +EMAIL_LIST_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/emailList/2.0 + + 1970-01-01T00:00:00.000Z + + EmailLists + """ + +EMAIL_LIST_RECIPIENT_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com + 1970-01-01T00:00:00.000Z + + TestUser + + + +""" + +EMAIL_LIST_RECIPIENT_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient + + 1970-01-01T00:00:00.000Z + + Recipients for email list us-sales + """ + +ACL_FEED = """ + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full + 2007-04-21T00:52:04.000Z + Elizabeth Bennet's access control list + + + + + + + + + Google Calendar + 2 + 1 + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com + 2007-04-21T00:52:04.000Z + + + owner + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default + 2007-04-21T00:52:04.000Z + + + read + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + + """ + +ACL_ENTRY = """ + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com + 2007-04-21T00:52:04.000Z + + + owner + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + """ + +DOCUMENT_LIST_FEED = """ +21test.usertest.user@gmail.comhttp://docs.google.com/feeds/documents/private/full/spreadsheet%3AsupercalifragilisticexpeadociousTest Spreadsheet2007-07-03T18:03:32.045Ztest.usertest.user@gmail.comhttp://docs.google.com/feeds/documents/private/full/document%3Agr00vyTest Document2007-07-03T18:02:50.338Zhttp://docs.google.com/feeds/documents/private/fullAvailable +Documents - +test.user@gmail.com2007-07-09T23:07:21.898Z +""" + +DOCUMENT_LIST_ENTRY = """ +test.usertest.user@gmail.comhttp://docs.google.com/feeds/documents/private/full/spreadsheet%3AsupercalifragilisticexpealidociousTest Spreadsheet2007-07-03T18:03:32.045Z +""" + +BATCH_ENTRY = """ + + http://www.google.com/base/feeds/items/2173859253842813008 + 2006-07-11T14:51:43.560Z + 2006-07-11T14:51: 43.560Z + title + content + + + recipes + + itemB + +""" + +BATCH_FEED_REQUEST = """ + + My Batch Feed + + http://www.google.com/base/feeds/items/13308004346459454600 + + + + http://www.google.com/base/feeds/items/17437536661927313949 + + + + ... + ... + itemA + + recipes + + + ... + ... + itemB + + recipes + +""" + +BATCH_FEED_RESULT = """ + + http://www.google.com/base/feeds/items + 2006-07-11T14:51:42.894Z + My Batch + + + + + http://www.google.com/base/feeds/items/2173859253842813008 + 2006-07-11T14:51:43.560Z + 2006-07-11T14:51: 43.560Z + ... + ... + + + recipes + + itemB + + + + http://www.google.com/base/feeds/items/11974645606383737963 + 2006-07-11T14:51:43.247Z + 2006-07-11T14:51: 43.247Z + ... + ... + + + recipes + + itemA + + + + http://www.google.com/base/feeds/items/13308004346459454600 + 2006-07-11T14:51:42.894Z + Error + Bad request + + + + + + + + http://www.google.com/base/feeds/items/17437536661927313949 + 2006-07-11T14:51:43.246Z + Deleted + + + +""" + +ALBUM_FEED = """ + + http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1 + 2007-09-21T18:23:05.000Z + + Test + + public + http://lh6.google.com/sample.user/Rt8WNoDZEJE/AAAAAAAAABk/HQGlDhpIgWo/s160-c/Test.jpg + + + + + + sample + http://picasaweb.google.com/sample.user + + Picasaweb 4 + 1 + 500 + 1 + Test + + public 1188975600000 + 2 + sample.user + sample + true + 0 + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2 + 2007-09-05T20:49:23.000Z + 2007-09-21T18:23:05.000Z + + Aqua Blue.jpg + Blue + + + + 2 + 1190398985145172 + 0.0 + 1 2560 + 1600 + 883405 + + + 1189025362000 + true + c041ce17aaa637eb656c81d9cf526c24 + + true + 1 + + Aqua Blue.jpg Blue + tag, test + + + + + sample + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/3 + 2007-09-05T20:49:24.000Z + 2007-09-21T18:19:38.000Z + + Aqua Graphite.jpg + Gray + + + + + 3 + 1190398778006402 + 1.0 + 1 + 2560 + 1600 + 798334 + + + 1189025363000 + + true + a5ce2e36b9df7d3cb081511c72e73926 + + true + 0 + + Aqua Graphite.jpg + Gray + + + + + + sample + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/tag + 2007-09-05T20:49:24.000Z + + tag + tag + + + + sample + http://picasaweb.google.com/sample.user + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/test + 2007-09-05T20:49:24.000Z + + test + test + + + + sample + http://picasaweb.google.com/sample.user + + +""" + +CODE_SEARCH_FEED = """ + +http://www.google.com/codesearch/feeds/search?q=malloc +2007-12-19T16:08:04Z +Google Code Search +Google Code Search +2530000 +1 + +Google Code Search + +http://www.google.com/codesearch + + + + + +http://www.google.com/codesearch?hl=en&q=+malloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002#first2007-12-19T16:08:04ZCode owned by external author.software/autoconf/manual/autoconf-2.60/autoconf.html<pre> 8: void *<b>malloc</b> (); + + +</pre><pre> #undef <b>malloc</b> +</pre><pre> void *<b>malloc</b> (); + +</pre><pre> rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre> +http://www.google.com/codesearch?hl=en&q=+malloc+show:h4hfh-fV-jI:niBq_bwWZNs:H0OhClf0HWQ&sa=N&ct=rx&cd=2&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c#first2007-12-19T16:08:04ZCode owned by external author.guile-1.6.8/libguile/mallocs.c<pre> 86: { + scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0; + if (n &amp;&amp; !mem) + +</pre><pre>#include &lt;<b>malloc</b>.h&gt; +</pre><pre>scm_t_bits scm_tc16_<b>malloc</b>; + +</pre><pre><b>malloc</b>_free (SCM ptr) +</pre><pre><b>malloc</b>_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED) + +</pre><pre> scm_puts(&quot;#&lt;<b>malloc</b> &quot;, port); +</pre><pre> scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0; + +</pre><pre> SCM_RETURN_NEWSMOB (scm_tc16_<b>malloc</b>, mem); +</pre><pre> scm_tc16_<b>malloc</b> = scm_make_smob_type (&quot;<b>malloc</b>&quot;, 0); + +</pre><pre> scm_set_smob_free (scm_tc16_<b>malloc</b>, <b>malloc</b>_free); +</pre>GPL + +http://www.google.com/codesearch?hl=en&q=+malloc+show:9wyZUG-N_30:7_dFxoC1ZrY:C0_iYbFj90M&sa=N&ct=rx&cd=3&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c#first2007-12-19T16:08:04ZCode owned by external author.bash-3.0/lib/malloc/alloca.c<pre> 78: #ifndef emacs + #define <b>malloc</b> x<b>malloc</b> + extern pointer x<b>malloc</b> (); + +</pre><pre> <b>malloc</b>. The Emacs executable needs alloca to call x<b>malloc</b>, because +</pre><pre> ordinary <b>malloc</b> isn&#39;t protected from input signals. On the other + +</pre><pre> hand, the utilities in lib-src need alloca to call <b>malloc</b>; some of +</pre><pre> them are very simple, and don&#39;t have an x<b>malloc</b> routine. + +</pre><pre> Callers below should use <b>malloc</b>. */ +</pre><pre>#define <b>malloc</b> x<b>malloc</b> + +</pre><pre>extern pointer x<b>malloc</b> (); +</pre><pre> It is very important that sizeof(header) agree with <b>malloc</b> + +</pre><pre> register pointer new = <b>malloc</b> (sizeof (header) + size); +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:uhVCKyPcT6k:8juMxxzmUJw:H7_IDsTB2L4&sa=N&ct=rx&cd=4&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.mozilla/xpcom/build/malloc.c<pre> 54: http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html + + You may already by default be using a c library containing a <b>malloc</b> + +</pre><pre>/* ---------- To make a <b>malloc</b>.h, start cutting here ------------ */ +</pre><pre> Note: There may be an updated version of this <b>malloc</b> obtainable at + +</pre><pre> ftp://gee.cs.oswego.edu/pub/misc/<b>malloc</b>.c +</pre><pre>* Why use this <b>malloc</b>? + +</pre><pre> most tunable <b>malloc</b> ever written. However it is among the fastest +</pre><pre> allocator for <b>malloc</b>-intensive programs. + +</pre><pre> http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html +</pre><pre> You may already by default be using a c library containing a <b>malloc</b> + +</pre><pre> that is somehow based on some version of this <b>malloc</b> (for example in +</pre>Mozilla +http://www.google.com/codesearch?hl=en&q=+malloc+show:4n1P2HVOISs:Ybbpph0wR2M:OhIN_sDrG0U&sa=N&ct=rx&cd=5&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh#first2007-12-19T16:08:04ZCode owned by external author.hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh<pre> 11: echo ================ unit-must-<b>malloc</b> tests ================ + ./unit-must-<b>malloc</b> + echo ...passed + +</pre><pre># tag: Tom Lord Tue Dec 4 14:54:29 2001 (mem-tests/unit-must-<b>malloc</b>.sh) +</pre><pre>echo ================ unit-must-<b>malloc</b> tests ================ + +</pre><pre>./unit-must-<b>malloc</b> +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:GzkwiWG266M:ykuz3bG00ws:2sTvVSif08g&sa=N&ct=rx&cd=6&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.tar-1.14/lib/malloc.c<pre> 22: #endif + #undef <b>malloc</b> + + +</pre><pre>/* Work around bug on some systems where <b>malloc</b> (0) fails. +</pre><pre>#undef <b>malloc</b> + +</pre><pre>rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:o_TFIeBY6dY:ktI_dt8wPao:AI03BD1Dz0Y&sa=N&ct=rx&cd=7&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.tar-1.16.1/lib/malloc.c<pre> 21: #include &lt;config.h&gt; + #undef <b>malloc</b> + + +</pre><pre>/* <b>malloc</b>() function that is glibc compatible. +</pre><pre>#undef <b>malloc</b> + +</pre><pre>rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:_ibw-VLkMoI:jBOtIJSmFd4:-0NUEVeCwfY&sa=N&ct=rx&cd=8&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h#first2007-12-19T16:08:04ZCode owned by external author.uClibc-0.9.29/include/malloc.h<pre> 1: /* Prototypes and definition for <b>malloc</b> implementation. + Copyright (C) 1996, 1997, 1999, 2000 Free Software Foundation, Inc. + +</pre><pre>/* Prototypes and definition for <b>malloc</b> implementation. +</pre><pre> `pt<b>malloc</b>&#39;, a <b>malloc</b> implementation for multiple threads without + +</pre><pre> See the files `pt<b>malloc</b>.c&#39; or `COPYRIGHT&#39; for copying conditions. +</pre><pre> This work is mainly derived from <b>malloc</b>-2.6.4 by Doug Lea + +</pre><pre> ftp://g.oswego.edu/pub/misc/<b>malloc</b>.c +</pre><pre> `pt<b>malloc</b>.c&#39;. + +</pre><pre># define __<b>malloc</b>_ptr_t void * +</pre><pre># define __<b>malloc</b>_ptr_t char * + +</pre><pre># define __<b>malloc</b>_size_t size_t +</pre>LGPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:F6qHcZ9vefo:bTX7o9gKfks:hECF4r_eKC0&sa=N&ct=rx&cd=9&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h#first2007-12-19T16:08:04ZCode owned by external author.glibc-2.0.1/hurd/hurdmalloc.h<pre> 15: #define <b>malloc</b> _hurd_<b>malloc</b> + #define realloc _hurd_realloc + +</pre><pre> All hurd-internal code which uses <b>malloc</b> et al includes this file so it +</pre><pre> will use the internal <b>malloc</b> routines _hurd_{<b>malloc</b>,realloc,free} + +</pre><pre> of <b>malloc</b> et al is the unixoid one using sbrk. +</pre><pre>extern void *_hurd_<b>malloc</b> (size_t); + +</pre><pre>#define <b>malloc</b> _hurd_<b>malloc</b> +</pre>GPL + +http://www.google.com/codesearch?hl=en&q=+malloc+show:CHUvHYzyLc8:pdcAfzDA6lY:wjofHuNLTHg&sa=N&ct=rx&cd=10&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h#first2007-12-19T16:08:04ZCode owned by external author.httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h<pre> 173: #undef <b>malloc</b> + #define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x) + +</pre><pre>/* Redefine <b>malloc</b> to use the library <b>malloc</b> call so +</pre><pre>#undef <b>malloc</b> + +</pre><pre>#define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x) +</pre>Apache + +""" + +YOUTUBE_VIDEO_FEED = """http://gdata.youtube.com/feeds/api/standardfeeds/top_rated2008-05-14T02:24:07.000-07:00Top Ratedhttp://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API100125 +http://gdata.youtube.com/feeds/api/videos/C71ypXYGho82008-03-20T10:17:27.000-07:002008-05-14T04:26:37.000-07:00Me odeio por te amar - KARYN GARCIAhttp://www.karyngarcia.com.brTvKarynGarciahttp://gdata.youtube.com/feeds/api/users/tvkaryngarciaMe odeio por te amar - KARYN GARCIAhttp://www.karyngarcia.com.bramar, boyfriend, garcia, karyn, me, odeio, por, teMusictest111test222 +http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw2008-02-15T04:31:45.000-08:002008-05-14T05:09:42.000-07:00extreme helmet cam Kani, Keil and Patotrimmedperaltamagichttp://gdata.youtube.com/feeds/api/users/peraltamagicextreme helmet cam Kani, Keil and Patotrimmedalcala, cam, campillo, dirt, extreme, helmet, kani, patoSports +""" + +YOUTUBE_ENTRY_PRIVATE = """ + + http://gdata.youtube.com/feeds/videos/UMFI1hdm96E + 2007-01-07T01:50:15.000Z + 2007-01-07T01:50:15.000Z + + + + + + + + + "Crazy (Gnarles Barkley)" - Acoustic Cover + <div style="color: #000000;font-family: + Arial, Helvetica, sans-serif; font-size:12px; font-size: 12px; + width: 555px;"><table cellspacing="0" cellpadding="0" + border="0"><tbody><tr><td width="140" + valign="top" rowspan="2"><div style="border: 1px solid + #999999; margin: 0px 10px 5px 0px;"><a + href="http://www.youtube.com/watch?v=UMFI1hdm96E"><img + alt="" + src="http://img.youtube.com/vi/UMFI1hdm96E/2.jpg"></a></div></td> + <td width="256" valign="top"><div style="font-size: + 12px; font-weight: bold;"><a style="font-size: 15px; + font-weight: bold; font-decoration: none;" + href="http://www.youtube.com/watch?v=UMFI1hdm96E">&quot;Crazy + (Gnarles Barkley)&quot; - Acoustic Cover</a> + <br></div> <div style="font-size: 12px; margin: + 3px 0px;"><span>Gnarles Barkley acoustic cover + http://www.myspace.com/davidchoimusic</span></div></td> + <td style="font-size: 11px; line-height: 1.4em; padding-left: + 20px; padding-top: 1px;" width="146" + valign="top"><div><span style="color: #666666; + font-size: 11px;">From:</span> <a + href="http://www.youtube.com/profile?user=davidchoimusic">davidchoimusic</a></div> + <div><span style="color: #666666; font-size: + 11px;">Views:</span> 113321</div> <div + style="white-space: nowrap;text-align: left"><img + style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_half_11x11.gif"></div> + <div style="font-size: 11px;">1005 <span style="color: + #666666; font-size: + 11px;">ratings</span></div></td></tr> + <tr><td><span style="color: #666666; font-size: + 11px;">Time:</span> <span style="color: #000000; + font-size: 11px; font-weight: + bold;">04:15</span></td> <td style="font-size: + 11px; padding-left: 20px;"><span style="color: #666666; + font-size: 11px;">More in</span> <a + href="http://www.youtube.com/categories_portal?c=10">Music</a></td></tr></tbody></table></div> + + + + + + davidchoimusic + http://gdata.youtube.com/feeds/users/davidchoimusic + + + "Crazy (Gnarles Barkley)" - Acoustic Cover + Gnarles Barkley acoustic cover http://www.myspace.com/davidchoimusic + music, singing, gnarls, barkley, acoustic, cover + + + Music + + DeveloperTag1 + + + + + + + + + + + + + 37.398529052734375 -122.0635986328125 + + + + + + + + yes + + The content of this video may violate the terms of use. + +""" + +YOUTUBE_COMMENT_FEED = """ +http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments2008-05-19T21:45:45.261ZCommentshttp://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API0125 + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/91F809A3DE2EB81B + 2008-02-22T15:27:15.000-08:002008-02-22T15:27:15.000-08:00 + + test66 + test66 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/A261AEEFD23674AA + 2008-02-22T15:27:01.000-08:002008-02-22T15:27:01.000-08:00 + + test333 + test333 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/0DCF1E3531B3FF85 + 2008-02-22T15:11:06.000-08:002008-02-22T15:11:06.000-08:00 + + test2 + test2 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + +""" + +YOUTUBE_PLAYLIST_FEED = """ + + http://gdata.youtube.com/feeds/users/andyland74/playlists?start-index=1&max-results=25 + 2008-02-26T00:26:15.635Z + + andyland74's Playlists + http://www.youtube.com/img/pic_youtubelogo_123x63.gif + + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + YouTube data API + 1 + 1 + 25 + + My new playlist Description + + http://gdata.youtube.com/feeds/users/andyland74/playlists/8BCDD04DE8F771B2 + 2007-11-04T17:30:27.000-08:00 + 2008-02-22T09:55:14.000-08:00 + + My New Playlist Title + My new playlist Description + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + +""" + +YOUTUBE_PLAYLIST_VIDEO_FEED = """http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B5052008-05-16T12:03:17.000-07:00Test PlaylistTest playlist 1http://www.youtube.com/img/pic_youtubelogo_123x63.gifgdpythonhttp://gdata.youtube.com/feeds/api/users/gdpythonYouTube data API1125Test PlaylistTest playlist 1http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505/B0F29389E537F8882008-05-16T20:54:08.520ZUploading YouTube Videos with the PHP Client LibraryJochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API. + +PHP Developer's Guide: +http://code.google.com/apis/youtube/developers_guide_php.html + +Other documentation: +http://code.google.com/apis/youtube/GoogleDevelopershttp://gdata.youtube.com/feeds/api/users/googledevelopersUploading YouTube Videos with the PHP Client LibraryJochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API. + +PHP Developer's Guide: +http://code.google.com/apis/youtube/developers_guide_php.html + +Other documentation: +http://code.google.com/apis/youtube/api, data, demo, php, screencast, tutorial, uploading, walkthrough, youtubeEducationundefined1""" + +YOUTUBE_SUBSCRIPTION_FEED = """ + + http://gdata.youtube.com/feeds/users/andyland74/subscriptions?start-index=1&max-results=25 + 2008-02-26T00:26:15.635Z + + andyland74's Subscriptions + http://www.youtube.com/img/pic_youtubelogo_123x63.gif + + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + YouTube data API + 1 + 1 + 25 + + http://gdata.youtube.com/feeds/users/andyland74/subscriptions/d411759045e2ad8c + 2007-11-04T17:30:27.000-08:00 + 2008-02-22T09:55:14.000-08:00 + + + Videos published by : NBC + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + NBC + + +""" + +YOUTUBE_VIDEO_RESPONSE_FEED = """ + + http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses2008-05-19T22:37:34.076ZVideos responses to 'Giant NES controller coffee table'http://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API8125 + + http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY2008-03-11T19:08:53.000-07:002008-05-18T21:33:10.000-07:00 + + + + + + + + + + + + Catnip Partysnipped + + + + + PismoBeachhttp://gdata.youtube.com/feeds/users/pismobeach + + Catnip Party + Uncle, Hillary, Hankette, and B4 all but overdose on the patioBrattman, cat, catmint, catnip, cats, chat, drug, gato, gatto, kat, kato, katt, Katze, kedi, kissa, OD, overdose, party, sex, Uncle + + Animals + + + + + + + + + + + + + + + + +""" + + +YOUTUBE_PROFILE = """ + + http://gdata.youtube.com/feeds/users/andyland74 + 2006-10-16T00:09:45.000-07:00 + 2008-02-26T11:48:21.000-08:00 + + + andyland74 Channel + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + 33 + andyland74 + andy + example + Catch-22 + m + Google + Testing YouTube APIs + Somewhere + US + Aqua Teen Hungerforce + Elliott Smith + Technical Writer + University of North Carolina + + + + + + + + +""" + +YOUTUBE_CONTACTS_FEED = """ + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts2008-05-16T19:24:34.916Zapitestjhartmann's Contactshttp://www.youtube.com/img/pic_youtubelogo_123x63.gifapitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmannYouTube data API2125 + + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test898990902008-02-04T11:27:54.000-08:002008-05-16T19:24:34.916Ztest89899090apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmanntest89899090requested + + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher2008-02-26T14:13:03.000-08:002008-05-16T19:24:34.916Ztestjfisherapitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmanntestjfisherpending +""" + +NEW_CONTACT = """ + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/8411573 + 2008-02-28T18:47:02.303Z + + Fitzgerald + Notes + + + + + (206)555-1212 + 456-123-2133 + (206)555-1213 + + + + + + + 1600 Amphitheatre Pkwy Mountain View +""" + +CONTACTS_FEED = """ + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base + 2008-03-05T12:36:38.836Z + + Contacts + + + + + + Elizabeth Bennet + liz@gmail.com + + + Contacts + + 1 + 1 + 25 + + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9012de + + 2008-03-05T12:36:38.835Z + + Fitzgerald + + + + + + 456 + + + + +""" + + +CONTACT_GROUPS_FEED = """ + + jo@gmail.com + 2008-05-21T21:11:25.237Z + + Jo's Contact Groups + + + + + + + Jo Brown + jo@gmail.com + + Contacts + 3 + 1 + 25 + + http://google.com/m8/feeds/groups/jo%40gmail.com/base/270f + 2008-05-14T13:10:19.070Z + + joggers + joggers + + + +""" + +CONTACT_GROUP_ENTRY = """ + + + http://www.google.com/feeds/groups/jo%40gmail.com/base/1234 + 2005-01-18T21:00:00Z + 2006-01-01T00:00:00Z + Salsa group + Salsa group + + + + Very nice people. + +""" + +BLOG_ENTRY = """ + tag:blogger.com,1999:blog-blogID.post-postID + 2006-08-02T18:44:43.089-07:00 + 2006-11-08T18:10:23.020-08:00 + Lizzy's Diary + Being the journal of Elizabeth Bennet + + + + + + + + + + + + Elizabeth Bennet + liz@gmail.com + +""" + +BLOG_POST = """ + Marriage! + +
+

Mr. Darcy has proposed marriage to me!

+

He is the last man on earth I would ever desire to marry.

+

Whatever shall I do?

+
+
+ + Elizabeth Bennet + liz@gmail.com + +
""" + +BLOG_POSTS_FEED = """ + tag:blogger.com,1999:blog-blogID + 2006-11-08T18:10:23.020-08:00 + Lizzy's Diary + + + + + + + + Elizabeth Bennet + liz@gmail.com + + Blogger + + tag:blogger.com,1999:blog-blogID.post-postID + 2006-11-08T18:10:00.000-08:00 + 2006-11-08T18:10:14.954-08:00 + Quite disagreeable + <p>I met Mr. Bingley's friend Mr. Darcy + this evening. I found him quite disagreeable.</p> + + + + + + + + Elizabeth Bennet + liz@gmail.com + + +""" + +BLOG_COMMENTS_FEED = """ + tag:blogger.com,1999:blog-blogID.postpostID..comments + 2007-04-04T21:56:29.803-07:00 + My Blog : Time to relax + + + + + Blog Author name + + Blogger + 1 + 1 + + tag:blogger.com,1999:blog-blogID.post-commentID + 2007-04-04T21:56:00.000-07:00 + 2007-04-04T21:56:29.803-07:00 + This is my first comment + This is my first comment + + + + + Blog Author name + + + +""" + + +SITES_FEED = """ + https://www.google.com/webmasters/tools/feeds/sites + Sites + 1 + + + + + 2008-10-02T07:26:51.833Z + + http://www.example.com + http://www.example.com + + + + 2007-11-17T18:27:32.543Z + + + + true + 2008-09-14T08:59:28.000 + US + none + normal + true + false + + + 456456-google.html + +""" + + +SITEMAPS_FEED = """ + http://www.example.com + http://www.example.com/ + 2006-11-17T18:27:32.543Z + + + + HTML + WAP + + + Value1 + Value2 + Value3 + + + http://www.example.com/sitemap-index.xml + http://www.example.com/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + WEB + StatusValue + 2006-11-18T19:27:32.543Z + 102 + + + http://www.example.com/mobile/sitemap-index.xml + http://www.example.com/mobile/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + StatusValue + 2006-11-18T19:27:32.543Z + 102 + HTML + + + http://www.example.com/news/sitemap-index.xml + http://www.example.com/news/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + StatusValue + 2006-11-18T19:27:32.543Z + 102 + LabelValue + +""" diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/BaseDB.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/BaseDB.py new file mode 100644 index 0000000..ca8dff6 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/BaseDB.py @@ -0,0 +1,120 @@ +"""Base class for SharedKeyDB and VerifierDB.""" + +import anydbm +import thread + +class BaseDB: + def __init__(self, filename, type): + self.type = type + self.filename = filename + if self.filename: + self.db = None + else: + self.db = {} + self.lock = thread.allocate_lock() + + def create(self): + """Create a new on-disk database. + + @raise anydbm.error: If there's a problem creating the database. + """ + if self.filename: + self.db = anydbm.open(self.filename, "n") #raises anydbm.error + self.db["--Reserved--type"] = self.type + self.db.sync() + else: + self.db = {} + + def open(self): + """Open a pre-existing on-disk database. + + @raise anydbm.error: If there's a problem opening the database. + @raise ValueError: If the database is not of the right type. + """ + if not self.filename: + raise ValueError("Can only open on-disk databases") + self.db = anydbm.open(self.filename, "w") #raises anydbm.error + try: + if self.db["--Reserved--type"] != self.type: + raise ValueError("Not a %s database" % self.type) + except KeyError: + raise ValueError("Not a recognized database") + + def __getitem__(self, username): + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + valueStr = self.db[username] + finally: + self.lock.release() + + return self._getItem(username, valueStr) + + def __setitem__(self, username, value): + if self.db == None: + raise AssertionError("DB not open") + + valueStr = self._setItem(username, value) + + self.lock.acquire() + try: + self.db[username] = valueStr + if self.filename: + self.db.sync() + finally: + self.lock.release() + + def __delitem__(self, username): + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + del(self.db[username]) + if self.filename: + self.db.sync() + finally: + self.lock.release() + + def __contains__(self, username): + """Check if the database contains the specified username. + + @type username: str + @param username: The username to check for. + + @rtype: bool + @return: True if the database contains the username, False + otherwise. + + """ + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + return self.db.has_key(username) + finally: + self.lock.release() + + def check(self, username, param): + value = self.__getitem__(username) + return self._checkItem(value, username, param) + + def keys(self): + """Return a list of usernames in the database. + + @rtype: list + @return: The usernames in the database. + """ + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + usernames = self.db.keys() + finally: + self.lock.release() + usernames = [u for u in usernames if not u.startswith("--Reserved--")] + return usernames \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/Checker.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/Checker.py new file mode 100644 index 0000000..f978697 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/Checker.py @@ -0,0 +1,146 @@ +"""Class for post-handshake certificate checking.""" + +from utils.cryptomath import hashAndBase64 +from X509 import X509 +from X509CertChain import X509CertChain +from errors import * + + +class Checker: + """This class is passed to a handshake function to check the other + party's certificate chain. + + If a handshake function completes successfully, but the Checker + judges the other party's certificate chain to be missing or + inadequate, a subclass of + L{tlslite.errors.TLSAuthenticationError} will be raised. + + Currently, the Checker can check either an X.509 or a cryptoID + chain (for the latter, cryptoIDlib must be installed). + """ + + def __init__(self, cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + checkResumedSession=False): + """Create a new Checker instance. + + You must pass in one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + @type cryptoID: str + @param cryptoID: A cryptoID which the other party's certificate + chain must match. The cryptoIDlib module must be installed. + Mutually exclusive with all of the 'x509...' arguments. + + @type protocol: str + @param protocol: A cryptoID protocol URI which the other + party's certificate chain must match. Requires the 'cryptoID' + argument. + + @type x509Fingerprint: str + @param x509Fingerprint: A hex-encoded X.509 end-entity + fingerprint which the other party's end-entity certificate must + match. Mutually exclusive with the 'cryptoID' and + 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed. Mutually exclusive with the 'cryptoID' and + 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type checkResumedSession: bool + @param checkResumedSession: If resumed sessions should be + checked. This defaults to False, on the theory that if the + session was checked once, we don't need to bother + re-checking it. + """ + + if cryptoID and (x509Fingerprint or x509TrustList): + raise ValueError() + if x509Fingerprint and x509TrustList: + raise ValueError() + if x509CommonName and not x509TrustList: + raise ValueError() + if protocol and not cryptoID: + raise ValueError() + if cryptoID: + import cryptoIDlib #So we raise an error here + if x509TrustList: + import cryptlib_py #So we raise an error here + self.cryptoID = cryptoID + self.protocol = protocol + self.x509Fingerprint = x509Fingerprint + self.x509TrustList = x509TrustList + self.x509CommonName = x509CommonName + self.checkResumedSession = checkResumedSession + + def __call__(self, connection): + """Check a TLSConnection. + + When a Checker is passed to a handshake function, this will + be called at the end of the function. + + @type connection: L{tlslite.TLSConnection.TLSConnection} + @param connection: The TLSConnection to examine. + + @raise tlslite.errors.TLSAuthenticationError: If the other + party's certificate chain is missing or bad. + """ + if not self.checkResumedSession and connection.resumed: + return + + if self.cryptoID or self.x509Fingerprint or self.x509TrustList: + if connection._client: + chain = connection.session.serverCertChain + else: + chain = connection.session.clientCertChain + + if self.x509Fingerprint or self.x509TrustList: + if isinstance(chain, X509CertChain): + if self.x509Fingerprint: + if chain.getFingerprint() != self.x509Fingerprint: + raise TLSFingerprintError(\ + "X.509 fingerprint mismatch: %s, %s" % \ + (chain.getFingerprint(), self.x509Fingerprint)) + else: #self.x509TrustList + if not chain.validate(self.x509TrustList): + raise TLSValidationError("X.509 validation failure") + if self.x509CommonName and \ + (chain.getCommonName() != self.x509CommonName): + raise TLSAuthorizationError(\ + "X.509 Common Name mismatch: %s, %s" % \ + (chain.getCommonName(), self.x509CommonName)) + elif chain: + raise TLSAuthenticationTypeError() + else: + raise TLSNoAuthenticationError() + elif self.cryptoID: + import cryptoIDlib.CertChain + if isinstance(chain, cryptoIDlib.CertChain.CertChain): + if chain.cryptoID != self.cryptoID: + raise TLSFingerprintError(\ + "cryptoID mismatch: %s, %s" % \ + (chain.cryptoID, self.cryptoID)) + if self.protocol: + if not chain.checkProtocol(self.protocol): + raise TLSAuthorizationError(\ + "cryptoID protocol mismatch") + if not chain.validate(): + raise TLSValidationError("cryptoID validation failure") + elif chain: + raise TLSAuthenticationTypeError() + else: + raise TLSNoAuthenticationError() + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/FileObject.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/FileObject.py new file mode 100644 index 0000000..6ee02b2 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/FileObject.py @@ -0,0 +1,220 @@ +"""Class returned by TLSConnection.makefile().""" + +class FileObject: + """This class provides a file object interface to a + L{tlslite.TLSConnection.TLSConnection}. + + Call makefile() on a TLSConnection to create a FileObject instance. + + This class was copied, with minor modifications, from the + _fileobject class in socket.py. Note that fileno() is not + implemented.""" + + default_bufsize = 16384 #TREV: changed from 8192 + + def __init__(self, sock, mode='rb', bufsize=-1): + self._sock = sock + self.mode = mode # Not actually used in this version + if bufsize < 0: + bufsize = self.default_bufsize + self.bufsize = bufsize + self.softspace = False + if bufsize == 0: + self._rbufsize = 1 + elif bufsize == 1: + self._rbufsize = self.default_bufsize + else: + self._rbufsize = bufsize + self._wbufsize = bufsize + self._rbuf = "" # A string + self._wbuf = [] # A list of strings + + def _getclosed(self): + return self._sock is not None + closed = property(_getclosed, doc="True if the file is closed") + + def close(self): + try: + if self._sock: + for result in self._sock._decrefAsync(): #TREV + pass + finally: + self._sock = None + + def __del__(self): + try: + self.close() + except: + # close() may fail if __init__ didn't complete + pass + + def flush(self): + if self._wbuf: + buffer = "".join(self._wbuf) + self._wbuf = [] + self._sock.sendall(buffer) + + #def fileno(self): + # raise NotImplementedError() #TREV + + def write(self, data): + data = str(data) # XXX Should really reject non-string non-buffers + if not data: + return + self._wbuf.append(data) + if (self._wbufsize == 0 or + self._wbufsize == 1 and '\n' in data or + self._get_wbuf_len() >= self._wbufsize): + self.flush() + + def writelines(self, list): + # XXX We could do better here for very long lists + # XXX Should really reject non-string non-buffers + self._wbuf.extend(filter(None, map(str, list))) + if (self._wbufsize <= 1 or + self._get_wbuf_len() >= self._wbufsize): + self.flush() + + def _get_wbuf_len(self): + buf_len = 0 + for x in self._wbuf: + buf_len += len(x) + return buf_len + + def read(self, size=-1): + data = self._rbuf + if size < 0: + # Read until EOF + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + if self._rbufsize <= 1: + recv_size = self.default_bufsize + else: + recv_size = self._rbufsize + while True: + data = self._sock.recv(recv_size) + if not data: + break + buffers.append(data) + return "".join(buffers) + else: + # Read until size bytes or EOF seen, whichever comes first + buf_len = len(data) + if buf_len >= size: + self._rbuf = data[size:] + return data[:size] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + left = size - buf_len + recv_size = max(self._rbufsize, left) + data = self._sock.recv(recv_size) + if not data: + break + buffers.append(data) + n = len(data) + if n >= left: + self._rbuf = data[left:] + buffers[-1] = data[:left] + break + buf_len += n + return "".join(buffers) + + def readline(self, size=-1): + data = self._rbuf + if size < 0: + # Read until \n or EOF, whichever comes first + if self._rbufsize <= 1: + # Speed up unbuffered case + assert data == "" + buffers = [] + recv = self._sock.recv + while data != "\n": + data = recv(1) + if not data: + break + buffers.append(data) + return "".join(buffers) + nl = data.find('\n') + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + return data[:nl] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + data = self._sock.recv(self._rbufsize) + if not data: + break + buffers.append(data) + nl = data.find('\n') + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + buffers[-1] = data[:nl] + break + return "".join(buffers) + else: + # Read until size bytes or \n or EOF seen, whichever comes first + nl = data.find('\n', 0, size) + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + return data[:nl] + buf_len = len(data) + if buf_len >= size: + self._rbuf = data[size:] + return data[:size] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + data = self._sock.recv(self._rbufsize) + if not data: + break + buffers.append(data) + left = size - buf_len + nl = data.find('\n', 0, left) + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + buffers[-1] = data[:nl] + break + n = len(data) + if n >= left: + self._rbuf = data[left:] + buffers[-1] = data[:left] + break + buf_len += n + return "".join(buffers) + + def readlines(self, sizehint=0): + total = 0 + list = [] + while True: + line = self.readline() + if not line: + break + list.append(line) + total += len(line) + if sizehint and total >= sizehint: + break + return list + + # Iterator protocols + + def __iter__(self): + return self + + def next(self): + line = self.readline() + if not line: + raise StopIteration + return line diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/HandshakeSettings.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/HandshakeSettings.py new file mode 100644 index 0000000..c7c3223 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/HandshakeSettings.py @@ -0,0 +1,159 @@ +"""Class for setting handshake parameters.""" + +from constants import CertificateType +from utils import cryptomath +from utils import cipherfactory + +class HandshakeSettings: + """This class encapsulates various parameters that can be used with + a TLS handshake. + @sort: minKeySize, maxKeySize, cipherNames, certificateTypes, + minVersion, maxVersion + + @type minKeySize: int + @ivar minKeySize: The minimum bit length for asymmetric keys. + + If the other party tries to use SRP, RSA, or Diffie-Hellman + parameters smaller than this length, an alert will be + signalled. The default is 1023. + + @type maxKeySize: int + @ivar maxKeySize: The maximum bit length for asymmetric keys. + + If the other party tries to use SRP, RSA, or Diffie-Hellman + parameters larger than this length, an alert will be signalled. + The default is 8193. + + @type cipherNames: list + @ivar cipherNames: The allowed ciphers, in order of preference. + + The allowed values in this list are 'aes256', 'aes128', '3des', and + 'rc4'. If these settings are used with a client handshake, they + determine the order of the ciphersuites offered in the ClientHello + message. + + If these settings are used with a server handshake, the server will + choose whichever ciphersuite matches the earliest entry in this + list. + + NOTE: If '3des' is used in this list, but TLS Lite can't find an + add-on library that supports 3DES, then '3des' will be silently + removed. + + The default value is ['aes256', 'aes128', '3des', 'rc4']. + + @type certificateTypes: list + @ivar certificateTypes: The allowed certificate types, in order of + preference. + + The allowed values in this list are 'x509' and 'cryptoID'. This + list is only used with a client handshake. The client will + advertise to the server which certificate types are supported, and + will check that the server uses one of the appropriate types. + + NOTE: If 'cryptoID' is used in this list, but cryptoIDlib is not + installed, then 'cryptoID' will be silently removed. + + @type minVersion: tuple + @ivar minVersion: The minimum allowed SSL/TLS version. + + This variable can be set to (3,0) for SSL 3.0, (3,1) for + TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to + use a lower version, a protocol_version alert will be signalled. + The default is (3,0). + + @type maxVersion: tuple + @ivar maxVersion: The maximum allowed SSL/TLS version. + + This variable can be set to (3,0) for SSL 3.0, (3,1) for + TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to + use a higher version, a protocol_version alert will be signalled. + The default is (3,2). (WARNING: Some servers may (improperly) + reject clients which offer support for TLS 1.1. In this case, + try lowering maxVersion to (3,1)). + """ + def __init__(self): + self.minKeySize = 1023 + self.maxKeySize = 8193 + self.cipherNames = ["aes256", "aes128", "3des", "rc4"] + self.cipherImplementations = ["cryptlib", "openssl", "pycrypto", + "python"] + self.certificateTypes = ["x509", "cryptoID"] + self.minVersion = (3,0) + self.maxVersion = (3,2) + + #Filters out options that are not supported + def _filter(self): + other = HandshakeSettings() + other.minKeySize = self.minKeySize + other.maxKeySize = self.maxKeySize + other.cipherNames = self.cipherNames + other.cipherImplementations = self.cipherImplementations + other.certificateTypes = self.certificateTypes + other.minVersion = self.minVersion + other.maxVersion = self.maxVersion + + if not cipherfactory.tripleDESPresent: + other.cipherNames = [e for e in self.cipherNames if e != "3des"] + if len(other.cipherNames)==0: + raise ValueError("No supported ciphers") + + try: + import cryptoIDlib + except ImportError: + other.certificateTypes = [e for e in self.certificateTypes \ + if e != "cryptoID"] + if len(other.certificateTypes)==0: + raise ValueError("No supported certificate types") + + if not cryptomath.cryptlibpyLoaded: + other.cipherImplementations = [e for e in \ + self.cipherImplementations if e != "cryptlib"] + if not cryptomath.m2cryptoLoaded: + other.cipherImplementations = [e for e in \ + other.cipherImplementations if e != "openssl"] + if not cryptomath.pycryptoLoaded: + other.cipherImplementations = [e for e in \ + other.cipherImplementations if e != "pycrypto"] + if len(other.cipherImplementations)==0: + raise ValueError("No supported cipher implementations") + + if other.minKeySize<512: + raise ValueError("minKeySize too small") + if other.minKeySize>16384: + raise ValueError("minKeySize too large") + if other.maxKeySize<512: + raise ValueError("maxKeySize too small") + if other.maxKeySize>16384: + raise ValueError("maxKeySize too large") + for s in other.cipherNames: + if s not in ("aes256", "aes128", "rc4", "3des"): + raise ValueError("Unknown cipher name: '%s'" % s) + for s in other.cipherImplementations: + if s not in ("cryptlib", "openssl", "python", "pycrypto"): + raise ValueError("Unknown cipher implementation: '%s'" % s) + for s in other.certificateTypes: + if s not in ("x509", "cryptoID"): + raise ValueError("Unknown certificate type: '%s'" % s) + + if other.minVersion > other.maxVersion: + raise ValueError("Versions set incorrectly") + + if not other.minVersion in ((3,0), (3,1), (3,2)): + raise ValueError("minVersion set incorrectly") + + if not other.maxVersion in ((3,0), (3,1), (3,2)): + raise ValueError("maxVersion set incorrectly") + + return other + + def _getCertificateTypes(self): + l = [] + for ct in self.certificateTypes: + if ct == "x509": + l.append(CertificateType.x509) + elif ct == "cryptoID": + l.append(CertificateType.cryptoID) + else: + raise AssertionError() + return l diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/Session.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/Session.py new file mode 100644 index 0000000..a951f45 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/Session.py @@ -0,0 +1,131 @@ +"""Class representing a TLS session.""" + +from utils.compat import * +from mathtls import * +from constants import * + +class Session: + """ + This class represents a TLS session. + + TLS distinguishes between connections and sessions. A new + handshake creates both a connection and a session. Data is + transmitted over the connection. + + The session contains a more permanent record of the handshake. The + session can be inspected to determine handshake results. The + session can also be used to create a new connection through + "session resumption". If the client and server both support this, + they can create a new connection based on an old session without + the overhead of a full handshake. + + The session for a L{tlslite.TLSConnection.TLSConnection} can be + retrieved from the connection's 'session' attribute. + + @type srpUsername: str + @ivar srpUsername: The client's SRP username (or None). + + @type sharedKeyUsername: str + @ivar sharedKeyUsername: The client's shared-key username (or + None). + + @type clientCertChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @ivar clientCertChain: The client's certificate chain (or None). + + @type serverCertChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @ivar serverCertChain: The server's certificate chain (or None). + """ + + def __init__(self): + self.masterSecret = createByteArraySequence([]) + self.sessionID = createByteArraySequence([]) + self.cipherSuite = 0 + self.srpUsername = None + self.sharedKeyUsername = None + self.clientCertChain = None + self.serverCertChain = None + self.resumable = False + self.sharedKey = False + + def _clone(self): + other = Session() + other.masterSecret = self.masterSecret + other.sessionID = self.sessionID + other.cipherSuite = self.cipherSuite + other.srpUsername = self.srpUsername + other.sharedKeyUsername = self.sharedKeyUsername + other.clientCertChain = self.clientCertChain + other.serverCertChain = self.serverCertChain + other.resumable = self.resumable + other.sharedKey = self.sharedKey + return other + + def _calcMasterSecret(self, version, premasterSecret, clientRandom, + serverRandom): + if version == (3,0): + self.masterSecret = PRF_SSL(premasterSecret, + concatArrays(clientRandom, serverRandom), 48) + elif version in ((3,1), (3,2)): + self.masterSecret = PRF(premasterSecret, "master secret", + concatArrays(clientRandom, serverRandom), 48) + else: + raise AssertionError() + + def valid(self): + """If this session can be used for session resumption. + + @rtype: bool + @return: If this session can be used for session resumption. + """ + return self.resumable or self.sharedKey + + def _setResumable(self, boolean): + #Only let it be set if this isn't a shared key + if not self.sharedKey: + #Only let it be set to True if the sessionID is non-null + if (not boolean) or (boolean and self.sessionID): + self.resumable = boolean + + def getCipherName(self): + """Get the name of the cipher used with this connection. + + @rtype: str + @return: The name of the cipher used with this connection. + Either 'aes128', 'aes256', 'rc4', or '3des'. + """ + if self.cipherSuite in CipherSuite.aes128Suites: + return "aes128" + elif self.cipherSuite in CipherSuite.aes256Suites: + return "aes256" + elif self.cipherSuite in CipherSuite.rc4Suites: + return "rc4" + elif self.cipherSuite in CipherSuite.tripleDESSuites: + return "3des" + else: + return None + + def _createSharedKey(self, sharedKeyUsername, sharedKey): + if len(sharedKeyUsername)>16: + raise ValueError() + if len(sharedKey)>47: + raise ValueError() + + self.sharedKeyUsername = sharedKeyUsername + + self.sessionID = createByteArrayZeros(16) + for x in range(len(sharedKeyUsername)): + self.sessionID[x] = ord(sharedKeyUsername[x]) + + premasterSecret = createByteArrayZeros(48) + sharedKey = chr(len(sharedKey)) + sharedKey + for x in range(48): + premasterSecret[x] = ord(sharedKey[x % len(sharedKey)]) + + self.masterSecret = PRF(premasterSecret, "shared secret", + createByteArraySequence([]), 48) + self.sharedKey = True + return self + + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/SessionCache.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/SessionCache.py new file mode 100644 index 0000000..34cf0b0 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/SessionCache.py @@ -0,0 +1,103 @@ +"""Class for caching TLS sessions.""" + +import thread +import time + +class SessionCache: + """This class is used by the server to cache TLS sessions. + + Caching sessions allows the client to use TLS session resumption + and avoid the expense of a full handshake. To use this class, + simply pass a SessionCache instance into the server handshake + function. + + This class is thread-safe. + """ + + #References to these instances + #are also held by the caller, who may change the 'resumable' + #flag, so the SessionCache must return the same instances + #it was passed in. + + def __init__(self, maxEntries=10000, maxAge=14400): + """Create a new SessionCache. + + @type maxEntries: int + @param maxEntries: The maximum size of the cache. When this + limit is reached, the oldest sessions will be deleted as + necessary to make room for new ones. The default is 10000. + + @type maxAge: int + @param maxAge: The number of seconds before a session expires + from the cache. The default is 14400 (i.e. 4 hours).""" + + self.lock = thread.allocate_lock() + + # Maps sessionIDs to sessions + self.entriesDict = {} + + #Circular list of (sessionID, timestamp) pairs + self.entriesList = [(None,None)] * maxEntries + + self.firstIndex = 0 + self.lastIndex = 0 + self.maxAge = maxAge + + def __getitem__(self, sessionID): + self.lock.acquire() + try: + self._purge() #Delete old items, so we're assured of a new one + session = self.entriesDict[sessionID] + + #When we add sessions they're resumable, but it's possible + #for the session to be invalidated later on (if a fatal alert + #is returned), so we have to check for resumability before + #returning the session. + + if session.valid(): + return session + else: + raise KeyError() + finally: + self.lock.release() + + + def __setitem__(self, sessionID, session): + self.lock.acquire() + try: + #Add the new element + self.entriesDict[sessionID] = session + self.entriesList[self.lastIndex] = (sessionID, time.time()) + self.lastIndex = (self.lastIndex+1) % len(self.entriesList) + + #If the cache is full, we delete the oldest element to make an + #empty space + if self.lastIndex == self.firstIndex: + del(self.entriesDict[self.entriesList[self.firstIndex][0]]) + self.firstIndex = (self.firstIndex+1) % len(self.entriesList) + finally: + self.lock.release() + + #Delete expired items + def _purge(self): + currentTime = time.time() + + #Search through the circular list, deleting expired elements until + #we reach a non-expired element. Since elements in list are + #ordered in time, we can break once we reach the first non-expired + #element + index = self.firstIndex + while index != self.lastIndex: + if currentTime - self.entriesList[index][1] > self.maxAge: + del(self.entriesDict[self.entriesList[index][0]]) + index = (index+1) % len(self.entriesList) + else: + break + self.firstIndex = index + +def _test(): + import doctest, SessionCache + return doctest.testmod(SessionCache) + +if __name__ == "__main__": + _test() diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/SharedKeyDB.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/SharedKeyDB.py new file mode 100644 index 0000000..3246ec7 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/SharedKeyDB.py @@ -0,0 +1,58 @@ +"""Class for storing shared keys.""" + +from utils.cryptomath import * +from utils.compat import * +from mathtls import * +from Session import Session +from BaseDB import BaseDB + +class SharedKeyDB(BaseDB): + """This class represent an in-memory or on-disk database of shared + keys. + + A SharedKeyDB can be passed to a server handshake function to + authenticate a client based on one of the shared keys. + + This class is thread-safe. + """ + + def __init__(self, filename=None): + """Create a new SharedKeyDB. + + @type filename: str + @param filename: Filename for an on-disk database, or None for + an in-memory database. If the filename already exists, follow + this with a call to open(). To create a new on-disk database, + follow this with a call to create(). + """ + BaseDB.__init__(self, filename, "shared key") + + def _getItem(self, username, valueStr): + session = Session() + session._createSharedKey(username, valueStr) + return session + + def __setitem__(self, username, sharedKey): + """Add a shared key to the database. + + @type username: str + @param username: The username to associate the shared key with. + Must be less than or equal to 16 characters in length, and must + not already be in the database. + + @type sharedKey: str + @param sharedKey: The shared key to add. Must be less than 48 + characters in length. + """ + BaseDB.__setitem__(self, username, sharedKey) + + def _setItem(self, username, value): + if len(username)>16: + raise ValueError("username too long") + if len(value)>=48: + raise ValueError("shared key too long") + return value + + def _checkItem(self, value, username, param): + newSession = self._getItem(username, param) + return value.masterSecret == newSession.masterSecret \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/TLSConnection.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/TLSConnection.py new file mode 100644 index 0000000..d125f8f --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/TLSConnection.py @@ -0,0 +1,1600 @@ +""" +MAIN CLASS FOR TLS LITE (START HERE!). +""" +from __future__ import generators + +import socket +from utils.compat import formatExceptionTrace +from TLSRecordLayer import TLSRecordLayer +from Session import Session +from constants import * +from utils.cryptomath import getRandomBytes +from errors import * +from messages import * +from mathtls import * +from HandshakeSettings import HandshakeSettings + + +class TLSConnection(TLSRecordLayer): + """ + This class wraps a socket and provides TLS handshaking and data + transfer. + + To use this class, create a new instance, passing a connected + socket into the constructor. Then call some handshake function. + If the handshake completes without raising an exception, then a TLS + connection has been negotiated. You can transfer data over this + connection as if it were a socket. + + This class provides both synchronous and asynchronous versions of + its key functions. The synchronous versions should be used when + writing single-or multi-threaded code using blocking sockets. The + asynchronous versions should be used when performing asynchronous, + event-based I/O with non-blocking sockets. + + Asynchronous I/O is a complicated subject; typically, you should + not use the asynchronous functions directly, but should use some + framework like asyncore or Twisted which TLS Lite integrates with + (see + L{tlslite.integration.TLSAsyncDispatcherMixIn.TLSAsyncDispatcherMixIn} or + L{tlslite.integration.TLSTwistedProtocolWrapper.TLSTwistedProtocolWrapper}). + """ + + + def __init__(self, sock): + """Create a new TLSConnection instance. + + @param sock: The socket data will be transmitted on. The + socket should already be connected. It may be in blocking or + non-blocking mode. + + @type sock: L{socket.socket} + """ + TLSRecordLayer.__init__(self, sock) + + def handshakeClientSRP(self, username, password, session=None, + settings=None, checker=None, async=False): + """Perform an SRP handshake in the role of client. + + This function performs a TLS/SRP handshake. SRP mutually + authenticates both parties to each other using only a + username and password. This function may also perform a + combined SRP and server-certificate handshake, if the server + chooses to authenticate itself with a certificate chain in + addition to doing SRP. + + TLS/SRP is non-standard. Most TLS implementations don't + support it. See + U{http://www.ietf.org/html.charters/tls-charter.html} or + U{http://trevp.net/tlssrp/} for the latest information on + TLS/SRP. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type username: str + @param username: The SRP username. + + @type password: str + @param password: The SRP password. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. This + session must be an SRP session performed with the same username + and password as were passed in. If the resumption does not + succeed, a full SRP handshake will be performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(srpParams=(username, password), + session=session, settings=settings, checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientCert(self, certChain=None, privateKey=None, + session=None, settings=None, checker=None, + async=False): + """Perform a certificate-based handshake in the role of client. + + This function performs an SSL or TLS handshake. The server + will authenticate itself using an X.509 or cryptoID certificate + chain. If the handshake succeeds, the server's certificate + chain will be stored in the session's serverCertChain attribute. + Unless a checker object is passed in, this function does no + validation or checking of the server's certificate chain. + + If the server requests client authentication, the + client will send the passed-in certificate chain, and use the + passed-in private key to authenticate itself. If no + certificate chain and private key were passed in, the client + will attempt to proceed without client authentication. The + server may or may not allow this. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: The certificate chain to be used if the + server requests client authentication. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: The private key to be used if the server + requests client authentication. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. If the + resumption does not succeed, a full handshake will be + performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(certParams=(certChain, + privateKey), session=session, settings=settings, + checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientUnknown(self, srpCallback=None, certCallback=None, + session=None, settings=None, checker=None, + async=False): + """Perform a to-be-determined type of handshake in the role of client. + + This function performs an SSL or TLS handshake. If the server + requests client certificate authentication, the + certCallback will be invoked and should return a (certChain, + privateKey) pair. If the callback returns None, the library + will attempt to proceed without client authentication. The + server may or may not allow this. + + If the server requests SRP authentication, the srpCallback + will be invoked and should return a (username, password) pair. + If the callback returns None, the local implementation will + signal a user_canceled error alert. + + After the handshake completes, the client can inspect the + connection's session attribute to determine what type of + authentication was performed. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type srpCallback: callable + @param srpCallback: The callback to be used if the server + requests SRP authentication. If None, the client will not + offer support for SRP ciphersuites. + + @type certCallback: callable + @param certCallback: The callback to be used if the server + requests client certificate authentication. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. If the + resumption does not succeed, a full handshake will be + performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(unknownParams=(srpCallback, + certCallback), session=session, settings=settings, + checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientSharedKey(self, username, sharedKey, settings=None, + checker=None, async=False): + """Perform a shared-key handshake in the role of client. + + This function performs a shared-key handshake. Using shared + symmetric keys of high entropy (128 bits or greater) mutually + authenticates both parties to each other. + + TLS with shared-keys is non-standard. Most TLS + implementations don't support it. See + U{http://www.ietf.org/html.charters/tls-charter.html} for the + latest information on TLS with shared-keys. If the shared-keys + Internet-Draft changes or is superceded, TLS Lite will track + those changes, so the shared-key support in later versions of + TLS Lite may become incompatible with this version. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type username: str + @param username: The shared-key username. + + @type sharedKey: str + @param sharedKey: The shared key. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(sharedKeyParams=(username, + sharedKey), settings=settings, checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def _handshakeClientAsync(self, srpParams=(), certParams=(), + unknownParams=(), sharedKeyParams=(), + session=None, settings=None, checker=None, + recursive=False): + + handshaker = self._handshakeClientAsyncHelper(srpParams=srpParams, + certParams=certParams, unknownParams=unknownParams, + sharedKeyParams=sharedKeyParams, session=session, + settings=settings, recursive=recursive) + for result in self._handshakeWrapperAsync(handshaker, checker): + yield result + + + def _handshakeClientAsyncHelper(self, srpParams, certParams, unknownParams, + sharedKeyParams, session, settings, recursive): + if not recursive: + self._handshakeStart(client=True) + + #Unpack parameters + srpUsername = None # srpParams + password = None # srpParams + clientCertChain = None # certParams + privateKey = None # certParams + srpCallback = None # unknownParams + certCallback = None # unknownParams + #session # sharedKeyParams (or session) + #settings # settings + + if srpParams: + srpUsername, password = srpParams + elif certParams: + clientCertChain, privateKey = certParams + elif unknownParams: + srpCallback, certCallback = unknownParams + elif sharedKeyParams: + session = Session()._createSharedKey(*sharedKeyParams) + + if not settings: + settings = HandshakeSettings() + settings = settings._filter() + + #Validate parameters + if srpUsername and not password: + raise ValueError("Caller passed a username but no password") + if password and not srpUsername: + raise ValueError("Caller passed a password but no username") + + if clientCertChain and not privateKey: + raise ValueError("Caller passed a certChain but no privateKey") + if privateKey and not clientCertChain: + raise ValueError("Caller passed a privateKey but no certChain") + + if clientCertChain: + foundType = False + try: + import cryptoIDlib.CertChain + if isinstance(clientCertChain, cryptoIDlib.CertChain.CertChain): + if "cryptoID" not in settings.certificateTypes: + raise ValueError("Client certificate doesn't "\ + "match Handshake Settings") + settings.certificateTypes = ["cryptoID"] + foundType = True + except ImportError: + pass + if not foundType and isinstance(clientCertChain, + X509CertChain): + if "x509" not in settings.certificateTypes: + raise ValueError("Client certificate doesn't match "\ + "Handshake Settings") + settings.certificateTypes = ["x509"] + foundType = True + if not foundType: + raise ValueError("Unrecognized certificate type") + + + if session: + if not session.valid(): + session = None #ignore non-resumable sessions... + elif session.resumable and \ + (session.srpUsername != srpUsername): + raise ValueError("Session username doesn't match") + + #Add Faults to parameters + if srpUsername and self.fault == Fault.badUsername: + srpUsername += "GARBAGE" + if password and self.fault == Fault.badPassword: + password += "GARBAGE" + if sharedKeyParams: + identifier = sharedKeyParams[0] + sharedKey = sharedKeyParams[1] + if self.fault == Fault.badIdentifier: + identifier += "GARBAGE" + session = Session()._createSharedKey(identifier, sharedKey) + elif self.fault == Fault.badSharedKey: + sharedKey += "GARBAGE" + session = Session()._createSharedKey(identifier, sharedKey) + + + #Initialize locals + serverCertChain = None + cipherSuite = 0 + certificateType = CertificateType.x509 + premasterSecret = None + + #Get client nonce + clientRandom = getRandomBytes(32) + + #Initialize acceptable ciphersuites + cipherSuites = [] + if srpParams: + cipherSuites += CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) + elif certParams: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + elif unknownParams: + if srpCallback: + cipherSuites += \ + CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += \ + CipherSuite.getSrpSuites(settings.cipherNames) + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + elif sharedKeyParams: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + else: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + + #Initialize acceptable certificate types + certificateTypes = settings._getCertificateTypes() + + #Tentatively set the version to the client's minimum version. + #We'll use this for the ClientHello, and if an error occurs + #parsing the Server Hello, we'll use this version for the response + self.version = settings.maxVersion + + #Either send ClientHello (with a resumable session)... + if session: + #If it's a resumable (i.e. not a shared-key session), then its + #ciphersuite must be one of the acceptable ciphersuites + if (not sharedKeyParams) and \ + session.cipherSuite not in cipherSuites: + raise ValueError("Session's cipher suite not consistent "\ + "with parameters") + else: + clientHello = ClientHello() + clientHello.create(settings.maxVersion, clientRandom, + session.sessionID, cipherSuites, + certificateTypes, session.srpUsername) + + #Or send ClientHello (without) + else: + clientHello = ClientHello() + clientHello.create(settings.maxVersion, clientRandom, + createByteArraySequence([]), cipherSuites, + certificateTypes, srpUsername) + for result in self._sendMsg(clientHello): + yield result + + #Get ServerHello (or missing_srp_username) + for result in self._getMsg((ContentType.handshake, + ContentType.alert), + HandshakeType.server_hello): + if result in (0,1): + yield result + else: + break + msg = result + + if isinstance(msg, ServerHello): + serverHello = msg + elif isinstance(msg, Alert): + alert = msg + + #If it's not a missing_srp_username, re-raise + if alert.description != AlertDescription.missing_srp_username: + self._shutdown(False) + raise TLSRemoteAlert(alert) + + #If we're not in SRP callback mode, we won't have offered SRP + #without a username, so we shouldn't get this alert + if not srpCallback: + for result in self._sendError(\ + AlertDescription.unexpected_message): + yield result + srpParams = srpCallback() + #If the callback returns None, cancel the handshake + if srpParams == None: + for result in self._sendError(AlertDescription.user_canceled): + yield result + + #Recursively perform handshake + for result in self._handshakeClientAsyncHelper(srpParams, + None, None, None, None, settings, True): + yield result + return + + #Get the server version. Do this before anything else, so any + #error alerts will use the server's version + self.version = serverHello.server_version + + #Future responses from server must use this version + self._versionCheck = True + + #Check ServerHello + if serverHello.server_version < settings.minVersion: + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(serverHello.server_version)): + yield result + if serverHello.server_version > settings.maxVersion: + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too new version: %s" % str(serverHello.server_version)): + yield result + if serverHello.cipher_suite not in cipherSuites: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect ciphersuite"): + yield result + if serverHello.certificate_type not in certificateTypes: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect certificate type"): + yield result + if serverHello.compression_method != 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect compression method"): + yield result + + #Get the server nonce + serverRandom = serverHello.random + + #If the server agrees to resume + if session and session.sessionID and \ + serverHello.session_id == session.sessionID: + + #If a shared-key, we're flexible about suites; otherwise the + #server-chosen suite has to match the session's suite + if sharedKeyParams: + session.cipherSuite = serverHello.cipher_suite + elif serverHello.cipher_suite != session.cipherSuite: + for result in self._sendError(\ + AlertDescription.illegal_parameter,\ + "Server's ciphersuite doesn't match session"): + yield result + + #Set the session for this connection + self.session = session + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._getFinished(): + yield result + for result in self._sendFinished(): + yield result + + #Mark the connection as open + self._handshakeDone(resumed=True) + + #If server DOES NOT agree to resume + else: + + if sharedKeyParams: + for result in self._sendError(\ + AlertDescription.user_canceled, + "Was expecting a shared-key resumption"): + yield result + + #We've already validated these + cipherSuite = serverHello.cipher_suite + certificateType = serverHello.certificate_type + + #If the server chose an SRP suite... + if cipherSuite in CipherSuite.srpSuites: + #Get ServerKeyExchange, ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_key_exchange, cipherSuite): + if result in (0,1): + yield result + else: + break + serverKeyExchange = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + + #If the server chose an SRP+RSA suite... + elif cipherSuite in CipherSuite.srpRsaSuites: + #Get Certificate, ServerKeyExchange, ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, certificateType): + if result in (0,1): + yield result + else: + break + serverCertificate = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_key_exchange, cipherSuite): + if result in (0,1): + yield result + else: + break + serverKeyExchange = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + + #If the server chose an RSA suite... + elif cipherSuite in CipherSuite.rsaSuites: + #Get Certificate[, CertificateRequest], ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, certificateType): + if result in (0,1): + yield result + else: + break + serverCertificate = result + + for result in self._getMsg(ContentType.handshake, + (HandshakeType.server_hello_done, + HandshakeType.certificate_request)): + if result in (0,1): + yield result + else: + break + msg = result + + certificateRequest = None + if isinstance(msg, CertificateRequest): + certificateRequest = msg + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + elif isinstance(msg, ServerHelloDone): + serverHelloDone = msg + else: + raise AssertionError() + + + #Calculate SRP premaster secret, if server chose an SRP or + #SRP+RSA suite + if cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + #Get and check the server's group parameters and B value + N = serverKeyExchange.srp_N + g = serverKeyExchange.srp_g + s = serverKeyExchange.srp_s + B = serverKeyExchange.srp_B + + if (g,N) not in goodGroupParameters: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "Unknown group parameters"): + yield result + if numBits(N) < settings.minKeySize: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "N value is too small: %d" % numBits(N)): + yield result + if numBits(N) > settings.maxKeySize: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "N value is too large: %d" % numBits(N)): + yield result + if B % N == 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Suspicious B value"): + yield result + + #Check the server's signature, if server chose an + #SRP+RSA suite + if cipherSuite in CipherSuite.srpRsaSuites: + #Hash ServerKeyExchange/ServerSRPParams + hashBytes = serverKeyExchange.hash(clientRandom, + serverRandom) + + #Extract signature bytes from ServerKeyExchange + sigBytes = serverKeyExchange.signature + if len(sigBytes) == 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server sent an SRP ServerKeyExchange "\ + "message without a signature"): + yield result + + #Get server's public key from the Certificate message + for result in self._getKeyFromChain(serverCertificate, + settings): + if result in (0,1): + yield result + else: + break + publicKey, serverCertChain = result + + #Verify signature + if not publicKey.verify(sigBytes, hashBytes): + for result in self._sendError(\ + AlertDescription.decrypt_error, + "Signature failed to verify"): + yield result + + + #Calculate client's ephemeral DH values (a, A) + a = bytesToNumber(getRandomBytes(32)) + A = powMod(g, a, N) + + #Calculate client's static DH values (x, v) + x = makeX(bytesToString(s), srpUsername, password) + v = powMod(g, x, N) + + #Calculate u + u = makeU(N, A, B) + + #Calculate premaster secret + k = makeK(N, g) + S = powMod((B - (k*v)) % N, a+(u*x), N) + + if self.fault == Fault.badA: + A = N + S = 0 + premasterSecret = numberToBytes(S) + + #Send ClientKeyExchange + for result in self._sendMsg(\ + ClientKeyExchange(cipherSuite).createSRP(A)): + yield result + + + #Calculate RSA premaster secret, if server chose an RSA suite + elif cipherSuite in CipherSuite.rsaSuites: + + #Handle the presence of a CertificateRequest + if certificateRequest: + if unknownParams and certCallback: + certParamsNew = certCallback() + if certParamsNew: + clientCertChain, privateKey = certParamsNew + + #Get server's public key from the Certificate message + for result in self._getKeyFromChain(serverCertificate, + settings): + if result in (0,1): + yield result + else: + break + publicKey, serverCertChain = result + + + #Calculate premaster secret + premasterSecret = getRandomBytes(48) + premasterSecret[0] = settings.maxVersion[0] + premasterSecret[1] = settings.maxVersion[1] + + if self.fault == Fault.badPremasterPadding: + premasterSecret[0] = 5 + if self.fault == Fault.shortPremasterSecret: + premasterSecret = premasterSecret[:-1] + + #Encrypt premaster secret to server's public key + encryptedPreMasterSecret = publicKey.encrypt(premasterSecret) + + #If client authentication was requested, send Certificate + #message, either with certificates or empty + if certificateRequest: + clientCertificate = Certificate(certificateType) + + if clientCertChain: + #Check to make sure we have the same type of + #certificates the server requested + wrongType = False + if certificateType == CertificateType.x509: + if not isinstance(clientCertChain, X509CertChain): + wrongType = True + elif certificateType == CertificateType.cryptoID: + if not isinstance(clientCertChain, + cryptoIDlib.CertChain.CertChain): + wrongType = True + if wrongType: + for result in self._sendError(\ + AlertDescription.handshake_failure, + "Client certificate is of wrong type"): + yield result + + clientCertificate.create(clientCertChain) + + for result in self._sendMsg(clientCertificate): + yield result + else: + #The server didn't request client auth, so we + #zeroize these so the clientCertChain won't be + #stored in the session. + privateKey = None + clientCertChain = None + + #Send ClientKeyExchange + clientKeyExchange = ClientKeyExchange(cipherSuite, + self.version) + clientKeyExchange.createRSA(encryptedPreMasterSecret) + for result in self._sendMsg(clientKeyExchange): + yield result + + #If client authentication was requested and we have a + #private key, send CertificateVerify + if certificateRequest and privateKey: + if self.version == (3,0): + #Create a temporary session object, just for the + #purpose of creating the CertificateVerify + session = Session() + session._calcMasterSecret(self.version, + premasterSecret, + clientRandom, + serverRandom) + verifyBytes = self._calcSSLHandshakeHash(\ + session.masterSecret, "") + elif self.version in ((3,1), (3,2)): + verifyBytes = stringToBytes(\ + self._handshake_md5.digest() + \ + self._handshake_sha.digest()) + if self.fault == Fault.badVerifyMessage: + verifyBytes[0] = ((verifyBytes[0]+1) % 256) + signedBytes = privateKey.sign(verifyBytes) + certificateVerify = CertificateVerify() + certificateVerify.create(signedBytes) + for result in self._sendMsg(certificateVerify): + yield result + + + #Create the session object + self.session = Session() + self.session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + self.session.sessionID = serverHello.session_id + self.session.cipherSuite = cipherSuite + self.session.srpUsername = srpUsername + self.session.clientCertChain = clientCertChain + self.session.serverCertChain = serverCertChain + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._sendFinished(): + yield result + for result in self._getFinished(): + yield result + + #Mark the connection as open + self.session._setResumable(True) + self._handshakeDone(resumed=False) + + + + def handshakeServer(self, sharedKeyDB=None, verifierDB=None, + certChain=None, privateKey=None, reqCert=False, + sessionCache=None, settings=None, checker=None): + """Perform a handshake in the role of server. + + This function performs an SSL or TLS handshake. Depending on + the arguments and the behavior of the client, this function can + perform a shared-key, SRP, or certificate-based handshake. It + can also perform a combined SRP and server-certificate + handshake. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + This function does not send a Hello Request message before + performing the handshake, so if re-handshaking is required, + the server must signal the client to begin the re-handshake + through some other means. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type sharedKeyDB: L{tlslite.SharedKeyDB.SharedKeyDB} + @param sharedKeyDB: A database of shared symmetric keys + associated with usernames. If the client performs a + shared-key handshake, the session's sharedKeyUsername + attribute will be set. + + @type verifierDB: L{tlslite.VerifierDB.VerifierDB} + @param verifierDB: A database of SRP password verifiers + associated with usernames. If the client performs an SRP + handshake, the session's srpUsername attribute will be set. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: The certificate chain to be used if the + client requests server certificate authentication. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: The private key to be used if the client + requests server certificate authentication. + + @type reqCert: bool + @param reqCert: Whether to request client certificate + authentication. This only applies if the client chooses server + certificate authentication; if the client chooses SRP or + shared-key authentication, this will be ignored. If the client + performs a client certificate authentication, the sessions's + clientCertChain attribute will be set. + + @type sessionCache: L{tlslite.SessionCache.SessionCache} + @param sessionCache: An in-memory cache of resumable sessions. + The client can resume sessions from this cache. Alternatively, + if the client performs a full handshake, a new session will be + added to the cache. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites and SSL/TLS version chosen by the server. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + for result in self.handshakeServerAsync(sharedKeyDB, verifierDB, + certChain, privateKey, reqCert, sessionCache, settings, + checker): + pass + + + def handshakeServerAsync(self, sharedKeyDB=None, verifierDB=None, + certChain=None, privateKey=None, reqCert=False, + sessionCache=None, settings=None, checker=None): + """Start a server handshake operation on the TLS connection. + + This function returns a generator which behaves similarly to + handshakeServer(). Successive invocations of the generator + will return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or it will raise StopIteration + if the handshake operation is complete. + + @rtype: iterable + @return: A generator; see above for details. + """ + handshaker = self._handshakeServerAsyncHelper(\ + sharedKeyDB=sharedKeyDB, + verifierDB=verifierDB, certChain=certChain, + privateKey=privateKey, reqCert=reqCert, + sessionCache=sessionCache, settings=settings) + for result in self._handshakeWrapperAsync(handshaker, checker): + yield result + + + def _handshakeServerAsyncHelper(self, sharedKeyDB, verifierDB, + certChain, privateKey, reqCert, sessionCache, + settings): + + self._handshakeStart(client=False) + + if (not sharedKeyDB) and (not verifierDB) and (not certChain): + raise ValueError("Caller passed no authentication credentials") + if certChain and not privateKey: + raise ValueError("Caller passed a certChain but no privateKey") + if privateKey and not certChain: + raise ValueError("Caller passed a privateKey but no certChain") + + if not settings: + settings = HandshakeSettings() + settings = settings._filter() + + #Initialize acceptable cipher suites + cipherSuites = [] + if verifierDB: + if certChain: + cipherSuites += \ + CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) + if sharedKeyDB or certChain: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + + #Initialize acceptable certificate type + certificateType = None + if certChain: + try: + import cryptoIDlib.CertChain + if isinstance(certChain, cryptoIDlib.CertChain.CertChain): + certificateType = CertificateType.cryptoID + except ImportError: + pass + if isinstance(certChain, X509CertChain): + certificateType = CertificateType.x509 + if certificateType == None: + raise ValueError("Unrecognized certificate type") + + #Initialize locals + clientCertChain = None + serverCertChain = None #We may set certChain to this later + postFinishedError = None + + #Tentatively set version to most-desirable version, so if an error + #occurs parsing the ClientHello, this is what we'll use for the + #error alert + self.version = settings.maxVersion + + #Get ClientHello + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_hello): + if result in (0,1): + yield result + else: + break + clientHello = result + + #If client's version is too low, reject it + if clientHello.client_version < settings.minVersion: + self.version = settings.minVersion + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(clientHello.client_version)): + yield result + + #If client's version is too high, propose my highest version + elif clientHello.client_version > settings.maxVersion: + self.version = settings.maxVersion + + else: + #Set the version to the client's version + self.version = clientHello.client_version + + #Get the client nonce; create server nonce + clientRandom = clientHello.random + serverRandom = getRandomBytes(32) + + #Calculate the first cipher suite intersection. + #This is the 'privileged' ciphersuite. We'll use it if we're + #doing a shared-key resumption or a new negotiation. In fact, + #the only time we won't use it is if we're resuming a non-sharedkey + #session, in which case we use the ciphersuite from the session. + # + #Given the current ciphersuite ordering, this means we prefer SRP + #over non-SRP. + for cipherSuite in cipherSuites: + if cipherSuite in clientHello.cipher_suites: + break + else: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #If resumption was requested... + if clientHello.session_id and (sharedKeyDB or sessionCache): + session = None + + #Check in the sharedKeys container + if sharedKeyDB and len(clientHello.session_id)==16: + try: + #Trim off zero padding, if any + for x in range(16): + if clientHello.session_id[x]==0: + break + self.allegedSharedKeyUsername = bytesToString(\ + clientHello.session_id[:x]) + session = sharedKeyDB[self.allegedSharedKeyUsername] + if not session.sharedKey: + raise AssertionError() + #use privileged ciphersuite + session.cipherSuite = cipherSuite + except KeyError: + pass + + #Then check in the session cache + if sessionCache and not session: + try: + session = sessionCache[bytesToString(\ + clientHello.session_id)] + if session.sharedKey: + raise AssertionError() + if not session.resumable: + raise AssertionError() + #Check for consistency with ClientHello + if session.cipherSuite not in cipherSuites: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + if session.cipherSuite not in clientHello.cipher_suites: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + if clientHello.srp_username: + if clientHello.srp_username != session.srpUsername: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + except KeyError: + pass + + #If a session is found.. + if session: + #Set the session + self.session = session + + #Send ServerHello + serverHello = ServerHello() + serverHello.create(self.version, serverRandom, + session.sessionID, session.cipherSuite, + certificateType) + for result in self._sendMsg(serverHello): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._sendFinished(): + yield result + for result in self._getFinished(): + yield result + + #Mark the connection as open + self._handshakeDone(resumed=True) + return + + + #If not a resumption... + + #TRICKY: we might have chosen an RSA suite that was only deemed + #acceptable because of the shared-key resumption. If the shared- + #key resumption failed, because the identifier wasn't recognized, + #we might fall through to here, where we have an RSA suite + #chosen, but no certificate. + if cipherSuite in CipherSuite.rsaSuites and not certChain: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #If an RSA suite is chosen, check for certificate type intersection + #(We do this check down here because if the mismatch occurs but the + # client is using a shared-key session, it's okay) + if cipherSuite in CipherSuite.rsaSuites + \ + CipherSuite.srpRsaSuites: + if certificateType not in clientHello.certificate_types: + for result in self._sendError(\ + AlertDescription.handshake_failure, + "the client doesn't support my certificate type"): + yield result + + #Move certChain -> serverCertChain, now that we're using it + serverCertChain = certChain + + + #Create sessionID + if sessionCache: + sessionID = getRandomBytes(32) + else: + sessionID = createByteArraySequence([]) + + #If we've selected an SRP suite, exchange keys and calculate + #premaster secret: + if cipherSuite in CipherSuite.srpSuites + CipherSuite.srpRsaSuites: + + #If there's no SRP username... + if not clientHello.srp_username: + + #Ask the client to re-send ClientHello with one + for result in self._sendMsg(Alert().create(\ + AlertDescription.missing_srp_username, + AlertLevel.warning)): + yield result + + #Get ClientHello + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_hello): + if result in (0,1): + yield result + else: + break + clientHello = result + + #Check ClientHello + #If client's version is too low, reject it (COPIED CODE; BAD!) + if clientHello.client_version < settings.minVersion: + self.version = settings.minVersion + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(clientHello.client_version)): + yield result + + #If client's version is too high, propose my highest version + elif clientHello.client_version > settings.maxVersion: + self.version = settings.maxVersion + + else: + #Set the version to the client's version + self.version = clientHello.client_version + + #Recalculate the privileged cipher suite, making sure to + #pick an SRP suite + cipherSuites = [c for c in cipherSuites if c in \ + CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites] + for cipherSuite in cipherSuites: + if cipherSuite in clientHello.cipher_suites: + break + else: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #Get the client nonce; create server nonce + clientRandom = clientHello.random + serverRandom = getRandomBytes(32) + + #The username better be there, this time + if not clientHello.srp_username: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Client resent a hello, but without the SRP"\ + " username"): + yield result + + + #Get username + self.allegedSrpUsername = clientHello.srp_username + + #Get parameters from username + try: + entry = verifierDB[self.allegedSrpUsername] + except KeyError: + for result in self._sendError(\ + AlertDescription.unknown_srp_username): + yield result + (N, g, s, v) = entry + + #Calculate server's ephemeral DH values (b, B) + b = bytesToNumber(getRandomBytes(32)) + k = makeK(N, g) + B = (powMod(g, b, N) + (k*v)) % N + + #Create ServerKeyExchange, signing it if necessary + serverKeyExchange = ServerKeyExchange(cipherSuite) + serverKeyExchange.createSRP(N, g, stringToBytes(s), B) + if cipherSuite in CipherSuite.srpRsaSuites: + hashBytes = serverKeyExchange.hash(clientRandom, + serverRandom) + serverKeyExchange.signature = privateKey.sign(hashBytes) + + #Send ServerHello[, Certificate], ServerKeyExchange, + #ServerHelloDone + msgs = [] + serverHello = ServerHello() + serverHello.create(self.version, serverRandom, sessionID, + cipherSuite, certificateType) + msgs.append(serverHello) + if cipherSuite in CipherSuite.srpRsaSuites: + certificateMsg = Certificate(certificateType) + certificateMsg.create(serverCertChain) + msgs.append(certificateMsg) + msgs.append(serverKeyExchange) + msgs.append(ServerHelloDone()) + for result in self._sendMsgs(msgs): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Get and check ClientKeyExchange + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_key_exchange, + cipherSuite): + if result in (0,1): + yield result + else: + break + clientKeyExchange = result + A = clientKeyExchange.srp_A + if A % N == 0: + postFinishedError = (AlertDescription.illegal_parameter, + "Suspicious A value") + #Calculate u + u = makeU(N, A, B) + + #Calculate premaster secret + S = powMod((A * powMod(v,u,N)) % N, b, N) + premasterSecret = numberToBytes(S) + + + #If we've selected an RSA suite, exchange keys and calculate + #premaster secret: + elif cipherSuite in CipherSuite.rsaSuites: + + #Send ServerHello, Certificate[, CertificateRequest], + #ServerHelloDone + msgs = [] + msgs.append(ServerHello().create(self.version, serverRandom, + sessionID, cipherSuite, certificateType)) + msgs.append(Certificate(certificateType).create(serverCertChain)) + if reqCert: + msgs.append(CertificateRequest()) + msgs.append(ServerHelloDone()) + for result in self._sendMsgs(msgs): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Get [Certificate,] (if was requested) + if reqCert: + if self.version == (3,0): + for result in self._getMsg((ContentType.handshake, + ContentType.alert), + HandshakeType.certificate, + certificateType): + if result in (0,1): + yield result + else: + break + msg = result + + if isinstance(msg, Alert): + #If it's not a no_certificate alert, re-raise + alert = msg + if alert.description != \ + AlertDescription.no_certificate: + self._shutdown(False) + raise TLSRemoteAlert(alert) + elif isinstance(msg, Certificate): + clientCertificate = msg + if clientCertificate.certChain and \ + clientCertificate.certChain.getNumCerts()!=0: + clientCertChain = clientCertificate.certChain + else: + raise AssertionError() + elif self.version in ((3,1), (3,2)): + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, + certificateType): + if result in (0,1): + yield result + else: + break + clientCertificate = result + if clientCertificate.certChain and \ + clientCertificate.certChain.getNumCerts()!=0: + clientCertChain = clientCertificate.certChain + else: + raise AssertionError() + + #Get ClientKeyExchange + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_key_exchange, + cipherSuite): + if result in (0,1): + yield result + else: + break + clientKeyExchange = result + + #Decrypt ClientKeyExchange + premasterSecret = privateKey.decrypt(\ + clientKeyExchange.encryptedPreMasterSecret) + + randomPreMasterSecret = getRandomBytes(48) + versionCheck = (premasterSecret[0], premasterSecret[1]) + if not premasterSecret: + premasterSecret = randomPreMasterSecret + elif len(premasterSecret)!=48: + premasterSecret = randomPreMasterSecret + elif versionCheck != clientHello.client_version: + if versionCheck != self.version: #Tolerate buggy IE clients + premasterSecret = randomPreMasterSecret + + #Get and check CertificateVerify, if relevant + if clientCertChain: + if self.version == (3,0): + #Create a temporary session object, just for the purpose + #of checking the CertificateVerify + session = Session() + session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + verifyBytes = self._calcSSLHandshakeHash(\ + session.masterSecret, "") + elif self.version in ((3,1), (3,2)): + verifyBytes = stringToBytes(self._handshake_md5.digest() +\ + self._handshake_sha.digest()) + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate_verify): + if result in (0,1): + yield result + else: + break + certificateVerify = result + publicKey = clientCertChain.getEndEntityPublicKey() + if len(publicKey) < settings.minKeySize: + postFinishedError = (AlertDescription.handshake_failure, + "Client's public key too small: %d" % len(publicKey)) + if len(publicKey) > settings.maxKeySize: + postFinishedError = (AlertDescription.handshake_failure, + "Client's public key too large: %d" % len(publicKey)) + + if not publicKey.verify(certificateVerify.signature, + verifyBytes): + postFinishedError = (AlertDescription.decrypt_error, + "Signature failed to verify") + + + #Create the session object + self.session = Session() + self.session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + self.session.sessionID = sessionID + self.session.cipherSuite = cipherSuite + self.session.srpUsername = self.allegedSrpUsername + self.session.clientCertChain = clientCertChain + self.session.serverCertChain = serverCertChain + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._getFinished(): + yield result + + #If we were holding a post-finished error until receiving the client + #finished message, send it now. We delay the call until this point + #because calling sendError() throws an exception, and our caller might + #shut down the socket upon receiving the exception. If he did, and the + #client was still sending its ChangeCipherSpec or Finished messages, it + #would cause a socket error on the client side. This is a lot of + #consideration to show to misbehaving clients, but this would also + #cause problems with fault-testing. + if postFinishedError: + for result in self._sendError(*postFinishedError): + yield result + + for result in self._sendFinished(): + yield result + + #Add the session object to the session cache + if sessionCache and sessionID: + sessionCache[bytesToString(sessionID)] = self.session + + #Mark the connection as open + self.session._setResumable(True) + self._handshakeDone(resumed=False) + + + def _handshakeWrapperAsync(self, handshaker, checker): + if not self.fault: + try: + for result in handshaker: + yield result + if checker: + try: + checker(self) + except TLSAuthenticationError: + alert = Alert().create(AlertDescription.close_notify, + AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + raise + except: + self._shutdown(False) + raise + else: + try: + for result in handshaker: + yield result + if checker: + try: + checker(self) + except TLSAuthenticationError: + alert = Alert().create(AlertDescription.close_notify, + AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + raise + except socket.error, e: + raise TLSFaultError("socket error!") + except TLSAbruptCloseError, e: + raise TLSFaultError("abrupt close error!") + except TLSAlert, alert: + if alert.description not in Fault.faultAlerts[self.fault]: + raise TLSFaultError(str(alert)) + else: + pass + except: + self._shutdown(False) + raise + else: + raise TLSFaultError("No error!") + + + def _getKeyFromChain(self, certificate, settings): + #Get and check cert chain from the Certificate message + certChain = certificate.certChain + if not certChain or certChain.getNumCerts() == 0: + for result in self._sendError(AlertDescription.illegal_parameter, + "Other party sent a Certificate message without "\ + "certificates"): + yield result + + #Get and check public key from the cert chain + publicKey = certChain.getEndEntityPublicKey() + if len(publicKey) < settings.minKeySize: + for result in self._sendError(AlertDescription.handshake_failure, + "Other party's public key too small: %d" % len(publicKey)): + yield result + if len(publicKey) > settings.maxKeySize: + for result in self._sendError(AlertDescription.handshake_failure, + "Other party's public key too large: %d" % len(publicKey)): + yield result + + yield publicKey, certChain diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/TLSRecordLayer.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/TLSRecordLayer.py new file mode 100644 index 0000000..875ce80 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/TLSRecordLayer.py @@ -0,0 +1,1123 @@ +"""Helper class for TLSConnection.""" +from __future__ import generators + +from utils.compat import * +from utils.cryptomath import * +from utils.cipherfactory import createAES, createRC4, createTripleDES +from utils.codec import * +from errors import * +from messages import * +from mathtls import * +from constants import * +from utils.cryptomath import getRandomBytes +from utils import hmac +from FileObject import FileObject +import sha +import md5 +import socket +import errno +import traceback + +class _ConnectionState: + def __init__(self): + self.macContext = None + self.encContext = None + self.seqnum = 0 + + def getSeqNumStr(self): + w = Writer(8) + w.add(self.seqnum, 8) + seqnumStr = bytesToString(w.bytes) + self.seqnum += 1 + return seqnumStr + + +class TLSRecordLayer: + """ + This class handles data transmission for a TLS connection. + + Its only subclass is L{tlslite.TLSConnection.TLSConnection}. We've + separated the code in this class from TLSConnection to make things + more readable. + + + @type sock: socket.socket + @ivar sock: The underlying socket object. + + @type session: L{tlslite.Session.Session} + @ivar session: The session corresponding to this connection. + + Due to TLS session resumption, multiple connections can correspond + to the same underlying session. + + @type version: tuple + @ivar version: The TLS version being used for this connection. + + (3,0) means SSL 3.0, and (3,1) means TLS 1.0. + + @type closed: bool + @ivar closed: If this connection is closed. + + @type resumed: bool + @ivar resumed: If this connection is based on a resumed session. + + @type allegedSharedKeyUsername: str or None + @ivar allegedSharedKeyUsername: This is set to the shared-key + username asserted by the client, whether the handshake succeeded or + not. If the handshake fails, this can be inspected to + determine if a guessing attack is in progress against a particular + user account. + + @type allegedSrpUsername: str or None + @ivar allegedSrpUsername: This is set to the SRP username + asserted by the client, whether the handshake succeeded or not. + If the handshake fails, this can be inspected to determine + if a guessing attack is in progress against a particular user + account. + + @type closeSocket: bool + @ivar closeSocket: If the socket should be closed when the + connection is closed (writable). + + If you set this to True, TLS Lite will assume the responsibility of + closing the socket when the TLS Connection is shutdown (either + through an error or through the user calling close()). The default + is False. + + @type ignoreAbruptClose: bool + @ivar ignoreAbruptClose: If an abrupt close of the socket should + raise an error (writable). + + If you set this to True, TLS Lite will not raise a + L{tlslite.errors.TLSAbruptCloseError} exception if the underlying + socket is unexpectedly closed. Such an unexpected closure could be + caused by an attacker. However, it also occurs with some incorrect + TLS implementations. + + You should set this to True only if you're not worried about an + attacker truncating the connection, and only if necessary to avoid + spurious errors. The default is False. + + @sort: __init__, read, readAsync, write, writeAsync, close, closeAsync, + getCipherImplementation, getCipherName + """ + + def __init__(self, sock): + self.sock = sock + + #My session object (Session instance; read-only) + self.session = None + + #Am I a client or server? + self._client = None + + #Buffers for processing messages + self._handshakeBuffer = [] + self._readBuffer = "" + + #Handshake digests + self._handshake_md5 = md5.md5() + self._handshake_sha = sha.sha() + + #TLS Protocol Version + self.version = (0,0) #read-only + self._versionCheck = False #Once we choose a version, this is True + + #Current and Pending connection states + self._writeState = _ConnectionState() + self._readState = _ConnectionState() + self._pendingWriteState = _ConnectionState() + self._pendingReadState = _ConnectionState() + + #Is the connection open? + self.closed = True #read-only + self._refCount = 0 #Used to trigger closure + + #Is this a resumed (or shared-key) session? + self.resumed = False #read-only + + #What username did the client claim in his handshake? + self.allegedSharedKeyUsername = None + self.allegedSrpUsername = None + + #On a call to close(), do we close the socket? (writeable) + self.closeSocket = False + + #If the socket is abruptly closed, do we ignore it + #and pretend the connection was shut down properly? (writeable) + self.ignoreAbruptClose = False + + #Fault we will induce, for testing purposes + self.fault = None + + #********************************************************* + # Public Functions START + #********************************************************* + + def read(self, max=None, min=1): + """Read some data from the TLS connection. + + This function will block until at least 'min' bytes are + available (or the connection is closed). + + If an exception is raised, the connection will have been + automatically closed. + + @type max: int + @param max: The maximum number of bytes to return. + + @type min: int + @param min: The minimum number of bytes to return + + @rtype: str + @return: A string of no more than 'max' bytes, and no fewer + than 'min' (unless the connection has been closed, in which + case fewer than 'min' bytes may be returned). + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + for result in self.readAsync(max, min): + pass + return result + + def readAsync(self, max=None, min=1): + """Start a read operation on the TLS connection. + + This function returns a generator which behaves similarly to + read(). Successive invocations of the generator will return 0 + if it is waiting to read from the socket, 1 if it is waiting + to write to the socket, or a string if the read operation has + completed. + + @rtype: iterable + @return: A generator; see above for details. + """ + try: + while len(self._readBuffer)= len(s): + break + if endIndex > len(s): + endIndex = len(s) + block = stringToBytes(s[startIndex : endIndex]) + applicationData = ApplicationData().create(block) + for result in self._sendMsg(applicationData, skipEmptyFrag): + yield result + skipEmptyFrag = True #only send an empy fragment on 1st message + index += 1 + except: + self._shutdown(False) + raise + + def close(self): + """Close the TLS connection. + + This function will block until it has exchanged close_notify + alerts with the other party. After doing so, it will shut down the + TLS connection. Further attempts to read through this connection + will return "". Further attempts to write through this connection + will raise ValueError. + + If makefile() has been called on this connection, the connection + will be not be closed until the connection object and all file + objects have been closed. + + Even if an exception is raised, the connection will have been + closed. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + if not self.closed: + for result in self._decrefAsync(): + pass + + def closeAsync(self): + """Start a close operation on the TLS connection. + + This function returns a generator which behaves similarly to + close(). Successive invocations of the generator will return 0 + if it is waiting to read from the socket, 1 if it is waiting + to write to the socket, or will raise StopIteration if the + close operation has completed. + + @rtype: iterable + @return: A generator; see above for details. + """ + if not self.closed: + for result in self._decrefAsync(): + yield result + + def _decrefAsync(self): + self._refCount -= 1 + if self._refCount == 0 and not self.closed: + try: + for result in self._sendMsg(Alert().create(\ + AlertDescription.close_notify, AlertLevel.warning)): + yield result + alert = None + while not alert: + for result in self._getMsg((ContentType.alert, \ + ContentType.application_data)): + if result in (0,1): + yield result + if result.contentType == ContentType.alert: + alert = result + if alert.description == AlertDescription.close_notify: + self._shutdown(True) + else: + raise TLSRemoteAlert(alert) + except (socket.error, TLSAbruptCloseError): + #If the other side closes the socket, that's okay + self._shutdown(True) + except: + self._shutdown(False) + raise + + def getCipherName(self): + """Get the name of the cipher used with this connection. + + @rtype: str + @return: The name of the cipher used with this connection. + Either 'aes128', 'aes256', 'rc4', or '3des'. + """ + if not self._writeState.encContext: + return None + return self._writeState.encContext.name + + def getCipherImplementation(self): + """Get the name of the cipher implementation used with + this connection. + + @rtype: str + @return: The name of the cipher implementation used with + this connection. Either 'python', 'cryptlib', 'openssl', + or 'pycrypto'. + """ + if not self._writeState.encContext: + return None + return self._writeState.encContext.implementation + + + + #Emulate a socket, somewhat - + def send(self, s): + """Send data to the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + """ + self.write(s) + return len(s) + + def sendall(self, s): + """Send data to the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + """ + self.write(s) + + def recv(self, bufsize): + """Get some data from the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + return self.read(bufsize) + + def makefile(self, mode='r', bufsize=-1): + """Create a file object for the TLS connection (socket emulation). + + @rtype: L{tlslite.FileObject.FileObject} + """ + self._refCount += 1 + return FileObject(self, mode, bufsize) + + def getsockname(self): + """Return the socket's own address (socket emulation).""" + return self.sock.getsockname() + + def getpeername(self): + """Return the remote address to which the socket is connected + (socket emulation).""" + return self.sock.getpeername() + + def settimeout(self, value): + """Set a timeout on blocking socket operations (socket emulation).""" + return self.sock.settimeout(value) + + def gettimeout(self): + """Return the timeout associated with socket operations (socket + emulation).""" + return self.sock.gettimeout() + + def setsockopt(self, level, optname, value): + """Set the value of the given socket option (socket emulation).""" + return self.sock.setsockopt(level, optname, value) + + + #********************************************************* + # Public Functions END + #********************************************************* + + def _shutdown(self, resumable): + self._writeState = _ConnectionState() + self._readState = _ConnectionState() + #Don't do this: self._readBuffer = "" + self.version = (0,0) + self._versionCheck = False + self.closed = True + if self.closeSocket: + self.sock.close() + + #Even if resumable is False, we'll never toggle this on + if not resumable and self.session: + self.session.resumable = False + + + def _sendError(self, alertDescription, errorStr=None): + alert = Alert().create(alertDescription, AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + self._shutdown(False) + raise TLSLocalAlert(alert, errorStr) + + def _sendMsgs(self, msgs): + skipEmptyFrag = False + for msg in msgs: + for result in self._sendMsg(msg, skipEmptyFrag): + yield result + skipEmptyFrag = True + + def _sendMsg(self, msg, skipEmptyFrag=False): + bytes = msg.write() + contentType = msg.contentType + + #Whenever we're connected and asked to send a message, + #we first send an empty Application Data message. This prevents + #an attacker from launching a chosen-plaintext attack based on + #knowing the next IV. + if not self.closed and not skipEmptyFrag and self.version == (3,1): + if self._writeState.encContext: + if self._writeState.encContext.isBlockCipher: + for result in self._sendMsg(ApplicationData(), + skipEmptyFrag=True): + yield result + + #Update handshake hashes + if contentType == ContentType.handshake: + bytesStr = bytesToString(bytes) + self._handshake_md5.update(bytesStr) + self._handshake_sha.update(bytesStr) + + #Calculate MAC + if self._writeState.macContext: + seqnumStr = self._writeState.getSeqNumStr() + bytesStr = bytesToString(bytes) + mac = self._writeState.macContext.copy() + mac.update(seqnumStr) + mac.update(chr(contentType)) + if self.version == (3,0): + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + elif self.version in ((3,1), (3,2)): + mac.update(chr(self.version[0])) + mac.update(chr(self.version[1])) + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + else: + raise AssertionError() + mac.update(bytesStr) + macString = mac.digest() + macBytes = stringToBytes(macString) + if self.fault == Fault.badMAC: + macBytes[0] = (macBytes[0]+1) % 256 + + #Encrypt for Block or Stream Cipher + if self._writeState.encContext: + #Add padding and encrypt (for Block Cipher): + if self._writeState.encContext.isBlockCipher: + + #Add TLS 1.1 fixed block + if self.version == (3,2): + bytes = self.fixedIVBlock + bytes + + #Add padding: bytes = bytes + (macBytes + paddingBytes) + currentLength = len(bytes) + len(macBytes) + 1 + blockLength = self._writeState.encContext.block_size + paddingLength = blockLength-(currentLength % blockLength) + + paddingBytes = createByteArraySequence([paddingLength] * \ + (paddingLength+1)) + if self.fault == Fault.badPadding: + paddingBytes[0] = (paddingBytes[0]+1) % 256 + endBytes = concatArrays(macBytes, paddingBytes) + bytes = concatArrays(bytes, endBytes) + #Encrypt + plaintext = stringToBytes(bytes) + ciphertext = self._writeState.encContext.encrypt(plaintext) + bytes = stringToBytes(ciphertext) + + #Encrypt (for Stream Cipher) + else: + bytes = concatArrays(bytes, macBytes) + plaintext = bytesToString(bytes) + ciphertext = self._writeState.encContext.encrypt(plaintext) + bytes = stringToBytes(ciphertext) + + #Add record header and send + r = RecordHeader3().create(self.version, contentType, len(bytes)) + s = bytesToString(concatArrays(r.write(), bytes)) + while 1: + try: + bytesSent = self.sock.send(s) #Might raise socket.error + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 1 + continue + else: + raise + if bytesSent == len(s): + return + s = s[bytesSent:] + yield 1 + + + def _getMsg(self, expectedType, secondaryType=None, constructorType=None): + try: + if not isinstance(expectedType, tuple): + expectedType = (expectedType,) + + #Spin in a loop, until we've got a non-empty record of a type we + #expect. The loop will be repeated if: + # - we receive a renegotiation attempt; we send no_renegotiation, + # then try again + # - we receive an empty application-data fragment; we try again + while 1: + for result in self._getNextRecord(): + if result in (0,1): + yield result + recordHeader, p = result + + #If this is an empty application-data fragment, try again + if recordHeader.type == ContentType.application_data: + if p.index == len(p.bytes): + continue + + #If we received an unexpected record type... + if recordHeader.type not in expectedType: + + #If we received an alert... + if recordHeader.type == ContentType.alert: + alert = Alert().parse(p) + + #We either received a fatal error, a warning, or a + #close_notify. In any case, we're going to close the + #connection. In the latter two cases we respond with + #a close_notify, but ignore any socket errors, since + #the other side might have already closed the socket. + if alert.level == AlertLevel.warning or \ + alert.description == AlertDescription.close_notify: + + #If the sendMsg() call fails because the socket has + #already been closed, we will be forgiving and not + #report the error nor invalidate the "resumability" + #of the session. + try: + alertMsg = Alert() + alertMsg.create(AlertDescription.close_notify, + AlertLevel.warning) + for result in self._sendMsg(alertMsg): + yield result + except socket.error: + pass + + if alert.description == \ + AlertDescription.close_notify: + self._shutdown(True) + elif alert.level == AlertLevel.warning: + self._shutdown(False) + + else: #Fatal alert: + self._shutdown(False) + + #Raise the alert as an exception + raise TLSRemoteAlert(alert) + + #If we received a renegotiation attempt... + if recordHeader.type == ContentType.handshake: + subType = p.get(1) + reneg = False + if self._client: + if subType == HandshakeType.hello_request: + reneg = True + else: + if subType == HandshakeType.client_hello: + reneg = True + #Send no_renegotiation, then try again + if reneg: + alertMsg = Alert() + alertMsg.create(AlertDescription.no_renegotiation, + AlertLevel.warning) + for result in self._sendMsg(alertMsg): + yield result + continue + + #Otherwise: this is an unexpected record, but neither an + #alert nor renegotiation + for result in self._sendError(\ + AlertDescription.unexpected_message, + "received type=%d" % recordHeader.type): + yield result + + break + + #Parse based on content_type + if recordHeader.type == ContentType.change_cipher_spec: + yield ChangeCipherSpec().parse(p) + elif recordHeader.type == ContentType.alert: + yield Alert().parse(p) + elif recordHeader.type == ContentType.application_data: + yield ApplicationData().parse(p) + elif recordHeader.type == ContentType.handshake: + #Convert secondaryType to tuple, if it isn't already + if not isinstance(secondaryType, tuple): + secondaryType = (secondaryType,) + + #If it's a handshake message, check handshake header + if recordHeader.ssl2: + subType = p.get(1) + if subType != HandshakeType.client_hello: + for result in self._sendError(\ + AlertDescription.unexpected_message, + "Can only handle SSLv2 ClientHello messages"): + yield result + if HandshakeType.client_hello not in secondaryType: + for result in self._sendError(\ + AlertDescription.unexpected_message): + yield result + subType = HandshakeType.client_hello + else: + subType = p.get(1) + if subType not in secondaryType: + for result in self._sendError(\ + AlertDescription.unexpected_message, + "Expecting %s, got %s" % (str(secondaryType), subType)): + yield result + + #Update handshake hashes + sToHash = bytesToString(p.bytes) + self._handshake_md5.update(sToHash) + self._handshake_sha.update(sToHash) + + #Parse based on handshake type + if subType == HandshakeType.client_hello: + yield ClientHello(recordHeader.ssl2).parse(p) + elif subType == HandshakeType.server_hello: + yield ServerHello().parse(p) + elif subType == HandshakeType.certificate: + yield Certificate(constructorType).parse(p) + elif subType == HandshakeType.certificate_request: + yield CertificateRequest().parse(p) + elif subType == HandshakeType.certificate_verify: + yield CertificateVerify().parse(p) + elif subType == HandshakeType.server_key_exchange: + yield ServerKeyExchange(constructorType).parse(p) + elif subType == HandshakeType.server_hello_done: + yield ServerHelloDone().parse(p) + elif subType == HandshakeType.client_key_exchange: + yield ClientKeyExchange(constructorType, \ + self.version).parse(p) + elif subType == HandshakeType.finished: + yield Finished(self.version).parse(p) + else: + raise AssertionError() + + #If an exception was raised by a Parser or Message instance: + except SyntaxError, e: + for result in self._sendError(AlertDescription.decode_error, + formatExceptionTrace(e)): + yield result + + + #Returns next record or next handshake message + def _getNextRecord(self): + + #If there's a handshake message waiting, return it + if self._handshakeBuffer: + recordHeader, bytes = self._handshakeBuffer[0] + self._handshakeBuffer = self._handshakeBuffer[1:] + yield (recordHeader, Parser(bytes)) + return + + #Otherwise... + #Read the next record header + bytes = createByteArraySequence([]) + recordHeaderLength = 1 + ssl2 = False + while 1: + try: + s = self.sock.recv(recordHeaderLength-len(bytes)) + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 0 + continue + else: + raise + + #If the connection was abruptly closed, raise an error + if len(s)==0: + raise TLSAbruptCloseError() + + bytes += stringToBytes(s) + if len(bytes)==1: + if bytes[0] in ContentType.all: + ssl2 = False + recordHeaderLength = 5 + elif bytes[0] == 128: + ssl2 = True + recordHeaderLength = 2 + else: + raise SyntaxError() + if len(bytes) == recordHeaderLength: + break + + #Parse the record header + if ssl2: + r = RecordHeader2().parse(Parser(bytes)) + else: + r = RecordHeader3().parse(Parser(bytes)) + + #Check the record header fields + if r.length > 18432: + for result in self._sendError(AlertDescription.record_overflow): + yield result + + #Read the record contents + bytes = createByteArraySequence([]) + while 1: + try: + s = self.sock.recv(r.length - len(bytes)) + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 0 + continue + else: + raise + + #If the connection is closed, raise a socket error + if len(s)==0: + raise TLSAbruptCloseError() + + bytes += stringToBytes(s) + if len(bytes) == r.length: + break + + #Check the record header fields (2) + #We do this after reading the contents from the socket, so that + #if there's an error, we at least don't leave extra bytes in the + #socket.. + # + # THIS CHECK HAS NO SECURITY RELEVANCE (?), BUT COULD HURT INTEROP. + # SO WE LEAVE IT OUT FOR NOW. + # + #if self._versionCheck and r.version != self.version: + # for result in self._sendError(AlertDescription.protocol_version, + # "Version in header field: %s, should be %s" % (str(r.version), + # str(self.version))): + # yield result + + #Decrypt the record + for result in self._decryptRecord(r.type, bytes): + if result in (0,1): + yield result + else: + break + bytes = result + p = Parser(bytes) + + #If it doesn't contain handshake messages, we can just return it + if r.type != ContentType.handshake: + yield (r, p) + #If it's an SSLv2 ClientHello, we can return it as well + elif r.ssl2: + yield (r, p) + else: + #Otherwise, we loop through and add the handshake messages to the + #handshake buffer + while 1: + if p.index == len(bytes): #If we're at the end + if not self._handshakeBuffer: + for result in self._sendError(\ + AlertDescription.decode_error, \ + "Received empty handshake record"): + yield result + break + #There needs to be at least 4 bytes to get a header + if p.index+4 > len(bytes): + for result in self._sendError(\ + AlertDescription.decode_error, + "A record has a partial handshake message (1)"): + yield result + p.get(1) # skip handshake type + msgLength = p.get(3) + if p.index+msgLength > len(bytes): + for result in self._sendError(\ + AlertDescription.decode_error, + "A record has a partial handshake message (2)"): + yield result + + handshakePair = (r, bytes[p.index-4 : p.index+msgLength]) + self._handshakeBuffer.append(handshakePair) + p.index += msgLength + + #We've moved at least one handshake message into the + #handshakeBuffer, return the first one + recordHeader, bytes = self._handshakeBuffer[0] + self._handshakeBuffer = self._handshakeBuffer[1:] + yield (recordHeader, Parser(bytes)) + + + def _decryptRecord(self, recordType, bytes): + if self._readState.encContext: + + #Decrypt if it's a block cipher + if self._readState.encContext.isBlockCipher: + blockLength = self._readState.encContext.block_size + if len(bytes) % blockLength != 0: + for result in self._sendError(\ + AlertDescription.decryption_failed, + "Encrypted data not a multiple of blocksize"): + yield result + ciphertext = bytesToString(bytes) + plaintext = self._readState.encContext.decrypt(ciphertext) + if self.version == (3,2): #For TLS 1.1, remove explicit IV + plaintext = plaintext[self._readState.encContext.block_size : ] + bytes = stringToBytes(plaintext) + + #Check padding + paddingGood = True + paddingLength = bytes[-1] + if (paddingLength+1) > len(bytes): + paddingGood=False + totalPaddingLength = 0 + else: + if self.version == (3,0): + totalPaddingLength = paddingLength+1 + elif self.version in ((3,1), (3,2)): + totalPaddingLength = paddingLength+1 + paddingBytes = bytes[-totalPaddingLength:-1] + for byte in paddingBytes: + if byte != paddingLength: + paddingGood = False + totalPaddingLength = 0 + else: + raise AssertionError() + + #Decrypt if it's a stream cipher + else: + paddingGood = True + ciphertext = bytesToString(bytes) + plaintext = self._readState.encContext.decrypt(ciphertext) + bytes = stringToBytes(plaintext) + totalPaddingLength = 0 + + #Check MAC + macGood = True + macLength = self._readState.macContext.digest_size + endLength = macLength + totalPaddingLength + if endLength > len(bytes): + macGood = False + else: + #Read MAC + startIndex = len(bytes) - endLength + endIndex = startIndex + macLength + checkBytes = bytes[startIndex : endIndex] + + #Calculate MAC + seqnumStr = self._readState.getSeqNumStr() + bytes = bytes[:-endLength] + bytesStr = bytesToString(bytes) + mac = self._readState.macContext.copy() + mac.update(seqnumStr) + mac.update(chr(recordType)) + if self.version == (3,0): + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + elif self.version in ((3,1), (3,2)): + mac.update(chr(self.version[0])) + mac.update(chr(self.version[1])) + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + else: + raise AssertionError() + mac.update(bytesStr) + macString = mac.digest() + macBytes = stringToBytes(macString) + + #Compare MACs + if macBytes != checkBytes: + macGood = False + + if not (paddingGood and macGood): + for result in self._sendError(AlertDescription.bad_record_mac, + "MAC failure (or padding failure)"): + yield result + + yield bytes + + def _handshakeStart(self, client): + self._client = client + self._handshake_md5 = md5.md5() + self._handshake_sha = sha.sha() + self._handshakeBuffer = [] + self.allegedSharedKeyUsername = None + self.allegedSrpUsername = None + self._refCount = 1 + + def _handshakeDone(self, resumed): + self.resumed = resumed + self.closed = False + + def _calcPendingStates(self, clientRandom, serverRandom, implementations): + if self.session.cipherSuite in CipherSuite.aes128Suites: + macLength = 20 + keyLength = 16 + ivLength = 16 + createCipherFunc = createAES + elif self.session.cipherSuite in CipherSuite.aes256Suites: + macLength = 20 + keyLength = 32 + ivLength = 16 + createCipherFunc = createAES + elif self.session.cipherSuite in CipherSuite.rc4Suites: + macLength = 20 + keyLength = 16 + ivLength = 0 + createCipherFunc = createRC4 + elif self.session.cipherSuite in CipherSuite.tripleDESSuites: + macLength = 20 + keyLength = 24 + ivLength = 8 + createCipherFunc = createTripleDES + else: + raise AssertionError() + + if self.version == (3,0): + createMACFunc = MAC_SSL + elif self.version in ((3,1), (3,2)): + createMACFunc = hmac.HMAC + + outputLength = (macLength*2) + (keyLength*2) + (ivLength*2) + + #Calculate Keying Material from Master Secret + if self.version == (3,0): + keyBlock = PRF_SSL(self.session.masterSecret, + concatArrays(serverRandom, clientRandom), + outputLength) + elif self.version in ((3,1), (3,2)): + keyBlock = PRF(self.session.masterSecret, + "key expansion", + concatArrays(serverRandom,clientRandom), + outputLength) + else: + raise AssertionError() + + #Slice up Keying Material + clientPendingState = _ConnectionState() + serverPendingState = _ConnectionState() + p = Parser(keyBlock) + clientMACBlock = bytesToString(p.getFixBytes(macLength)) + serverMACBlock = bytesToString(p.getFixBytes(macLength)) + clientKeyBlock = bytesToString(p.getFixBytes(keyLength)) + serverKeyBlock = bytesToString(p.getFixBytes(keyLength)) + clientIVBlock = bytesToString(p.getFixBytes(ivLength)) + serverIVBlock = bytesToString(p.getFixBytes(ivLength)) + clientPendingState.macContext = createMACFunc(clientMACBlock, + digestmod=sha) + serverPendingState.macContext = createMACFunc(serverMACBlock, + digestmod=sha) + clientPendingState.encContext = createCipherFunc(clientKeyBlock, + clientIVBlock, + implementations) + serverPendingState.encContext = createCipherFunc(serverKeyBlock, + serverIVBlock, + implementations) + + #Assign new connection states to pending states + if self._client: + self._pendingWriteState = clientPendingState + self._pendingReadState = serverPendingState + else: + self._pendingWriteState = serverPendingState + self._pendingReadState = clientPendingState + + if self.version == (3,2) and ivLength: + #Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC + #residue to create the IV for each sent block) + self.fixedIVBlock = getRandomBytes(ivLength) + + def _changeWriteState(self): + self._writeState = self._pendingWriteState + self._pendingWriteState = _ConnectionState() + + def _changeReadState(self): + self._readState = self._pendingReadState + self._pendingReadState = _ConnectionState() + + def _sendFinished(self): + #Send ChangeCipherSpec + for result in self._sendMsg(ChangeCipherSpec()): + yield result + + #Switch to pending write state + self._changeWriteState() + + #Calculate verification data + verifyData = self._calcFinished(True) + if self.fault == Fault.badFinished: + verifyData[0] = (verifyData[0]+1)%256 + + #Send Finished message under new state + finished = Finished(self.version).create(verifyData) + for result in self._sendMsg(finished): + yield result + + def _getFinished(self): + #Get and check ChangeCipherSpec + for result in self._getMsg(ContentType.change_cipher_spec): + if result in (0,1): + yield result + changeCipherSpec = result + + if changeCipherSpec.type != 1: + for result in self._sendError(AlertDescription.illegal_parameter, + "ChangeCipherSpec type incorrect"): + yield result + + #Switch to pending read state + self._changeReadState() + + #Calculate verification data + verifyData = self._calcFinished(False) + + #Get and check Finished message under new state + for result in self._getMsg(ContentType.handshake, + HandshakeType.finished): + if result in (0,1): + yield result + finished = result + if finished.verify_data != verifyData: + for result in self._sendError(AlertDescription.decrypt_error, + "Finished message is incorrect"): + yield result + + def _calcFinished(self, send=True): + if self.version == (3,0): + if (self._client and send) or (not self._client and not send): + senderStr = "\x43\x4C\x4E\x54" + else: + senderStr = "\x53\x52\x56\x52" + + verifyData = self._calcSSLHandshakeHash(self.session.masterSecret, + senderStr) + return verifyData + + elif self.version in ((3,1), (3,2)): + if (self._client and send) or (not self._client and not send): + label = "client finished" + else: + label = "server finished" + + handshakeHashes = stringToBytes(self._handshake_md5.digest() + \ + self._handshake_sha.digest()) + verifyData = PRF(self.session.masterSecret, label, handshakeHashes, + 12) + return verifyData + else: + raise AssertionError() + + #Used for Finished messages and CertificateVerify messages in SSL v3 + def _calcSSLHandshakeHash(self, masterSecret, label): + masterSecretStr = bytesToString(masterSecret) + + imac_md5 = self._handshake_md5.copy() + imac_sha = self._handshake_sha.copy() + + imac_md5.update(label + masterSecretStr + '\x36'*48) + imac_sha.update(label + masterSecretStr + '\x36'*40) + + md5Str = md5.md5(masterSecretStr + ('\x5c'*48) + \ + imac_md5.digest()).digest() + shaStr = sha.sha(masterSecretStr + ('\x5c'*40) + \ + imac_sha.digest()).digest() + + return stringToBytes(md5Str + shaStr) + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/VerifierDB.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/VerifierDB.py new file mode 100644 index 0000000..f706b17 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/VerifierDB.py @@ -0,0 +1,90 @@ +"""Class for storing SRP password verifiers.""" + +from utils.cryptomath import * +from utils.compat import * +import mathtls +from BaseDB import BaseDB + +class VerifierDB(BaseDB): + """This class represent an in-memory or on-disk database of SRP + password verifiers. + + A VerifierDB can be passed to a server handshake to authenticate + a client based on one of the verifiers. + + This class is thread-safe. + """ + def __init__(self, filename=None): + """Create a new VerifierDB instance. + + @type filename: str + @param filename: Filename for an on-disk database, or None for + an in-memory database. If the filename already exists, follow + this with a call to open(). To create a new on-disk database, + follow this with a call to create(). + """ + BaseDB.__init__(self, filename, "verifier") + + def _getItem(self, username, valueStr): + (N, g, salt, verifier) = valueStr.split(" ") + N = base64ToNumber(N) + g = base64ToNumber(g) + salt = base64ToString(salt) + verifier = base64ToNumber(verifier) + return (N, g, salt, verifier) + + def __setitem__(self, username, verifierEntry): + """Add a verifier entry to the database. + + @type username: str + @param username: The username to associate the verifier with. + Must be less than 256 characters in length. Must not already + be in the database. + + @type verifierEntry: tuple + @param verifierEntry: The verifier entry to add. Use + L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a + verifier entry. + """ + BaseDB.__setitem__(self, username, verifierEntry) + + + def _setItem(self, username, value): + if len(username)>=256: + raise ValueError("username too long") + N, g, salt, verifier = value + N = numberToBase64(N) + g = numberToBase64(g) + salt = stringToBase64(salt) + verifier = numberToBase64(verifier) + valueStr = " ".join( (N, g, salt, verifier) ) + return valueStr + + def _checkItem(self, value, username, param): + (N, g, salt, verifier) = value + x = mathtls.makeX(salt, username, param) + v = powMod(g, x, N) + return (verifier == v) + + + def makeVerifier(username, password, bits): + """Create a verifier entry which can be stored in a VerifierDB. + + @type username: str + @param username: The username for this verifier. Must be less + than 256 characters in length. + + @type password: str + @param password: The password for this verifier. + + @type bits: int + @param bits: This values specifies which SRP group parameters + to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144, + 8192). Larger values are more secure but slower. 2048 is a + good compromise between safety and speed. + + @rtype: tuple + @return: A tuple which may be stored in a VerifierDB. + """ + return mathtls.makeVerifier(username, password, bits) + makeVerifier = staticmethod(makeVerifier) \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/X509.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/X509.py new file mode 100644 index 0000000..a47ddcf --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/X509.py @@ -0,0 +1,133 @@ +"""Class representing an X.509 certificate.""" + +from utils.ASN1Parser import ASN1Parser +from utils.cryptomath import * +from utils.keyfactory import _createPublicRSAKey + + +class X509: + """This class represents an X.509 certificate. + + @type bytes: L{array.array} of unsigned bytes + @ivar bytes: The DER-encoded ASN.1 certificate + + @type publicKey: L{tlslite.utils.RSAKey.RSAKey} + @ivar publicKey: The subject public key from the certificate. + """ + + def __init__(self): + self.bytes = createByteArraySequence([]) + self.publicKey = None + + def parse(self, s): + """Parse a PEM-encoded X.509 certificate. + + @type s: str + @param s: A PEM-encoded X.509 certificate (i.e. a base64-encoded + certificate wrapped with "-----BEGIN CERTIFICATE-----" and + "-----END CERTIFICATE-----" tags). + """ + + start = s.find("-----BEGIN CERTIFICATE-----") + end = s.find("-----END CERTIFICATE-----") + if start == -1: + raise SyntaxError("Missing PEM prefix") + if end == -1: + raise SyntaxError("Missing PEM postfix") + s = s[start+len("-----BEGIN CERTIFICATE-----") : end] + + bytes = base64ToBytes(s) + self.parseBinary(bytes) + return self + + def parseBinary(self, bytes): + """Parse a DER-encoded X.509 certificate. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: A DER-encoded X.509 certificate. + """ + + if isinstance(bytes, type("")): + bytes = stringToBytes(bytes) + + self.bytes = bytes + p = ASN1Parser(bytes) + + #Get the tbsCertificate + tbsCertificateP = p.getChild(0) + + #Is the optional version field present? + #This determines which index the key is at. + if tbsCertificateP.value[0]==0xA0: + subjectPublicKeyInfoIndex = 6 + else: + subjectPublicKeyInfoIndex = 5 + + #Get the subjectPublicKeyInfo + subjectPublicKeyInfoP = tbsCertificateP.getChild(\ + subjectPublicKeyInfoIndex) + + #Get the algorithm + algorithmP = subjectPublicKeyInfoP.getChild(0) + rsaOID = algorithmP.value + if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]: + raise SyntaxError("Unrecognized AlgorithmIdentifier") + + #Get the subjectPublicKey + subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1) + + #Adjust for BIT STRING encapsulation + if (subjectPublicKeyP.value[0] !=0): + raise SyntaxError() + subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:]) + + #Get the modulus and exponent + modulusP = subjectPublicKeyP.getChild(0) + publicExponentP = subjectPublicKeyP.getChild(1) + + #Decode them into numbers + n = bytesToNumber(modulusP.value) + e = bytesToNumber(publicExponentP.value) + + #Create a public key instance + self.publicKey = _createPublicRSAKey(n, e) + + def getFingerprint(self): + """Get the hex-encoded fingerprint of this certificate. + + @rtype: str + @return: A hex-encoded fingerprint. + """ + return sha.sha(self.bytes).hexdigest() + + def getCommonName(self): + """Get the Subject's Common Name from the certificate. + + The cryptlib_py module must be installed in order to use this + function. + + @rtype: str or None + @return: The CN component of the certificate's subject DN, if + present. + """ + import cryptlib_py + import array + c = cryptlib_py.cryptImportCert(self.bytes, cryptlib_py.CRYPT_UNUSED) + name = cryptlib_py.CRYPT_CERTINFO_COMMONNAME + try: + try: + length = cryptlib_py.cryptGetAttributeString(c, name, None) + returnVal = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(c, name, returnVal) + returnVal = returnVal.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + returnVal = None + return returnVal + finally: + cryptlib_py.cryptDestroyCert(c) + + def writeBytes(self): + return self.bytes + + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/X509CertChain.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/X509CertChain.py new file mode 100644 index 0000000..d5f0b4d --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/X509CertChain.py @@ -0,0 +1,181 @@ +"""Class representing an X.509 certificate chain.""" + +from utils import cryptomath + +class X509CertChain: + """This class represents a chain of X.509 certificates. + + @type x509List: list + @ivar x509List: A list of L{tlslite.X509.X509} instances, + starting with the end-entity certificate and with every + subsequent certificate certifying the previous. + """ + + def __init__(self, x509List=None): + """Create a new X509CertChain. + + @type x509List: list + @param x509List: A list of L{tlslite.X509.X509} instances, + starting with the end-entity certificate and with every + subsequent certificate certifying the previous. + """ + if x509List: + self.x509List = x509List + else: + self.x509List = [] + + def getNumCerts(self): + """Get the number of certificates in this chain. + + @rtype: int + """ + return len(self.x509List) + + def getEndEntityPublicKey(self): + """Get the public key from the end-entity certificate. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].publicKey + + def getFingerprint(self): + """Get the hex-encoded fingerprint of the end-entity certificate. + + @rtype: str + @return: A hex-encoded fingerprint. + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].getFingerprint() + + def getCommonName(self): + """Get the Subject's Common Name from the end-entity certificate. + + The cryptlib_py module must be installed in order to use this + function. + + @rtype: str or None + @return: The CN component of the certificate's subject DN, if + present. + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].getCommonName() + + def validate(self, x509TrustList): + """Check the validity of the certificate chain. + + This checks that every certificate in the chain validates with + the subsequent one, until some certificate validates with (or + is identical to) one of the passed-in root certificates. + + The cryptlib_py module must be installed in order to use this + function. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + certificate chain must extend to one of these certificates to + be considered valid. + """ + + import cryptlib_py + c1 = None + c2 = None + lastC = None + rootC = None + + try: + rootFingerprints = [c.getFingerprint() for c in x509TrustList] + + #Check that every certificate in the chain validates with the + #next one + for cert1, cert2 in zip(self.x509List, self.x509List[1:]): + + #If we come upon a root certificate, we're done. + if cert1.getFingerprint() in rootFingerprints: + return True + + c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + try: + cryptlib_py.cryptCheckCert(c1, c2) + except: + return False + cryptlib_py.cryptDestroyCert(c1) + c1 = None + cryptlib_py.cryptDestroyCert(c2) + c2 = None + + #If the last certificate is one of the root certificates, we're + #done. + if self.x509List[-1].getFingerprint() in rootFingerprints: + return True + + #Otherwise, find a root certificate that the last certificate + #chains to, and validate them. + lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(), + cryptlib_py.CRYPT_UNUSED) + for rootCert in x509TrustList: + rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + if self._checkChaining(lastC, rootC): + try: + cryptlib_py.cryptCheckCert(lastC, rootC) + return True + except: + return False + return False + finally: + if not (c1 is None): + cryptlib_py.cryptDestroyCert(c1) + if not (c2 is None): + cryptlib_py.cryptDestroyCert(c2) + if not (lastC is None): + cryptlib_py.cryptDestroyCert(lastC) + if not (rootC is None): + cryptlib_py.cryptDestroyCert(rootC) + + + + def _checkChaining(self, lastC, rootC): + import cryptlib_py + import array + def compareNames(name): + try: + length = cryptlib_py.cryptGetAttributeString(lastC, name, None) + lastName = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(lastC, name, lastName) + lastName = lastName.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + lastName = None + try: + length = cryptlib_py.cryptGetAttributeString(rootC, name, None) + rootName = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(rootC, name, rootName) + rootName = rootName.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + rootName = None + + return lastName == rootName + + cryptlib_py.cryptSetAttribute(lastC, + cryptlib_py.CRYPT_CERTINFO_ISSUERNAME, + cryptlib_py.CRYPT_UNUSED) + + if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME): + return False + return True \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/__init__.py new file mode 100644 index 0000000..47cfd1c --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/__init__.py @@ -0,0 +1,39 @@ +""" +TLS Lite is a free python library that implements SSL v3, TLS v1, and +TLS v1.1. TLS Lite supports non-traditional authentication methods +such as SRP, shared keys, and cryptoIDs, in addition to X.509 +certificates. TLS Lite is pure python, however it can access OpenSSL, +cryptlib, pycrypto, and GMPY for faster crypto operations. TLS Lite +integrates with httplib, xmlrpclib, poplib, imaplib, smtplib, +SocketServer, asyncore, and Twisted. + +To use, do:: + + from tlslite.api import * + +Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket, +or use one of the integration classes in L{tlslite.integration}. + +@version: 0.3.8 +""" +__version__ = "0.3.8" + +__all__ = ["api", + "BaseDB", + "Checker", + "constants", + "errors", + "FileObject", + "HandshakeSettings", + "mathtls", + "messages", + "Session", + "SessionCache", + "SharedKeyDB", + "TLSConnection", + "TLSRecordLayer", + "VerifierDB", + "X509", + "X509CertChain", + "integration", + "utils"] diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/api.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/api.py new file mode 100644 index 0000000..eebfbc6 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/api.py @@ -0,0 +1,75 @@ +"""Import this module for easy access to TLS Lite objects. + +The TLS Lite API consists of classes, functions, and variables spread +throughout this package. Instead of importing them individually with:: + + from tlslite.TLSConnection import TLSConnection + from tlslite.HandshakeSettings import HandshakeSettings + from tlslite.errors import * + . + . + +It's easier to do:: + + from tlslite.api import * + +This imports all the important objects (TLSConnection, Checker, +HandshakeSettings, etc.) into the global namespace. In particular, it +imports:: + + from constants import AlertLevel, AlertDescription, Fault + from errors import * + from Checker import Checker + from HandshakeSettings import HandshakeSettings + from Session import Session + from SessionCache import SessionCache + from SharedKeyDB import SharedKeyDB + from TLSConnection import TLSConnection + from VerifierDB import VerifierDB + from X509 import X509 + from X509CertChain import X509CertChain + + from integration.HTTPTLSConnection import HTTPTLSConnection + from integration.POP3_TLS import POP3_TLS + from integration.IMAP4_TLS import IMAP4_TLS + from integration.SMTP_TLS import SMTP_TLS + from integration.XMLRPCTransport import XMLRPCTransport + from integration.TLSSocketServerMixIn import TLSSocketServerMixIn + from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn + from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper + from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, + gmpyLoaded, pycryptoLoaded, prngName + from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, + parseAsPublicKey, parsePrivateKey +""" + +from constants import AlertLevel, AlertDescription, Fault +from errors import * +from Checker import Checker +from HandshakeSettings import HandshakeSettings +from Session import Session +from SessionCache import SessionCache +from SharedKeyDB import SharedKeyDB +from TLSConnection import TLSConnection +from VerifierDB import VerifierDB +from X509 import X509 +from X509CertChain import X509CertChain + +from integration.HTTPTLSConnection import HTTPTLSConnection +from integration.TLSSocketServerMixIn import TLSSocketServerMixIn +from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn +from integration.POP3_TLS import POP3_TLS +from integration.IMAP4_TLS import IMAP4_TLS +from integration.SMTP_TLS import SMTP_TLS +from integration.XMLRPCTransport import XMLRPCTransport +try: + import twisted + del(twisted) + from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper +except ImportError: + pass + +from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, gmpyLoaded, \ + pycryptoLoaded, prngName +from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, \ + parseAsPublicKey, parsePrivateKey diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/constants.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/constants.py new file mode 100644 index 0000000..8f2d559 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/constants.py @@ -0,0 +1,225 @@ +"""Constants used in various places.""" + +class CertificateType: + x509 = 0 + openpgp = 1 + cryptoID = 2 + +class HandshakeType: + hello_request = 0 + client_hello = 1 + server_hello = 2 + certificate = 11 + server_key_exchange = 12 + certificate_request = 13 + server_hello_done = 14 + certificate_verify = 15 + client_key_exchange = 16 + finished = 20 + +class ContentType: + change_cipher_spec = 20 + alert = 21 + handshake = 22 + application_data = 23 + all = (20,21,22,23) + +class AlertLevel: + warning = 1 + fatal = 2 + +class AlertDescription: + """ + @cvar bad_record_mac: A TLS record failed to decrypt properly. + + If this occurs during a shared-key or SRP handshake it most likely + indicates a bad password. It may also indicate an implementation + error, or some tampering with the data in transit. + + This alert will be signalled by the server if the SRP password is bad. It + may also be signalled by the server if the SRP username is unknown to the + server, but it doesn't wish to reveal that fact. + + This alert will be signalled by the client if the shared-key username is + bad. + + @cvar handshake_failure: A problem occurred while handshaking. + + This typically indicates a lack of common ciphersuites between client and + server, or some other disagreement (about SRP parameters or key sizes, + for example). + + @cvar protocol_version: The other party's SSL/TLS version was unacceptable. + + This indicates that the client and server couldn't agree on which version + of SSL or TLS to use. + + @cvar user_canceled: The handshake is being cancelled for some reason. + + """ + + close_notify = 0 + unexpected_message = 10 + bad_record_mac = 20 + decryption_failed = 21 + record_overflow = 22 + decompression_failure = 30 + handshake_failure = 40 + no_certificate = 41 #SSLv3 + bad_certificate = 42 + unsupported_certificate = 43 + certificate_revoked = 44 + certificate_expired = 45 + certificate_unknown = 46 + illegal_parameter = 47 + unknown_ca = 48 + access_denied = 49 + decode_error = 50 + decrypt_error = 51 + export_restriction = 60 + protocol_version = 70 + insufficient_security = 71 + internal_error = 80 + user_canceled = 90 + no_renegotiation = 100 + unknown_srp_username = 120 + missing_srp_username = 121 + untrusted_srp_parameters = 122 + +class CipherSuite: + TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0x0050 + TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0x0053 + TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0x0056 + + TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0x0051 + TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0x0054 + TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0x0057 + + TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A + TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F + TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 + TLS_RSA_WITH_RC4_128_SHA = 0x0005 + + srpSuites = [] + srpSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + srpSuites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + srpSuites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + def getSrpSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + return suites + getSrpSuites = staticmethod(getSrpSuites) + + srpRsaSuites = [] + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + def getSrpRsaSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + return suites + getSrpRsaSuites = staticmethod(getSrpRsaSuites) + + rsaSuites = [] + rsaSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_AES_128_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_AES_256_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_RC4_128_SHA) + def getRsaSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA) + elif cipher == "rc4": + suites.append(CipherSuite.TLS_RSA_WITH_RC4_128_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_RSA_WITH_3DES_EDE_CBC_SHA) + return suites + getRsaSuites = staticmethod(getRsaSuites) + + tripleDESSuites = [] + tripleDESSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + tripleDESSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + tripleDESSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA) + + aes128Suites = [] + aes128Suites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + aes128Suites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + aes128Suites.append(TLS_RSA_WITH_AES_128_CBC_SHA) + + aes256Suites = [] + aes256Suites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + aes256Suites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + aes256Suites.append(TLS_RSA_WITH_AES_256_CBC_SHA) + + rc4Suites = [] + rc4Suites.append(TLS_RSA_WITH_RC4_128_SHA) + + +class Fault: + badUsername = 101 + badPassword = 102 + badA = 103 + clientSrpFaults = range(101,104) + + badVerifyMessage = 601 + clientCertFaults = range(601,602) + + badPremasterPadding = 501 + shortPremasterSecret = 502 + clientNoAuthFaults = range(501,503) + + badIdentifier = 401 + badSharedKey = 402 + clientSharedKeyFaults = range(401,403) + + badB = 201 + serverFaults = range(201,202) + + badFinished = 300 + badMAC = 301 + badPadding = 302 + genericFaults = range(300,303) + + faultAlerts = {\ + badUsername: (AlertDescription.unknown_srp_username, \ + AlertDescription.bad_record_mac),\ + badPassword: (AlertDescription.bad_record_mac,),\ + badA: (AlertDescription.illegal_parameter,),\ + badIdentifier: (AlertDescription.handshake_failure,),\ + badSharedKey: (AlertDescription.bad_record_mac,),\ + badPremasterPadding: (AlertDescription.bad_record_mac,),\ + shortPremasterSecret: (AlertDescription.bad_record_mac,),\ + badVerifyMessage: (AlertDescription.decrypt_error,),\ + badFinished: (AlertDescription.decrypt_error,),\ + badMAC: (AlertDescription.bad_record_mac,),\ + badPadding: (AlertDescription.bad_record_mac,) + } + + faultNames = {\ + badUsername: "bad username",\ + badPassword: "bad password",\ + badA: "bad A",\ + badIdentifier: "bad identifier",\ + badSharedKey: "bad sharedkey",\ + badPremasterPadding: "bad premaster padding",\ + shortPremasterSecret: "short premaster secret",\ + badVerifyMessage: "bad verify message",\ + badFinished: "bad finished message",\ + badMAC: "bad MAC",\ + badPadding: "bad padding" + } diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/errors.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/errors.py new file mode 100644 index 0000000..c7f7ba8 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/errors.py @@ -0,0 +1,149 @@ +"""Exception classes. +@sort: TLSError, TLSAbruptCloseError, TLSAlert, TLSLocalAlert, TLSRemoteAlert, +TLSAuthenticationError, TLSNoAuthenticationError, TLSAuthenticationTypeError, +TLSFingerprintError, TLSAuthorizationError, TLSValidationError, TLSFaultError +""" + +from constants import AlertDescription, AlertLevel + +class TLSError(Exception): + """Base class for all TLS Lite exceptions.""" + pass + +class TLSAbruptCloseError(TLSError): + """The socket was closed without a proper TLS shutdown. + + The TLS specification mandates that an alert of some sort + must be sent before the underlying socket is closed. If the socket + is closed without this, it could signify that an attacker is trying + to truncate the connection. It could also signify a misbehaving + TLS implementation, or a random network failure. + """ + pass + +class TLSAlert(TLSError): + """A TLS alert has been signalled.""" + pass + + _descriptionStr = {\ + AlertDescription.close_notify: "close_notify",\ + AlertDescription.unexpected_message: "unexpected_message",\ + AlertDescription.bad_record_mac: "bad_record_mac",\ + AlertDescription.decryption_failed: "decryption_failed",\ + AlertDescription.record_overflow: "record_overflow",\ + AlertDescription.decompression_failure: "decompression_failure",\ + AlertDescription.handshake_failure: "handshake_failure",\ + AlertDescription.no_certificate: "no certificate",\ + AlertDescription.bad_certificate: "bad_certificate",\ + AlertDescription.unsupported_certificate: "unsupported_certificate",\ + AlertDescription.certificate_revoked: "certificate_revoked",\ + AlertDescription.certificate_expired: "certificate_expired",\ + AlertDescription.certificate_unknown: "certificate_unknown",\ + AlertDescription.illegal_parameter: "illegal_parameter",\ + AlertDescription.unknown_ca: "unknown_ca",\ + AlertDescription.access_denied: "access_denied",\ + AlertDescription.decode_error: "decode_error",\ + AlertDescription.decrypt_error: "decrypt_error",\ + AlertDescription.export_restriction: "export_restriction",\ + AlertDescription.protocol_version: "protocol_version",\ + AlertDescription.insufficient_security: "insufficient_security",\ + AlertDescription.internal_error: "internal_error",\ + AlertDescription.user_canceled: "user_canceled",\ + AlertDescription.no_renegotiation: "no_renegotiation",\ + AlertDescription.unknown_srp_username: "unknown_srp_username",\ + AlertDescription.missing_srp_username: "missing_srp_username"} + +class TLSLocalAlert(TLSAlert): + """A TLS alert has been signalled by the local implementation. + + @type description: int + @ivar description: Set to one of the constants in + L{tlslite.constants.AlertDescription} + + @type level: int + @ivar level: Set to one of the constants in + L{tlslite.constants.AlertLevel} + + @type message: str + @ivar message: Description of what went wrong. + """ + def __init__(self, alert, message=None): + self.description = alert.description + self.level = alert.level + self.message = message + + def __str__(self): + alertStr = TLSAlert._descriptionStr.get(self.description) + if alertStr == None: + alertStr = str(self.description) + if self.message: + return alertStr + ": " + self.message + else: + return alertStr + +class TLSRemoteAlert(TLSAlert): + """A TLS alert has been signalled by the remote implementation. + + @type description: int + @ivar description: Set to one of the constants in + L{tlslite.constants.AlertDescription} + + @type level: int + @ivar level: Set to one of the constants in + L{tlslite.constants.AlertLevel} + """ + def __init__(self, alert): + self.description = alert.description + self.level = alert.level + + def __str__(self): + alertStr = TLSAlert._descriptionStr.get(self.description) + if alertStr == None: + alertStr = str(self.description) + return alertStr + +class TLSAuthenticationError(TLSError): + """The handshake succeeded, but the other party's authentication + was inadequate. + + This exception will only be raised when a + L{tlslite.Checker.Checker} has been passed to a handshake function. + The Checker will be invoked once the handshake completes, and if + the Checker objects to how the other party authenticated, a + subclass of this exception will be raised. + """ + pass + +class TLSNoAuthenticationError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain, but this did not occur.""" + pass + +class TLSAuthenticationTypeError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + different type of certificate chain.""" + pass + +class TLSFingerprintError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain that matches a different fingerprint.""" + pass + +class TLSAuthorizationError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain that has a different authorization.""" + pass + +class TLSValidationError(TLSAuthenticationError): + """The Checker has determined that the other party's certificate + chain is invalid.""" + pass + +class TLSFaultError(TLSError): + """The other party responded incorrectly to an induced fault. + + This exception will only occur during fault testing, when a + TLSConnection's fault variable is set to induce some sort of + faulty behavior, and the other party doesn't respond appropriately. + """ + pass diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/AsyncStateMachine.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/AsyncStateMachine.py new file mode 100644 index 0000000..abed604 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/AsyncStateMachine.py @@ -0,0 +1,235 @@ +""" +A state machine for using TLS Lite with asynchronous I/O. +""" + +class AsyncStateMachine: + """ + This is an abstract class that's used to integrate TLS Lite with + asyncore and Twisted. + + This class signals wantsReadsEvent() and wantsWriteEvent(). When + the underlying socket has become readable or writeable, the event + should be passed to this class by calling inReadEvent() or + inWriteEvent(). This class will then try to read or write through + the socket, and will update its state appropriately. + + This class will forward higher-level events to its subclass. For + example, when a complete TLS record has been received, + outReadEvent() will be called with the decrypted data. + """ + + def __init__(self): + self._clear() + + def _clear(self): + #These store the various asynchronous operations (i.e. + #generators). Only one of them, at most, is ever active at a + #time. + self.handshaker = None + self.closer = None + self.reader = None + self.writer = None + + #This stores the result from the last call to the + #currently active operation. If 0 it indicates that the + #operation wants to read, if 1 it indicates that the + #operation wants to write. If None, there is no active + #operation. + self.result = None + + def _checkAssert(self, maxActive=1): + #This checks that only one operation, at most, is + #active, and that self.result is set appropriately. + activeOps = 0 + if self.handshaker: + activeOps += 1 + if self.closer: + activeOps += 1 + if self.reader: + activeOps += 1 + if self.writer: + activeOps += 1 + + if self.result == None: + if activeOps != 0: + raise AssertionError() + elif self.result in (0,1): + if activeOps != 1: + raise AssertionError() + else: + raise AssertionError() + if activeOps > maxActive: + raise AssertionError() + + def wantsReadEvent(self): + """If the state machine wants to read. + + If an operation is active, this returns whether or not the + operation wants to read from the socket. If an operation is + not active, this returns None. + + @rtype: bool or None + @return: If the state machine wants to read. + """ + if self.result != None: + return self.result == 0 + return None + + def wantsWriteEvent(self): + """If the state machine wants to write. + + If an operation is active, this returns whether or not the + operation wants to write to the socket. If an operation is + not active, this returns None. + + @rtype: bool or None + @return: If the state machine wants to write. + """ + if self.result != None: + return self.result == 1 + return None + + def outConnectEvent(self): + """Called when a handshake operation completes. + + May be overridden in subclass. + """ + pass + + def outCloseEvent(self): + """Called when a close operation completes. + + May be overridden in subclass. + """ + pass + + def outReadEvent(self, readBuffer): + """Called when a read operation completes. + + May be overridden in subclass.""" + pass + + def outWriteEvent(self): + """Called when a write operation completes. + + May be overridden in subclass.""" + pass + + def inReadEvent(self): + """Tell the state machine it can read from the socket.""" + try: + self._checkAssert() + if self.handshaker: + self._doHandshakeOp() + elif self.closer: + self._doCloseOp() + elif self.reader: + self._doReadOp() + elif self.writer: + self._doWriteOp() + else: + self.reader = self.tlsConnection.readAsync(16384) + self._doReadOp() + except: + self._clear() + raise + + def inWriteEvent(self): + """Tell the state machine it can write to the socket.""" + try: + self._checkAssert() + if self.handshaker: + self._doHandshakeOp() + elif self.closer: + self._doCloseOp() + elif self.reader: + self._doReadOp() + elif self.writer: + self._doWriteOp() + else: + self.outWriteEvent() + except: + self._clear() + raise + + def _doHandshakeOp(self): + try: + self.result = self.handshaker.next() + except StopIteration: + self.handshaker = None + self.result = None + self.outConnectEvent() + + def _doCloseOp(self): + try: + self.result = self.closer.next() + except StopIteration: + self.closer = None + self.result = None + self.outCloseEvent() + + def _doReadOp(self): + self.result = self.reader.next() + if not self.result in (0,1): + readBuffer = self.result + self.reader = None + self.result = None + self.outReadEvent(readBuffer) + + def _doWriteOp(self): + try: + self.result = self.writer.next() + except StopIteration: + self.writer = None + self.result = None + + def setHandshakeOp(self, handshaker): + """Start a handshake operation. + + @type handshaker: generator + @param handshaker: A generator created by using one of the + asynchronous handshake functions (i.e. handshakeServerAsync, or + handshakeClientxxx(..., async=True). + """ + try: + self._checkAssert(0) + self.handshaker = handshaker + self._doHandshakeOp() + except: + self._clear() + raise + + def setServerHandshakeOp(self, **args): + """Start a handshake operation. + + The arguments passed to this function will be forwarded to + L{tlslite.TLSConnection.TLSConnection.handshakeServerAsync}. + """ + handshaker = self.tlsConnection.handshakeServerAsync(**args) + self.setHandshakeOp(handshaker) + + def setCloseOp(self): + """Start a close operation. + """ + try: + self._checkAssert(0) + self.closer = self.tlsConnection.closeAsync() + self._doCloseOp() + except: + self._clear() + raise + + def setWriteOp(self, writeBuffer): + """Start a write operation. + + @type writeBuffer: str + @param writeBuffer: The string to transmit. + """ + try: + self._checkAssert(0) + self.writer = self.tlsConnection.writeAsync(writeBuffer) + self._doWriteOp() + except: + self._clear() + raise + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/ClientHelper.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/ClientHelper.py new file mode 100644 index 0000000..58e0152 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/ClientHelper.py @@ -0,0 +1,163 @@ +""" +A helper class for using TLS Lite with stdlib clients +(httplib, xmlrpclib, imaplib, poplib). +""" + +from gdata.tlslite.Checker import Checker + +class ClientHelper: + """This is a helper class used to integrate TLS Lite with various + TLS clients (e.g. poplib, smtplib, httplib, etc.)""" + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + """ + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Then you should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + self.username = None + self.password = None + self.sharedKey = None + self.certChain = None + self.privateKey = None + self.checker = None + + #SRP Authentication + if username and password and not \ + (sharedKey or certChain or privateKey): + self.username = username + self.password = password + + #Shared Key Authentication + elif username and sharedKey and not \ + (password or certChain or privateKey): + self.username = username + self.sharedKey = sharedKey + + #Certificate Chain Authentication + elif certChain and privateKey and not \ + (username or password or sharedKey): + self.certChain = certChain + self.privateKey = privateKey + + #No Authentication + elif not password and not username and not \ + sharedKey and not certChain and not privateKey: + pass + + else: + raise ValueError("Bad parameters") + + #Authenticate the server based on its cryptoID or fingerprint + if sharedKey and (cryptoID or protocol or x509Fingerprint): + raise ValueError("Can't use shared keys with other forms of"\ + "authentication") + + self.checker = Checker(cryptoID, protocol, x509Fingerprint, + x509TrustList, x509CommonName) + self.settings = settings + + self.tlsSession = None + + def _handshake(self, tlsConnection): + if self.username and self.password: + tlsConnection.handshakeClientSRP(username=self.username, + password=self.password, + checker=self.checker, + settings=self.settings, + session=self.tlsSession) + elif self.username and self.sharedKey: + tlsConnection.handshakeClientSharedKey(username=self.username, + sharedKey=self.sharedKey, + settings=self.settings) + else: + tlsConnection.handshakeClientCert(certChain=self.certChain, + privateKey=self.privateKey, + checker=self.checker, + settings=self.settings, + session=self.tlsSession) + self.tlsSession = tlsConnection.session diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/HTTPTLSConnection.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/HTTPTLSConnection.py new file mode 100644 index 0000000..58e31a1 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/HTTPTLSConnection.py @@ -0,0 +1,169 @@ +"""TLS Lite + httplib.""" + +import socket +import httplib +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + + +class HTTPBaseTLSConnection(httplib.HTTPConnection): + """This abstract class provides a framework for adding TLS support + to httplib.""" + + default_port = 443 + + def __init__(self, host, port=None, strict=None): + if strict == None: + #Python 2.2 doesn't support strict + httplib.HTTPConnection.__init__(self, host, port) + else: + httplib.HTTPConnection.__init__(self, host, port, strict) + + def connect(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if hasattr(sock, 'settimeout'): + sock.settimeout(10) + sock.connect((self.host, self.port)) + + #Use a TLSConnection to emulate a socket + self.sock = TLSConnection(sock) + + #When httplib closes this, close the socket + self.sock.closeSocket = True + self._handshake(self.sock) + + def _handshake(self, tlsConnection): + """Called to perform some sort of handshake. + + This method must be overridden in a subclass to do some type of + handshake. This method will be called after the socket has + been connected but before any data has been sent. If this + method does not raise an exception, the TLS connection will be + considered valid. + + This method may (or may not) be called every time an HTTP + request is performed, depending on whether the underlying HTTP + connection is persistent. + + @type tlsConnection: L{tlslite.TLSConnection.TLSConnection} + @param tlsConnection: The connection to perform the handshake + on. + """ + raise NotImplementedError() + + +class HTTPTLSConnection(HTTPBaseTLSConnection, ClientHelper): + """This class extends L{HTTPBaseTLSConnection} to support the + common types of handshaking.""" + + def __init__(self, host, port=None, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + """Create a new HTTPTLSConnection. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Thus you should be prepared to handle TLS-specific + exceptions when calling methods inherited from + L{httplib.HTTPConnection} such as request(), connect(), and + send(). See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + HTTPBaseTLSConnection.__init__(self, host, port) + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + def _handshake(self, tlsConnection): + ClientHelper._handshake(self, tlsConnection) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/IMAP4_TLS.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/IMAP4_TLS.py new file mode 100644 index 0000000..e47076c --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/IMAP4_TLS.py @@ -0,0 +1,132 @@ +"""TLS Lite + imaplib.""" + +import socket +from imaplib import IMAP4 +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +# IMAP TLS PORT +IMAP4_TLS_PORT = 993 + +class IMAP4_TLS(IMAP4, ClientHelper): + """This class extends L{imaplib.IMAP4} with TLS support.""" + + def __init__(self, host = '', port = IMAP4_TLS_PORT, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new IMAP4_TLS. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + IMAP4.__init__(self, host, port) + + + def open(self, host = '', port = IMAP4_TLS_PORT): + """Setup connection to remote server on "host:port". + + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = host + self.port = port + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((host, port)) + self.sock = TLSConnection(self.sock) + self.sock.closeSocket = True + ClientHelper._handshake(self, self.sock) + self.file = self.sock.makefile('rb') diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/IntegrationHelper.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/IntegrationHelper.py new file mode 100644 index 0000000..af5193b --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/IntegrationHelper.py @@ -0,0 +1,52 @@ + +class IntegrationHelper: + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + + self.username = None + self.password = None + self.sharedKey = None + self.certChain = None + self.privateKey = None + self.checker = None + + #SRP Authentication + if username and password and not \ + (sharedKey or certChain or privateKey): + self.username = username + self.password = password + + #Shared Key Authentication + elif username and sharedKey and not \ + (password or certChain or privateKey): + self.username = username + self.sharedKey = sharedKey + + #Certificate Chain Authentication + elif certChain and privateKey and not \ + (username or password or sharedKey): + self.certChain = certChain + self.privateKey = privateKey + + #No Authentication + elif not password and not username and not \ + sharedKey and not certChain and not privateKey: + pass + + else: + raise ValueError("Bad parameters") + + #Authenticate the server based on its cryptoID or fingerprint + if sharedKey and (cryptoID or protocol or x509Fingerprint): + raise ValueError("Can't use shared keys with other forms of"\ + "authentication") + + self.checker = Checker(cryptoID, protocol, x509Fingerprint, + x509TrustList, x509CommonName) + self.settings = settings \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/POP3_TLS.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/POP3_TLS.py new file mode 100644 index 0000000..26b37fd --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/POP3_TLS.py @@ -0,0 +1,142 @@ +"""TLS Lite + poplib.""" + +import socket +from poplib import POP3 +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +# POP TLS PORT +POP3_TLS_PORT = 995 + +class POP3_TLS(POP3, ClientHelper): + """This class extends L{poplib.POP3} with TLS support.""" + + def __init__(self, host, port = POP3_TLS_PORT, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new POP3_TLS. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + self.host = host + self.port = port + msg = "getaddrinfo returns an empty list" + self.sock = None + for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + self.sock = socket.socket(af, socktype, proto) + self.sock.connect(sa) + except socket.error, msg: + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + + ### New code below (all else copied from poplib) + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + self.sock = TLSConnection(self.sock) + self.sock.closeSocket = True + ClientHelper._handshake(self, self.sock) + ### + + self.file = self.sock.makefile('rb') + self._debugging = 0 + self.welcome = self._getresp() diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/SMTP_TLS.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/SMTP_TLS.py new file mode 100644 index 0000000..67e0feb --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/SMTP_TLS.py @@ -0,0 +1,114 @@ +"""TLS Lite + smtplib.""" + +from smtplib import SMTP +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +class SMTP_TLS(SMTP): + """This class extends L{smtplib.SMTP} with TLS support.""" + + def starttls(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Puts the connection to the SMTP server into TLS mode. + + If the server supports TLS, this will encrypt the rest of the SMTP + session. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + (resp, reply) = self.docmd("STARTTLS") + if resp == 220: + helper = ClientHelper( + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + conn = TLSConnection(self.sock) + conn.closeSocket = True + helper._handshake(conn) + self.sock = conn + self.file = conn.makefile('rb') + return (resp, reply) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py new file mode 100644 index 0000000..f732f62 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py @@ -0,0 +1,139 @@ +"""TLS Lite + asyncore.""" + + +import asyncore +from gdata.tlslite.TLSConnection import TLSConnection +from AsyncStateMachine import AsyncStateMachine + + +class TLSAsyncDispatcherMixIn(AsyncStateMachine): + """This class can be "mixed in" with an + L{asyncore.dispatcher} to add TLS support. + + This class essentially sits between the dispatcher and the select + loop, intercepting events and only calling the dispatcher when + applicable. + + In the case of handle_read(), a read operation will be activated, + and when it completes, the bytes will be placed in a buffer where + the dispatcher can retrieve them by calling recv(), and the + dispatcher's handle_read() will be called. + + In the case of handle_write(), the dispatcher's handle_write() will + be called, and when it calls send(), a write operation will be + activated. + + To use this class, you must combine it with an asyncore.dispatcher, + and pass in a handshake operation with setServerHandshakeOp(). + + Below is an example of using this class with medusa. This class is + mixed in with http_channel to create http_tls_channel. Note: + 1. the mix-in is listed first in the inheritance list + + 2. the input buffer size must be at least 16K, otherwise the + dispatcher might not read all the bytes from the TLS layer, + leaving some bytes in limbo. + + 3. IE seems to have a problem receiving a whole HTTP response in a + single TLS record, so HTML pages containing '\\r\\n\\r\\n' won't + be displayed on IE. + + Add the following text into 'start_medusa.py', in the 'HTTP Server' + section:: + + from tlslite.api import * + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + class http_tls_channel(TLSAsyncDispatcherMixIn, + http_server.http_channel): + ac_in_buffer_size = 16384 + + def __init__ (self, server, conn, addr): + http_server.http_channel.__init__(self, server, conn, addr) + TLSAsyncDispatcherMixIn.__init__(self, conn) + self.tlsConnection.ignoreAbruptClose = True + self.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey) + + hs.channel_class = http_tls_channel + + If the TLS layer raises an exception, the exception will be caught + in asyncore.dispatcher, which will call close() on this class. The + TLS layer always closes the TLS connection before raising an + exception, so the close operation will complete right away, causing + asyncore.dispatcher.close() to be called, which closes the socket + and removes this instance from the asyncore loop. + + """ + + + def __init__(self, sock=None): + AsyncStateMachine.__init__(self) + + if sock: + self.tlsConnection = TLSConnection(sock) + + #Calculate the sibling I'm being mixed in with. + #This is necessary since we override functions + #like readable(), handle_read(), etc., but we + #also want to call the sibling's versions. + for cl in self.__class__.__bases__: + if cl != TLSAsyncDispatcherMixIn and cl != AsyncStateMachine: + self.siblingClass = cl + break + else: + raise AssertionError() + + def readable(self): + result = self.wantsReadEvent() + if result != None: + return result + return self.siblingClass.readable(self) + + def writable(self): + result = self.wantsWriteEvent() + if result != None: + return result + return self.siblingClass.writable(self) + + def handle_read(self): + self.inReadEvent() + + def handle_write(self): + self.inWriteEvent() + + def outConnectEvent(self): + self.siblingClass.handle_connect(self) + + def outCloseEvent(self): + asyncore.dispatcher.close(self) + + def outReadEvent(self, readBuffer): + self.readBuffer = readBuffer + self.siblingClass.handle_read(self) + + def outWriteEvent(self): + self.siblingClass.handle_write(self) + + def recv(self, bufferSize=16384): + if bufferSize < 16384 or self.readBuffer == None: + raise AssertionError() + returnValue = self.readBuffer + self.readBuffer = None + return returnValue + + def send(self, writeBuffer): + self.setWriteOp(writeBuffer) + return len(writeBuffer) + + def close(self): + if hasattr(self, "tlsConnection"): + self.setCloseOp() + else: + asyncore.dispatcher.close(self) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/TLSSocketServerMixIn.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/TLSSocketServerMixIn.py new file mode 100644 index 0000000..10224b6 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/TLSSocketServerMixIn.py @@ -0,0 +1,59 @@ +"""TLS Lite + SocketServer.""" + +from gdata.tlslite.TLSConnection import TLSConnection + +class TLSSocketServerMixIn: + """ + This class can be mixed in with any L{SocketServer.TCPServer} to + add TLS support. + + To use this class, define a new class that inherits from it and + some L{SocketServer.TCPServer} (with the mix-in first). Then + implement the handshake() method, doing some sort of server + handshake on the connection argument. If the handshake method + returns True, the RequestHandler will be triggered. Below is a + complete example of a threaded HTTPS server:: + + from SocketServer import * + from BaseHTTPServer import * + from SimpleHTTPServer import * + from tlslite.api import * + + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + sessionCache = SessionCache() + + class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn, + HTTPServer): + def handshake(self, tlsConnection): + try: + tlsConnection.handshakeServer(certChain=certChain, + privateKey=privateKey, + sessionCache=sessionCache) + tlsConnection.ignoreAbruptClose = True + return True + except TLSError, error: + print "Handshake failure:", str(error) + return False + + httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler) + httpd.serve_forever() + """ + + + def finish_request(self, sock, client_address): + tlsConnection = TLSConnection(sock) + if self.handshake(tlsConnection) == True: + self.RequestHandlerClass(tlsConnection, client_address, self) + tlsConnection.close() + + #Implement this method to do some form of handshaking. Return True + #if the handshake finishes properly and the request is authorized. + def handshake(self, tlsConnection): + raise NotImplementedError() diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py new file mode 100644 index 0000000..c88703c --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py @@ -0,0 +1,196 @@ +"""TLS Lite + Twisted.""" + +from twisted.protocols.policies import ProtocolWrapper, WrappingFactory +from twisted.python.failure import Failure + +from AsyncStateMachine import AsyncStateMachine +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.errors import * + +import socket +import errno + + +#The TLSConnection is created around a "fake socket" that +#plugs it into the underlying Twisted transport +class _FakeSocket: + def __init__(self, wrapper): + self.wrapper = wrapper + self.data = "" + + def send(self, data): + ProtocolWrapper.write(self.wrapper, data) + return len(data) + + def recv(self, numBytes): + if self.data == "": + raise socket.error, (errno.EWOULDBLOCK, "") + returnData = self.data[:numBytes] + self.data = self.data[numBytes:] + return returnData + +class TLSTwistedProtocolWrapper(ProtocolWrapper, AsyncStateMachine): + """This class can wrap Twisted protocols to add TLS support. + + Below is a complete example of using TLS Lite with a Twisted echo + server. + + There are two server implementations below. Echo is the original + protocol, which is oblivious to TLS. Echo1 subclasses Echo and + negotiates TLS when the client connects. Echo2 subclasses Echo and + negotiates TLS when the client sends "STARTTLS":: + + from twisted.internet.protocol import Protocol, Factory + from twisted.internet import reactor + from twisted.protocols.policies import WrappingFactory + from twisted.protocols.basic import LineReceiver + from twisted.python import log + from twisted.python.failure import Failure + import sys + from tlslite.api import * + + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + verifierDB = VerifierDB("verifierDB") + verifierDB.open() + + class Echo(LineReceiver): + def connectionMade(self): + self.transport.write("Welcome to the echo server!\\r\\n") + + def lineReceived(self, line): + self.transport.write(line + "\\r\\n") + + class Echo1(Echo): + def connectionMade(self): + if not self.transport.tlsStarted: + self.transport.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey, + verifierDB=verifierDB) + else: + Echo.connectionMade(self) + + def connectionLost(self, reason): + pass #Handle any TLS exceptions here + + class Echo2(Echo): + def lineReceived(self, data): + if data == "STARTTLS": + self.transport.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey, + verifierDB=verifierDB) + else: + Echo.lineReceived(self, data) + + def connectionLost(self, reason): + pass #Handle any TLS exceptions here + + factory = Factory() + factory.protocol = Echo1 + #factory.protocol = Echo2 + + wrappingFactory = WrappingFactory(factory) + wrappingFactory.protocol = TLSTwistedProtocolWrapper + + log.startLogging(sys.stdout) + reactor.listenTCP(1079, wrappingFactory) + reactor.run() + + This class works as follows: + + Data comes in and is given to the AsyncStateMachine for handling. + AsyncStateMachine will forward events to this class, and we'll + pass them on to the ProtocolHandler, which will proxy them to the + wrapped protocol. The wrapped protocol may then call back into + this class, and these calls will be proxied into the + AsyncStateMachine. + + The call graph looks like this: + - self.dataReceived + - AsyncStateMachine.inReadEvent + - self.out(Connect|Close|Read)Event + - ProtocolWrapper.(connectionMade|loseConnection|dataReceived) + - self.(loseConnection|write|writeSequence) + - AsyncStateMachine.(setCloseOp|setWriteOp) + """ + + #WARNING: IF YOU COPY-AND-PASTE THE ABOVE CODE, BE SURE TO REMOVE + #THE EXTRA ESCAPING AROUND "\\r\\n" + + def __init__(self, factory, wrappedProtocol): + ProtocolWrapper.__init__(self, factory, wrappedProtocol) + AsyncStateMachine.__init__(self) + self.fakeSocket = _FakeSocket(self) + self.tlsConnection = TLSConnection(self.fakeSocket) + self.tlsStarted = False + self.connectionLostCalled = False + + def connectionMade(self): + try: + ProtocolWrapper.connectionMade(self) + except TLSError, e: + self.connectionLost(Failure(e)) + ProtocolWrapper.loseConnection(self) + + def dataReceived(self, data): + try: + if not self.tlsStarted: + ProtocolWrapper.dataReceived(self, data) + else: + self.fakeSocket.data += data + while self.fakeSocket.data: + AsyncStateMachine.inReadEvent(self) + except TLSError, e: + self.connectionLost(Failure(e)) + ProtocolWrapper.loseConnection(self) + + def connectionLost(self, reason): + if not self.connectionLostCalled: + ProtocolWrapper.connectionLost(self, reason) + self.connectionLostCalled = True + + + def outConnectEvent(self): + ProtocolWrapper.connectionMade(self) + + def outCloseEvent(self): + ProtocolWrapper.loseConnection(self) + + def outReadEvent(self, data): + if data == "": + ProtocolWrapper.loseConnection(self) + else: + ProtocolWrapper.dataReceived(self, data) + + + def setServerHandshakeOp(self, **args): + self.tlsStarted = True + AsyncStateMachine.setServerHandshakeOp(self, **args) + + def loseConnection(self): + if not self.tlsStarted: + ProtocolWrapper.loseConnection(self) + else: + AsyncStateMachine.setCloseOp(self) + + def write(self, data): + if not self.tlsStarted: + ProtocolWrapper.write(self, data) + else: + #Because of the FakeSocket, write operations are guaranteed to + #terminate immediately. + AsyncStateMachine.setWriteOp(self, data) + + def writeSequence(self, seq): + if not self.tlsStarted: + ProtocolWrapper.writeSequence(self, seq) + else: + #Because of the FakeSocket, write operations are guaranteed to + #terminate immediately. + AsyncStateMachine.setWriteOp(self, "".join(seq)) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/XMLRPCTransport.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/XMLRPCTransport.py new file mode 100644 index 0000000..3f025e4 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/XMLRPCTransport.py @@ -0,0 +1,137 @@ +"""TLS Lite + xmlrpclib.""" + +import xmlrpclib +import httplib +from gdata.tlslite.integration.HTTPTLSConnection import HTTPTLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + + +class XMLRPCTransport(xmlrpclib.Transport, ClientHelper): + """Handles an HTTPS transaction to an XML-RPC server.""" + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new XMLRPCTransport. + + An instance of this class can be passed to L{xmlrpclib.ServerProxy} + to use TLS with XML-RPC calls:: + + from tlslite.api import XMLRPCTransport + from xmlrpclib import ServerProxy + + transport = XMLRPCTransport(user="alice", password="abra123") + server = ServerProxy("https://localhost", transport) + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Thus you should be prepared to handle TLS-specific + exceptions when calling methods of L{xmlrpclib.ServerProxy}. See the + client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + + def make_connection(self, host): + # create a HTTPS connection object from a host descriptor + host, extra_headers, x509 = self.get_host_info(host) + http = HTTPTLSConnection(host, None, + self.username, self.password, + self.sharedKey, + self.certChain, self.privateKey, + self.checker.cryptoID, + self.checker.protocol, + self.checker.x509Fingerprint, + self.checker.x509TrustList, + self.checker.x509CommonName, + self.settings) + http2 = httplib.HTTP() + http2._setup(http) + return http2 diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/__init__.py new file mode 100644 index 0000000..960f406 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/integration/__init__.py @@ -0,0 +1,17 @@ +"""Classes for integrating TLS Lite with other packages.""" + +__all__ = ["AsyncStateMachine", + "HTTPTLSConnection", + "POP3_TLS", + "IMAP4_TLS", + "SMTP_TLS", + "XMLRPCTransport", + "TLSSocketServerMixIn", + "TLSAsyncDispatcherMixIn", + "TLSTwistedProtocolWrapper"] + +try: + import twisted + del twisted +except ImportError: + del __all__[__all__.index("TLSTwistedProtocolWrapper")] diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/mathtls.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/mathtls.py new file mode 100644 index 0000000..3b8ede6 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/mathtls.py @@ -0,0 +1,170 @@ +"""Miscellaneous helper functions.""" + +from utils.compat import * +from utils.cryptomath import * + +import hmac +import md5 +import sha + +#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups] +goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\ + (2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\ + (2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\ + (2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)] + +def P_hash(hashModule, secret, seed, length): + bytes = createByteArrayZeros(length) + secret = bytesToString(secret) + seed = bytesToString(seed) + A = seed + index = 0 + while 1: + A = hmac.HMAC(secret, A, hashModule).digest() + output = hmac.HMAC(secret, A+seed, hashModule).digest() + for c in output: + if index >= length: + return bytes + bytes[index] = ord(c) + index += 1 + return bytes + +def PRF(secret, label, seed, length): + #Split the secret into left and right halves + S1 = secret[ : int(math.ceil(len(secret)/2.0))] + S2 = secret[ int(math.floor(len(secret)/2.0)) : ] + + #Run the left half through P_MD5 and the right half through P_SHA1 + p_md5 = P_hash(md5, S1, concatArrays(stringToBytes(label), seed), length) + p_sha1 = P_hash(sha, S2, concatArrays(stringToBytes(label), seed), length) + + #XOR the output values and return the result + for x in range(length): + p_md5[x] ^= p_sha1[x] + return p_md5 + + +def PRF_SSL(secret, seed, length): + secretStr = bytesToString(secret) + seedStr = bytesToString(seed) + bytes = createByteArrayZeros(length) + index = 0 + for x in range(26): + A = chr(ord('A')+x) * (x+1) # 'A', 'BB', 'CCC', etc.. + input = secretStr + sha.sha(A + secretStr + seedStr).digest() + output = md5.md5(input).digest() + for c in output: + if index >= length: + return bytes + bytes[index] = ord(c) + index += 1 + return bytes + +def makeX(salt, username, password): + if len(username)>=256: + raise ValueError("username too long") + if len(salt)>=256: + raise ValueError("salt too long") + return stringToNumber(sha.sha(salt + sha.sha(username + ":" + password)\ + .digest()).digest()) + +#This function is used by VerifierDB.makeVerifier +def makeVerifier(username, password, bits): + bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits] + g,N = goodGroupParameters[bitsIndex] + salt = bytesToString(getRandomBytes(16)) + x = makeX(salt, username, password) + verifier = powMod(g, x, N) + return N, g, salt, verifier + +def PAD(n, x): + nLength = len(numberToString(n)) + s = numberToString(x) + if len(s) < nLength: + s = ("\0" * (nLength-len(s))) + s + return s + +def makeU(N, A, B): + return stringToNumber(sha.sha(PAD(N, A) + PAD(N, B)).digest()) + +def makeK(N, g): + return stringToNumber(sha.sha(numberToString(N) + PAD(N, g)).digest()) + + +""" +MAC_SSL +Modified from Python HMAC by Trevor +""" + +class MAC_SSL: + """MAC_SSL class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new MAC_SSL object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod is None: + import md5 + digestmod = md5 + + if key == None: #TREVNEW - for faster copying + return #TREVNEW + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + self.digest_size = digestmod.digest_size + + ipad = "\x36" * 40 + opad = "\x5C" * 40 + + self.inner.update(key) + self.inner.update(ipad) + self.outer.update(key) + self.outer.update(opad) + if msg is not None: + self.update(msg) + + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = MAC_SSL(None) #TREVNEW - for faster copying + other.digest_size = self.digest_size #TREVNEW + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([hex(ord(x))[2:].zfill(2) + for x in tuple(self.digest())]) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/messages.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/messages.py new file mode 100644 index 0000000..afccc79 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/messages.py @@ -0,0 +1,561 @@ +"""Classes representing TLS messages.""" + +from utils.compat import * +from utils.cryptomath import * +from errors import * +from utils.codec import * +from constants import * +from X509 import X509 +from X509CertChain import X509CertChain + +import sha +import md5 + +class RecordHeader3: + def __init__(self): + self.type = 0 + self.version = (0,0) + self.length = 0 + self.ssl2 = False + + def create(self, version, type, length): + self.type = type + self.version = version + self.length = length + return self + + def write(self): + w = Writer(5) + w.add(self.type, 1) + w.add(self.version[0], 1) + w.add(self.version[1], 1) + w.add(self.length, 2) + return w.bytes + + def parse(self, p): + self.type = p.get(1) + self.version = (p.get(1), p.get(1)) + self.length = p.get(2) + self.ssl2 = False + return self + +class RecordHeader2: + def __init__(self): + self.type = 0 + self.version = (0,0) + self.length = 0 + self.ssl2 = True + + def parse(self, p): + if p.get(1)!=128: + raise SyntaxError() + self.type = ContentType.handshake + self.version = (2,0) + #We don't support 2-byte-length-headers; could be a problem + self.length = p.get(1) + return self + + +class Msg: + def preWrite(self, trial): + if trial: + w = Writer() + else: + length = self.write(True) + w = Writer(length) + return w + + def postWrite(self, w, trial): + if trial: + return w.index + else: + return w.bytes + +class Alert(Msg): + def __init__(self): + self.contentType = ContentType.alert + self.level = 0 + self.description = 0 + + def create(self, description, level=AlertLevel.fatal): + self.level = level + self.description = description + return self + + def parse(self, p): + p.setLengthCheck(2) + self.level = p.get(1) + self.description = p.get(1) + p.stopLengthCheck() + return self + + def write(self): + w = Writer(2) + w.add(self.level, 1) + w.add(self.description, 1) + return w.bytes + + +class HandshakeMsg(Msg): + def preWrite(self, handshakeType, trial): + if trial: + w = Writer() + w.add(handshakeType, 1) + w.add(0, 3) + else: + length = self.write(True) + w = Writer(length) + w.add(handshakeType, 1) + w.add(length-4, 3) + return w + + +class ClientHello(HandshakeMsg): + def __init__(self, ssl2=False): + self.contentType = ContentType.handshake + self.ssl2 = ssl2 + self.client_version = (0,0) + self.random = createByteArrayZeros(32) + self.session_id = createByteArraySequence([]) + self.cipher_suites = [] # a list of 16-bit values + self.certificate_types = [CertificateType.x509] + self.compression_methods = [] # a list of 8-bit values + self.srp_username = None # a string + + def create(self, version, random, session_id, cipher_suites, + certificate_types=None, srp_username=None): + self.client_version = version + self.random = random + self.session_id = session_id + self.cipher_suites = cipher_suites + self.certificate_types = certificate_types + self.compression_methods = [0] + self.srp_username = srp_username + return self + + def parse(self, p): + if self.ssl2: + self.client_version = (p.get(1), p.get(1)) + cipherSpecsLength = p.get(2) + sessionIDLength = p.get(2) + randomLength = p.get(2) + self.cipher_suites = p.getFixList(3, int(cipherSpecsLength/3)) + self.session_id = p.getFixBytes(sessionIDLength) + self.random = p.getFixBytes(randomLength) + if len(self.random) < 32: + zeroBytes = 32-len(self.random) + self.random = createByteArrayZeros(zeroBytes) + self.random + self.compression_methods = [0]#Fake this value + + #We're not doing a stopLengthCheck() for SSLv2, oh well.. + else: + p.startLengthCheck(3) + self.client_version = (p.get(1), p.get(1)) + self.random = p.getFixBytes(32) + self.session_id = p.getVarBytes(1) + self.cipher_suites = p.getVarList(2, 2) + self.compression_methods = p.getVarList(1, 1) + if not p.atLengthCheck(): + totalExtLength = p.get(2) + soFar = 0 + while soFar != totalExtLength: + extType = p.get(2) + extLength = p.get(2) + if extType == 6: + self.srp_username = bytesToString(p.getVarBytes(1)) + elif extType == 7: + self.certificate_types = p.getVarList(1, 1) + else: + p.getFixBytes(extLength) + soFar += 4 + extLength + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.client_hello, trial) + w.add(self.client_version[0], 1) + w.add(self.client_version[1], 1) + w.addFixSeq(self.random, 1) + w.addVarSeq(self.session_id, 1, 1) + w.addVarSeq(self.cipher_suites, 2, 2) + w.addVarSeq(self.compression_methods, 1, 1) + + extLength = 0 + if self.certificate_types and self.certificate_types != \ + [CertificateType.x509]: + extLength += 5 + len(self.certificate_types) + if self.srp_username: + extLength += 5 + len(self.srp_username) + if extLength > 0: + w.add(extLength, 2) + + if self.certificate_types and self.certificate_types != \ + [CertificateType.x509]: + w.add(7, 2) + w.add(len(self.certificate_types)+1, 2) + w.addVarSeq(self.certificate_types, 1, 1) + if self.srp_username: + w.add(6, 2) + w.add(len(self.srp_username)+1, 2) + w.addVarSeq(stringToBytes(self.srp_username), 1, 1) + + return HandshakeMsg.postWrite(self, w, trial) + + +class ServerHello(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.server_version = (0,0) + self.random = createByteArrayZeros(32) + self.session_id = createByteArraySequence([]) + self.cipher_suite = 0 + self.certificate_type = CertificateType.x509 + self.compression_method = 0 + + def create(self, version, random, session_id, cipher_suite, + certificate_type): + self.server_version = version + self.random = random + self.session_id = session_id + self.cipher_suite = cipher_suite + self.certificate_type = certificate_type + self.compression_method = 0 + return self + + def parse(self, p): + p.startLengthCheck(3) + self.server_version = (p.get(1), p.get(1)) + self.random = p.getFixBytes(32) + self.session_id = p.getVarBytes(1) + self.cipher_suite = p.get(2) + self.compression_method = p.get(1) + if not p.atLengthCheck(): + totalExtLength = p.get(2) + soFar = 0 + while soFar != totalExtLength: + extType = p.get(2) + extLength = p.get(2) + if extType == 7: + self.certificate_type = p.get(1) + else: + p.getFixBytes(extLength) + soFar += 4 + extLength + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_hello, trial) + w.add(self.server_version[0], 1) + w.add(self.server_version[1], 1) + w.addFixSeq(self.random, 1) + w.addVarSeq(self.session_id, 1, 1) + w.add(self.cipher_suite, 2) + w.add(self.compression_method, 1) + + extLength = 0 + if self.certificate_type and self.certificate_type != \ + CertificateType.x509: + extLength += 5 + + if extLength != 0: + w.add(extLength, 2) + + if self.certificate_type and self.certificate_type != \ + CertificateType.x509: + w.add(7, 2) + w.add(1, 2) + w.add(self.certificate_type, 1) + + return HandshakeMsg.postWrite(self, w, trial) + +class Certificate(HandshakeMsg): + def __init__(self, certificateType): + self.certificateType = certificateType + self.contentType = ContentType.handshake + self.certChain = None + + def create(self, certChain): + self.certChain = certChain + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.certificateType == CertificateType.x509: + chainLength = p.get(3) + index = 0 + certificate_list = [] + while index != chainLength: + certBytes = p.getVarBytes(3) + x509 = X509() + x509.parseBinary(certBytes) + certificate_list.append(x509) + index += len(certBytes)+3 + if certificate_list: + self.certChain = X509CertChain(certificate_list) + elif self.certificateType == CertificateType.cryptoID: + s = bytesToString(p.getVarBytes(2)) + if s: + try: + import cryptoIDlib.CertChain + except ImportError: + raise SyntaxError(\ + "cryptoID cert chain received, cryptoIDlib not present") + self.certChain = cryptoIDlib.CertChain.CertChain().parse(s) + else: + raise AssertionError() + + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate, trial) + if self.certificateType == CertificateType.x509: + chainLength = 0 + if self.certChain: + certificate_list = self.certChain.x509List + else: + certificate_list = [] + #determine length + for cert in certificate_list: + bytes = cert.writeBytes() + chainLength += len(bytes)+3 + #add bytes + w.add(chainLength, 3) + for cert in certificate_list: + bytes = cert.writeBytes() + w.addVarSeq(bytes, 1, 3) + elif self.certificateType == CertificateType.cryptoID: + if self.certChain: + bytes = stringToBytes(self.certChain.write()) + else: + bytes = createByteArraySequence([]) + w.addVarSeq(bytes, 1, 2) + else: + raise AssertionError() + return HandshakeMsg.postWrite(self, w, trial) + +class CertificateRequest(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.certificate_types = [] + #treat as opaque bytes for now + self.certificate_authorities = createByteArraySequence([]) + + def create(self, certificate_types, certificate_authorities): + self.certificate_types = certificate_types + self.certificate_authorities = certificate_authorities + return self + + def parse(self, p): + p.startLengthCheck(3) + self.certificate_types = p.getVarList(1, 1) + self.certificate_authorities = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate_request, + trial) + w.addVarSeq(self.certificate_types, 1, 1) + w.addVarSeq(self.certificate_authorities, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + +class ServerKeyExchange(HandshakeMsg): + def __init__(self, cipherSuite): + self.cipherSuite = cipherSuite + self.contentType = ContentType.handshake + self.srp_N = 0L + self.srp_g = 0L + self.srp_s = createByteArraySequence([]) + self.srp_B = 0L + self.signature = createByteArraySequence([]) + + def createSRP(self, srp_N, srp_g, srp_s, srp_B): + self.srp_N = srp_N + self.srp_g = srp_g + self.srp_s = srp_s + self.srp_B = srp_B + return self + + def parse(self, p): + p.startLengthCheck(3) + self.srp_N = bytesToNumber(p.getVarBytes(2)) + self.srp_g = bytesToNumber(p.getVarBytes(2)) + self.srp_s = p.getVarBytes(1) + self.srp_B = bytesToNumber(p.getVarBytes(2)) + if self.cipherSuite in CipherSuite.srpRsaSuites: + self.signature = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_key_exchange, + trial) + w.addVarSeq(numberToBytes(self.srp_N), 1, 2) + w.addVarSeq(numberToBytes(self.srp_g), 1, 2) + w.addVarSeq(self.srp_s, 1, 1) + w.addVarSeq(numberToBytes(self.srp_B), 1, 2) + if self.cipherSuite in CipherSuite.srpRsaSuites: + w.addVarSeq(self.signature, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + + def hash(self, clientRandom, serverRandom): + oldCipherSuite = self.cipherSuite + self.cipherSuite = None + try: + bytes = clientRandom + serverRandom + self.write()[4:] + s = bytesToString(bytes) + return stringToBytes(md5.md5(s).digest() + sha.sha(s).digest()) + finally: + self.cipherSuite = oldCipherSuite + +class ServerHelloDone(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + + def create(self): + return self + + def parse(self, p): + p.startLengthCheck(3) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_hello_done, trial) + return HandshakeMsg.postWrite(self, w, trial) + +class ClientKeyExchange(HandshakeMsg): + def __init__(self, cipherSuite, version=None): + self.cipherSuite = cipherSuite + self.version = version + self.contentType = ContentType.handshake + self.srp_A = 0 + self.encryptedPreMasterSecret = createByteArraySequence([]) + + def createSRP(self, srp_A): + self.srp_A = srp_A + return self + + def createRSA(self, encryptedPreMasterSecret): + self.encryptedPreMasterSecret = encryptedPreMasterSecret + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + self.srp_A = bytesToNumber(p.getVarBytes(2)) + elif self.cipherSuite in CipherSuite.rsaSuites: + if self.version in ((3,1), (3,2)): + self.encryptedPreMasterSecret = p.getVarBytes(2) + elif self.version == (3,0): + self.encryptedPreMasterSecret = \ + p.getFixBytes(len(p.bytes)-p.index) + else: + raise AssertionError() + else: + raise AssertionError() + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.client_key_exchange, + trial) + if self.cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + w.addVarSeq(numberToBytes(self.srp_A), 1, 2) + elif self.cipherSuite in CipherSuite.rsaSuites: + if self.version in ((3,1), (3,2)): + w.addVarSeq(self.encryptedPreMasterSecret, 1, 2) + elif self.version == (3,0): + w.addFixSeq(self.encryptedPreMasterSecret, 1) + else: + raise AssertionError() + else: + raise AssertionError() + return HandshakeMsg.postWrite(self, w, trial) + +class CertificateVerify(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.signature = createByteArraySequence([]) + + def create(self, signature): + self.signature = signature + return self + + def parse(self, p): + p.startLengthCheck(3) + self.signature = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate_verify, + trial) + w.addVarSeq(self.signature, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + +class ChangeCipherSpec(Msg): + def __init__(self): + self.contentType = ContentType.change_cipher_spec + self.type = 1 + + def create(self): + self.type = 1 + return self + + def parse(self, p): + p.setLengthCheck(1) + self.type = p.get(1) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = Msg.preWrite(self, trial) + w.add(self.type,1) + return Msg.postWrite(self, w, trial) + + +class Finished(HandshakeMsg): + def __init__(self, version): + self.contentType = ContentType.handshake + self.version = version + self.verify_data = createByteArraySequence([]) + + def create(self, verify_data): + self.verify_data = verify_data + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.version == (3,0): + self.verify_data = p.getFixBytes(36) + elif self.version in ((3,1), (3,2)): + self.verify_data = p.getFixBytes(12) + else: + raise AssertionError() + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.finished, trial) + w.addFixSeq(self.verify_data, 1) + return HandshakeMsg.postWrite(self, w, trial) + +class ApplicationData(Msg): + def __init__(self): + self.contentType = ContentType.application_data + self.bytes = createByteArraySequence([]) + + def create(self, bytes): + self.bytes = bytes + return self + + def parse(self, p): + self.bytes = p.bytes + return self + + def write(self): + return self.bytes \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/AES.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/AES.py new file mode 100644 index 0000000..8413f4c --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/AES.py @@ -0,0 +1,31 @@ +"""Abstract class for AES.""" + +class AES: + def __init__(self, key, mode, IV, implementation): + if len(key) not in (16, 24, 32): + raise AssertionError() + if mode != 2: + raise AssertionError() + if len(IV) != 16: + raise AssertionError() + self.isBlockCipher = True + self.block_size = 16 + self.implementation = implementation + if len(key)==16: + self.name = "aes128" + elif len(key)==24: + self.name = "aes192" + elif len(key)==32: + self.name = "aes256" + else: + raise AssertionError() + + #CBC-Mode encryption, returns ciphertext + #WARNING: *MAY* modify the input as well + def encrypt(self, plaintext): + assert(len(plaintext) % 16 == 0) + + #CBC-Mode decryption, returns plaintext + #WARNING: *MAY* modify the input as well + def decrypt(self, ciphertext): + assert(len(ciphertext) % 16 == 0) \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/ASN1Parser.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/ASN1Parser.py new file mode 100644 index 0000000..16b50f2 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/ASN1Parser.py @@ -0,0 +1,34 @@ +"""Class for parsing ASN.1""" +from compat import * +from codec import * + +#Takes a byte array which has a DER TLV field at its head +class ASN1Parser: + def __init__(self, bytes): + p = Parser(bytes) + p.get(1) #skip Type + + #Get Length + self.length = self._getASN1Length(p) + + #Get Value + self.value = p.getFixBytes(self.length) + + #Assuming this is a sequence... + def getChild(self, which): + p = Parser(self.value) + for x in range(which+1): + markIndex = p.index + p.get(1) #skip Type + length = self._getASN1Length(p) + p.getFixBytes(length) + return ASN1Parser(p.bytes[markIndex : p.index]) + + #Decode the ASN.1 DER length field + def _getASN1Length(self, p): + firstLength = p.get(1) + if firstLength<=127: + return firstLength + else: + lengthLength = firstLength & 0x7F + return p.get(lengthLength) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Cryptlib_AES.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Cryptlib_AES.py new file mode 100644 index 0000000..9e101fc --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Cryptlib_AES.py @@ -0,0 +1,34 @@ +"""Cryptlib AES implementation.""" + +from cryptomath import * +from AES import * + +if cryptlibpyLoaded: + + def new(key, mode, IV): + return Cryptlib_AES(key, mode, IV) + + class Cryptlib_AES(AES): + + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "cryptlib") + self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_AES) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key)) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV) + + def __del__(self): + cryptlib_py.cryptDestroyContext(self.context) + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + bytes = stringToBytes(plaintext) + cryptlib_py.cryptEncrypt(self.context, bytes) + return bytesToString(bytes) + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + bytes = stringToBytes(ciphertext) + cryptlib_py.cryptDecrypt(self.context, bytes) + return bytesToString(bytes) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Cryptlib_RC4.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Cryptlib_RC4.py new file mode 100644 index 0000000..7c6d087 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Cryptlib_RC4.py @@ -0,0 +1,28 @@ +"""Cryptlib RC4 implementation.""" + +from cryptomath import * +from RC4 import RC4 + +if cryptlibpyLoaded: + + def new(key): + return Cryptlib_RC4(key) + + class Cryptlib_RC4(RC4): + + def __init__(self, key): + RC4.__init__(self, key, "cryptlib") + self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_RC4) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key)) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key) + + def __del__(self): + cryptlib_py.cryptDestroyContext(self.context) + + def encrypt(self, plaintext): + bytes = stringToBytes(plaintext) + cryptlib_py.cryptEncrypt(self.context, bytes) + return bytesToString(bytes) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Cryptlib_TripleDES.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Cryptlib_TripleDES.py new file mode 100644 index 0000000..a4f8155 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Cryptlib_TripleDES.py @@ -0,0 +1,35 @@ +"""Cryptlib 3DES implementation.""" + +from cryptomath import * + +from TripleDES import * + +if cryptlibpyLoaded: + + def new(key, mode, IV): + return Cryptlib_TripleDES(key, mode, IV) + + class Cryptlib_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "cryptlib") + self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_3DES) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key)) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV) + + def __del__(self): + cryptlib_py.cryptDestroyContext(self.context) + + def encrypt(self, plaintext): + TripleDES.encrypt(self, plaintext) + bytes = stringToBytes(plaintext) + cryptlib_py.cryptEncrypt(self.context, bytes) + return bytesToString(bytes) + + def decrypt(self, ciphertext): + TripleDES.decrypt(self, ciphertext) + bytes = stringToBytes(ciphertext) + cryptlib_py.cryptDecrypt(self.context, bytes) + return bytesToString(bytes) \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_AES.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_AES.py new file mode 100644 index 0000000..e60679b --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_AES.py @@ -0,0 +1,49 @@ +"""OpenSSL/M2Crypto AES implementation.""" + +from cryptomath import * +from AES import * + +if m2cryptoLoaded: + + def new(key, mode, IV): + return OpenSSL_AES(key, mode, IV) + + class OpenSSL_AES(AES): + + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "openssl") + self.key = key + self.IV = IV + + def _createContext(self, encrypt): + context = m2.cipher_ctx_new() + if len(self.key)==16: + cipherType = m2.aes_128_cbc() + if len(self.key)==24: + cipherType = m2.aes_192_cbc() + if len(self.key)==32: + cipherType = m2.aes_256_cbc() + m2.cipher_init(context, cipherType, self.key, self.IV, encrypt) + return context + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + context = self._createContext(1) + ciphertext = m2.cipher_update(context, plaintext) + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return ciphertext + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + context = self._createContext(0) + #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in. + #To work around this, we append sixteen zeros to the string, below: + plaintext = m2.cipher_update(context, ciphertext+('\0'*16)) + + #If this bug is ever fixed, then plaintext will end up having a garbage + #plaintext block on the end. That's okay - the below code will discard it. + plaintext = plaintext[:len(ciphertext)] + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return plaintext diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_RC4.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_RC4.py new file mode 100644 index 0000000..ac433aa --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_RC4.py @@ -0,0 +1,25 @@ +"""OpenSSL/M2Crypto RC4 implementation.""" + +from cryptomath import * +from RC4 import RC4 + +if m2cryptoLoaded: + + def new(key): + return OpenSSL_RC4(key) + + class OpenSSL_RC4(RC4): + + def __init__(self, key): + RC4.__init__(self, key, "openssl") + self.rc4 = m2.rc4_new() + m2.rc4_set_key(self.rc4, key) + + def __del__(self): + m2.rc4_free(self.rc4) + + def encrypt(self, plaintext): + return m2.rc4_update(self.rc4, plaintext) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_RSAKey.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_RSAKey.py new file mode 100644 index 0000000..fe1a3cd --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_RSAKey.py @@ -0,0 +1,148 @@ +"""OpenSSL/M2Crypto RSA implementation.""" + +from cryptomath import * + +from RSAKey import * +from Python_RSAKey import Python_RSAKey + +#copied from M2Crypto.util.py, so when we load the local copy of m2 +#we can still use it +def password_callback(v, prompt1='Enter private key passphrase:', + prompt2='Verify passphrase:'): + from getpass import getpass + while 1: + try: + p1=getpass(prompt1) + if v: + p2=getpass(prompt2) + if p1==p2: + break + else: + break + except KeyboardInterrupt: + return None + return p1 + + +if m2cryptoLoaded: + class OpenSSL_RSAKey(RSAKey): + def __init__(self, n=0, e=0): + self.rsa = None + self._hasPrivateKey = False + if (n and not e) or (e and not n): + raise AssertionError() + if n and e: + self.rsa = m2.rsa_new() + m2.rsa_set_n(self.rsa, numberToMPI(n)) + m2.rsa_set_e(self.rsa, numberToMPI(e)) + + def __del__(self): + if self.rsa: + m2.rsa_free(self.rsa) + + def __getattr__(self, name): + if name == 'e': + if not self.rsa: + return 0 + return mpiToNumber(m2.rsa_get_e(self.rsa)) + elif name == 'n': + if not self.rsa: + return 0 + return mpiToNumber(m2.rsa_get_n(self.rsa)) + else: + raise AttributeError + + def hasPrivateKey(self): + return self._hasPrivateKey + + def hash(self): + return Python_RSAKey(self.n, self.e).hash() + + def _rawPrivateKeyOp(self, m): + s = numberToString(m) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + c = stringToNumber(m2.rsa_private_encrypt(self.rsa, s, + m2.no_padding)) + return c + + def _rawPublicKeyOp(self, c): + s = numberToString(c) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + m = stringToNumber(m2.rsa_public_decrypt(self.rsa, s, + m2.no_padding)) + return m + + def acceptsPassword(self): return True + + def write(self, password=None): + bio = m2.bio_new(m2.bio_s_mem()) + if self._hasPrivateKey: + if password: + def f(v): return password + m2.rsa_write_key(self.rsa, bio, m2.des_ede_cbc(), f) + else: + def f(): pass + m2.rsa_write_key_no_cipher(self.rsa, bio, f) + else: + if password: + raise AssertionError() + m2.rsa_write_pub_key(self.rsa, bio) + s = m2.bio_read(bio, m2.bio_ctrl_pending(bio)) + m2.bio_free(bio) + return s + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = OpenSSL_RSAKey() + def f():pass + key.rsa = m2.rsa_generate_key(bits, 3, f) + key._hasPrivateKey = True + return key + generate = staticmethod(generate) + + def parse(s, passwordCallback=None): + if s.startswith("-----BEGIN "): + if passwordCallback==None: + callback = password_callback + else: + def f(v, prompt1=None, prompt2=None): + return passwordCallback() + callback = f + bio = m2.bio_new(m2.bio_s_mem()) + try: + m2.bio_write(bio, s) + key = OpenSSL_RSAKey() + if s.startswith("-----BEGIN RSA PRIVATE KEY-----"): + def f():pass + key.rsa = m2.rsa_read_key(bio, callback) + if key.rsa == None: + raise SyntaxError() + key._hasPrivateKey = True + elif s.startswith("-----BEGIN PUBLIC KEY-----"): + key.rsa = m2.rsa_read_pub_key(bio) + if key.rsa == None: + raise SyntaxError() + key._hasPrivateKey = False + else: + raise SyntaxError() + return key + finally: + m2.bio_free(bio) + else: + raise SyntaxError() + + parse = staticmethod(parse) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_TripleDES.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_TripleDES.py new file mode 100644 index 0000000..f5ba165 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/OpenSSL_TripleDES.py @@ -0,0 +1,44 @@ +"""OpenSSL/M2Crypto 3DES implementation.""" + +from cryptomath import * +from TripleDES import * + +if m2cryptoLoaded: + + def new(key, mode, IV): + return OpenSSL_TripleDES(key, mode, IV) + + class OpenSSL_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "openssl") + self.key = key + self.IV = IV + + def _createContext(self, encrypt): + context = m2.cipher_ctx_new() + cipherType = m2.des_ede3_cbc() + m2.cipher_init(context, cipherType, self.key, self.IV, encrypt) + return context + + def encrypt(self, plaintext): + TripleDES.encrypt(self, plaintext) + context = self._createContext(1) + ciphertext = m2.cipher_update(context, plaintext) + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return ciphertext + + def decrypt(self, ciphertext): + TripleDES.decrypt(self, ciphertext) + context = self._createContext(0) + #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in. + #To work around this, we append sixteen zeros to the string, below: + plaintext = m2.cipher_update(context, ciphertext+('\0'*16)) + + #If this bug is ever fixed, then plaintext will end up having a garbage + #plaintext block on the end. That's okay - the below code will ignore it. + plaintext = plaintext[:len(ciphertext)] + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return plaintext \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_AES.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_AES.py new file mode 100644 index 0000000..e38b19d --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_AES.py @@ -0,0 +1,22 @@ +"""PyCrypto AES implementation.""" + +from cryptomath import * +from AES import * + +if pycryptoLoaded: + import Crypto.Cipher.AES + + def new(key, mode, IV): + return PyCrypto_AES(key, mode, IV) + + class PyCrypto_AES(AES): + + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "pycrypto") + self.context = Crypto.Cipher.AES.new(key, mode, IV) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_RC4.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_RC4.py new file mode 100644 index 0000000..6c6d86a --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_RC4.py @@ -0,0 +1,22 @@ +"""PyCrypto RC4 implementation.""" + +from cryptomath import * +from RC4 import * + +if pycryptoLoaded: + import Crypto.Cipher.ARC4 + + def new(key): + return PyCrypto_RC4(key) + + class PyCrypto_RC4(RC4): + + def __init__(self, key): + RC4.__init__(self, key, "pycrypto") + self.context = Crypto.Cipher.ARC4.new(key) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_RSAKey.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_RSAKey.py new file mode 100644 index 0000000..48b5cef --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_RSAKey.py @@ -0,0 +1,61 @@ +"""PyCrypto RSA implementation.""" + +from cryptomath import * + +from RSAKey import * +from Python_RSAKey import Python_RSAKey + +if pycryptoLoaded: + + from Crypto.PublicKey import RSA + + class PyCrypto_RSAKey(RSAKey): + def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): + if not d: + self.rsa = RSA.construct( (n, e) ) + else: + self.rsa = RSA.construct( (n, e, d, p, q) ) + + def __getattr__(self, name): + return getattr(self.rsa, name) + + def hasPrivateKey(self): + return self.rsa.has_private() + + def hash(self): + return Python_RSAKey(self.n, self.e).hash() + + def _rawPrivateKeyOp(self, m): + s = numberToString(m) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + c = stringToNumber(self.rsa.decrypt((s,))) + return c + + def _rawPublicKeyOp(self, c): + s = numberToString(c) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + m = stringToNumber(self.rsa.encrypt(s, None)[0]) + return m + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = PyCrypto_RSAKey() + def f(numBytes): + return bytesToString(getRandomBytes(numBytes)) + key.rsa = RSA.generate(bits, f) + return key + generate = staticmethod(generate) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_TripleDES.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_TripleDES.py new file mode 100644 index 0000000..8c22bb8 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/PyCrypto_TripleDES.py @@ -0,0 +1,22 @@ +"""PyCrypto 3DES implementation.""" + +from cryptomath import * +from TripleDES import * + +if pycryptoLoaded: + import Crypto.Cipher.DES3 + + def new(key, mode, IV): + return PyCrypto_TripleDES(key, mode, IV) + + class PyCrypto_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "pycrypto") + self.context = Crypto.Cipher.DES3.new(key, mode, IV) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Python_AES.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Python_AES.py new file mode 100644 index 0000000..657152f --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Python_AES.py @@ -0,0 +1,68 @@ +"""Pure-Python AES implementation.""" + +from cryptomath import * + +from AES import * +from rijndael import rijndael + +def new(key, mode, IV): + return Python_AES(key, mode, IV) + +class Python_AES(AES): + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "python") + self.rijndael = rijndael(key, 16) + self.IV = IV + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + + plaintextBytes = stringToBytes(plaintext) + chainBytes = stringToBytes(self.IV) + + #CBC Mode: For each block... + for x in range(len(plaintextBytes)/16): + + #XOR with the chaining block + blockBytes = plaintextBytes[x*16 : (x*16)+16] + for y in range(16): + blockBytes[y] ^= chainBytes[y] + blockString = bytesToString(blockBytes) + + #Encrypt it + encryptedBytes = stringToBytes(self.rijndael.encrypt(blockString)) + + #Overwrite the input with the output + for y in range(16): + plaintextBytes[(x*16)+y] = encryptedBytes[y] + + #Set the next chaining block + chainBytes = encryptedBytes + + self.IV = bytesToString(chainBytes) + return bytesToString(plaintextBytes) + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + + ciphertextBytes = stringToBytes(ciphertext) + chainBytes = stringToBytes(self.IV) + + #CBC Mode: For each block... + for x in range(len(ciphertextBytes)/16): + + #Decrypt it + blockBytes = ciphertextBytes[x*16 : (x*16)+16] + blockString = bytesToString(blockBytes) + decryptedBytes = stringToBytes(self.rijndael.decrypt(blockString)) + + #XOR with the chaining block and overwrite the input with output + for y in range(16): + decryptedBytes[y] ^= chainBytes[y] + ciphertextBytes[(x*16)+y] = decryptedBytes[y] + + #Set the next chaining block + chainBytes = blockBytes + + self.IV = bytesToString(chainBytes) + return bytesToString(ciphertextBytes) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Python_RC4.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Python_RC4.py new file mode 100644 index 0000000..56ce5fb --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Python_RC4.py @@ -0,0 +1,39 @@ +"""Pure-Python RC4 implementation.""" + +from RC4 import RC4 +from cryptomath import * + +def new(key): + return Python_RC4(key) + +class Python_RC4(RC4): + def __init__(self, key): + RC4.__init__(self, key, "python") + keyBytes = stringToBytes(key) + S = [i for i in range(256)] + j = 0 + for i in range(256): + j = (j + S[i] + keyBytes[i % len(keyBytes)]) % 256 + S[i], S[j] = S[j], S[i] + + self.S = S + self.i = 0 + self.j = 0 + + def encrypt(self, plaintext): + plaintextBytes = stringToBytes(plaintext) + S = self.S + i = self.i + j = self.j + for x in range(len(plaintextBytes)): + i = (i + 1) % 256 + j = (j + S[i]) % 256 + S[i], S[j] = S[j], S[i] + t = (S[i] + S[j]) % 256 + plaintextBytes[x] ^= S[t] + self.i = i + self.j = j + return bytesToString(plaintextBytes) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Python_RSAKey.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Python_RSAKey.py new file mode 100644 index 0000000..2c469b5 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/Python_RSAKey.py @@ -0,0 +1,209 @@ +"""Pure-Python RSA implementation.""" + +from cryptomath import * +import xmltools +from ASN1Parser import ASN1Parser +from RSAKey import * + +class Python_RSAKey(RSAKey): + def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): + if (n and not e) or (e and not n): + raise AssertionError() + self.n = n + self.e = e + self.d = d + self.p = p + self.q = q + self.dP = dP + self.dQ = dQ + self.qInv = qInv + self.blinder = 0 + self.unblinder = 0 + + def hasPrivateKey(self): + return self.d != 0 + + def hash(self): + s = self.writeXMLPublicKey('\t\t') + return hashAndBase64(s.strip()) + + def _rawPrivateKeyOp(self, m): + #Create blinding values, on the first pass: + if not self.blinder: + self.unblinder = getRandomNumber(2, self.n) + self.blinder = powMod(invMod(self.unblinder, self.n), self.e, + self.n) + + #Blind the input + m = (m * self.blinder) % self.n + + #Perform the RSA operation + c = self._rawPrivateKeyOpHelper(m) + + #Unblind the output + c = (c * self.unblinder) % self.n + + #Update blinding values + self.blinder = (self.blinder * self.blinder) % self.n + self.unblinder = (self.unblinder * self.unblinder) % self.n + + #Return the output + return c + + + def _rawPrivateKeyOpHelper(self, m): + #Non-CRT version + #c = powMod(m, self.d, self.n) + + #CRT version (~3x faster) + s1 = powMod(m, self.dP, self.p) + s2 = powMod(m, self.dQ, self.q) + h = ((s1 - s2) * self.qInv) % self.p + c = s2 + self.q * h + return c + + def _rawPublicKeyOp(self, c): + m = powMod(c, self.e, self.n) + return m + + def acceptsPassword(self): return False + + def write(self, indent=''): + if self.d: + s = indent+'\n' + else: + s = indent+'\n' + s += indent+'\t%s\n' % numberToBase64(self.n) + s += indent+'\t%s\n' % numberToBase64(self.e) + if self.d: + s += indent+'\t%s\n' % numberToBase64(self.d) + s += indent+'\t

%s

\n' % numberToBase64(self.p) + s += indent+'\t%s\n' % numberToBase64(self.q) + s += indent+'\t%s\n' % numberToBase64(self.dP) + s += indent+'\t%s\n' % numberToBase64(self.dQ) + s += indent+'\t%s\n' % numberToBase64(self.qInv) + s += indent+'
' + else: + s += indent+'' + #Only add \n if part of a larger structure + if indent != '': + s += '\n' + return s + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = Python_RSAKey() + p = getRandomPrime(bits/2, False) + q = getRandomPrime(bits/2, False) + t = lcm(p-1, q-1) + key.n = p * q + key.e = 3L #Needed to be long, for Java + key.d = invMod(key.e, t) + key.p = p + key.q = q + key.dP = key.d % (p-1) + key.dQ = key.d % (q-1) + key.qInv = invMod(q, p) + return key + generate = staticmethod(generate) + + def parsePEM(s, passwordCallback=None): + """Parse a string containing a or , or + PEM-encoded key.""" + + start = s.find("-----BEGIN PRIVATE KEY-----") + if start != -1: + end = s.find("-----END PRIVATE KEY-----") + if end == -1: + raise SyntaxError("Missing PEM Postfix") + s = s[start+len("-----BEGIN PRIVATE KEY -----") : end] + bytes = base64ToBytes(s) + return Python_RSAKey._parsePKCS8(bytes) + else: + start = s.find("-----BEGIN RSA PRIVATE KEY-----") + if start != -1: + end = s.find("-----END RSA PRIVATE KEY-----") + if end == -1: + raise SyntaxError("Missing PEM Postfix") + s = s[start+len("-----BEGIN RSA PRIVATE KEY -----") : end] + bytes = base64ToBytes(s) + return Python_RSAKey._parseSSLeay(bytes) + raise SyntaxError("Missing PEM Prefix") + parsePEM = staticmethod(parsePEM) + + def parseXML(s): + element = xmltools.parseAndStripWhitespace(s) + return Python_RSAKey._parseXML(element) + parseXML = staticmethod(parseXML) + + def _parsePKCS8(bytes): + p = ASN1Parser(bytes) + + version = p.getChild(0).value[0] + if version != 0: + raise SyntaxError("Unrecognized PKCS8 version") + + rsaOID = p.getChild(1).value + if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]: + raise SyntaxError("Unrecognized AlgorithmIdentifier") + + #Get the privateKey + privateKeyP = p.getChild(2) + + #Adjust for OCTET STRING encapsulation + privateKeyP = ASN1Parser(privateKeyP.value) + + return Python_RSAKey._parseASN1PrivateKey(privateKeyP) + _parsePKCS8 = staticmethod(_parsePKCS8) + + def _parseSSLeay(bytes): + privateKeyP = ASN1Parser(bytes) + return Python_RSAKey._parseASN1PrivateKey(privateKeyP) + _parseSSLeay = staticmethod(_parseSSLeay) + + def _parseASN1PrivateKey(privateKeyP): + version = privateKeyP.getChild(0).value[0] + if version != 0: + raise SyntaxError("Unrecognized RSAPrivateKey version") + n = bytesToNumber(privateKeyP.getChild(1).value) + e = bytesToNumber(privateKeyP.getChild(2).value) + d = bytesToNumber(privateKeyP.getChild(3).value) + p = bytesToNumber(privateKeyP.getChild(4).value) + q = bytesToNumber(privateKeyP.getChild(5).value) + dP = bytesToNumber(privateKeyP.getChild(6).value) + dQ = bytesToNumber(privateKeyP.getChild(7).value) + qInv = bytesToNumber(privateKeyP.getChild(8).value) + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + _parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey) + + def _parseXML(element): + try: + xmltools.checkName(element, "privateKey") + except SyntaxError: + xmltools.checkName(element, "publicKey") + + #Parse attributes + xmltools.getReqAttribute(element, "xmlns", "http://trevp.net/rsa\Z") + xmltools.checkNoMoreAttributes(element) + + #Parse public values ( and ) + n = base64ToNumber(xmltools.getText(xmltools.getChild(element, 0, "n"), xmltools.base64RegEx)) + e = base64ToNumber(xmltools.getText(xmltools.getChild(element, 1, "e"), xmltools.base64RegEx)) + d = 0 + p = 0 + q = 0 + dP = 0 + dQ = 0 + qInv = 0 + #Parse private values, if present + if element.childNodes.length>=3: + d = base64ToNumber(xmltools.getText(xmltools.getChild(element, 2, "d"), xmltools.base64RegEx)) + p = base64ToNumber(xmltools.getText(xmltools.getChild(element, 3, "p"), xmltools.base64RegEx)) + q = base64ToNumber(xmltools.getText(xmltools.getChild(element, 4, "q"), xmltools.base64RegEx)) + dP = base64ToNumber(xmltools.getText(xmltools.getChild(element, 5, "dP"), xmltools.base64RegEx)) + dQ = base64ToNumber(xmltools.getText(xmltools.getChild(element, 6, "dQ"), xmltools.base64RegEx)) + qInv = base64ToNumber(xmltools.getText(xmltools.getLastChild(element, 7, "qInv"), xmltools.base64RegEx)) + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + _parseXML = staticmethod(_parseXML) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/RC4.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/RC4.py new file mode 100644 index 0000000..5506923 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/RC4.py @@ -0,0 +1,17 @@ +"""Abstract class for RC4.""" + +from compat import * #For False + +class RC4: + def __init__(self, keyBytes, implementation): + if len(keyBytes) < 16 or len(keyBytes) > 256: + raise ValueError() + self.isBlockCipher = False + self.name = "rc4" + self.implementation = implementation + + def encrypt(self, plaintext): + raise NotImplementedError() + + def decrypt(self, ciphertext): + raise NotImplementedError() \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/RSAKey.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/RSAKey.py new file mode 100644 index 0000000..37c292d --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/RSAKey.py @@ -0,0 +1,264 @@ +"""Abstract class for RSA.""" + +from cryptomath import * + + +class RSAKey: + """This is an abstract base class for RSA keys. + + Particular implementations of RSA keys, such as + L{OpenSSL_RSAKey.OpenSSL_RSAKey}, + L{Python_RSAKey.Python_RSAKey}, and + L{PyCrypto_RSAKey.PyCrypto_RSAKey}, + inherit from this. + + To create or parse an RSA key, don't use one of these classes + directly. Instead, use the factory functions in + L{tlslite.utils.keyfactory}. + """ + + def __init__(self, n=0, e=0): + """Create a new RSA key. + + If n and e are passed in, the new key will be initialized. + + @type n: int + @param n: RSA modulus. + + @type e: int + @param e: RSA public exponent. + """ + raise NotImplementedError() + + def __len__(self): + """Return the length of this key in bits. + + @rtype: int + """ + return numBits(self.n) + + def hasPrivateKey(self): + """Return whether or not this key has a private component. + + @rtype: bool + """ + raise NotImplementedError() + + def hash(self): + """Return the cryptoID value corresponding to this + key. + + @rtype: str + """ + raise NotImplementedError() + + def getSigningAlgorithm(self): + """Return the cryptoID sigAlgo value corresponding to this key. + + @rtype: str + """ + return "pkcs1-sha1" + + def hashAndSign(self, bytes): + """Hash and sign the passed-in bytes. + + This requires the key to have a private component. It performs + a PKCS1-SHA1 signature on the passed-in data. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: The value which will be hashed and signed. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1-SHA1 signature on the passed-in data. + """ + if not isinstance(bytes, type("")): + bytes = bytesToString(bytes) + hashBytes = stringToBytes(sha.sha(bytes).digest()) + prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) + sigBytes = self.sign(prefixedHashBytes) + return sigBytes + + def hashAndVerify(self, sigBytes, bytes): + """Hash and verify the passed-in bytes with the signature. + + This verifies a PKCS1-SHA1 signature on the passed-in data. + + @type sigBytes: L{array.array} of unsigned bytes + @param sigBytes: A PKCS1-SHA1 signature. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: The value which will be hashed and verified. + + @rtype: bool + @return: Whether the signature matches the passed-in data. + """ + if not isinstance(bytes, type("")): + bytes = bytesToString(bytes) + hashBytes = stringToBytes(sha.sha(bytes).digest()) + prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) + return self.verify(sigBytes, prefixedHashBytes) + + def sign(self, bytes): + """Sign the passed-in bytes. + + This requires the key to have a private component. It performs + a PKCS1 signature on the passed-in data. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be signed. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1 signature on the passed-in data. + """ + if not self.hasPrivateKey(): + raise AssertionError() + paddedBytes = self._addPKCS1Padding(bytes, 1) + m = bytesToNumber(paddedBytes) + if m >= self.n: + raise ValueError() + c = self._rawPrivateKeyOp(m) + sigBytes = numberToBytes(c) + return sigBytes + + def verify(self, sigBytes, bytes): + """Verify the passed-in bytes with the signature. + + This verifies a PKCS1 signature on the passed-in data. + + @type sigBytes: L{array.array} of unsigned bytes + @param sigBytes: A PKCS1 signature. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be verified. + + @rtype: bool + @return: Whether the signature matches the passed-in data. + """ + paddedBytes = self._addPKCS1Padding(bytes, 1) + c = bytesToNumber(sigBytes) + if c >= self.n: + return False + m = self._rawPublicKeyOp(c) + checkBytes = numberToBytes(m) + return checkBytes == paddedBytes + + def encrypt(self, bytes): + """Encrypt the passed-in bytes. + + This performs PKCS1 encryption of the passed-in data. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be encrypted. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1 encryption of the passed-in data. + """ + paddedBytes = self._addPKCS1Padding(bytes, 2) + m = bytesToNumber(paddedBytes) + if m >= self.n: + raise ValueError() + c = self._rawPublicKeyOp(m) + encBytes = numberToBytes(c) + return encBytes + + def decrypt(self, encBytes): + """Decrypt the passed-in bytes. + + This requires the key to have a private component. It performs + PKCS1 decryption of the passed-in data. + + @type encBytes: L{array.array} of unsigned bytes + @param encBytes: The value which will be decrypted. + + @rtype: L{array.array} of unsigned bytes or None. + @return: A PKCS1 decryption of the passed-in data or None if + the data is not properly formatted. + """ + if not self.hasPrivateKey(): + raise AssertionError() + c = bytesToNumber(encBytes) + if c >= self.n: + return None + m = self._rawPrivateKeyOp(c) + decBytes = numberToBytes(m) + if (len(decBytes) != numBytes(self.n)-1): #Check first byte + return None + if decBytes[0] != 2: #Check second byte + return None + for x in range(len(decBytes)-1): #Scan through for zero separator + if decBytes[x]== 0: + break + else: + return None + return decBytes[x+1:] #Return everything after the separator + + def _rawPrivateKeyOp(self, m): + raise NotImplementedError() + + def _rawPublicKeyOp(self, c): + raise NotImplementedError() + + def acceptsPassword(self): + """Return True if the write() method accepts a password for use + in encrypting the private key. + + @rtype: bool + """ + raise NotImplementedError() + + def write(self, password=None): + """Return a string containing the key. + + @rtype: str + @return: A string describing the key, in whichever format (PEM + or XML) is native to the implementation. + """ + raise NotImplementedError() + + def writeXMLPublicKey(self, indent=''): + """Return a string containing the key. + + @rtype: str + @return: A string describing the public key, in XML format. + """ + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + """Generate a new key with the specified bit length. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + """ + raise NotImplementedError() + generate = staticmethod(generate) + + + # ************************************************************************** + # Helper Functions for RSA Keys + # ************************************************************************** + + def _addPKCS1SHA1Prefix(self, bytes): + prefixBytes = createByteArraySequence(\ + [48,33,48,9,6,5,43,14,3,2,26,5,0,4,20]) + prefixedBytes = prefixBytes + bytes + return prefixedBytes + + def _addPKCS1Padding(self, bytes, blockType): + padLength = (numBytes(self.n) - (len(bytes)+3)) + if blockType == 1: #Signature padding + pad = [0xFF] * padLength + elif blockType == 2: #Encryption padding + pad = createByteArraySequence([]) + while len(pad) < padLength: + padBytes = getRandomBytes(padLength * 2) + pad = [b for b in padBytes if b != 0] + pad = pad[:padLength] + else: + raise AssertionError() + + #NOTE: To be proper, we should add [0,blockType]. However, + #the zero is lost when the returned padding is converted + #to a number, so we don't even bother with it. Also, + #adding it would cause a misalignment in verify() + padding = createByteArraySequence([blockType] + pad + [0]) + paddedBytes = padding + bytes + return paddedBytes diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/TripleDES.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/TripleDES.py new file mode 100644 index 0000000..2db4588 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/TripleDES.py @@ -0,0 +1,26 @@ +"""Abstract class for 3DES.""" + +from compat import * #For True + +class TripleDES: + def __init__(self, key, mode, IV, implementation): + if len(key) != 24: + raise ValueError() + if mode != 2: + raise ValueError() + if len(IV) != 8: + raise ValueError() + self.isBlockCipher = True + self.block_size = 8 + self.implementation = implementation + self.name = "3des" + + #CBC-Mode encryption, returns ciphertext + #WARNING: *MAY* modify the input as well + def encrypt(self, plaintext): + assert(len(plaintext) % 8 == 0) + + #CBC-Mode decryption, returns plaintext + #WARNING: *MAY* modify the input as well + def decrypt(self, ciphertext): + assert(len(ciphertext) % 8 == 0) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/__init__.py new file mode 100644 index 0000000..e96b4be --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/__init__.py @@ -0,0 +1,31 @@ +"""Toolkit for crypto and other stuff.""" + +__all__ = ["AES", + "ASN1Parser", + "cipherfactory", + "codec", + "Cryptlib_AES", + "Cryptlib_RC4", + "Cryptlib_TripleDES", + "cryptomath: cryptomath module", + "dateFuncs", + "hmac", + "JCE_RSAKey", + "compat", + "keyfactory", + "OpenSSL_AES", + "OpenSSL_RC4", + "OpenSSL_RSAKey", + "OpenSSL_TripleDES", + "PyCrypto_AES", + "PyCrypto_RC4", + "PyCrypto_RSAKey", + "PyCrypto_TripleDES", + "Python_AES", + "Python_RC4", + "Python_RSAKey", + "RC4", + "rijndael", + "RSAKey", + "TripleDES", + "xmltools"] diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/cipherfactory.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/cipherfactory.py new file mode 100644 index 0000000..ccbb6b5 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/cipherfactory.py @@ -0,0 +1,111 @@ +"""Factory functions for symmetric cryptography.""" + +import os + +import Python_AES +import Python_RC4 + +import cryptomath + +tripleDESPresent = False + +if cryptomath.m2cryptoLoaded: + import OpenSSL_AES + import OpenSSL_RC4 + import OpenSSL_TripleDES + tripleDESPresent = True + +if cryptomath.cryptlibpyLoaded: + import Cryptlib_AES + import Cryptlib_RC4 + import Cryptlib_TripleDES + tripleDESPresent = True + +if cryptomath.pycryptoLoaded: + import PyCrypto_AES + import PyCrypto_RC4 + import PyCrypto_TripleDES + tripleDESPresent = True + +# ************************************************************************** +# Factory Functions for AES +# ************************************************************************** + +def createAES(key, IV, implList=None): + """Create a new AES object. + + @type key: str + @param key: A 16, 24, or 32 byte string. + + @type IV: str + @param IV: A 16 byte string + + @rtype: L{tlslite.utils.AES} + @return: An AES object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto", "python"] + + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_AES.new(key, 2, IV) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_AES.new(key, 2, IV) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_AES.new(key, 2, IV) + elif impl == "python": + return Python_AES.new(key, 2, IV) + raise NotImplementedError() + +def createRC4(key, IV, implList=None): + """Create a new RC4 object. + + @type key: str + @param key: A 16 to 32 byte string. + + @type IV: object + @param IV: Ignored, whatever it is. + + @rtype: L{tlslite.utils.RC4} + @return: An RC4 object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto", "python"] + + if len(IV) != 0: + raise AssertionError() + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_RC4.new(key) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RC4.new(key) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RC4.new(key) + elif impl == "python": + return Python_RC4.new(key) + raise NotImplementedError() + +#Create a new TripleDES instance +def createTripleDES(key, IV, implList=None): + """Create a new 3DES object. + + @type key: str + @param key: A 24 byte string. + + @type IV: str + @param IV: An 8 byte string + + @rtype: L{tlslite.utils.TripleDES} + @return: A 3DES object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto"] + + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_TripleDES.new(key, 2, IV) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_TripleDES.new(key, 2, IV) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_TripleDES.new(key, 2, IV) + raise NotImplementedError() \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/codec.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/codec.py new file mode 100644 index 0000000..13022a0 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/codec.py @@ -0,0 +1,94 @@ +"""Classes for reading/writing binary data (such as TLS records).""" + +from compat import * + +class Writer: + def __init__(self, length=0): + #If length is zero, then this is just a "trial run" to determine length + self.index = 0 + self.bytes = createByteArrayZeros(length) + + def add(self, x, length): + if self.bytes: + newIndex = self.index+length-1 + while newIndex >= self.index: + self.bytes[newIndex] = x & 0xFF + x >>= 8 + newIndex -= 1 + self.index += length + + def addFixSeq(self, seq, length): + if self.bytes: + for e in seq: + self.add(e, length) + else: + self.index += len(seq)*length + + def addVarSeq(self, seq, length, lengthLength): + if self.bytes: + self.add(len(seq)*length, lengthLength) + for e in seq: + self.add(e, length) + else: + self.index += lengthLength + (len(seq)*length) + + +class Parser: + def __init__(self, bytes): + self.bytes = bytes + self.index = 0 + + def get(self, length): + if self.index + length > len(self.bytes): + raise SyntaxError() + x = 0 + for count in range(length): + x <<= 8 + x |= self.bytes[self.index] + self.index += 1 + return x + + def getFixBytes(self, lengthBytes): + bytes = self.bytes[self.index : self.index+lengthBytes] + self.index += lengthBytes + return bytes + + def getVarBytes(self, lengthLength): + lengthBytes = self.get(lengthLength) + return self.getFixBytes(lengthBytes) + + def getFixList(self, length, lengthList): + l = [0] * lengthList + for x in range(lengthList): + l[x] = self.get(length) + return l + + def getVarList(self, length, lengthLength): + lengthList = self.get(lengthLength) + if lengthList % length != 0: + raise SyntaxError() + lengthList = int(lengthList/length) + l = [0] * lengthList + for x in range(lengthList): + l[x] = self.get(length) + return l + + def startLengthCheck(self, lengthLength): + self.lengthCheck = self.get(lengthLength) + self.indexCheck = self.index + + def setLengthCheck(self, length): + self.lengthCheck = length + self.indexCheck = self.index + + def stopLengthCheck(self): + if (self.index - self.indexCheck) != self.lengthCheck: + raise SyntaxError() + + def atLengthCheck(self): + if (self.index - self.indexCheck) < self.lengthCheck: + return False + elif (self.index - self.indexCheck) == self.lengthCheck: + return True + else: + raise SyntaxError() \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/compat.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/compat.py new file mode 100644 index 0000000..7d2d925 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/compat.py @@ -0,0 +1,140 @@ +"""Miscellaneous functions to mask Python version differences.""" + +import sys +import os + +if sys.version_info < (2,2): + raise AssertionError("Python 2.2 or later required") + +if sys.version_info < (2,3): + + def enumerate(collection): + return zip(range(len(collection)), collection) + + class Set: + def __init__(self, seq=None): + self.values = {} + if seq: + for e in seq: + self.values[e] = None + + def add(self, e): + self.values[e] = None + + def discard(self, e): + if e in self.values.keys(): + del(self.values[e]) + + def union(self, s): + ret = Set() + for e in self.values.keys(): + ret.values[e] = None + for e in s.values.keys(): + ret.values[e] = None + return ret + + def issubset(self, other): + for e in self.values.keys(): + if e not in other.values.keys(): + return False + return True + + def __nonzero__( self): + return len(self.values.keys()) + + def __contains__(self, e): + return e in self.values.keys() + + def __iter__(self): + return iter(set.values.keys()) + + +if os.name != "java": + + import array + def createByteArraySequence(seq): + return array.array('B', seq) + def createByteArrayZeros(howMany): + return array.array('B', [0] * howMany) + def concatArrays(a1, a2): + return a1+a2 + + def bytesToString(bytes): + return bytes.tostring() + def stringToBytes(s): + bytes = createByteArrayZeros(0) + bytes.fromstring(s) + return bytes + + import math + def numBits(n): + if n==0: + return 0 + s = "%x" % n + return ((len(s)-1)*4) + \ + {'0':0, '1':1, '2':2, '3':2, + '4':3, '5':3, '6':3, '7':3, + '8':4, '9':4, 'a':4, 'b':4, + 'c':4, 'd':4, 'e':4, 'f':4, + }[s[0]] + return int(math.floor(math.log(n, 2))+1) + + BaseException = Exception + import sys + import traceback + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + #NOTE: JYTHON SUPPORT NO LONGER WORKS, DUE TO USE OF GENERATORS. + #THIS CODE IS LEFT IN SO THAT ONE JYTHON UPDATES TO 2.2, IT HAS A + #CHANCE OF WORKING AGAIN. + + import java + import jarray + + def createByteArraySequence(seq): + if isinstance(seq, type("")): #If it's a string, convert + seq = [ord(c) for c in seq] + return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed + def createByteArrayZeros(howMany): + return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed + def concatArrays(a1, a2): + l = list(a1)+list(a2) + return createByteArraySequence(l) + + #WAY TOO SLOW - MUST BE REPLACED------------ + def bytesToString(bytes): + return "".join([chr(b) for b in bytes]) + + def stringToBytes(s): + bytes = createByteArrayZeros(len(s)) + for count, c in enumerate(s): + bytes[count] = ord(c) + return bytes + #WAY TOO SLOW - MUST BE REPLACED------------ + + def numBits(n): + if n==0: + return 0 + n= 1L * n; #convert to long, if it isn't already + return n.__tojava__(java.math.BigInteger).bitLength() + + #Adjust the string to an array of bytes + def stringToJavaByteArray(s): + bytes = jarray.zeros(len(s), 'b') + for count, c in enumerate(s): + x = ord(c) + if x >= 128: x -= 256 + bytes[count] = x + return bytes + + BaseException = java.lang.Exception + import sys + import traceback + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr \ No newline at end of file diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/cryptomath.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/cryptomath.py new file mode 100644 index 0000000..51d6dff --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/cryptomath.py @@ -0,0 +1,400 @@ +"""cryptomath module + +This module has basic math/crypto code.""" + +import os +import math +import base64 +import binascii +import sha + +from compat import * + + +# ************************************************************************** +# Load Optional Modules +# ************************************************************************** + +# Try to load M2Crypto/OpenSSL +try: + from M2Crypto import m2 + m2cryptoLoaded = True + +except ImportError: + m2cryptoLoaded = False + + +# Try to load cryptlib +try: + import cryptlib_py + try: + cryptlib_py.cryptInit() + except cryptlib_py.CryptException, e: + #If tlslite and cryptoIDlib are both present, + #they might each try to re-initialize this, + #so we're tolerant of that. + if e[0] != cryptlib_py.CRYPT_ERROR_INITED: + raise + cryptlibpyLoaded = True + +except ImportError: + cryptlibpyLoaded = False + +#Try to load GMPY +try: + import gmpy + gmpyLoaded = True +except ImportError: + gmpyLoaded = False + +#Try to load pycrypto +try: + import Crypto.Cipher.AES + pycryptoLoaded = True +except ImportError: + pycryptoLoaded = False + + +# ************************************************************************** +# PRNG Functions +# ************************************************************************** + +# Get os.urandom PRNG +try: + os.urandom(1) + def getRandomBytes(howMany): + return stringToBytes(os.urandom(howMany)) + prngName = "os.urandom" + +except: + # Else get cryptlib PRNG + if cryptlibpyLoaded: + def getRandomBytes(howMany): + randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, + cryptlib_py.CRYPT_ALGO_AES) + cryptlib_py.cryptSetAttribute(randomKey, + cryptlib_py.CRYPT_CTXINFO_MODE, + cryptlib_py.CRYPT_MODE_OFB) + cryptlib_py.cryptGenerateKey(randomKey) + bytes = createByteArrayZeros(howMany) + cryptlib_py.cryptEncrypt(randomKey, bytes) + return bytes + prngName = "cryptlib" + + else: + #Else get UNIX /dev/urandom PRNG + try: + devRandomFile = open("/dev/urandom", "rb") + def getRandomBytes(howMany): + return stringToBytes(devRandomFile.read(howMany)) + prngName = "/dev/urandom" + except IOError: + #Else get Win32 CryptoAPI PRNG + try: + import win32prng + def getRandomBytes(howMany): + s = win32prng.getRandomBytes(howMany) + if len(s) != howMany: + raise AssertionError() + return stringToBytes(s) + prngName ="CryptoAPI" + except ImportError: + #Else no PRNG :-( + def getRandomBytes(howMany): + raise NotImplementedError("No Random Number Generator "\ + "available.") + prngName = "None" + +# ************************************************************************** +# Converter Functions +# ************************************************************************** + +def bytesToNumber(bytes): + total = 0L + multiplier = 1L + for count in range(len(bytes)-1, -1, -1): + byte = bytes[count] + total += multiplier * byte + multiplier *= 256 + return total + +def numberToBytes(n): + howManyBytes = numBytes(n) + bytes = createByteArrayZeros(howManyBytes) + for count in range(howManyBytes-1, -1, -1): + bytes[count] = int(n % 256) + n >>= 8 + return bytes + +def bytesToBase64(bytes): + s = bytesToString(bytes) + return stringToBase64(s) + +def base64ToBytes(s): + s = base64ToString(s) + return stringToBytes(s) + +def numberToBase64(n): + bytes = numberToBytes(n) + return bytesToBase64(bytes) + +def base64ToNumber(s): + bytes = base64ToBytes(s) + return bytesToNumber(bytes) + +def stringToNumber(s): + bytes = stringToBytes(s) + return bytesToNumber(bytes) + +def numberToString(s): + bytes = numberToBytes(s) + return bytesToString(bytes) + +def base64ToString(s): + try: + return base64.decodestring(s) + except binascii.Error, e: + raise SyntaxError(e) + except binascii.Incomplete, e: + raise SyntaxError(e) + +def stringToBase64(s): + return base64.encodestring(s).replace("\n", "") + +def mpiToNumber(mpi): #mpi is an openssl-format bignum string + if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number + raise AssertionError() + bytes = stringToBytes(mpi[4:]) + return bytesToNumber(bytes) + +def numberToMPI(n): + bytes = numberToBytes(n) + ext = 0 + #If the high-order bit is going to be set, + #add an extra byte of zeros + if (numBits(n) & 0x7)==0: + ext = 1 + length = numBytes(n) + ext + bytes = concatArrays(createByteArrayZeros(4+ext), bytes) + bytes[0] = (length >> 24) & 0xFF + bytes[1] = (length >> 16) & 0xFF + bytes[2] = (length >> 8) & 0xFF + bytes[3] = length & 0xFF + return bytesToString(bytes) + + + +# ************************************************************************** +# Misc. Utility Functions +# ************************************************************************** + +def numBytes(n): + if n==0: + return 0 + bits = numBits(n) + return int(math.ceil(bits / 8.0)) + +def hashAndBase64(s): + return stringToBase64(sha.sha(s).digest()) + +def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce + bytes = getRandomBytes(numChars) + bytesStr = "".join([chr(b) for b in bytes]) + return stringToBase64(bytesStr)[:numChars] + + +# ************************************************************************** +# Big Number Math +# ************************************************************************** + +def getRandomNumber(low, high): + if low >= high: + raise AssertionError() + howManyBits = numBits(high) + howManyBytes = numBytes(high) + lastBits = howManyBits % 8 + while 1: + bytes = getRandomBytes(howManyBytes) + if lastBits: + bytes[0] = bytes[0] % (1 << lastBits) + n = bytesToNumber(bytes) + if n >= low and n < high: + return n + +def gcd(a,b): + a, b = max(a,b), min(a,b) + while b: + a, b = b, a % b + return a + +def lcm(a, b): + #This will break when python division changes, but we can't use // cause + #of Jython + return (a * b) / gcd(a, b) + +#Returns inverse of a mod b, zero if none +#Uses Extended Euclidean Algorithm +def invMod(a, b): + c, d = a, b + uc, ud = 1, 0 + while c != 0: + #This will break when python division changes, but we can't use // + #cause of Jython + q = d / c + c, d = d-(q*c), c + uc, ud = ud - (q * uc), uc + if d == 1: + return ud % b + return 0 + + +if gmpyLoaded: + def powMod(base, power, modulus): + base = gmpy.mpz(base) + power = gmpy.mpz(power) + modulus = gmpy.mpz(modulus) + result = pow(base, power, modulus) + return long(result) + +else: + #Copied from Bryan G. Olson's post to comp.lang.python + #Does left-to-right instead of pow()'s right-to-left, + #thus about 30% faster than the python built-in with small bases + def powMod(base, power, modulus): + nBitScan = 5 + + """ Return base**power mod modulus, using multi bit scanning + with nBitScan bits at a time.""" + + #TREV - Added support for negative exponents + negativeResult = False + if (power < 0): + power *= -1 + negativeResult = True + + exp2 = 2**nBitScan + mask = exp2 - 1 + + # Break power into a list of digits of nBitScan bits. + # The list is recursive so easy to read in reverse direction. + nibbles = None + while power: + nibbles = int(power & mask), nibbles + power = power >> nBitScan + + # Make a table of powers of base up to 2**nBitScan - 1 + lowPowers = [1] + for i in xrange(1, exp2): + lowPowers.append((lowPowers[i-1] * base) % modulus) + + # To exponentiate by the first nibble, look it up in the table + nib, nibbles = nibbles + prod = lowPowers[nib] + + # For the rest, square nBitScan times, then multiply by + # base^nibble + while nibbles: + nib, nibbles = nibbles + for i in xrange(nBitScan): + prod = (prod * prod) % modulus + if nib: prod = (prod * lowPowers[nib]) % modulus + + #TREV - Added support for negative exponents + if negativeResult: + prodInv = invMod(prod, modulus) + #Check to make sure the inverse is correct + if (prod * prodInv) % modulus != 1: + raise AssertionError() + return prodInv + return prod + + +#Pre-calculate a sieve of the ~100 primes < 1000: +def makeSieve(n): + sieve = range(n) + for count in range(2, int(math.sqrt(n))): + if sieve[count] == 0: + continue + x = sieve[count] * 2 + while x < len(sieve): + sieve[x] = 0 + x += sieve[count] + sieve = [x for x in sieve[2:] if x] + return sieve + +sieve = makeSieve(1000) + +def isPrime(n, iterations=5, display=False): + #Trial division with sieve + for x in sieve: + if x >= n: return True + if n % x == 0: return False + #Passed trial division, proceed to Rabin-Miller + #Rabin-Miller implemented per Ferguson & Schneier + #Compute s, t for Rabin-Miller + if display: print "*", + s, t = n-1, 0 + while s % 2 == 0: + s, t = s/2, t+1 + #Repeat Rabin-Miller x times + a = 2 #Use 2 as a base for first iteration speedup, per HAC + for count in range(iterations): + v = powMod(a, s, n) + if v==1: + continue + i = 0 + while v != n-1: + if i == t-1: + return False + else: + v, i = powMod(v, 2, n), i+1 + a = getRandomNumber(2, n) + return True + +def getRandomPrime(bits, display=False): + if bits < 10: + raise AssertionError() + #The 1.5 ensures the 2 MSBs are set + #Thus, when used for p,q in RSA, n will have its MSB set + # + #Since 30 is lcm(2,3,5), we'll set our test numbers to + #29 % 30 and keep them there + low = (2L ** (bits-1)) * 3/2 + high = 2L ** bits - 30 + p = getRandomNumber(low, high) + p += 29 - (p % 30) + while 1: + if display: print ".", + p += 30 + if p >= high: + p = getRandomNumber(low, high) + p += 29 - (p % 30) + if isPrime(p, display=display): + return p + +#Unused at the moment... +def getRandomSafePrime(bits, display=False): + if bits < 10: + raise AssertionError() + #The 1.5 ensures the 2 MSBs are set + #Thus, when used for p,q in RSA, n will have its MSB set + # + #Since 30 is lcm(2,3,5), we'll set our test numbers to + #29 % 30 and keep them there + low = (2 ** (bits-2)) * 3/2 + high = (2 ** (bits-1)) - 30 + q = getRandomNumber(low, high) + q += 29 - (q % 30) + while 1: + if display: print ".", + q += 30 + if (q >= high): + q = getRandomNumber(low, high) + q += 29 - (q % 30) + #Ideas from Tom Wu's SRP code + #Do trial division on p and q before Rabin-Miller + if isPrime(q, 0, display=display): + p = (2 * q) + 1 + if isPrime(p, display=display): + if isPrime(q, display=display): + return p diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/dateFuncs.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/dateFuncs.py new file mode 100644 index 0000000..38812eb --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/dateFuncs.py @@ -0,0 +1,75 @@ + +import os + +#Functions for manipulating datetime objects +#CCYY-MM-DDThh:mm:ssZ +def parseDateClass(s): + year, month, day = s.split("-") + day, tail = day[:2], day[2:] + hour, minute, second = tail[1:].split(":") + second = second[:2] + year, month, day = int(year), int(month), int(day) + hour, minute, second = int(hour), int(minute), int(second) + return createDateClass(year, month, day, hour, minute, second) + + +if os.name != "java": + from datetime import datetime, timedelta + + #Helper functions for working with a date/time class + def createDateClass(year, month, day, hour, minute, second): + return datetime(year, month, day, hour, minute, second) + + def printDateClass(d): + #Split off fractional seconds, append 'Z' + return d.isoformat().split(".")[0]+"Z" + + def getNow(): + return datetime.utcnow() + + def getHoursFromNow(hours): + return datetime.utcnow() + timedelta(hours=hours) + + def getMinutesFromNow(minutes): + return datetime.utcnow() + timedelta(minutes=minutes) + + def isDateClassExpired(d): + return d < datetime.utcnow() + + def isDateClassBefore(d1, d2): + return d1 < d2 + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + import java + import jarray + + def createDateClass(year, month, day, hour, minute, second): + c = java.util.Calendar.getInstance() + c.setTimeZone(java.util.TimeZone.getTimeZone("UTC")) + c.set(year, month-1, day, hour, minute, second) + return c + + def printDateClass(d): + return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \ + (d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \ + d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND)) + + def getNow(): + c = java.util.Calendar.getInstance() + c.setTimeZone(java.util.TimeZone.getTimeZone("UTC")) + c.get(c.HOUR) #force refresh? + return c + + def getHoursFromNow(hours): + d = getNow() + d.add(d.HOUR, hours) + return d + + def isDateClassExpired(d): + n = getNow() + return d.before(n) + + def isDateClassBefore(d1, d2): + return d1.before(d2) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/hmac.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/hmac.py new file mode 100644 index 0000000..fe8feec --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/hmac.py @@ -0,0 +1,104 @@ +"""HMAC (Keyed-Hashing for Message Authentication) Python module. + +Implements the HMAC algorithm as described by RFC 2104. + +(This file is modified from the standard library version to do faster +copying) +""" + +def _strxor(s1, s2): + """Utility method. XOR the two strings s1 and s2 (must have same length). + """ + return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2)) + +# The size of the digests returned by HMAC depends on the underlying +# hashing module used. +digest_size = None + +class HMAC: + """RFC2104 HMAC class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new HMAC object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod is None: + import md5 + digestmod = md5 + + if key == None: #TREVNEW - for faster copying + return #TREVNEW + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + self.digest_size = digestmod.digest_size + + blocksize = 64 + ipad = "\x36" * blocksize + opad = "\x5C" * blocksize + + if len(key) > blocksize: + key = digestmod.new(key).digest() + + key = key + chr(0) * (blocksize - len(key)) + self.outer.update(_strxor(key, opad)) + self.inner.update(_strxor(key, ipad)) + if msg is not None: + self.update(msg) + +## def clear(self): +## raise NotImplementedError, "clear() method not available in HMAC." + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = HMAC(None) #TREVNEW - for faster copying + other.digest_size = self.digest_size #TREVNEW + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([hex(ord(x))[2:].zfill(2) + for x in tuple(self.digest())]) + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/jython_compat.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/jython_compat.py new file mode 100644 index 0000000..1245183 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/jython_compat.py @@ -0,0 +1,195 @@ +"""Miscellaneous functions to mask Python/Jython differences.""" + +import os +import sha + +if os.name != "java": + BaseException = Exception + + from sets import Set + import array + import math + + def createByteArraySequence(seq): + return array.array('B', seq) + def createByteArrayZeros(howMany): + return array.array('B', [0] * howMany) + def concatArrays(a1, a2): + return a1+a2 + + def bytesToString(bytes): + return bytes.tostring() + + def stringToBytes(s): + bytes = createByteArrayZeros(0) + bytes.fromstring(s) + return bytes + + def numBits(n): + if n==0: + return 0 + return int(math.floor(math.log(n, 2))+1) + + class CertChainBase: pass + class SelfTestBase: pass + class ReportFuncBase: pass + + #Helper functions for working with sets (from Python 2.3) + def iterSet(set): + return iter(set) + + def getListFromSet(set): + return list(set) + + #Factory function for getting a SHA1 object + def getSHA1(s): + return sha.sha(s) + + import sys + import traceback + + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + import java + import jarray + + BaseException = java.lang.Exception + + def createByteArraySequence(seq): + if isinstance(seq, type("")): #If it's a string, convert + seq = [ord(c) for c in seq] + return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed + def createByteArrayZeros(howMany): + return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed + def concatArrays(a1, a2): + l = list(a1)+list(a2) + return createByteArraySequence(l) + + #WAY TOO SLOW - MUST BE REPLACED------------ + def bytesToString(bytes): + return "".join([chr(b) for b in bytes]) + + def stringToBytes(s): + bytes = createByteArrayZeros(len(s)) + for count, c in enumerate(s): + bytes[count] = ord(c) + return bytes + #WAY TOO SLOW - MUST BE REPLACED------------ + + def numBits(n): + if n==0: + return 0 + n= 1L * n; #convert to long, if it isn't already + return n.__tojava__(java.math.BigInteger).bitLength() + + #This properly creates static methods for Jython + class staticmethod: + def __init__(self, anycallable): self.__call__ = anycallable + + #Properties are not supported for Jython + class property: + def __init__(self, anycallable): pass + + #True and False have to be specially defined + False = 0 + True = 1 + + class StopIteration(Exception): pass + + def enumerate(collection): + return zip(range(len(collection)), collection) + + class Set: + def __init__(self, seq=None): + self.values = {} + if seq: + for e in seq: + self.values[e] = None + + def add(self, e): + self.values[e] = None + + def discard(self, e): + if e in self.values.keys(): + del(self.values[e]) + + def union(self, s): + ret = Set() + for e in self.values.keys(): + ret.values[e] = None + for e in s.values.keys(): + ret.values[e] = None + return ret + + def issubset(self, other): + for e in self.values.keys(): + if e not in other.values.keys(): + return False + return True + + def __nonzero__( self): + return len(self.values.keys()) + + def __contains__(self, e): + return e in self.values.keys() + + def iterSet(set): + return set.values.keys() + + def getListFromSet(set): + return set.values.keys() + + """ + class JCE_SHA1: + def __init__(self, s=None): + self.md = java.security.MessageDigest.getInstance("SHA1") + if s: + self.update(s) + + def update(self, s): + self.md.update(s) + + def copy(self): + sha1 = JCE_SHA1() + sha1.md = self.md.clone() + return sha1 + + def digest(self): + digest = self.md.digest() + bytes = jarray.zeros(20, 'h') + for count in xrange(20): + x = digest[count] + if x < 0: x += 256 + bytes[count] = x + return bytes + """ + + #Factory function for getting a SHA1 object + #The JCE_SHA1 class is way too slow... + #the sha.sha object we use instead is broken in the jython 2.1 + #release, and needs to be patched + def getSHA1(s): + #return JCE_SHA1(s) + return sha.sha(s) + + + #Adjust the string to an array of bytes + def stringToJavaByteArray(s): + bytes = jarray.zeros(len(s), 'b') + for count, c in enumerate(s): + x = ord(c) + if x >= 128: x -= 256 + bytes[count] = x + return bytes + + import sys + import traceback + + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/keyfactory.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/keyfactory.py new file mode 100644 index 0000000..5005af7 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/keyfactory.py @@ -0,0 +1,243 @@ +"""Factory functions for asymmetric cryptography. +@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey, +parseAsPrivateKey +""" + +from compat import * + +from RSAKey import RSAKey +from Python_RSAKey import Python_RSAKey +import cryptomath + +if cryptomath.m2cryptoLoaded: + from OpenSSL_RSAKey import OpenSSL_RSAKey + +if cryptomath.pycryptoLoaded: + from PyCrypto_RSAKey import PyCrypto_RSAKey + +# ************************************************************************** +# Factory Functions for RSA Keys +# ************************************************************************** + +def generateRSAKey(bits, implementations=["openssl", "python"]): + """Generate an RSA key with the specified bit length. + + @type bits: int + @param bits: Desired bit length of the new key's modulus. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: A new RSA private key. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey.generate(bits) + elif implementation == "python": + return Python_RSAKey.generate(bits) + raise ValueError("No acceptable implementations") + +def parseXMLKey(s, private=False, public=False, implementations=["python"]): + """Parse an XML-format key. + + The XML format used here is specific to tlslite and cryptoIDlib. The + format can store the public component of a key, or the public and + private components. For example:: + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy... +

5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc... + /E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ... + mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6... + qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB... + j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr... + + + @type s: str + @param s: A string containing an XML public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the private + key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will be + discarded, so this function will always return a public key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "python": + key = Python_RSAKey.parseXML(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + +#Parse as an OpenSSL or Python key +def parsePEMKey(s, private=False, public=False, passwordCallback=None, + implementations=["openssl", "python"]): + """Parse a PEM-format key. + + The PEM format is used by OpenSSL and other tools. The + format is typically used to store both the public and private + components of a key. For example:: + + -----BEGIN RSA PRIVATE KEY----- + MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+ + dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH + dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB + AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc + esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO + gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl + aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV + VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV + CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv + i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP + wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG + 6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH + h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe + -----END RSA PRIVATE KEY----- + + To generate a key like this with OpenSSL, run:: + + openssl genrsa 2048 > key.pem + + This format also supports password-encrypted private keys. TLS + Lite can only handle password-encrypted private keys when OpenSSL + and M2Crypto are installed. In this case, passwordCallback will be + invoked to query the user for the password. + + @type s: str + @param s: A string containing a PEM-encoded public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the + private key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will + be discarded, so this function will always return a public key. + + @type passwordCallback: callable + @param passwordCallback: This function will be called, with no + arguments, if the PEM-encoded private key is password-encrypted. + The callback should return the password string. If the password is + incorrect, SyntaxError will be raised. If no callback is passed + and the key is password-encrypted, a prompt will be displayed at + the console. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + key = OpenSSL_RSAKey.parse(s, passwordCallback) + break + elif implementation == "python": + key = Python_RSAKey.parsePEM(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + + +def _parseKeyHelper(key, private, public): + if private: + if not key.hasPrivateKey(): + raise SyntaxError("Not a private key!") + + if public: + return _createPublicKey(key) + + if private: + if hasattr(key, "d"): + return _createPrivateKey(key) + else: + return key + + return key + +def parseAsPublicKey(s): + """Parse an XML or PEM-formatted public key. + + @type s: str + @param s: A string containing an XML or PEM-encoded public or private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA public key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, public=True) + except: + return parseXMLKey(s, public=True) + +def parsePrivateKey(s): + """Parse an XML or PEM-formatted private key. + + @type s: str + @param s: A string containing an XML or PEM-encoded private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA private key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, private=True) + except: + return parseXMLKey(s, private=True) + +def _createPublicKey(key): + """ + Create a new public key. Discard any private component, + and return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + return _createPublicRSAKey(key.n, key.e) + +def _createPrivateKey(key): + """ + Create a new private key. Return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + if not key.hasPrivateKey(): + raise AssertionError() + return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP, + key.dQ, key.qInv) + +def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto", + "python"]): + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey(n, e) + elif implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e) + elif implementation == "python": + return Python_RSAKey(n, e) + raise ValueError("No acceptable implementations") + +def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv, + implementations = ["pycrypto", "python"]): + for implementation in implementations: + if implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv) + elif implementation == "python": + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + raise ValueError("No acceptable implementations") diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/rijndael.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/rijndael.py new file mode 100644 index 0000000..cb2f547 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/rijndael.py @@ -0,0 +1,392 @@ +""" +A pure python (slow) implementation of rijndael with a decent interface + +To include - + +from rijndael import rijndael + +To do a key setup - + +r = rijndael(key, block_size = 16) + +key must be a string of length 16, 24, or 32 +blocksize must be 16, 24, or 32. Default is 16 + +To use - + +ciphertext = r.encrypt(plaintext) +plaintext = r.decrypt(ciphertext) + +If any strings are of the wrong length a ValueError is thrown +""" + +# ported from the Java reference code by Bram Cohen, bram@gawth.com, April 2001 +# this code is public domain, unless someone makes +# an intellectual property claim against the reference +# code, in which case it can be made public domain by +# deleting all the comments and renaming all the variables + +import copy +import string + + + +#----------------------- +#TREV - ADDED BECAUSE THERE'S WARNINGS ABOUT INT OVERFLOW BEHAVIOR CHANGING IN +#2.4..... +import os +if os.name != "java": + import exceptions + if hasattr(exceptions, "FutureWarning"): + import warnings + warnings.filterwarnings("ignore", category=FutureWarning, append=1) +#----------------------- + + + +shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]], + [[0, 0], [1, 5], [2, 4], [3, 3]], + [[0, 0], [1, 7], [3, 5], [4, 4]]] + +# [keysize][block_size] +num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}} + +A = [[1, 1, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 1, 1, 1, 1, 1], + [1, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 1, 1, 1], + [1, 1, 1, 0, 0, 0, 1, 1], + [1, 1, 1, 1, 0, 0, 0, 1]] + +# produce log and alog tables, needed for multiplying in the +# field GF(2^m) (generator = 3) +alog = [1] +for i in xrange(255): + j = (alog[-1] << 1) ^ alog[-1] + if j & 0x100 != 0: + j ^= 0x11B + alog.append(j) + +log = [0] * 256 +for i in xrange(1, 255): + log[alog[i]] = i + +# multiply two elements of GF(2^m) +def mul(a, b): + if a == 0 or b == 0: + return 0 + return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255] + +# substitution box based on F^{-1}(x) +box = [[0] * 8 for i in xrange(256)] +box[1][7] = 1 +for i in xrange(2, 256): + j = alog[255 - log[i]] + for t in xrange(8): + box[i][t] = (j >> (7 - t)) & 0x01 + +B = [0, 1, 1, 0, 0, 0, 1, 1] + +# affine transform: box[i] <- B + A*box[i] +cox = [[0] * 8 for i in xrange(256)] +for i in xrange(256): + for t in xrange(8): + cox[i][t] = B[t] + for j in xrange(8): + cox[i][t] ^= A[t][j] * box[i][j] + +# S-boxes and inverse S-boxes +S = [0] * 256 +Si = [0] * 256 +for i in xrange(256): + S[i] = cox[i][0] << 7 + for t in xrange(1, 8): + S[i] ^= cox[i][t] << (7-t) + Si[S[i] & 0xFF] = i + +# T-boxes +G = [[2, 1, 1, 3], + [3, 2, 1, 1], + [1, 3, 2, 1], + [1, 1, 3, 2]] + +AA = [[0] * 8 for i in xrange(4)] + +for i in xrange(4): + for j in xrange(4): + AA[i][j] = G[i][j] + AA[i][i+4] = 1 + +for i in xrange(4): + pivot = AA[i][i] + if pivot == 0: + t = i + 1 + while AA[t][i] == 0 and t < 4: + t += 1 + assert t != 4, 'G matrix must be invertible' + for j in xrange(8): + AA[i][j], AA[t][j] = AA[t][j], AA[i][j] + pivot = AA[i][i] + for j in xrange(8): + if AA[i][j] != 0: + AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255] + for t in xrange(4): + if i != t: + for j in xrange(i+1, 8): + AA[t][j] ^= mul(AA[i][j], AA[t][i]) + AA[t][i] = 0 + +iG = [[0] * 4 for i in xrange(4)] + +for i in xrange(4): + for j in xrange(4): + iG[i][j] = AA[i][j + 4] + +def mul4(a, bs): + if a == 0: + return 0 + r = 0 + for b in bs: + r <<= 8 + if b != 0: + r = r | mul(a, b) + return r + +T1 = [] +T2 = [] +T3 = [] +T4 = [] +T5 = [] +T6 = [] +T7 = [] +T8 = [] +U1 = [] +U2 = [] +U3 = [] +U4 = [] + +for t in xrange(256): + s = S[t] + T1.append(mul4(s, G[0])) + T2.append(mul4(s, G[1])) + T3.append(mul4(s, G[2])) + T4.append(mul4(s, G[3])) + + s = Si[t] + T5.append(mul4(s, iG[0])) + T6.append(mul4(s, iG[1])) + T7.append(mul4(s, iG[2])) + T8.append(mul4(s, iG[3])) + + U1.append(mul4(t, iG[0])) + U2.append(mul4(t, iG[1])) + U3.append(mul4(t, iG[2])) + U4.append(mul4(t, iG[3])) + +# round constants +rcon = [1] +r = 1 +for t in xrange(1, 30): + r = mul(2, r) + rcon.append(r) + +del A +del AA +del pivot +del B +del G +del box +del log +del alog +del i +del j +del r +del s +del t +del mul +del mul4 +del cox +del iG + +class rijndael: + def __init__(self, key, block_size = 16): + if block_size != 16 and block_size != 24 and block_size != 32: + raise ValueError('Invalid block size: ' + str(block_size)) + if len(key) != 16 and len(key) != 24 and len(key) != 32: + raise ValueError('Invalid key size: ' + str(len(key))) + self.block_size = block_size + + ROUNDS = num_rounds[len(key)][block_size] + BC = block_size / 4 + # encryption round keys + Ke = [[0] * BC for i in xrange(ROUNDS + 1)] + # decryption round keys + Kd = [[0] * BC for i in xrange(ROUNDS + 1)] + ROUND_KEY_COUNT = (ROUNDS + 1) * BC + KC = len(key) / 4 + + # copy user material bytes into temporary ints + tk = [] + for i in xrange(0, KC): + tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) | + (ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3])) + + # copy values into round key arrays + t = 0 + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t / BC][t % BC] = tk[j] + Kd[ROUNDS - (t / BC)][t % BC] = tk[j] + j += 1 + t += 1 + tt = 0 + rconpointer = 0 + while t < ROUND_KEY_COUNT: + # extrapolate using phi (the round key evolution function) + tt = tk[KC - 1] + tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \ + (S[ tt & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) ^ \ + (rcon[rconpointer] & 0xFF) << 24 + rconpointer += 1 + if KC != 8: + for i in xrange(1, KC): + tk[i] ^= tk[i-1] + else: + for i in xrange(1, KC / 2): + tk[i] ^= tk[i-1] + tt = tk[KC / 2 - 1] + tk[KC / 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) << 24 + for i in xrange(KC / 2 + 1, KC): + tk[i] ^= tk[i-1] + # copy values into round key arrays + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t / BC][t % BC] = tk[j] + Kd[ROUNDS - (t / BC)][t % BC] = tk[j] + j += 1 + t += 1 + # inverse MixColumn where needed + for r in xrange(1, ROUNDS): + for j in xrange(BC): + tt = Kd[r][j] + Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \ + U2[(tt >> 16) & 0xFF] ^ \ + U3[(tt >> 8) & 0xFF] ^ \ + U4[ tt & 0xFF] + self.Ke = Ke + self.Kd = Kd + + def encrypt(self, plaintext): + if len(plaintext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) + Ke = self.Ke + + BC = self.block_size / 4 + ROUNDS = len(Ke) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][0] + s2 = shifts[SC][2][0] + s3 = shifts[SC][3][0] + a = [0] * BC + # temporary work array + t = [] + # plaintext to ints + key + for i in xrange(BC): + t.append((ord(plaintext[i * 4 ]) << 24 | + ord(plaintext[i * 4 + 1]) << 16 | + ord(plaintext[i * 4 + 2]) << 8 | + ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i]) + # apply round transforms + for r in xrange(1, ROUNDS): + for i in xrange(BC): + a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^ + T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in xrange(BC): + tt = Ke[ROUNDS][i] + result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return string.join(map(chr, result), '') + + def decrypt(self, ciphertext): + if len(ciphertext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) + Kd = self.Kd + + BC = self.block_size / 4 + ROUNDS = len(Kd) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][1] + s2 = shifts[SC][2][1] + s3 = shifts[SC][3][1] + a = [0] * BC + # temporary work array + t = [0] * BC + # ciphertext to ints + key + for i in xrange(BC): + t[i] = (ord(ciphertext[i * 4 ]) << 24 | + ord(ciphertext[i * 4 + 1]) << 16 | + ord(ciphertext[i * 4 + 2]) << 8 | + ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i] + # apply round transforms + for r in xrange(1, ROUNDS): + for i in xrange(BC): + a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^ + T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in xrange(BC): + tt = Kd[ROUNDS][i] + result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return string.join(map(chr, result), '') + +def encrypt(key, block): + return rijndael(key, len(block)).encrypt(block) + +def decrypt(key, block): + return rijndael(key, len(block)).decrypt(block) + +def test(): + def t(kl, bl): + b = 'b' * bl + r = rijndael('a' * kl, bl) + assert r.decrypt(r.encrypt(b)) == b + t(16, 16) + t(16, 24) + t(16, 32) + t(24, 16) + t(24, 24) + t(24, 32) + t(32, 16) + t(32, 24) + t(32, 32) + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/xmltools.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/xmltools.py new file mode 100644 index 0000000..06f2e43 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/tlslite/utils/xmltools.py @@ -0,0 +1,201 @@ +"""Helper functions for XML. + +This module has misc. helper functions for working with XML DOM nodes.""" + +import re +from compat import * + +import os +if os.name != "java": + from xml.dom import minidom + from xml.sax import saxutils + + def parseDocument(s): + return minidom.parseString(s) +else: + from javax.xml.parsers import * + import java + + builder = DocumentBuilderFactory.newInstance().newDocumentBuilder() + + def parseDocument(s): + stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes()) + return builder.parse(stream) + +def parseAndStripWhitespace(s): + try: + element = parseDocument(s).documentElement + except BaseException, e: + raise SyntaxError(str(e)) + stripWhitespace(element) + return element + +#Goes through a DOM tree and removes whitespace besides child elements, +#as long as this whitespace is correctly tab-ified +def stripWhitespace(element, tab=0): + element.normalize() + + lastSpacer = "\n" + ("\t"*tab) + spacer = lastSpacer + "\t" + + #Zero children aren't allowed (i.e. ) + #This makes writing output simpler, and matches Canonical XML + if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython + raise SyntaxError("Empty XML elements not allowed") + + #If there's a single child, it must be text context + if element.childNodes.length==1: + if element.firstChild.nodeType == element.firstChild.TEXT_NODE: + #If it's an empty element, remove + if element.firstChild.data == lastSpacer: + element.removeChild(element.firstChild) + return + #If not text content, give an error + elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + else: + raise SyntaxError("Unexpected node type in XML document") + + #Otherwise there's multiple child element + child = element.firstChild + while child: + if child.nodeType == child.ELEMENT_NODE: + stripWhitespace(child, tab+1) + child = child.nextSibling + elif child.nodeType == child.TEXT_NODE: + if child == element.lastChild: + if child.data != lastSpacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + elif child.data != spacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + next = child.nextSibling + element.removeChild(child) + child = next + else: + raise SyntaxError("Unexpected node type in XML document") + + +def checkName(element, name): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Missing element: '%s'" % name) + + if name == None: + return + + if element.tagName != name: + raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName)) + +def getChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + checkName(child, name) + return child + +def getChildIter(element, index): + class ChildIter: + def __init__(self, element, index): + self.element = element + self.index = index + + def next(self): + if self.index < len(self.element.childNodes): + retVal = self.element.childNodes.item(self.index) + self.index += 1 + else: + retVal = None + return retVal + + def checkEnd(self): + if self.index != len(self.element.childNodes): + raise SyntaxError("Too many elements under: '%s'" % self.element.tagName) + return ChildIter(element, index) + +def getChildOrNone(element, index): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + child = element.childNodes.item(index) + return child + +def getLastChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getLastChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + if child != element.lastChild: + raise SyntaxError("Too many elements under: '%s'" % element.tagName) + checkName(child, name) + return child + +#Regular expressions for syntax-checking attribute and element content +nsRegEx = "http://trevp.net/cryptoID\Z" +cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z" +urlRegEx = "http(s)?://.{1,100}\Z" +sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z" +base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z" +certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z" +keyRegEx = "[A-Z]\Z" +keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z" +dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z" +shortStringRegEx = ".{1,100}\Z" +exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z" +notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1 +booleanRegEx = "(true)|(false)" + +def getReqAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getReqAttribute()") + + value = element.getAttribute(attrName) + if not value: + raise SyntaxError("Missing Attribute: " + attrName) + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def getAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getAttribute()") + + value = element.getAttribute(attrName) + if value: + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def checkNoMoreAttributes(element): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in checkNoMoreAttributes()") + + if element.attributes.length!=0: + raise SyntaxError("Extra attributes on '%s'" % element.tagName) + +def getText(element, regEx=""): + textNode = element.firstChild + if textNode == None: + raise SyntaxError("Empty element '%s'" % element.tagName) + if textNode.nodeType != textNode.TEXT_NODE: + raise SyntaxError("Non-text node: '%s'" % element.tagName) + if not re.match(regEx, textNode.data): + raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data)) + return str(textNode.data) #de-unicode it; this is needed for bsddb, for example + +#Function for adding tabs to a string +def indent(s, steps, ch="\t"): + tabs = ch*steps + if s[-1] != "\n": + s = tabs + s.replace("\n", "\n"+tabs) + else: + s = tabs + s.replace("\n", "\n"+tabs) + s = s[ : -len(tabs)] + return s + +def escape(s): + return saxutils.escape(s) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/urlfetch.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/urlfetch.py new file mode 100644 index 0000000..890b257 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/urlfetch.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Provides HTTP functions for gdata.service to use on Google App Engine + +AppEngineHttpClient: Provides an HTTP request method which uses App Engine's + urlfetch API. Set the http_client member of a GDataService object to an + instance of an AppEngineHttpClient to allow the gdata library to run on + Google App Engine. + +run_on_appengine: Function which will modify an existing GDataService object + to allow it to run on App Engine. It works by creating a new instance of + the AppEngineHttpClient and replacing the GDataService object's + http_client. + +HttpRequest: Function that wraps google.appengine.api.urlfetch.Fetch in a + common interface which is used by gdata.service.GDataService. In other + words, this module can be used as the gdata service request handler so + that all HTTP requests will be performed by the hosting Google App Engine + server. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import StringIO +import atom.service +import atom.http_interface +from google.appengine.api import urlfetch + + +def run_on_appengine(gdata_service): + """Modifies a GDataService object to allow it to run on App Engine. + + Args: + gdata_service: An instance of AtomService, GDataService, or any + of their subclasses which has an http_client member. + """ + gdata_service.http_client = AppEngineHttpClient() + + +class AppEngineHttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None): + self.debug = False + self.headers = headers or {} + + def request(self, operation, url, data=None, headers=None): + """Performs an HTTP call to the server, supports GET, POST, PUT, and + DELETE. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.http + client = atom.http.HttpClient() + http_response = client.request('GET', 'http://www.google.com/') + + Args: + operation: str The HTTP operation to be performed. This is usually one + of 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be converted + to a string. Should be set to None when performing a GET or DELETE. + If data is a file-like object which can be read, this method will + read a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be + evaluated and sent. + url: The full URL to which the request should be sent. Can be a string + or atom.url.Url. + headers: dict of strings. HTTP headers which should be sent + in the request. + """ + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + + # Construct the full payload. + # Assume that data is None or a string. + data_str = data + if data: + if isinstance(data, list): + # If data is a list of different objects, convert them all to strings + # and join them together. + converted_parts = [__ConvertDataPart(x) for x in data] + data_str = ''.join(converted_parts) + else: + data_str = __ConvertDataPart(data) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + all_headers['Content-Length'] = len(data_str) + + # Set the content type to the default value if none was set. + if 'Content-Type' not in all_headers: + all_headers['Content-Type'] = 'application/atom+xml' + + # Lookup the urlfetch operation which corresponds to the desired HTTP verb. + if operation == 'GET': + method = urlfetch.GET + elif operation == 'POST': + method = urlfetch.POST + elif operation == 'PUT': + method = urlfetch.PUT + elif operation == 'DELETE': + method = urlfetch.DELETE + else: + method = None + return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str, + method=method, headers=all_headers)) + + +def HttpRequest(service, operation, data, uri, extra_headers=None, + url_params=None, escape_params=True, content_type='application/atom+xml'): + """Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE. + + This function is deprecated, use AppEngineHttpClient.request instead. + + To use this module with gdata.service, you can set this module to be the + http_request_handler so that HTTP requests use Google App Engine's urlfetch. + import gdata.service + import gdata.urlfetch + gdata.service.http_request_handler = gdata.urlfetch + + Args: + service: atom.AtomService object which contains some of the parameters + needed to make the request. The following members are used to + construct the HTTP call: server (str), additional_headers (dict), + port (int), and ssl (bool). + operation: str The HTTP operation to be performed. This is usually one of + 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be + converted to a string. + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, this method will read + a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be evaluated + and sent. + uri: The beginning of the URL to which the request should be sent. + Examples: '/', '/base/feeds/snippets', + '/m8/feeds/contacts/default/base' + extra_headers: dict of strings. HTTP headers which should be sent + in the request. These headers are in addition to those stored in + service.additional_headers. + url_params: dict of strings. Key value pairs to be added to the URL as + URL parameters. For example {'foo':'bar', 'test':'param'} will + become ?foo=bar&test=param. + escape_params: bool default True. If true, the keys and values in + url_params will be URL escaped when the form is constructed + (Special characters converted to %XX form.) + content_type: str The MIME type for the data being sent. Defaults to + 'application/atom+xml', this is only used if data is set. + """ + full_uri = atom.service.BuildUri(uri, url_params, escape_params) + (server, port, ssl, partial_uri) = atom.service.ProcessUrl(service, full_uri) + # Construct the full URL for the request. + if ssl: + full_url = 'https://%s%s' % (server, partial_uri) + else: + full_url = 'http://%s%s' % (server, partial_uri) + + # Construct the full payload. + # Assume that data is None or a string. + data_str = data + if data: + if isinstance(data, list): + # If data is a list of different objects, convert them all to strings + # and join them together. + converted_parts = [__ConvertDataPart(x) for x in data] + data_str = ''.join(converted_parts) + else: + data_str = __ConvertDataPart(data) + + # Construct the dictionary of HTTP headers. + headers = {} + if isinstance(service.additional_headers, dict): + headers = service.additional_headers.copy() + if isinstance(extra_headers, dict): + for header, value in extra_headers.iteritems(): + headers[header] = value + # Add the content type header (we don't need to calculate content length, + # since urlfetch.Fetch will calculate for us). + if content_type: + headers['Content-Type'] = content_type + + # Lookup the urlfetch operation which corresponds to the desired HTTP verb. + if operation == 'GET': + method = urlfetch.GET + elif operation == 'POST': + method = urlfetch.POST + elif operation == 'PUT': + method = urlfetch.PUT + elif operation == 'DELETE': + method = urlfetch.DELETE + else: + method = None + return HttpResponse(urlfetch.Fetch(url=full_url, payload=data_str, + method=method, headers=headers)) + + +def __ConvertDataPart(data): + if not data or isinstance(data, str): + return data + elif hasattr(data, 'read'): + # data is a file like object, so read it completely. + return data.read() + # The data object was not a file. + # Try to convert to a string and send the data. + return str(data) + + +class HttpResponse(object): + """Translates a urlfetch resoinse to look like an hhtplib resoinse. + + Used to allow the resoinse from HttpRequest to be usable by gdata.service + methods. + """ + + def __init__(self, urlfetch_response): + self.body = StringIO.StringIO(urlfetch_response.content) + self.headers = urlfetch_response.headers + self.status = urlfetch_response.status_code + self.reason = '' + + def read(self, length=None): + if not length: + return self.body.read() + else: + return self.body.read(length) + + def getheader(self, name): + if not self.headers.has_key(name): + return self.headers[name.lower()] + return self.headers[name] + diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/webmastertools/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/webmastertools/__init__.py new file mode 100644 index 0000000..c40a641 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/webmastertools/__init__.py @@ -0,0 +1,542 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Webmaster Tools.""" + + +__author__ = 'livibetter (Yu-Jie Lin)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + + +# XML namespaces which are often used in Google Webmaster Tools entities. +GWEBMASTERTOOLS_NAMESPACE = 'http://schemas.google.com/webmasters/tools/2007' +GWEBMASTERTOOLS_TEMPLATE = '{http://schemas.google.com/webmasters/tools/2007}%s' + + +class Indexed(atom.AtomBase): + _tag = 'indexed' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def IndexedFromString(xml_string): + return atom.CreateClassFromXMLString(Indexed, xml_string) + + +class Crawled(atom.Date): + _tag = 'crawled' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def CrawledFromString(xml_string): + return atom.CreateClassFromXMLString(Crawled, xml_string) + + +class GeoLocation(atom.AtomBase): + _tag = 'geolocation' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def GeoLocationFromString(xml_string): + return atom.CreateClassFromXMLString(GeoLocation, xml_string) + + +class PreferredDomain(atom.AtomBase): + _tag = 'preferred-domain' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def PreferredDomainFromString(xml_string): + return atom.CreateClassFromXMLString(PreferredDomain, xml_string) + + +class CrawlRate(atom.AtomBase): + _tag = 'crawl-rate' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def CrawlRateFromString(xml_string): + return atom.CreateClassFromXMLString(CrawlRate, xml_string) + + +class EnhancedImageSearch(atom.AtomBase): + _tag = 'enhanced-image-search' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def EnhancedImageSearchFromString(xml_string): + return atom.CreateClassFromXMLString(EnhancedImageSearch, xml_string) + + +class Verified(atom.AtomBase): + _tag = 'verified' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def VerifiedFromString(xml_string): + return atom.CreateClassFromXMLString(Verified, xml_string) + + +class VerificationMethodMeta(atom.AtomBase): + _tag = 'meta' + _namespace = atom.ATOM_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['content'] = 'content' + + def __init__(self, text=None, name=None, content=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.name = name + self.content = content + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def VerificationMethodMetaFromString(xml_string): + return atom.CreateClassFromXMLString(VerificationMethodMeta, xml_string) + + +class VerificationMethod(atom.AtomBase): + _tag = 'verification-method' + _namespace = GWEBMASTERTOOLS_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + _children['{%s}meta' % atom.ATOM_NAMESPACE] = ( + 'meta', VerificationMethodMeta) + _attributes['in-use'] = 'in_use' + + def __init__(self, text=None, in_use=None, meta=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.in_use = in_use + self.meta = meta + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def VerificationMethodFromString(xml_string): + return atom.CreateClassFromXMLString(VerificationMethod, xml_string) + + +class MarkupLanguage(atom.AtomBase): + _tag = 'markup-language' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def MarkupLanguageFromString(xml_string): + return atom.CreateClassFromXMLString(MarkupLanguage, xml_string) + + +class SitemapMobile(atom.AtomBase): + _tag = 'sitemap-mobile' + _namespace = GWEBMASTERTOOLS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}markup-language' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'markup_language', [MarkupLanguage]) + + def __init__(self, markup_language=None, + extension_elements=None, extension_attributes=None, text=None): + + self.markup_language = markup_language or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitemapMobileFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapMobile, xml_string) + + +class SitemapMobileMarkupLanguage(atom.AtomBase): + _tag = 'sitemap-mobile-markup-language' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapMobileMarkupLanguageFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapMobileMarkupLanguage, xml_string) + + +class PublicationLabel(atom.AtomBase): + _tag = 'publication-label' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def PublicationLabelFromString(xml_string): + return atom.CreateClassFromXMLString(PublicationLabel, xml_string) + + +class SitemapNews(atom.AtomBase): + _tag = 'sitemap-news' + _namespace = GWEBMASTERTOOLS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}publication-label' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'publication_label', [PublicationLabel]) + + def __init__(self, publication_label=None, + extension_elements=None, extension_attributes=None, text=None): + + self.publication_label = publication_label or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitemapNewsFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapNews, xml_string) + + +class SitemapNewsPublicationLabel(atom.AtomBase): + _tag = 'sitemap-news-publication-label' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapNewsPublicationLabelFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapNewsPublicationLabel, xml_string) + + +class SitemapLastDownloaded(atom.Date): + _tag = 'sitemap-last-downloaded' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapLastDownloadedFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapLastDownloaded, xml_string) + + +class SitemapType(atom.AtomBase): + _tag = 'sitemap-type' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapTypeFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapType, xml_string) + + +class SitemapStatus(atom.AtomBase): + _tag = 'sitemap-status' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapStatusFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapStatus, xml_string) + + +class SitemapUrlCount(atom.AtomBase): + _tag = 'sitemap-url-count' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapUrlCountFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapUrlCount, xml_string) + + +class LinkFinder(atom.LinkFinder): + """An "interface" providing methods to find link elements + + SitesEntry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of links. + + This class is used as a mixin in SitesEntry. + """ + + def GetSelfLink(self): + """Find the first link with rel set to 'self' + + Returns: + An atom.Link or none if none of the links had rel equal to 'self' + """ + + for a_link in self.link: + if a_link.rel == 'self': + return a_link + return None + + def GetEditLink(self): + for a_link in self.link: + if a_link.rel == 'edit': + return a_link + return None + + def GetPostLink(self): + """Get a link containing the POST target URL. + + The POST target URL is used to insert new entries. + + Returns: + A link object with a rel matching the POST type. + """ + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#post': + return a_link + return None + + def GetFeedLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#feed': + return a_link + return None + + +class SitesEntry(atom.Entry, LinkFinder): + """A Google Webmaster Tools meta Entry flavor of an Atom Entry """ + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + _children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ( + 'entry_link', [gdata.EntryLink]) + _children['{%s}indexed' % GWEBMASTERTOOLS_NAMESPACE] = ('indexed', Indexed) + _children['{%s}crawled' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'crawled', Crawled) + _children['{%s}geolocation' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'geolocation', GeoLocation) + _children['{%s}preferred-domain' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'preferred_domain', PreferredDomain) + _children['{%s}crawl-rate' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'crawl_rate', CrawlRate) + _children['{%s}enhanced-image-search' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'enhanced_image_search', EnhancedImageSearch) + _children['{%s}verified' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'verified', Verified) + _children['{%s}verification-method' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'verification_method', [VerificationMethod]) + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + entry_link=None, indexed=None, crawled=None, + geolocation=None, preferred_domain=None, crawl_rate=None, + enhanced_image_search=None, + verified=None, verification_method=None, + extension_elements=None, extension_attributes=None, text=None): + atom.Entry.__init__(self, category=category, + content=content, atom_id=atom_id, link=link, + title=title, updated=updated, text=text) + + self.entry_link = entry_link or [] + self.indexed = indexed + self.crawled = crawled + self.geolocation = geolocation + self.preferred_domain = preferred_domain + self.crawl_rate = crawl_rate + self.enhanced_image_search = enhanced_image_search + self.verified = verified + self.verification_method = verification_method or [] + + +def SitesEntryFromString(xml_string): + return atom.CreateClassFromXMLString(SitesEntry, xml_string) + + +class SitesFeed(atom.Feed, LinkFinder): + """A Google Webmaster Tools meta Sites feed flavor of an Atom Feed""" + + _tag = atom.Feed._tag + _namespace = atom.Feed._namespace + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}startIndex' % gdata.OPENSEARCH_NAMESPACE] = ( + 'start_index', gdata.StartIndex) + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [SitesEntry]) + del _children['{%s}generator' % atom.ATOM_NAMESPACE] + del _children['{%s}author' % atom.ATOM_NAMESPACE] + del _children['{%s}contributor' % atom.ATOM_NAMESPACE] + del _children['{%s}logo' % atom.ATOM_NAMESPACE] + del _children['{%s}icon' % atom.ATOM_NAMESPACE] + del _children['{%s}rights' % atom.ATOM_NAMESPACE] + del _children['{%s}subtitle' % atom.ATOM_NAMESPACE] + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, start_index=None, atom_id=None, title=None, entry=None, + category=None, link=None, updated=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Source + + Args: + category: list (optional) A list of Category instances + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + entry: list (optional) A list of the Entry instances contained in the + feed. + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.start_index = start_index + self.category = category or [] + self.id = atom_id + self.link = link or [] + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SitesFeed, xml_string) + + +class SitemapsEntry(atom.Entry, LinkFinder): + """A Google Webmaster Tools meta Sitemaps Entry flavor of an Atom Entry """ + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + _children['{%s}sitemap-type' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_type', SitemapType) + _children['{%s}sitemap-status' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_status', SitemapStatus) + _children['{%s}sitemap-last-downloaded' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_last_downloaded', SitemapLastDownloaded) + _children['{%s}sitemap-url-count' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_url_count', SitemapUrlCount) + _children['{%s}sitemap-mobile-markup-language' % GWEBMASTERTOOLS_NAMESPACE] \ + = ('sitemap_mobile_markup_language', SitemapMobileMarkupLanguage) + _children['{%s}sitemap-news-publication-label' % GWEBMASTERTOOLS_NAMESPACE] \ + = ('sitemap_news_publication_label', SitemapNewsPublicationLabel) + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + sitemap_type=None, sitemap_status=None, sitemap_last_downloaded=None, + sitemap_url_count=None, sitemap_mobile_markup_language=None, + sitemap_news_publication_label=None, + extension_elements=None, extension_attributes=None, text=None): + atom.Entry.__init__(self, category=category, + content=content, atom_id=atom_id, link=link, + title=title, updated=updated, text=text) + + self.sitemap_type = sitemap_type + self.sitemap_status = sitemap_status + self.sitemap_last_downloaded = sitemap_last_downloaded + self.sitemap_url_count = sitemap_url_count + self.sitemap_mobile_markup_language = sitemap_mobile_markup_language + self.sitemap_news_publication_label = sitemap_news_publication_label + + +def SitemapsEntryFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapsEntry, xml_string) + + +class SitemapsFeed(atom.Feed, LinkFinder): + """A Google Webmaster Tools meta Sitemaps feed flavor of an Atom Feed""" + + _tag = atom.Feed._tag + _namespace = atom.Feed._namespace + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [SitemapsEntry]) + _children['{%s}sitemap-mobile' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_mobile', SitemapMobile) + _children['{%s}sitemap-news' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_news', SitemapNews) + del _children['{%s}generator' % atom.ATOM_NAMESPACE] + del _children['{%s}author' % atom.ATOM_NAMESPACE] + del _children['{%s}contributor' % atom.ATOM_NAMESPACE] + del _children['{%s}logo' % atom.ATOM_NAMESPACE] + del _children['{%s}icon' % atom.ATOM_NAMESPACE] + del _children['{%s}rights' % atom.ATOM_NAMESPACE] + del _children['{%s}subtitle' % atom.ATOM_NAMESPACE] + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + entry=None, sitemap_mobile=None, sitemap_news=None, + extension_elements=None, extension_attributes=None, text=None): + + self.category = category or [] + self.id = atom_id + self.link = link or [] + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.sitemap_mobile = sitemap_mobile + self.sitemap_news = sitemap_news + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitemapsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapsFeed, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/webmastertools/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/webmastertools/service.py new file mode 100644 index 0000000..cc51acd --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/webmastertools/service.py @@ -0,0 +1,507 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GWebmasterToolsService extends the GDataService to streamline +Google Webmaster Tools operations. + + GWebmasterToolsService: Provides methods to query feeds and manipulate items. + Extends GDataService. +""" + +__author__ = 'livibetter (Yu-Jie Lin)' + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.webmastertools as webmastertools +import atom + + +FEED_BASE = 'https://www.google.com/webmasters/tools/feeds/' +SITES_FEED = FEED_BASE + 'sites/' +SITE_TEMPLATE = SITES_FEED + '%s' +SITEMAPS_FEED_TEMPLATE = FEED_BASE + '%(site_id)s/sitemaps/' +SITEMAP_TEMPLATE = SITEMAPS_FEED_TEMPLATE + '%(sitemap_id)s' + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class GWebmasterToolsService(gdata.service.GDataService): + """Client for the Google Webmaster Tools service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', api_key=None, + additional_headers=None, handler=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='sitemaps', source=source, + server=server, + additional_headers=additional_headers, + handler=handler) + + def GetSitesFeed(self, uri=SITES_FEED, + converter=webmastertools.SitesFeedFromString): + """Gets sites feed. + + Args: + uri: str (optional) URI to retrieve sites feed. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitesFeedFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesFeed object. + """ + return self.Get(uri, converter=converter) + + def AddSite(self, site_uri, uri=SITES_FEED, + url_params=None, escape_params=True, converter=None): + """Adds a site to Google Webmaster Tools. + + Args: + site_uri: str URI of which site to add. + uri: str (optional) URI to add a site. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitesEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry() + site_entry.content = atom.Content(src=site_uri) + response = self.Post(site_entry, uri, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def DeleteSite(self, site_uri, uri=SITE_TEMPLATE, + url_params=None, escape_params=True): + """Removes a site from Google Webmaster Tools. + + Args: + site_uri: str URI of which site to remove. + uri: str (optional) A URI template to send DELETE request. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete( + uri % urllib.quote_plus(site_uri), + url_params=url_params, escape_params=escape_params) + + def VerifySite(self, site_uri, verification_method, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Requests a verification of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + verification_method: str The method to verify a site. Valid values are + 'htmlpage', and 'metatag'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + verification_method=webmastertools.VerificationMethod( + type=verification_method, in_user='true') + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + + def UpdateGeoLocation(self, site_uri, geolocation, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates geolocation setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + geolocation: str The geographic location. Valid values are listed in + http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + geolocation=webmastertools.GeoLocation(text=geolocation) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdateCrawlRate(self, site_uri, crawl_rate, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates crawl rate setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + crawl_rate: str The crawl rate for a site. Valid values are 'slower', + 'normal', and 'faster'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + crawl_rate=webmastertools.CrawlRate(text=crawl_rate) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdatePreferredDomain(self, site_uri, preferred_domain, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates preferred domain setting of a site. + + Note that if using 'preferwww', will also need www.example.com in account to + take effect. + + Args: + site_uri: str URI of which site to add sitemap for. + preferred_domain: str The preferred domain for a site. Valid values are 'none', + 'preferwww', and 'prefernowww'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + preferred_domain=webmastertools.PreferredDomain(text=preferred_domain) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdateEnhancedImageSearch(self, site_uri, enhanced_image_search, + uri=SITE_TEMPLATE, url_params=None, escape_params=True, converter=None): + """Updates enhanced image search setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + enhanced_image_search: str The enhanced image search setting for a site. + Valid values are 'true', and 'false'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + enhanced_image_search=webmastertools.EnhancedImageSearch( + text=enhanced_image_search) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def GetSitemapsFeed(self, site_uri, uri=SITEMAPS_FEED_TEMPLATE, + converter=webmastertools.SitemapsFeedFromString): + """Gets sitemaps feed of a site. + + Args: + site_uri: str (optional) URI of which site to retrieve its sitemaps feed. + uri: str (optional) URI to retrieve sites feed. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsFeedFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsFeed object. + """ + return self.Get(uri % {'site_id': urllib.quote_plus(site_uri)}, + converter=converter) + + def AddSitemap(self, site_uri, sitemap_uri, sitemap_type='WEB', + uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a regular sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_type: str Type of added sitemap. Valid types: WEB, VIDEO, or CODE. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-regular'), + sitemap_type=webmastertools.SitemapType(text=sitemap_type)) + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def AddMobileSitemap(self, site_uri, sitemap_uri, + sitemap_mobile_markup_language='XHTML', uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a mobile sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_mobile_markup_language: str Format of added sitemap. Valid types: + XHTML, WML, or cHTML. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + # FIXME + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-mobile'), + sitemap_mobile_markup_language=\ + webmastertools.SitemapMobileMarkupLanguage( + text=sitemap_mobile_markup_language)) + print sitemap_entry + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def AddNewsSitemap(self, site_uri, sitemap_uri, + sitemap_news_publication_label, uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a news sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_news_publication_label: str, list of str Publication Labels for + sitemap. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-news'), + sitemap_news_publication_label=[], + ) + if isinstance(sitemap_news_publication_label, str): + sitemap_news_publication_label = [sitemap_news_publication_label] + for label in sitemap_news_publication_label: + sitemap_entry.sitemap_news_publication_label.append( + webmastertools.SitemapNewsPublicationLabel(text=label)) + print sitemap_entry + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def DeleteSitemap(self, site_uri, sitemap_uri, uri=SITEMAP_TEMPLATE, + url_params=None, escape_params=True): + """Removes a sitemap from a site. + + Args: + site_uri: str URI of which site to remove a sitemap from. + sitemap_uri: str URI of sitemap to remove from a site. + uri: str (optional) A URI template to send DELETE request. + Default SITEMAP_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete( + uri % {'site_id': urllib.quote_plus(site_uri), + 'sitemap_id': urllib.quote_plus(sitemap_uri)}, + url_params=url_params, escape_params=escape_params) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/youtube/__init__.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/youtube/__init__.py new file mode 100644 index 0000000..c41aaea --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/youtube/__init__.py @@ -0,0 +1,684 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = ('api.stephaniel@gmail.com (Stephanie Liu)' + ', api.jhartmann@gmail.com (Jochen Hartmann)') + +import atom +import gdata +import gdata.media as Media +import gdata.geo as Geo + +YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007' +YOUTUBE_FORMAT = '{http://gdata.youtube.com/schemas/2007}format' +YOUTUBE_DEVELOPER_TAG_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, + 'developertags.cat') +YOUTUBE_SUBSCRIPTION_TYPE_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, + 'subscriptiontypes.cat') + +class Username(atom.AtomBase): + """The YouTube Username element""" + _tag = 'username' + _namespace = YOUTUBE_NAMESPACE + +class QueryString(atom.AtomBase): + """The YouTube QueryString element""" + _tag = 'queryString' + _namespace = YOUTUBE_NAMESPACE + + +class FirstName(atom.AtomBase): + """The YouTube FirstName element""" + _tag = 'firstName' + _namespace = YOUTUBE_NAMESPACE + + +class LastName(atom.AtomBase): + """The YouTube LastName element""" + _tag = 'lastName' + _namespace = YOUTUBE_NAMESPACE + + +class Age(atom.AtomBase): + """The YouTube Age element""" + _tag = 'age' + _namespace = YOUTUBE_NAMESPACE + + +class Books(atom.AtomBase): + """The YouTube Books element""" + _tag = 'books' + _namespace = YOUTUBE_NAMESPACE + + +class Gender(atom.AtomBase): + """The YouTube Gender element""" + _tag = 'gender' + _namespace = YOUTUBE_NAMESPACE + + +class Company(atom.AtomBase): + """The YouTube Company element""" + _tag = 'company' + _namespace = YOUTUBE_NAMESPACE + + +class Hobbies(atom.AtomBase): + """The YouTube Hobbies element""" + _tag = 'hobbies' + _namespace = YOUTUBE_NAMESPACE + + +class Hometown(atom.AtomBase): + """The YouTube Hometown element""" + _tag = 'hometown' + _namespace = YOUTUBE_NAMESPACE + + +class Location(atom.AtomBase): + """The YouTube Location element""" + _tag = 'location' + _namespace = YOUTUBE_NAMESPACE + + +class Movies(atom.AtomBase): + """The YouTube Movies element""" + _tag = 'movies' + _namespace = YOUTUBE_NAMESPACE + + +class Music(atom.AtomBase): + """The YouTube Music element""" + _tag = 'music' + _namespace = YOUTUBE_NAMESPACE + + +class Occupation(atom.AtomBase): + """The YouTube Occupation element""" + _tag = 'occupation' + _namespace = YOUTUBE_NAMESPACE + + +class School(atom.AtomBase): + """The YouTube School element""" + _tag = 'school' + _namespace = YOUTUBE_NAMESPACE + + +class Relationship(atom.AtomBase): + """The YouTube Relationship element""" + _tag = 'relationship' + _namespace = YOUTUBE_NAMESPACE + + +class Recorded(atom.AtomBase): + """The YouTube Recorded element""" + _tag = 'recorded' + _namespace = YOUTUBE_NAMESPACE + + +class Statistics(atom.AtomBase): + """The YouTube Statistics element.""" + _tag = 'statistics' + _namespace = YOUTUBE_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['viewCount'] = 'view_count' + _attributes['videoWatchCount'] = 'video_watch_count' + _attributes['subscriberCount'] = 'subscriber_count' + _attributes['lastWebAccess'] = 'last_web_access' + _attributes['favoriteCount'] = 'favorite_count' + + def __init__(self, view_count=None, video_watch_count=None, + favorite_count=None, subscriber_count=None, last_web_access=None, + extension_elements=None, extension_attributes=None, text=None): + + self.view_count = view_count + self.video_watch_count = video_watch_count + self.subscriber_count = subscriber_count + self.last_web_access = last_web_access + self.favorite_count = favorite_count + + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Status(atom.AtomBase): + """The YouTube Status element""" + _tag = 'status' + _namespace = YOUTUBE_NAMESPACE + + +class Position(atom.AtomBase): + """The YouTube Position element. The position in a playlist feed.""" + _tag = 'position' + _namespace = YOUTUBE_NAMESPACE + + +class Racy(atom.AtomBase): + """The YouTube Racy element.""" + _tag = 'racy' + _namespace = YOUTUBE_NAMESPACE + +class Description(atom.AtomBase): + """The YouTube Description element.""" + _tag = 'description' + _namespace = YOUTUBE_NAMESPACE + + +class Private(atom.AtomBase): + """The YouTube Private element.""" + _tag = 'private' + _namespace = YOUTUBE_NAMESPACE + + +class NoEmbed(atom.AtomBase): + """The YouTube VideoShare element. Whether a video can be embedded or not.""" + _tag = 'noembed' + _namespace = YOUTUBE_NAMESPACE + + +class Comments(atom.AtomBase): + """The GData Comments element""" + _tag = 'comments' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + + self.feed_link = feed_link + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Rating(atom.AtomBase): + """The GData Rating element""" + _tag = 'rating' + _namespace = gdata.GDATA_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['min'] = 'min' + _attributes['max'] = 'max' + _attributes['numRaters'] = 'num_raters' + _attributes['average'] = 'average' + + def __init__(self, min=None, max=None, + num_raters=None, average=None, extension_elements=None, + extension_attributes=None, text=None): + + self.min = min + self.max = max + self.num_raters = num_raters + self.average = average + + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class YouTubePlaylistVideoEntry(gdata.GDataEntry): + """Represents a YouTubeVideoEntry on a YouTubePlaylist.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) + _children['{%s}position' % YOUTUBE_NAMESPACE] = ('position', Position) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, feed_link=None, description=None, + rating=None, comments=None, statistics=None, + location=None, position=None, media=None, + extension_elements=None, extension_attributes=None): + + self.feed_link = feed_link + self.description = description + self.rating = rating + self.comments = comments + self.statistics = statistics + self.location = location + self.position = position + self.media = media + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, title=title, + updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + +class YouTubeVideoCommentEntry(gdata.GDataEntry): + """Represents a comment on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +class YouTubeSubscriptionEntry(gdata.GDataEntry): + """Represents a subscription entry on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}queryString' % YOUTUBE_NAMESPACE] = ( + 'query_string', QueryString) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, username=None, query_string=None, feed_link=None, + extension_elements=None, extension_attributes=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.username = username + self.query_string = query_string + self.feed_link = feed_link + + + def GetSubscriptionType(self): + """Retrieve the type of this subscription. + + Returns: + A string that is either 'channel, 'query' or 'favorites' + """ + for category in self.category: + if category.scheme == YOUTUBE_SUBSCRIPTION_TYPE_SCHEME: + return category.term + + +class YouTubeVideoResponseEntry(gdata.GDataEntry): + """Represents a video response. """ + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, rating=None, + noembed=None, statistics=None, racy=None, media=None, + extension_elements=None, extension_attributes=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.rating = rating + self.noembed = noembed + self.statistics = statistics + self.racy = racy + self.media = media or Media.Group() + + +class YouTubeContactEntry(gdata.GDataEntry): + """Represents a contact entry.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}status' % YOUTUBE_NAMESPACE] = ('status', Status) + + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, + username=None, status=None, extension_elements=None, + extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.username = username + self.status = status + + +class YouTubeVideoEntry(gdata.GDataEntry): + """Represents a video on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}recorded' % YOUTUBE_NAMESPACE] = ('recorded', Recorded) + _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + _children['{%s}where' % gdata.geo.GEORSS_NAMESPACE] = ('geo', Geo.Where) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, rating=None, + noembed=None, statistics=None, racy=None, media=None, geo=None, + recorded=None, comments=None, extension_elements=None, + extension_attributes=None): + + self.rating = rating + self.noembed = noembed + self.statistics = statistics + self.racy = racy + self.comments = comments + self.media = media or Media.Group() + self.geo = geo + self.recorded = recorded + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + def GetSwfUrl(self): + """Return the URL for the embeddable Video + + Returns: + URL of the embeddable video + """ + if self.media.content: + for content in self.media.content: + if content.extension_attributes[YOUTUBE_FORMAT] == '5': + return content.url + else: + return None + + def AddDeveloperTags(self, developer_tags): + """Add a developer tag for this entry. + + Developer tags can only be set during the initial upload. + + Arguments: + developer_tags: A list of developer tags as strings. + + Returns: + A list of all developer tags for this video entry. + """ + for tag_text in developer_tags: + self.media.category.append(gdata.media.Category( + text=tag_text, label=tag_text, scheme=YOUTUBE_DEVELOPER_TAG_SCHEME)) + + return self.GetDeveloperTags() + + def GetDeveloperTags(self): + """Retrieve developer tags for this video entry.""" + developer_tags = [] + for category in self.media.category: + if category.scheme == YOUTUBE_DEVELOPER_TAG_SCHEME: + developer_tags.append(category) + if len(developer_tags) > 0: + return developer_tags + + def GetYouTubeCategoryAsString(self): + """Convenience method to return the YouTube category as string. + + YouTubeVideoEntries can contain multiple Category objects with differing + schemes. This method returns only the category with the correct + scheme, ignoring developer tags. + """ + for category in self.media.category: + if category.scheme != YOUTUBE_DEVELOPER_TAG_SCHEME: + return category.text + +class YouTubeUserEntry(gdata.GDataEntry): + """Represents a user on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}firstName' % YOUTUBE_NAMESPACE] = ('first_name', FirstName) + _children['{%s}lastName' % YOUTUBE_NAMESPACE] = ('last_name', LastName) + _children['{%s}age' % YOUTUBE_NAMESPACE] = ('age', Age) + _children['{%s}books' % YOUTUBE_NAMESPACE] = ('books', Books) + _children['{%s}gender' % YOUTUBE_NAMESPACE] = ('gender', Gender) + _children['{%s}company' % YOUTUBE_NAMESPACE] = ('company', Company) + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}hobbies' % YOUTUBE_NAMESPACE] = ('hobbies', Hobbies) + _children['{%s}hometown' % YOUTUBE_NAMESPACE] = ('hometown', Hometown) + _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) + _children['{%s}movies' % YOUTUBE_NAMESPACE] = ('movies', Movies) + _children['{%s}music' % YOUTUBE_NAMESPACE] = ('music', Music) + _children['{%s}occupation' % YOUTUBE_NAMESPACE] = ('occupation', Occupation) + _children['{%s}school' % YOUTUBE_NAMESPACE] = ('school', School) + _children['{%s}relationship' % YOUTUBE_NAMESPACE] = ('relationship', + Relationship) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}thumbnail' % gdata.media.MEDIA_NAMESPACE] = ('thumbnail', + Media.Thumbnail) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, + username=None, first_name=None, last_name=None, age=None, + books=None, gender=None, company=None, description=None, + hobbies=None, hometown=None, location=None, movies=None, + music=None, occupation=None, school=None, relationship=None, + statistics=None, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + + self.username = username + self.first_name = first_name + self.last_name = last_name + self.age = age + self.books = books + self.gender = gender + self.company = company + self.description = description + self.hobbies = hobbies + self.hometown = hometown + self.location = location + self.movies = movies + self.music = music + self.occupation = occupation + self.school = school + self.relationship = relationship + self.statistics = statistics + self.feed_link = feed_link + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class YouTubeVideoFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a video feed on YouTube.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeVideoEntry]) + +class YouTubePlaylistEntry(gdata.GDataEntry): + """Represents a playlist in YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', + Private) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, private=None, feed_link=None, + description=None, extension_elements=None, + extension_attributes=None): + + self.description = description + self.private = private + self.feed_link = feed_link + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, title=title, + updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + + +class YouTubePlaylistFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a user's playlists """ + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubePlaylistEntry]) + + +class YouTubePlaylistVideoFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of video entry on a playlist.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubePlaylistVideoEntry]) + + +class YouTubeContactFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a users contacts.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeContactEntry]) + + +class YouTubeSubscriptionFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a users subscriptions.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeSubscriptionEntry]) + + +class YouTubeVideoCommentFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of comments for a video.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeVideoCommentEntry]) + + +class YouTubeVideoResponseFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of video responses.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeVideoResponseEntry]) + + +def YouTubeVideoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) + + +def YouTubeVideoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoEntry, xml_string) + + +def YouTubeContactFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeContactFeed, xml_string) + + +def YouTubeContactEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeContactEntry, xml_string) + + +def YouTubeVideoCommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoCommentFeed, xml_string) + + +def YouTubeVideoCommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoCommentEntry, xml_string) + + +def YouTubeUserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) + + +def YouTubeUserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeUserEntry, xml_string) + + +def YouTubePlaylistFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistFeed, xml_string) + + +def YouTubePlaylistVideoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistVideoFeed, xml_string) + + +def YouTubePlaylistEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistEntry, xml_string) + + +def YouTubePlaylistVideoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistVideoEntry, xml_string) + + +def YouTubeSubscriptionFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeSubscriptionFeed, xml_string) + + +def YouTubeSubscriptionEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeSubscriptionEntry, xml_string) + + +def YouTubeVideoResponseFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoResponseFeed, xml_string) + + +def YouTubeVideoResponseEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoResponseEntry, xml_string) diff --git a/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/youtube/service.py b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/youtube/service.py new file mode 100644 index 0000000..ef7c0f1 --- /dev/null +++ b/gdata.py-1.2.3/build/lib.linux-i686-2.7/gdata/youtube/service.py @@ -0,0 +1,1545 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""YouTubeService extends GDataService to streamline YouTube operations. + + YouTubeService: Provides methods to perform CRUD operations on YouTube feeds. + Extends GDataService. +""" + +__author__ = ('api.stephaniel@gmail.com (Stephanie Liu), ' + 'api.jhartmann@gmail.com (Jochen Hartmann)') + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import os +import atom +import gdata +import gdata.service +import gdata.youtube + +YOUTUBE_SERVER = 'gdata.youtube.com' +YOUTUBE_SERVICE = 'youtube' +YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin' +YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime', + 'flv') +YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month', + 'all_time') +YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating', + 'relevance') +YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude') +YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6') +YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured', + 'top_rated', 'most_viewed','watch_on_mobile') +YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users' +YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken' +YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos' +YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users' +YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists' + +YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds' +YOUTUBE_STANDARD_TOP_RATED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_rated') +YOUTUBE_STANDARD_MOST_VIEWED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_viewed') +YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'recently_featured') +YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'watch_on_mobile') +YOUTUBE_STANDARD_TOP_FAVORITES_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'top_favorites') +YOUTUBE_STANDARD_MOST_RECENT_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_recent') +YOUTUBE_STANDARD_MOST_DISCUSSED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_discussed') +YOUTUBE_STANDARD_MOST_LINKED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_linked') +YOUTUBE_STANDARD_MOST_RESPONDED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_responded') +YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas' + +YOUTUBE_RATING_LINK_REL = '%s#video.ratings' % YOUTUBE_SCHEMA + +YOUTUBE_COMPLAINT_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, + 'complaint-reasons.cat') +YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, + 'subscriptiontypes.cat') + +YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS', + 'RIGHTS', 'SPAM') +YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected') +YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family') + +UNKOWN_ERROR = 1000 +YOUTUBE_BAD_REQUEST = 400 +YOUTUBE_CONFLICT = 409 +YOUTUBE_INTERNAL_SERVER_ERROR = 500 +YOUTUBE_INVALID_ARGUMENT = 601 +YOUTUBE_INVALID_CONTENT_TYPE = 602 +YOUTUBE_NOT_A_VIDEO = 603 +YOUTUBE_INVALID_KIND = 604 + + +class Error(Exception): + """Base class for errors within the YouTube service.""" + pass + +class RequestError(Error): + """Error class that is thrown in response to an invalid HTTP Request.""" + pass + +class YouTubeError(Error): + """YouTube service specific error class.""" + pass + +class YouTubeService(gdata.service.GDataService): + + """Client for the YouTube service. + + Performs all documented Google Data YouTube API functions, such as inserting, + updating and deleting videos, comments, playlist, subscriptions etc. + YouTube Service requires authentication for any write, update or delete + actions. + + Attributes: + email: An optional string identifying the user. Required only for + authenticated actions. + password: An optional string identifying the user's password. + source: An optional string identifying the name of your application. + server: An optional address of the YouTube API server. gdata.youtube.com + is provided as the default value. + additional_headers: An optional dictionary containing additional headers + to be passed along with each request. Use to store developer key. + client_id: An optional string identifying your application, required for + authenticated requests, along with a developer key. + developer_key: An optional string value. Register your application at + http://code.google.com/apis/youtube/dashboard to obtain a (free) key. + """ + + def __init__(self, email=None, password=None, source=None, + server=YOUTUBE_SERVER, additional_headers=None, client_id=None, + developer_key=None): + self.additional_headers = {} + if client_id is not None and developer_key is not None: + self.additional_headers = {'X-Gdata-Client': client_id, + 'X-GData-Key': 'key=%s' % developer_key} + + gdata.service.GDataService.__init__( + self, email=email, password=password, service=YOUTUBE_SERVICE, + source=source, server=server, + additional_headers=self.additional_headers) + elif developer_key and not client_id: + raise YouTubeError('You must also specify the clientId') + else: + gdata.service.GDataService.__init__( + self, email=email, password=password, service=YOUTUBE_SERVICE, + source=source, server=server, additional_headers=additional_headers) + self.auth_service_url = YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL + + def GetYouTubeVideoFeed(self, uri): + """Retrieve a YouTubeVideoFeed. + + Args: + uri: A string representing the URI of the feed that is to be retrieved. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubeVideoFeedFromString) + + def GetYouTubeVideoEntry(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoEntry. + + Either a uri or a video_id must be provided. + + Args: + uri: An optional string representing the URI of the entry that is to + be retrieved. + video_id: An optional string representing the ID of the video. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoEntry() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoEntry() method') + elif video_id and not uri: + uri = '%s/%s' % (YOUTUBE_VIDEO_URI, video_id) + return self.Get(uri, converter=gdata.youtube.YouTubeVideoEntryFromString) + + def GetYouTubeContactFeed(self, uri=None, username='default'): + """Retrieve a YouTubeContactFeed. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the contact feed that + is to be retrieved. + username: An optional string representing the username. Defaults to the + currently authenticated user. + + Returns: + A YouTubeContactFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeContactFeed() method. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'contacts') + return self.Get(uri, converter=gdata.youtube.YouTubeContactFeedFromString) + + def GetYouTubeContactEntry(self, uri): + """Retrieve a YouTubeContactEntry. + + Args: + uri: A string representing the URI of the contact entry that is to + be retrieved. + + Returns: + A YouTubeContactEntry if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubeContactEntryFromString) + + def GetYouTubeVideoCommentFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoCommentFeed. + + Either a uri or a video_id must be provided. + + Args: + uri: An optional string representing the URI of the comment feed that + is to be retrieved. + video_id: An optional string representing the ID of the video for which + to retrieve the comment feed. + + Returns: + A YouTubeVideoCommentFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoCommentFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoCommentFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'comments') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoCommentFeedFromString) + + def GetYouTubeVideoCommentEntry(self, uri): + """Retrieve a YouTubeVideoCommentEntry. + + Args: + uri: A string representing the URI of the comment entry that is to + be retrieved. + + Returns: + A YouTubeCommentEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoCommentEntryFromString) + + def GetYouTubeUserFeed(self, uri=None, username=None): + """Retrieve a YouTubeUserFeed. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the user feed that is + to be retrieved. + username: An optional string representing the username. + + Returns: + A YouTubeUserFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeUserFeed() method. + """ + if uri is None and username is None: + raise YouTubeError('You must provide at least a uri or a username ' + 'to the GetYouTubeUserFeed() method') + elif username and not uri: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads') + return self.Get(uri, converter=gdata.youtube.YouTubeUserFeedFromString) + + def GetYouTubeUserEntry(self, uri=None, username=None): + """Retrieve a YouTubeUserEntry. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the user entry that is + to be retrieved. + username: An optional string representing the username. + + Returns: + A YouTubeUserEntry if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeUserEntry() method. + """ + if uri is None and username is None: + raise YouTubeError('You must provide at least a uri or a username ' + 'to the GetYouTubeUserEntry() method') + elif username and not uri: + uri = '%s/%s' % (YOUTUBE_USER_FEED_URI, username) + return self.Get(uri, converter=gdata.youtube.YouTubeUserEntryFromString) + + def GetYouTubePlaylistFeed(self, uri=None, username='default'): + """Retrieve a YouTubePlaylistFeed (a feed of playlists for a user). + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the playlist feed that + is to be retrieved. + username: An optional string representing the username. Defaults to the + currently authenticated user. + + Returns: + A YouTubePlaylistFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubePlaylistFeed() method. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'playlists') + return self.Get(uri, converter=gdata.youtube.YouTubePlaylistFeedFromString) + + def GetYouTubePlaylistEntry(self, uri): + """Retrieve a YouTubePlaylistEntry. + + Args: + uri: A string representing the URI of the playlist feed that is to + be retrieved. + + Returns: + A YouTubePlaylistEntry if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None): + """Retrieve a YouTubePlaylistVideoFeed (a feed of videos on a playlist). + + Either a uri or a playlist_id must be provided. + + Args: + uri: An optional string representing the URI of the playlist video feed + that is to be retrieved. + playlist_id: An optional string representing the Id of the playlist whose + playlist video feed is to be retrieved. + + Returns: + A YouTubePlaylistVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a playlist_id to the + GetYouTubePlaylistVideoFeed() method. + """ + if uri is None and playlist_id is None: + raise YouTubeError('You must provide at least a uri or a playlist_id ' + 'to the GetYouTubePlaylistVideoFeed() method') + elif playlist_id and not uri: + uri = '%s/%s' % (YOUTUBE_PLAYLIST_FEED_URI, playlist_id) + return self.Get( + uri, converter=gdata.youtube.YouTubePlaylistVideoFeedFromString) + + def GetYouTubeVideoResponseFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoResponseFeed. + + Either a uri or a playlist_id must be provided. + + Args: + uri: An optional string representing the URI of the video response feed + that is to be retrieved. + video_id: An optional string representing the ID of the video whose + response feed is to be retrieved. + + Returns: + A YouTubeVideoResponseFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoResponseFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoResponseFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoResponseFeedFromString) + + def GetYouTubeVideoResponseEntry(self, uri): + """Retrieve a YouTubeVideoResponseEntry. + + Args: + uri: A string representing the URI of the video response entry that + is to be retrieved. + + Returns: + A YouTubeVideoResponseEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoResponseEntryFromString) + + def GetYouTubeSubscriptionFeed(self, uri=None, username='default'): + """Retrieve a YouTubeSubscriptionFeed. + + Either the uri of the feed or a username must be provided. + + Args: + uri: An optional string representing the URI of the feed that is to + be retrieved. + username: An optional string representing the username whose subscription + feed is to be retrieved. Defaults to the currently authenticted user. + + Returns: + A YouTubeVideoSubscriptionFeed if successfully retrieved. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'subscriptions') + return self.Get( + uri, converter=gdata.youtube.YouTubeSubscriptionFeedFromString) + + def GetYouTubeSubscriptionEntry(self, uri): + """Retrieve a YouTubeSubscriptionEntry. + + Args: + uri: A string representing the URI of the entry that is to be retrieved. + + Returns: + A YouTubeVideoSubscriptionEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeRelatedVideoFeed. + + Either a uri for the feed or a video_id is required. + + Args: + uri: An optional string representing the URI of the feed that is to + be retrieved. + video_id: An optional string representing the ID of the video for which + to retrieve the related video feed. + + Returns: + A YouTubeRelatedVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeRelatedVideoFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeRelatedVideoFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'related') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoFeedFromString) + + def GetTopRatedVideoFeed(self): + """Retrieve the 'top_rated' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_RATED_URI) + + def GetMostViewedVideoFeed(self): + """Retrieve the 'most_viewed' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_VIEWED_URI) + + def GetRecentlyFeaturedVideoFeed(self): + """Retrieve the 'recently_featured' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_RECENTLY_FEATURED_URI) + + def GetWatchOnMobileVideoFeed(self): + """Retrieve the 'watch_on_mobile' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI) + + def GetTopFavoritesVideoFeed(self): + """Retrieve the 'top_favorites' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_FAVORITES_URI) + + def GetMostRecentVideoFeed(self): + """Retrieve the 'most_recent' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RECENT_URI) + + def GetMostDiscussedVideoFeed(self): + """Retrieve the 'most_discussed' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_DISCUSSED_URI) + + def GetMostLinkedVideoFeed(self): + """Retrieve the 'most_linked' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_LINKED_URI) + + def GetMostRespondedVideoFeed(self): + """Retrieve the 'most_responded' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RESPONDED_URI) + + def GetUserFavoritesFeed(self, username='default'): + """Retrieve the favorites feed for a given user. + + Args: + username: An optional string representing the username whose favorites + feed is to be retrieved. Defaults to the currently authenticated user. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + favorites_feed_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, + 'favorites') + return self.GetYouTubeVideoFeed(favorites_feed_uri) + + def InsertVideoEntry(self, video_entry, filename_or_handle, + youtube_username='default', + content_type='video/quicktime'): + """Upload a new video to YouTube using the direct upload mechanism. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to upload. + filename_or_handle: A file-like object or file name where the video + will be read from. + youtube_username: An optional string representing the username into whose + account this video is to be uploaded to. Defaults to the currently + authenticated user. + content_type: An optional string representing internet media type + (a.k.a. mime type) of the media object. Currently the YouTube API + supports these types: + o video/mpeg + o video/quicktime + o video/x-msvideo + o video/mp4 + o video/x-flv + + Returns: + The newly created YouTubeVideoEntry if successful. + + Raises: + AssertionError: video_entry must be a gdata.youtube.VideoEntry instance. + YouTubeError: An error occurred trying to read the video file provided. + gdata.service.RequestError: An error occurred trying to upload the video + to the API server. + """ + + # We need to perform a series of checks on the video_entry and on the + # file that we plan to upload, such as checking whether we have a valid + # video_entry and that the file is the correct type and readable, prior + # to performing the actual POST request. + + try: + assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry)) + except AssertionError: + raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, + 'body':'`video_entry` must be a gdata.youtube.VideoEntry instance', + 'reason':'Found %s, not VideoEntry' % type(video_entry) + }) + majtype, mintype = content_type.split('/') + + try: + assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % + ['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]}) + + if (isinstance(filename_or_handle, (str, unicode)) + and os.path.exists(filename_or_handle)): + mediasource = gdata.MediaSource() + mediasource.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'): + import StringIO + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'video' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: + raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body': + '`filename_or_handle` must be a path name or a file-like object', + 'reason': ('Found %s, not path name or object ' + 'with a .read() method' % type(filename_or_handle))}) + upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username, + 'uploads') + self.additional_headers['Slug'] = mediasource.file_name + + # Using a nested try statement to retain Python 2.4 compatibility + try: + try: + return self.Post(video_entry, uri=upload_uri, media_source=mediasource, + converter=gdata.youtube.YouTubeVideoEntryFromString) + except gdata.service.RequestError, e: + raise YouTubeError(e.args[0]) + finally: + del(self.additional_headers['Slug']) + + def CheckUploadStatus(self, video_entry=None, video_id=None): + """Check upload status on a recently uploaded video entry. + + Needs authentication. Either video_entry or video_id must be provided. + + Args: + video_entry: An optional YouTubeVideoEntry whose upload status to check + video_id: An optional string representing the ID of the uploaded video + whose status is to be checked. + + Returns: + A tuple containing (video_upload_state, detailed_message) or None if + no status information is found. + + Raises: + YouTubeError: You must provide at least a video_entry or a video_id to the + CheckUploadStatus() method. + """ + if video_entry is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the CheckUploadStatus() method') + elif video_id and not video_entry: + video_entry = self.GetYouTubeVideoEntry(video_id=video_id) + + control = video_entry.control + if control is not None: + draft = control.draft + if draft is not None: + if draft.text == 'yes': + yt_state = control.extension_elements[0] + if yt_state is not None: + state_value = yt_state.attributes['name'] + message = '' + if yt_state.text is not None: + message = yt_state.text + + return (state_value, message) + + def GetFormUploadToken(self, video_entry, uri=YOUTUBE_UPLOAD_TOKEN_URI): + """Receives a YouTube Token and a YouTube PostUrl from a YouTubeVideoEntry. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to upload (meta-data only). + uri: An optional string representing the URI from where to fetch the + token information. Defaults to the YOUTUBE_UPLOADTOKEN_URI. + + Returns: + A tuple containing the URL to which to post your video file, along + with the youtube token that must be included with your upload in the + form of: (post_url, youtube_token). + """ + try: + response = self.Post(video_entry, uri) + except gdata.service.RequestError, e: + raise YouTubeError(e.args[0]) + + tree = ElementTree.fromstring(response) + + for child in tree: + if child.tag == 'url': + post_url = child.text + elif child.tag == 'token': + youtube_token = child.text + return (post_url, youtube_token) + + def UpdateVideoEntry(self, video_entry): + """Updates a video entry's meta-data. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to update, containing updated + meta-data. + + Returns: + An updated YouTubeVideoEntry on success or None. + """ + for link in video_entry.link: + if link.rel == 'edit': + edit_uri = link.href + return self.Put(video_entry, uri=edit_uri, + converter=gdata.youtube.YouTubeVideoEntryFromString) + + def DeleteVideoEntry(self, video_entry): + """Deletes a video entry. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to be deleted. + + Returns: + True if entry was deleted successfully. + """ + for link in video_entry.link: + if link.rel == 'edit': + edit_uri = link.href + return self.Delete(edit_uri) + + def AddRating(self, rating_value, video_entry): + """Add a rating to a video entry. + + Needs authentication. + + Args: + rating_value: The integer value for the rating (between 1 and 5). + video_entry: The YouTubeVideoEntry to be rated. + + Returns: + True if the rating was added successfully. + + Raises: + YouTubeError: rating_value must be between 1 and 5 in AddRating(). + """ + if rating_value < 1 or rating_value > 5: + raise YouTubeError('rating_value must be between 1 and 5 in AddRating()') + + entry = gdata.GDataEntry() + rating = gdata.youtube.Rating(min='1', max='5') + rating.extension_attributes['name'] = 'value' + rating.extension_attributes['value'] = str(rating_value) + entry.extension_elements.append(rating) + + for link in video_entry.link: + if link.rel == YOUTUBE_RATING_LINK_REL: + rating_uri = link.href + + return self.Post(entry, uri=rating_uri) + + def AddComment(self, comment_text, video_entry): + """Add a comment to a video entry. + + Needs authentication. Note that each comment that is posted must contain + the video entry that it is to be posted to. + + Args: + comment_text: A string representing the text of the comment. + video_entry: The YouTubeVideoEntry to be commented on. + + Returns: + True if the comment was added successfully. + """ + content = atom.Content(text=comment_text) + comment_entry = gdata.youtube.YouTubeVideoCommentEntry(content=content) + comment_post_uri = video_entry.comments.feed_link[0].href + + return self.Post(comment_entry, uri=comment_post_uri) + + def AddVideoResponse(self, video_id_to_respond_to, video_response): + """Add a video response. + + Needs authentication. + + Args: + video_id_to_respond_to: A string representing the ID of the video to be + responded to. + video_response: YouTubeVideoEntry to be posted as a response. + + Returns: + True if video response was posted successfully. + """ + post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id_to_respond_to, + 'responses') + return self.Post(video_response, uri=post_uri) + + def DeleteVideoResponse(self, video_id, response_video_id): + """Delete a video response. + + Needs authentication. + + Args: + video_id: A string representing the ID of video that contains the + response. + response_video_id: A string representing the ID of the video that was + posted as a response. + + Returns: + True if video response was deleted succcessfully. + """ + delete_uri = '%s/%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses', + response_video_id) + return self.Delete(delete_uri) + + def AddComplaint(self, complaint_text, complaint_term, video_id): + """Add a complaint for a particular video entry. + + Needs authentication. + + Args: + complaint_text: A string representing the complaint text. + complaint_term: A string representing the complaint category term. + video_id: A string representing the ID of YouTubeVideoEntry to + complain about. + + Returns: + True if posted successfully. + + Raises: + YouTubeError: Your complaint_term is not valid. + """ + if complaint_term not in YOUTUBE_COMPLAINT_CATEGORY_TERMS: + raise YouTubeError('Your complaint_term is not valid') + + content = atom.Content(text=complaint_text) + category = atom.Category(term=complaint_term, + scheme=YOUTUBE_COMPLAINT_CATEGORY_SCHEME) + + complaint_entry = gdata.GDataEntry(content=content, category=[category]) + post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'complaints') + + return self.Post(complaint_entry, post_uri) + + def AddVideoEntryToFavorites(self, video_entry, username='default'): + """Add a video entry to a users favorite feed. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to add. + username: An optional string representing the username to whose favorite + feed you wish to add the entry. Defaults to the currently + authenticated user. + Returns: + The posted YouTubeVideoEntry if successfully posted. + """ + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites') + + return self.Post(video_entry, post_uri, + converter=gdata.youtube.YouTubeVideoEntryFromString) + + def DeleteVideoEntryFromFavorites(self, video_id, username='default'): + """Delete a video entry from the users favorite feed. + + Needs authentication. + + Args: + video_id: A string representing the ID of the video that is to be removed + username: An optional string representing the username of the user's + favorite feed. Defaults to the currently authenticated user. + + Returns: + True if entry was successfully deleted. + """ + edit_link = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites', + video_id) + return self.Delete(edit_link) + + def AddPlaylist(self, playlist_title, playlist_description, + playlist_private=None): + """Add a new playlist to the currently authenticated users account. + + Needs authentication. + + Args: + playlist_title: A string representing the title for the new playlist. + playlist_description: A string representing the description of the + playlist. + playlist_private: An optional boolean, set to True if the playlist is + to be private. + + Returns: + The YouTubePlaylistEntry if successfully posted. + """ + playlist_entry = gdata.youtube.YouTubePlaylistEntry( + title=atom.Title(text=playlist_title), + description=gdata.youtube.Description(text=playlist_description)) + if playlist_private: + playlist_entry.private = gdata.youtube.Private() + + playlist_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, 'default', + 'playlists') + return self.Post(playlist_entry, playlist_post_uri, + converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def UpdatePlaylist(self, playlist_id, new_playlist_title, + new_playlist_description, playlist_private=None, + username='default'): + """Update a playlist with new meta-data. + + Needs authentication. + + Args: + playlist_id: A string representing the ID of the playlist to be updated. + new_playlist_title: A string representing a new title for the playlist. + new_playlist_description: A string representing a new description for the + playlist. + playlist_private: An optional boolean, set to True if the playlist is + to be private. + username: An optional string representing the username whose playlist is + to be updated. Defaults to the currently authenticated user. + + Returns: + A YouTubePlaylistEntry if the update was successful. + """ + updated_playlist = gdata.youtube.YouTubePlaylistEntry( + title=atom.Title(text=new_playlist_title), + description=gdata.youtube.Description(text=new_playlist_description)) + if playlist_private: + updated_playlist.private = gdata.youtube.Private() + + playlist_put_uri = '%s/%s/playlists/%s' % (YOUTUBE_USER_FEED_URI, username, + playlist_id) + + return self.Put(updated_playlist, playlist_put_uri, + converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def DeletePlaylist(self, playlist_uri): + """Delete a playlist from the currently authenticated users playlists. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist that is + to be deleted. + + Returns: + True if successfully deleted. + """ + return self.Delete(playlist_uri) + + def AddPlaylistVideoEntryToPlaylist( + self, playlist_uri, video_id, custom_video_title=None, + custom_video_description=None): + """Add a video entry to a playlist, optionally providing a custom title + and description. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist to which this + video entry is to be added. + video_id: A string representing the ID of the video entry to add. + custom_video_title: An optional string representing a custom title for + the video (only shown on the playlist). + custom_video_description: An optional string representing a custom + description for the video (only shown on the playlist). + + Returns: + A YouTubePlaylistVideoEntry if successfully posted. + """ + playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry( + atom_id=atom.Id(text=video_id)) + if custom_video_title: + playlist_video_entry.title = atom.Title(text=custom_video_title) + if custom_video_description: + playlist_video_entry.description = gdata.youtube.Description( + text=custom_video_description) + + return self.Post(playlist_video_entry, playlist_uri, + converter=gdata.youtube.YouTubePlaylistVideoEntryFromString) + + def UpdatePlaylistVideoEntryMetaData( + self, playlist_uri, playlist_entry_id, new_video_title, + new_video_description, new_video_position): + """Update the meta data for a YouTubePlaylistVideoEntry. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist that contains + the entry to be updated. + playlist_entry_id: A string representing the ID of the entry to be + updated. + new_video_title: A string representing the new title for the video entry. + new_video_description: A string representing the new description for + the video entry. + new_video_position: An integer representing the new position on the + playlist for the video. + + Returns: + A YouTubePlaylistVideoEntry if the update was successful. + """ + playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry( + title=atom.Title(text=new_video_title), + description=gdata.youtube.Description(text=new_video_description), + position=gdata.youtube.Position(text=str(new_video_position))) + + playlist_put_uri = playlist_uri + '/' + playlist_entry_id + + return self.Put(playlist_video_entry, playlist_put_uri, + converter=gdata.youtube.YouTubePlaylistVideoEntryFromString) + + def DeletePlaylistVideoEntry(self, playlist_uri, playlist_video_entry_id): + """Delete a playlist video entry from a playlist. + + Needs authentication. + + Args: + playlist_uri: A URI representing the playlist from which the playlist + video entry is to be removed from. + playlist_video_entry_id: A string representing id of the playlist video + entry that is to be removed. + + Returns: + True if entry was successfully deleted. + """ + delete_uri = '%s/%s' % (playlist_uri, playlist_video_entry_id) + return self.Delete(delete_uri) + + def AddSubscriptionToChannel(self, username_to_subscribe_to, + my_username = 'default'): + """Add a new channel subscription to the currently authenticated users + account. + + Needs authentication. + + Args: + username_to_subscribe_to: A string representing the username of the + channel to which we want to subscribe to. + my_username: An optional string representing the name of the user which + we want to subscribe. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successfully posted. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='channel') + subscription_username = gdata.youtube.Username( + text=username_to_subscribe_to) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + username=subscription_username) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def AddSubscriptionToFavorites(self, username, my_username = 'default'): + """Add a new subscription to a users favorites to the currently + authenticated user's account. + + Needs authentication + + Args: + username: A string representing the username of the user's favorite feed + to subscribe to. + my_username: An optional string representing the username of the user + that is to be subscribed. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successful. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='favorites') + subscription_username = gdata.youtube.Username(text=username) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + username=subscription_username) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def AddSubscriptionToQuery(self, query, my_username = 'default'): + """Add a new subscription to a specific keyword query to the currently + authenticated user's account. + + Needs authentication + + Args: + query: A string representing the keyword query to subscribe to. + my_username: An optional string representing the username of the user + that is to be subscribed. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successful. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='query') + subscription_query_string = gdata.youtube.QueryString(text=query) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + query_string=subscription_query_string) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + + + def DeleteSubscription(self, subscription_uri): + """Delete a subscription from the currently authenticated user's account. + + Needs authentication. + + Args: + subscription_uri: A string representing the URI of the subscription that + is to be deleted. + + Returns: + True if deleted successfully. + """ + return self.Delete(subscription_uri) + + def AddContact(self, contact_username, my_username='default'): + """Add a new contact to the currently authenticated user's contact feed. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that you wish to add. + my_username: An optional string representing the username to whose + contact the new contact is to be added. + + Returns: + A YouTubeContactEntry if added successfully. + """ + contact_category = atom.Category( + scheme = 'http://gdata.youtube.com/schemas/2007/contact.cat', + term = 'Friends') + contact_username = gdata.youtube.Username(text=contact_username) + contact_entry = gdata.youtube.YouTubeContactEntry( + category=contact_category, + username=contact_username) + + contact_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts') + + return self.Post(contact_entry, contact_post_uri, + converter=gdata.youtube.YouTubeContactEntryFromString) + + def UpdateContact(self, contact_username, new_contact_status, + new_contact_category, my_username='default'): + """Update a contact, providing a new status and a new category. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that is to be updated. + new_contact_status: A string representing the new status of the contact. + This can either be set to 'accepted' or 'rejected'. + new_contact_category: A string representing the new category for the + contact, either 'Friends' or 'Family'. + my_username: An optional string representing the username of the user + whose contact feed we are modifying. Defaults to the currently + authenticated user. + + Returns: + A YouTubeContactEntry if updated succesfully. + + Raises: + YouTubeError: New contact status must be within the accepted values. Or + new contact category must be within the accepted categories. + """ + if new_contact_status not in YOUTUBE_CONTACT_STATUS: + raise YouTubeError('New contact status must be one of %s' % + (' '.join(YOUTUBE_CONTACT_STATUS))) + if new_contact_category not in YOUTUBE_CONTACT_CATEGORY: + raise YouTubeError('New contact category must be one of %s' % + (' '.join(YOUTUBE_CONTACT_CATEGORY))) + + contact_category = atom.Category( + scheme='http://gdata.youtube.com/schemas/2007/contact.cat', + term=new_contact_category) + + contact_status = gdata.youtube.Status(text=new_contact_status) + contact_entry = gdata.youtube.YouTubeContactEntry( + category=contact_category, + status=contact_status) + + contact_put_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts', contact_username) + + return self.Put(contact_entry, contact_put_uri, + converter=gdata.youtube.YouTubeContactEntryFromString) + + def DeleteContact(self, contact_username, my_username='default'): + """Delete a contact from a users contact feed. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that is to be deleted. + my_username: An optional string representing the username of the user's + contact feed from which to delete the contact. Defaults to the + currently authenticated user. + + Returns: + True if the contact was deleted successfully + """ + contact_edit_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts', contact_username) + return self.Delete(contact_edit_uri) + + def _GetDeveloperKey(self): + """Getter for Developer Key property. + + Returns: + If the developer key has been set, a string representing the developer key + is returned or None. + """ + if 'X-GData-Key' in self.additional_headers: + return self.additional_headers['X-GData-Key'][4:] + else: + return None + + def _SetDeveloperKey(self, developer_key): + """Setter for Developer Key property. + + Sets the developer key in the 'X-GData-Key' header. The actual value that + is set is 'key=' plus the developer_key that was passed. + """ + self.additional_headers['X-GData-Key'] = 'key=' + developer_key + + developer_key = property(_GetDeveloperKey, _SetDeveloperKey, + doc="""The Developer Key property""") + + def _GetClientId(self): + """Getter for Client Id property. + + Returns: + If the client_id has been set, a string representing it is returned + or None. + """ + if 'X-Gdata-Client' in self.additional_headers: + return self.additional_headers['X-Gdata-Client'] + else: + return None + + def _SetClientId(self, client_id): + """Setter for Client Id property. + + Sets the 'X-Gdata-Client' header. + """ + self.additional_headers['X-Gdata-Client'] = client_id + + client_id = property(_GetClientId, _SetClientId, + doc="""The ClientId property""") + + def Query(self, uri): + """Performs a query and returns a resulting feed or entry. + + Args: + uri: A string representing the URI of the feed that is to be queried. + + Returns: + On success, a tuple in the form: + (boolean succeeded=True, ElementTree._Element result) + On failure, a tuple in the form: + (boolean succeeded=False, {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response}) + """ + result = self.Get(uri) + return result + + def YouTubeQuery(self, query): + """Performs a YouTube specific query and returns a resulting feed or entry. + + Args: + query: A Query object or one if its sub-classes (YouTubeVideoQuery, + YouTubeUserQuery or YouTubePlaylistQuery). + + Returns: + Depending on the type of Query object submitted returns either a + YouTubeVideoFeed, a YouTubeUserFeed, a YouTubePlaylistFeed. If the + Query object provided was not YouTube-related, a tuple is returned. + On success the tuple will be in this form: + (boolean succeeded=True, ElementTree._Element result) + On failure, the tuple will be in this form: + (boolean succeeded=False, {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server response}) + """ + result = self.Query(query.ToUri()) + if isinstance(query, YouTubeVideoQuery): + return gdata.youtube.YouTubeVideoFeedFromString(result.ToString()) + elif isinstance(query, YouTubeUserQuery): + return gdata.youtube.YouTubeUserFeedFromString(result.ToString()) + elif isinstance(query, YouTubePlaylistQuery): + return gdata.youtube.YouTubePlaylistFeedFromString(result.ToString()) + else: + return result + +class YouTubeVideoQuery(gdata.service.Query): + + """Subclasses gdata.service.Query to represent a YouTube Data API query. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. Please refer to the API documentation for details. + + Attributes: + vq: The vq parameter, which is only supported for video feeds, specifies a + search query term. Refer to API documentation for further details. + orderby: The orderby parameter, which is only supported for video feeds, + specifies the value that will be used to sort videos in the search + result set. Valid values for this parameter are relevance, published, + viewCount and rating. + time: The time parameter, which is only available for the top_rated, + top_favorites, most_viewed, most_discussed, most_linked and + most_responded standard feeds, restricts the search to videos uploaded + within the specified time. Valid values for this parameter are today + (1 day), this_week (7 days), this_month (1 month) and all_time. + The default value for this parameter is all_time. + format: The format parameter specifies that videos must be available in a + particular video format. Refer to the API documentation for details. + racy: The racy parameter allows a search result set to include restricted + content as well as standard content. Valid values for this parameter + are include and exclude. By default, restricted content is excluded. + lr: The lr parameter restricts the search to videos that have a title, + description or keywords in a specific language. Valid values for the lr + parameter are ISO 639-1 two-letter language codes. + restriction: The restriction parameter identifies the IP address that + should be used to filter videos that can only be played in specific + countries. + location: A string of geo coordinates. Note that this is not used when the + search is performed but rather to filter the returned videos for ones + that match to the location entered. + """ + + def __init__(self, video_id=None, feed_type=None, text_query=None, + params=None, categories=None): + + if feed_type in YOUTUBE_STANDARDFEEDS: + feed = 'http://%s/feeds/standardfeeds/%s' % (YOUTUBE_SERVER, feed_type) + elif feed_type is 'responses' or feed_type is 'comments' and video_id: + feed = 'http://%s/feeds/videos/%s/%s' % (YOUTUBE_SERVER, video_id, + feed_type) + else: + feed = 'http://%s/feeds/videos' % (YOUTUBE_SERVER) + + gdata.service.Query.__init__(self, feed, text_query=text_query, + params=params, categories=categories) + + def _GetVideoQuery(self): + if 'vq' in self: + return self['vq'] + else: + return None + + def _SetVideoQuery(self, val): + self['vq'] = val + + vq = property(_GetVideoQuery, _SetVideoQuery, + doc="""The video query (vq) query parameter""") + + def _GetOrderBy(self): + if 'orderby' in self: + return self['orderby'] + else: + return None + + def _SetOrderBy(self, val): + if val not in YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS: + if val.startswith('relevance_lang_') is False: + raise YouTubeError('OrderBy must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS)) + self['orderby'] = val + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The orderby query parameter""") + + def _GetTime(self): + if 'time' in self: + return self['time'] + else: + return None + + def _SetTime(self, val): + if val not in YOUTUBE_QUERY_VALID_TIME_PARAMETERS: + raise YouTubeError('Time must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_TIME_PARAMETERS)) + self['time'] = val + + time = property(_GetTime, _SetTime, + doc="""The time query parameter""") + + def _GetFormat(self): + if 'format' in self: + return self['format'] + else: + return None + + def _SetFormat(self, val): + if val not in YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS: + raise YouTubeError('Format must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS)) + self['format'] = val + + format = property(_GetFormat, _SetFormat, + doc="""The format query parameter""") + + def _GetRacy(self): + if 'racy' in self: + return self['racy'] + else: + return None + + def _SetRacy(self, val): + if val not in YOUTUBE_QUERY_VALID_RACY_PARAMETERS: + raise YouTubeError('Racy must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_RACY_PARAMETERS)) + self['racy'] = val + + racy = property(_GetRacy, _SetRacy, + doc="""The racy query parameter""") + + def _GetLanguageRestriction(self): + if 'lr' in self: + return self['lr'] + else: + return None + + def _SetLanguageRestriction(self, val): + self['lr'] = val + + lr = property(_GetLanguageRestriction, _SetLanguageRestriction, + doc="""The lr (language restriction) query parameter""") + + def _GetIPRestriction(self): + if 'restriction' in self: + return self['restriction'] + else: + return None + + def _SetIPRestriction(self, val): + self['restriction'] = val + + restriction = property(_GetIPRestriction, _SetIPRestriction, + doc="""The restriction query parameter""") + + def _GetLocation(self): + if 'location' in self: + return self['location'] + else: + return None + + def _SetLocation(self, val): + self['location'] = val + + location = property(_GetLocation, _SetLocation, + doc="""The location query parameter""") + + + +class YouTubeUserQuery(YouTubeVideoQuery): + + """Subclasses YouTubeVideoQuery to perform user-specific queries. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. + """ + + def __init__(self, username=None, feed_type=None, subscription_id=None, + text_query=None, params=None, categories=None): + + uploads_favorites_playlists = ('uploads', 'favorites', 'playlists') + + if feed_type is 'subscriptions' and subscription_id and username: + feed = "http://%s/feeds/users/%s/%s/%s" % (YOUTUBE_SERVER, username, + feed_type, subscription_id) + elif feed_type is 'subscriptions' and not subscription_id and username: + feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username, + feed_type) + elif feed_type in uploads_favorites_playlists: + feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username, + feed_type) + else: + feed = "http://%s/feeds/users" % (YOUTUBE_SERVER) + + YouTubeVideoQuery.__init__(self, feed, text_query=text_query, + params=params, categories=categories) + + +class YouTubePlaylistQuery(YouTubeVideoQuery): + + """Subclasses YouTubeVideoQuery to perform playlist-specific queries. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. + """ + + def __init__(self, playlist_id, text_query=None, params=None, + categories=None): + if playlist_id: + feed = "http://%s/feeds/playlists/%s" % (YOUTUBE_SERVER, playlist_id) + else: + feed = "http://%s/feeds/playlists" % (YOUTUBE_SERVER) + + YouTubeVideoQuery.__init__(self, feed, text_query=text_query, + params=params, categories=categories) diff --git a/gdata.py-1.2.3/pydocs/atom.html b/gdata.py-1.2.3/pydocs/atom.html new file mode 100644 index 0000000..60e3346 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/atom.html @@ -0,0 +1,2030 @@ + + +Python: package atom + + + + +
 
+ 
atom
index
/usr/local/svn/gdata-python-client/src/atom/__init__.py
+

Contains classes representing Atom elements.

+Module objective: provide data classes for Atom constructs. These classes hide
+the XML-ness of Atom and provide a set of native Python classes to interact 
+with.

+Conversions to and from XML should only be necessary when the Atom classes
+"touch the wire" and are sent over HTTP. For this reason this module 
+provides  methods and functions to convert Atom classes to and from strings.

+For more information on the Atom data model, see RFC 4287 
+(http://www.ietf.org/rfc/rfc4287.txt)

+AtomBase: A foundation class on which Atom classes are built. It 
+    handles the parsing of attributes and children which are common to all
+    Atom classes. By default, the AtomBase class translates all XML child 
+    nodes into ExtensionElements.

+ExtensionElement: Atom allows Atom objects to contain XML which is not part 
+    of the Atom specification, these are called extension elements. If a 
+    classes parser encounters an unexpected XML construct, it is translated
+    into an ExtensionElement instance. ExtensionElement is designed to fully
+    capture the information in the XML. Child nodes in an XML extension are
+    turned into ExtensionElements as well.

+

+ + + + + +
 
+Package Contents
       
core
+http
+
http_interface
+mock_http
+
mock_service
+service
+
token_store
+url
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
ExtensionContainer +
+
+
AtomBase +
+
+
Category +
Control +
Date +
+
+
Published +
Updated +
+
+
Draft +
Email +
FeedEntryParent(AtomBase, LinkFinder) +
+
+
Entry +
Source +
+
+
Feed +
+
+
+
+
Generator +
Icon +
Id +
Link +
Logo +
Name +
Person +
+
+
Author +
Contributor +
+
+
Text +
+
+
Content +
Rights +
Subtitle +
Summary +
Title +
+
+
Uri +
+
+
+
+
ExtensionElement +
LinkFinder +
+
+
+

+ + + + + +
 
+class AtomBase(ExtensionContainer)
    
Method resolution order:
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Author(Person)
   The atom:author element

+An author is a required element in Feed.
 
 
Method resolution order:
+
Author
+
Person
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, email=None, uri=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Author

+Args:
+  name: Name
+  email: Email
+  uri: Uri
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+  text: str The text data in the this element
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Category(AtomBase)
   The atom:category element
 
 
Method resolution order:
+
Category
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, term=None, scheme=None, label=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Category

+Args:
+  term: str
+  scheme: str
+  label: str
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Content(Text)
   The atom:content element
 
 
Method resolution order:
+
Content
+
Text
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, content_type=None, src=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Content

+Args:
+  content_type: string
+  src: string
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Contributor(Person)
   The atom:contributor element
 
 
Method resolution order:
+
Contributor
+
Person
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, email=None, uri=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Contributor

+Args:
+  name: Name
+  email: Email
+  uri: Uri
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+  text: str The text data in the this element
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Control(AtomBase)
   The app:control element indicating restrictions on publication.

+The APP control element may contain a draft element indicating whether or
+not this entry should be publicly available.
 
 
Method resolution order:
+
Control
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, draft=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for app:control
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Date(AtomBase)
   A parent class for atom:updated, published, etc.
 
 
Method resolution order:
+
Date
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Draft(AtomBase)
   The app:draft element which indicates if this entry should be public.
 
 
Method resolution order:
+
Draft
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
Constructor for app:draft

+Args:
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Email(AtomBase)
   The atom:email element
 
 
Method resolution order:
+
Email
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
Constructor for Email

+Args:
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+  text: str The text data in the this element
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Entry(FeedEntryParent)
   The atom:entry element
 
 
Method resolution order:
+
Entry
+
FeedEntryParent
+
AtomBase
+
ExtensionContainer
+
LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for atom:entry

+Args:
+  author: list A list of Author instances which belong to this class.
+  category: list A list of Category instances
+  content: Content The entry's Content
+  contributor: list A list on Contributor instances
+  id: Id The entry's Id element
+  link: list A list of Link instances
+  published: Published The entry's Published element
+  rights: Rights The entry's Rights element
+  source: Source the entry's source element
+  summary: Summary the entry's summary element
+  title: Title the entry's title element
+  updated: Updated the entry's updated element
+  control: The entry's app:control element which can be used to mark an 
+      entry as a draft which should not be publicly viewable.
+  text: String The text contents of the element. This is the contents
+      of the Entry's XML text node. (Example: <foo>This is the text</foo>)
+  extension_elements: list A list of ExtensionElement instances which are
+      children of this element.
+  extension_attributes: dict A dictionary of strings which are the values
+      for additional XML attributes of this element.
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetAlternateLink(self)
+ +
GetEditLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +

+ + + + + +
 
+class ExtensionContainer(__builtin__.object)
    Methods defined here:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class ExtensionElement(__builtin__.object)
   Represents extra XML elements contained in Atom classes.
 
 Methods defined here:
+
FindChildren(self, tag=None, namespace=None)
Searches child nodes for objects with the desired tag/namespace.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all children in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
ToString(self)
+ +
__init__(self, tag, namespace=None, attributes=None, children=None, text=None)
Constructor for EtensionElement

+Args:
+  namespace: string (optional) The XML namespace for this element.
+  tag: string (optional) The tag (without the namespace qualifier) for
+      this element. To reconstruct the full qualified name of the element,
+      combine this tag with the namespace.
+  attributes: dict (optinal) The attribute value string pairs for the XML 
+      attributes of this element.
+  children: list (optional) A list of ExtensionElements which represent 
+      the XML child nodes of this element.
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Feed(Source)
   The atom:feed element
 
 
Method resolution order:
+
Feed
+
Source
+
FeedEntryParent
+
AtomBase
+
ExtensionContainer
+
LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetAlternateLink(self)
+ +
GetEditLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +

+ + + + + + + +
 
+class FeedEntryParent(AtomBase, LinkFinder)
   A super class for atom:feed and entry, contains shared attributes
 
 
Method resolution order:
+
FeedEntryParent
+
AtomBase
+
ExtensionContainer
+
LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, atom_id=None, link=None, rights=None, title=None, updated=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetAlternateLink(self)
+ +
GetEditLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +

+ + + + + + + +
 
+class Generator(AtomBase)
   The atom:generator element
 
 
Method resolution order:
+
Generator
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, uri=None, version=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Generator

+Args:
+  uri: string
+  version: string
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Icon(AtomBase)
   The atom:icon element.
 
 
Method resolution order:
+
Icon
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
Constructor for Icon

+Args:
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Id(AtomBase)
   The atom:id element.
 
 
Method resolution order:
+
Id
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
Constructor for Id

+Args:
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Link(AtomBase)
   The atom:link element
 
 
Method resolution order:
+
Link
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, href=None, rel=None, link_type=None, hreflang=None, title=None, length=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Link

+Args:
+  href: string The href attribute of the link
+  rel: string
+  type: string
+  hreflang: string The language for the href
+  title: string
+  length: string The length of the href's destination
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+  text: str The text data in the this element
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class LinkFinder(__builtin__.object)
   An "interface" providing methods to find link elements

+Entry elements often contain multiple links which differ in the rel
+attribute or content type. Often, developers are interested in a specific
+type of link so this class provides methods to find specific classes of
+links.

+This class is used as a mixin in Atom entries and feeds.
 
 Methods defined here:
+
GetAlternateLink(self)
+ +
GetEditLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Logo(AtomBase)
   The atom:logo element.
 
 
Method resolution order:
+
Logo
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
Constructor for Logo

+Args:
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Name(AtomBase)
   The atom:name element
 
 
Method resolution order:
+
Name
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
Constructor for Name

+Args:
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Person(AtomBase)
   A foundation class from which atom:author and atom:contributor extend.

+A person contains information like name, email address, and web page URI for
+an author or contributor to an Atom feed.
 
 
Method resolution order:
+
Person
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, email=None, uri=None, extension_elements=None, extension_attributes=None, text=None)
Foundation from which author and contributor are derived.

+The constructor is provided for illustrative purposes, you should not
+need to instantiate a Person.

+Args:
+  name: Name The person's name
+  email: Email The person's email address
+  uri: Uri The URI of the person's webpage
+  extension_elements: list A list of ExtensionElement instances which are
+      children of this element.
+  extension_attributes: dict A dictionary of strings which are the values
+      for additional XML attributes of this element.
+  text: String The text contents of the element. This is the contents
+      of the Entry's XML text node. (Example: <foo>This is the text</foo>)
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Published(Date)
   The atom:published element.
 
 
Method resolution order:
+
Published
+
Date
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
Constructor for Published

+Args:
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Rights(Text)
   The atom:rights element
 
 
Method resolution order:
+
Rights
+
Text
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, rights_type=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Rights

+Args:
+  rights_type: string
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Source(FeedEntryParent)
   The atom:source element
 
 
Method resolution order:
+
Source
+
FeedEntryParent
+
AtomBase
+
ExtensionContainer
+
LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional)
+  icon: Icon (optional)
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional)
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  text: String (optional) The text contents of the element. This is the
+      contents of the Entry's XML text node.
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are
+      the values for additional XML attributes of this element.
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetAlternateLink(self)
+ +
GetEditLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +

+ + + + + + + +
 
+class Subtitle(Text)
   The atom:subtitle element
 
 
Method resolution order:
+
Subtitle
+
Text
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, subtitle_type=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Subtitle

+Args:
+  subtitle_type: string
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Summary(Text)
   The atom:summary element
 
 
Method resolution order:
+
Summary
+
Text
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, summary_type=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Summary

+Args:
+  summary_type: string
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Text(AtomBase)
   A foundation class from which atom:title, summary, etc. extend.

+This class should never be instantiated.
 
 
Method resolution order:
+
Text
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text_type=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Text

+Args:
+  text_type: string
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Title(Text)
   The atom:title element
 
 
Method resolution order:
+
Title
+
Text
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, title_type=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Title

+Args:
+  title_type: string
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Updated(Date)
   The atom:updated element.
 
 
Method resolution order:
+
Updated
+
Date
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
Constructor for Updated

+Args:
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Uri(AtomBase)
   The atom:uri element
 
 
Method resolution order:
+
Uri
+
AtomBase
+
ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
Constructor for Uri

+Args:
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+  text: str The text data in the this element
+ +
+Methods inherited from AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
AuthorFromString(xml_string)
+
CategoryFromString(xml_string)
+
ContentFromString(xml_string)
+
ContributorFromString(xml_string)
+
ControlFromString(xml_string)
+
CreateClassFromXMLString(target_class, xml_string, string_encoding=None)
Creates an instance of the target class from the string contents.

+Args:
+  target_class: class The class which will be instantiated and populated
+      with the contents of the XML. This class must have a _tag and a
+      _namespace class variable.
+  xml_string: str A string which contains valid XML. The root element
+      of the XML string should match the tag and namespace of the desired
+      class.
+  string_encoding: str The character encoding which the xml_string should
+      be converted to before it is interpreted and translated into 
+      objects. The default is None in which case the string encoding
+      is not changed.

+Returns:
+  An instance of the target class with members assigned according to the
+  contents of the XML - or None if the root XML tag and namespace did not
+  match those of the target class.
+
DraftFromString(xml_string)
+
EmailFromString(xml_string)
+
EntryFromString(xml_string)
+
ExtensionElementFromString(xml_string)
+
FeedFromString(xml_string)
+
GeneratorFromString(xml_string)
+
IconFromString(xml_string)
+
IdFromString(xml_string)
+
LinkFromString(xml_string)
+
LogoFromString(xml_string)
+
NameFromString(xml_string)
+
PublishedFromString(xml_string)
+
RightsFromString(xml_string)
+
SourceFromString(xml_string)
+
SubtitleFromString(xml_string)
+
SummaryFromString(xml_string)
+
TitleFromString(xml_string)
+
UpdatedFromString(xml_string)
+
UriFromString(xml_string)
+

+ + + + + +
 
+Data
       APP_NAMESPACE = 'http://purl.org/atom/app#'
+APP_TEMPLATE = '{http://purl.org/atom/app#}%s'
+ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom'
+ELEMENT_TEMPLATE = '{http://www.w3.org/2005/Atom}%s'
+MEMBER_STRING_ENCODING = 'utf-8'
+XML_STRING_ENCODING = 'utf-8'
+__author__ = 'api.jscudder (Jeffrey Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeffrey Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/atom.http_interface.html b/gdata.py-1.2.3/pydocs/atom.http_interface.html new file mode 100644 index 0000000..b2023ab --- /dev/null +++ b/gdata.py-1.2.3/pydocs/atom.http_interface.html @@ -0,0 +1,356 @@ + + +Python: module atom.http_interface + + + + +
 
+ 
atom.http_interface
index
/usr/local/svn/gdata-python-client/src/atom/http_interface.py
+

This module provides a common interface for all HTTP requests.

+HttpResponse: Represents the server's response to an HTTP request. Provides
+    an interface identical to httplib.HTTPResponse which is the response
+    expected from higher level classes which use HttpClient.request.

+GenericHttpClient: Provides an interface (superclass) for an object 
+    responsible for making HTTP requests. Subclasses of this object are
+    used in AtomService and GDataService to make requests to the server. By
+    changing the http_client member object, the AtomService is able to make
+    HTTP requests using different logic (for example, when running on 
+    Google App Engine, the http_client makes requests using the App Engine
+    urlfetch API).

+

+ + + + + +
 
+Modules
       
StringIO
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
GenericHttpClient +
GenericToken +
HttpResponse +
+
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
ContentLengthRequired +
UnparsableUrlObject +
+
+
+
+
+

+ + + + + +
 
+class ContentLengthRequired(Error)
    
Method resolution order:
+
ContentLengthRequired
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class GenericHttpClient(__builtin__.object)
    Methods defined here:
+
__init__(self, http_client, headers=None)
Args:
+  http_client: An object which provides a request method to make an HTTP 
+      request. The request method in GenericHttpClient performs a 
+      call-through to the contained HTTP client object.
+  headers: A dictionary containing HTTP headers which should be included
+      in every HTTP request. Common persistent headers include 
+      'User-Agent'.
+ +
delete(self, url, headers=None)
+ +
get(self, url, headers=None)
+ +
post(self, url, data, headers=None)
+ +
put(self, url, data, headers=None)
+ +
request(self, operation, url, data=None, headers=None)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Data and other attributes defined here:
+
debug = False
+ +

+ + + + + + + +
 
+class GenericToken(__builtin__.object)
   Represents an Authorization token to be added to HTTP requests.

+Some Authorization headers included calculated fields (digital
+signatures for example) which are based on the parameters of the HTTP
+request. Therefore the token is responsible for signing the request
+and adding the Authorization header.
 
 Methods defined here:
+
perform_request(self, http_client, operation, url, data=None, headers=None)
For the GenericToken, no Authorization token is set.
+ +
valid_for_scope(self, url)
Tells the caller if the token authorizes access to the desired URL.

+Since the generic token doesn't add an auth header, it is not valid for
+any scope.
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class HttpResponse(__builtin__.object)
    Methods defined here:
+
__init__(self, body=None, status=None, reason=None, headers=None)
Constructor for an HttpResponse object

+HttpResponse represents the server's response to an HTTP request from
+the client. The HttpClient.request method returns a httplib.HTTPResponse
+object and this HttpResponse class is designed to mirror the interface
+exposed by httplib.HTTPResponse.

+Args:
+  body: A file like object, with a read() method. The body could also
+      be a string, and the constructor will wrap it so that 
+      HttpResponse.read(self) will return the full string.
+  status: The HTTP status code as an int. Example: 200, 201, 404.
+  reason: The HTTP status message which follows the code. Example: 
+      OK, Created, Not Found
+  headers: A dictionary containing the HTTP headers in the server's 
+      response. A common header in the response is Content-Length.
+ +
getheader(self, name, default=None)
+ +
read(self, amt=None)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class UnparsableUrlObject(Error)
    
Method resolution order:
+
UnparsableUrlObject
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+Data
       USER_AGENT = '%s GData-Python/1.2.2'
+__author__ = 'api.jscudder (Jeff Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeff Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/atom.mock_http.html b/gdata.py-1.2.3/pydocs/atom.mock_http.html new file mode 100644 index 0000000..f8136fb --- /dev/null +++ b/gdata.py-1.2.3/pydocs/atom.mock_http.html @@ -0,0 +1,329 @@ + + +Python: module atom.mock_http + + + + +
 
+ 
atom.mock_http
index
/usr/local/svn/gdata-python-client/src/atom/mock_http.py
+

# Copyright (C) 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+

+ + + + + +
 
+Modules
       
atom
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
MockRequest +
+
+
atom.http_interface.GenericHttpClient(__builtin__.object) +
+
+
MockHttpClient +
+
+
atom.http_interface.HttpResponse(__builtin__.object) +
+
+
MockResponse +
+
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
NoRecordingFound +
+
+
+
+
+

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class MockHttpClient(atom.http_interface.GenericHttpClient)
    
Method resolution order:
+
MockHttpClient
+
atom.http_interface.GenericHttpClient
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, headers=None, recordings=None, real_client=None)
An HttpClient which responds to request with stored data.

+The request-response pairs are stored as tuples in a member list named
+recordings.

+The MockHttpClient can be switched from replay mode to record mode by
+setting the real_client member to an instance of an HttpClient which will
+make real HTTP requests and store the server's response in list of 
+recordings.

+Args:
+  headers: dict containing HTTP headers which should be included in all
+      HTTP requests.
+  recordings: The initial recordings to be used for responses. This list
+      contains tuples in the form: (MockRequestMockResponse)
+  real_client: An HttpClient which will make a real HTTP request. The 
+      response will be converted into a MockResponse and stored in 
+      recordings.
+ +
add_response(self, response, operation, url, data=None, headers=None)
Adds a request-response pair to the recordings list.

+After the recording is added, future matching requests will receive the
+response.

+Args:
+  response: MockResponse
+  operation: str
+  url: str
+  data: str, Currently the data is ignored when looking for matching
+      requests.
+  headers: dict of strings: Currently the headers are ignored when
+      looking for matching requests.
+ +
request(self, operation, url, data=None, headers=None)
Returns a matching MockResponse from the recordings.

+If the real_client is set, the request will be passed along and the 
+server's response will be added to the recordings and also returned. 

+If there is no match, a NoRecordingFound error will be raised.
+ +
+Methods inherited from atom.http_interface.GenericHttpClient:
+
delete(self, url, headers=None)
+ +
get(self, url, headers=None)
+ +
post(self, url, data, headers=None)
+ +
put(self, url, data, headers=None)
+ +
+Data descriptors inherited from atom.http_interface.GenericHttpClient:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Data and other attributes inherited from atom.http_interface.GenericHttpClient:
+
debug = False
+ +

+ + + + + + + +
 
+class MockRequest(__builtin__.object)
   Holds parameters of an HTTP request for matching against future requests.
 
 Methods defined here:
+
__init__(self, operation, url, data=None, headers=None)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class MockResponse(atom.http_interface.HttpResponse)
   Simulates an httplib.HTTPResponse object.
 
 
Method resolution order:
+
MockResponse
+
atom.http_interface.HttpResponse
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, body=None, status=None, reason=None, headers=None)
+ +
read(self)
+ +
+Methods inherited from atom.http_interface.HttpResponse:
+
getheader(self, name, default=None)
+ +
+Data descriptors inherited from atom.http_interface.HttpResponse:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class NoRecordingFound(Error)
    
Method resolution order:
+
NoRecordingFound
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+Data
       __author__ = 'api.jscudder (Jeff Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeff Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/atom.mock_service.html b/gdata.py-1.2.3/pydocs/atom.mock_service.html new file mode 100644 index 0000000..4476880 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/atom.mock_service.html @@ -0,0 +1,200 @@ + + +Python: module atom.mock_service + + + + +
 
+ 
atom.mock_service
index
/usr/local/svn/gdata-python-client/src/atom/mock_service.py
+

MockService provides CRUD ops. for mocking calls to AtomPub services.

+MockService: Exposes the publicly used methods of AtomService to provide
+    a mock interface which can be used in unit tests.

+

+ + + + + +
 
+Modules
       
atom
+
pickle
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
MockHttpResponse +
MockRequest +
+
+
+

+ + + + + + + +
 
+class MockHttpResponse(__builtin__.object)
   Returned from MockService crud methods as the server's response.
 
 Methods defined here:
+
__init__(self, body=None, status=None, reason=None, headers=None)
Construct a mock HTTPResponse and set members.

+Args:
+  body: str (optional) The HTTP body of the server's response. 
+  status: int (optional) 
+  reason: str (optional)
+  headers: dict (optional)
+ +
getheader(self, header_name)
+ +
read(self)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class MockRequest(__builtin__.object)
   Represents a request made to an AtomPub server.

+These objects are used to determine if a client request matches a recorded
+HTTP request to determine what the mock server's response will be.
 
 Methods defined here:
+
ConcealSecrets(self, conceal_func)
Conceal secret data in this request.
+ +
IsMatch(self, other_request)
Check to see if the other_request is equivalent to this request.

+Used to determine if a recording matches an incoming request so that a
+recorded response should be sent to the client.

+The matching is not exact, only the operation and URL are examined 
+currently.

+Args:
+  other_request: MockRequest The request which we want to check this
+      (self) MockRequest against to see if they are equivalent.
+ +
__init__(self, operation, uri, host=None, ssl=False, port=None, data=None, extra_headers=None, url_params=None, escape_params=True, content_type='application/atom+xml')
Constructor for a MockRequest

+Args:
+  operation: str One of 'GET', 'POST', 'PUT', or 'DELETE' this is the
+      HTTP operation requested on the resource.
+  uri: str The URL describing the resource to be modified or feed to be
+      retrieved. This should include the protocol (http/https) and the host
+      (aka domain). For example, these are some valud full_uris:
+      'http://example.com', 'https://www.google.com/accounts/ClientLogin'
+  host: str (optional) The server name which will be placed at the 
+      beginning of the URL if the uri parameter does not begin with 'http'.
+      Examples include 'example.com', 'www.google.com', 'www.blogger.com'.
+  ssl: boolean (optional) If true, the request URL will begin with https 
+      instead of http.
+  data: ElementTree, filestream, list of parts, or other object which can be
+      converted to a string. (optional)
+      Should be set to None when performing a GET or PUT.
+      If data is a file-like object which can be read, the constructor 
+      will read the entire file into memory. If the data is a list of 
+      parts to be sent, each part will be evaluated and stored.
+  extra_headers: dict (optional) HTTP headers included in the request.
+  url_params: dict (optional) Key value pairs which should be added to 
+      the URL as URL parameters in the request. For example uri='/', 
+      url_parameters={'foo':'1','bar':'2'} could become '/?foo=1&bar=2'.
+  escape_params: boolean (optional) Perform URL escaping on the keys and 
+      values specified in url_params. Defaults to True.
+  content_type: str (optional) Provides the MIME type of the data being 
+      sent.
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
ConcealValueWithSha(source)
+
DumpRecordings(conceal_func=<function ConcealValueWithSha at 0xaf81b8>)
+
HttpRequest(service, operation, data, uri, extra_headers=None, url_params=None, escape_params=True, content_type='application/atom+xml')
Simulates an HTTP call to the server, makes an actual HTTP request if 
+real_request_handler is set.

+This function operates in two different modes depending on if 
+real_request_handler is set or not. If real_request_handler is not set,
+HttpRequest will look in this module's recordings list to find a response
+which matches the parameters in the function call. If real_request_handler
+is set, this function will call real_request_handler.HttpRequest, add the
+response to the recordings list, and respond with the actual response.

+Args:
+  service: atom.AtomService object which contains some of the parameters
+      needed to make the request. The following members are used to
+      construct the HTTP call: server (str), additional_headers (dict),
+      port (int), and ssl (bool).
+  operation: str The HTTP operation to be performed. This is usually one of
+      'GET', 'POST', 'PUT', or 'DELETE'
+  data: ElementTree, filestream, list of parts, or other object which can be
+      converted to a string.
+      Should be set to None when performing a GET or PUT.
+      If data is a file-like object which can be read, this method will read
+      a chunk of 100K bytes at a time and send them.
+      If the data is a list of parts to be sent, each part will be evaluated
+      and sent.
+  uri: The beginning of the URL to which the request should be sent.
+      Examples: '/', '/base/feeds/snippets',
+      '/m8/feeds/contacts/default/base'
+  extra_headers: dict of strings. HTTP headers which should be sent
+      in the request. These headers are in addition to those stored in
+      service.additional_headers.
+  url_params: dict of strings. Key value pairs to be added to the URL as
+      URL parameters. For example {'foo':'bar', 'test':'param'} will
+      become ?foo=bar&test=param.
+  escape_params: bool default True. If true, the keys and values in
+      url_params will be URL escaped when the form is constructed
+      (Special characters converted to %XX form.)
+  content_type: str The MIME type for the data being sent. Defaults to
+      'application/atom+xml', this is only used if data is set.
+
LoadRecordings(recordings_file_or_string)
+

+ + + + + +
 
+Data
       __author__ = 'api.jscudder (Jeffrey Scudder)'
+real_request_handler = None
+recordings = []

+ + + + + +
 
+Author
       api.jscudder (Jeffrey Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/atom.service.html b/gdata.py-1.2.3/pydocs/atom.service.html new file mode 100644 index 0000000..4b43259 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/atom.service.html @@ -0,0 +1,412 @@ + + +Python: module atom.service + + + + +
 
+ 
atom.service
index
/usr/local/svn/gdata-python-client/src/atom/service.py
+

AtomService provides CRUD ops. in line with the Atom Publishing Protocol.

+AtomService: Encapsulates the ability to perform insert, update and delete
+             operations with the Atom Publishing Protocol on which GData is
+             based. An instance can perform query, insertion, deletion, and
+             update.

+HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request
+     to the specified end point. An AtomService object or a subclass can be
+     used to specify information about the request.

+

+ + + + + +
 
+Modules
       
xml.etree.cElementTree
+atom
+base64
+
httplib
+os
+re
+
socket
+urllib
+warnings
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
AtomService +
+
+
atom.http_interface.GenericToken(__builtin__.object) +
+
+
BasicAuthToken +
+
+
+

+ + + + + + + +
 
+class AtomService(__builtin__.object)
   Performs Atom Publishing Protocol CRUD operations.

+The AtomService contains methods to perform HTTP CRUD operations.
 
 Methods defined here:
+
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  httplib.HTTPResponse Server's response to the DELETE request.
+ +
Get(self, uri, extra_headers=None, url_params=None, escape_params=True)
Query the APP server with the given URI

+The uri is the portion of the URI after the server value 
+(server example: 'www.google.com').

+Example use:
+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dicty (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the query. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  httplib.HTTPResponse The server's response to the GET request.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, content_type='application/atom+xml')
Insert data into an APP server at the given URI.

+Args:
+  data: string, ElementTree._Element, or something with a __str__ method 
+        The XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  httplib.HTTPResponse Server's response to the POST request.
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, content_type='application/atom+xml')
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  httplib.HTTPResponse Server's response to the PUT request.
+ +
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
__init__(self, server=None, additional_headers=None, application_name='', http_client=None, token_store=None)
Creates a new AtomService client.

+Args:
+  server: string (optional) The start of a URL for the server
+          to which all operations should be directed. Example: 
+          'www.google.com'
+  additional_headers: dict (optional) Any additional HTTP headers which
+                      should be included with CRUD operations.
+  http_client: An object responsible for making HTTP requests using a
+               request method. If none is provided, a new instance of
+               atom.http.ProxiedHttpClient will be used.
+  token_store: Keeps a collection of authorization tokens which can be
+               applied to requests for a specific URLs. Critical methods are
+               find_token based on a URL (atom.url.Url or a string), add_token,
+               and remove_token.
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes defined here:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+class BasicAuthToken(atom.http_interface.GenericToken)
    
Method resolution order:
+
BasicAuthToken
+
atom.http_interface.GenericToken
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, auth_header, scopes=None)
Creates a token used to add Basic Auth headers to HTTP requests.

+Args:
+  auth_header: str The value for the Authorization header.
+  scopes: list of str or atom.url.Url specifying the beginnings of URLs
+      for which this token can be used. For example, if scopes contains
+      'http://example.com/foo', then this token can be used for a request to
+      'http://example.com/foo/bar' but it cannot be used for a request to
+      'http://example.com/baz'
+ +
__str__(self)
+ +
perform_request(self, http_client, operation, url, data=None, headers=None)
Sets the Authorization header to the basic auth string.
+ +
valid_for_scope(self, url)
Tells the caller if the token authorizes access to the desired URL.
+ +
+Data descriptors inherited from atom.http_interface.GenericToken:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
BuildUri(uri, url_params=None, escape_params=True)
Converts a uri string and a collection of parameters into a URI.

+This function is deprcated, use atom.url.Url instead.

+Args:
+  uri: string
+  url_params: dict (optional)
+  escape_params: boolean (optional)
+  uri: string The start of the desired URI. This string can alrady contain
+       URL parameters. Examples: '/base/feeds/snippets', 
+       '/base/feeds/snippets?bq=digital+camera'
+  url_parameters: dict (optional) Additional URL parameters to be included
+                  in the query. These are translated into query arguments
+                  in the form '&dict_key=value&...'.
+                  Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  string The URI consisting of the escaped URL parameters appended to the
+  initial uri string.
+
CalculateDataLength(data)
Attempts to determine the length of the data to send. 

+This method will respond with a length only if the data is a string or
+and ElementTree element.

+Args:
+  data: object If this is not a string or ElementTree element this funtion
+      will return None.
+
DictionaryToParamList(url_parameters, escape_params=True)
Convert a dictionary of URL arguments into a URL parameter string.

+This function is deprcated, use atom.url.Url instead.

+Args:
+  url_parameters: The dictionaty of key-value pairs which will be converted
+                  into URL parameters. For example,
+                  {'dry-run': 'true', 'foo': 'bar'}
+                  will become ['dry-run=true', 'foo=bar'].

+Returns:
+  A list which contains a string for each key-value pair. The strings are
+  ready to be incorporated into a URL by using '&'.join([] + parameter_list)
+
HttpRequest(service, operation, data, uri, extra_headers=None, url_params=None, escape_params=True, content_type='application/atom+xml')
Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.

+This method is deprecated, use atom.http.HttpClient.request instead.

+Usage example, perform and HTTP GET on http://www.google.com/:
+  import atom.service
+  client = atom.service.AtomService()
+  http_response = client.Get('http://www.google.com/')
+or you could set the client.server to 'www.google.com' and use the 
+following:
+  client.server = 'www.google.com'
+  http_response = client.Get('/')

+Args:
+  service: atom.AtomService object which contains some of the parameters 
+      needed to make the request. The following members are used to 
+      construct the HTTP call: server (str), additional_headers (dict), 
+      port (int), and ssl (bool).
+  operation: str The HTTP operation to be performed. This is usually one of
+      'GET', 'POST', 'PUT', or 'DELETE'
+  data: ElementTree, filestream, list of parts, or other object which can be 
+      converted to a string. 
+      Should be set to None when performing a GET or PUT.
+      If data is a file-like object which can be read, this method will read
+      a chunk of 100K bytes at a time and send them. 
+      If the data is a list of parts to be sent, each part will be evaluated
+      and sent.
+  uri: The beginning of the URL to which the request should be sent. 
+      Examples: '/', '/base/feeds/snippets', 
+      '/m8/feeds/contacts/default/base'
+  extra_headers: dict of strings. HTTP headers which should be sent
+      in the request. These headers are in addition to those stored in 
+      service.additional_headers.
+  url_params: dict of strings. Key value pairs to be added to the URL as
+      URL parameters. For example {'foo':'bar', 'test':'param'} will 
+      become ?foo=bar&test=param.
+  escape_params: bool default True. If true, the keys and values in 
+      url_params will be URL escaped when the form is constructed 
+      (Special characters converted to %XX form.)
+  content_type: str The MIME type for the data being sent. Defaults to
+      'application/atom+xml', this is only used if data is set.
+
PrepareConnection(service, full_uri)
Opens a connection to the server based on the full URI.

+This method is deprecated, instead use atom.http.HttpClient.request.

+Examines the target URI and the proxy settings, which are set as
+environment variables, to open a connection with the server. This
+connection is used to make an HTTP request.

+Args:
+  service: atom.AtomService or a subclass. It must have a server string which
+    represents the server host to which the request should be made. It may also
+    have a dictionary of additional_headers to send in the HTTP request.
+  full_uri: str Which is the target relative (lacks protocol and host) or
+  absolute URL to be opened. Example:
+  'https://www.google.com/accounts/ClientLogin' or
+  'base/feeds/snippets' where the server is set to www.google.com.

+Returns:
+  A tuple containing the httplib.HTTPConnection and the full_uri for the
+  request.
+
ProcessUrl(service, url, for_proxy=False)
Processes a passed URL.  If the URL does not begin with https?, then
+the default value for server is used

+This method is deprecated, use atom.url.parse_url instead.
+
UseBasicAuth(service, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use AtomService.use_basic_auth insread.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext. The auth header is added to the 
+additional_headers dictionary in the service object.

+Args:
+  service: atom.AtomService or a subclass which has an 
+      additional_headers dict as a member.
+  username: str
+  password: str
+
deprecation(message)
+

+ + + + + +
 
+Data
       __author__ = 'api.jscudder (Jeff Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeff Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/atom.token_store.html b/gdata.py-1.2.3/pydocs/atom.token_store.html new file mode 100644 index 0000000..305ec20 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/atom.token_store.html @@ -0,0 +1,119 @@ + + +Python: module atom.token_store + + + + +
 
+ 
atom.token_store
index
/usr/local/svn/gdata-python-client/src/atom/token_store.py
+

This module provides a TokenStore class which is designed to manage
+auth tokens required for different services.

+Each token is valid for a set of scopes which is the start of a URL. An HTTP
+client will use a token store to find a valid Authorization header to send
+in requests to the specified URL. If the HTTP client determines that a token
+has expired or been revoked, it can remove the token from the store so that
+it will not be used in future requests.

+

+ + + + + +
 
+Modules
       
atom
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
TokenStore +
+
+
+

+ + + + + + + +
 
+class TokenStore(__builtin__.object)
   Manages Authorization tokens which will be sent in HTTP headers.
 
 Methods defined here:
+
__init__(self, scoped_tokens=None)
+ +
add_token(self, token)
Adds a new token to the store (replaces tokens with the same scope).

+Args:
+  token: A subclass of http_interface.GenericToken. The token object is 
+      responsible for adding the Authorization header to the HTTP request.
+      The scopes defined in the token are used to determine if the token
+      is valid for a requested scope when find_token is called.

+Returns:
+  True if the token was added, False if the token was not added becase
+  no scopes were provided.
+ +
find_token(self, url)
Selects an Authorization header token which can be used for the URL.

+Args:
+  url: str or atom.url.Url or a list containing the same.
+      The URL which is going to be requested. All
+      tokens are examined to see if any scopes begin match the beginning
+      of the URL. The first match found is returned.

+Returns:
+  The token object which should execute the HTTP request. If there was
+  no token for the url (the url did not begin with any of the token
+  scopes available), then the atom.http_interface.GenericToken will be 
+  returned because the GenericToken calls through to the http client
+  without adding an Authorization header.
+ +
remove_all_tokens(self)
+ +
remove_token(self, token)
Removes the token from the token_store.

+This method is used when a token is determined to be invalid. If the
+token was found by find_token, but resulted in a 401 or 403 error stating
+that the token was invlid, then the token should be removed to prevent
+future use.

+Returns:
+  True if a token was found and then removed from the token
+  store. False if the token was not in the TokenStore.
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Data
       SCOPE_ALL = 'http'
+__author__ = 'api.jscudder (Jeff Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeff Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/atom.url.html b/gdata.py-1.2.3/pydocs/atom.url.html new file mode 100644 index 0000000..61d4203 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/atom.url.html @@ -0,0 +1,112 @@ + + +Python: module atom.url + + + + +
 
+ 
atom.url
index
/usr/local/svn/gdata-python-client/src/atom/url.py
+

# Copyright (C) 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+

+ + + + + +
 
+Modules
       
urllib
+
urlparse
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
Url +
+
+
+

+ + + + + + + +
 
+class Url(__builtin__.object)
   Represents a URL and implements comparison logic.

+URL strings which are not identical can still be equivalent, so this object
+provides a better interface for comparing and manipulating URLs than 
+strings. URL parameters are represented as a dictionary of strings, and
+defaults are used for the protocol (http) and port (80) if not provided.
 
 Methods defined here:
+
__cmp__(self, other)
+ +
__init__(self, protocol=None, host=None, port=None, path=None, params=None)
+ +
__str__(self)
+ +
get_param_string(self)
+ +
get_request_uri(self)
Returns the path with the parameters escaped and appended.
+ +
to_string(self)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
parse_url(url_string)
Creates a Url object which corresponds to the URL string.

+This method can accept partial URLs, but it will leave missing
+members of the Url unset.
+

+ + + + + +
 
+Data
       DEFAULT_PORT = 80
+DEFAULT_PROTOCOL = 'http'
+__author__ = 'api.jscudder (Jeff Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeff Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.alt.appengine.html b/gdata.py-1.2.3/pydocs/gdata.alt.appengine.html new file mode 100644 index 0000000..d114e21 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.alt.appengine.html @@ -0,0 +1,513 @@ + + +Python: module gdata.alt.appengine + + + + +
 
+ 
gdata.alt.appengine
index
/usr/local/svn/gdata-python-client-release/src/gdata/alt/appengine.py
+

Provides HTTP functions for gdata.service to use on Google App Engine

+AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
+   urlfetch API. Set the http_client member of a GDataService object to an
+   instance of an AppEngineHttpClient to allow the gdata library to run on
+   Google App Engine.

+run_on_appengine: Function which will modify an existing GDataService object
+   to allow it to run on App Engine. It works by creating a new instance of
+   the AppEngineHttpClient and replacing the GDataService object's
+   http_client.

+

+ + + + + +
 
+Modules
       
StringIO
+atom
+
google.appengine.ext.db
+google.appengine.api.memcache
+
pickle
+google.appengine.api.urlfetch
+
google.appengine.api.users
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
HttpResponse +
+
+
atom.http_interface.GenericHttpClient(__builtin__.object) +
+
+
AppEngineHttpClient +
+
+
atom.token_store.TokenStore(__builtin__.object) +
+
+
AppEngineTokenStore +
+
+
google.appengine.ext.db.Model(__builtin__.object) +
+
+
TokenCollection +
+
+
+

+ + + + + +
 
+class AppEngineHttpClient(atom.http_interface.GenericHttpClient)
    
Method resolution order:
+
AppEngineHttpClient
+
atom.http_interface.GenericHttpClient
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, headers=None)
+ +
request(self, operation, url, data=None, headers=None)
Performs an HTTP call to the server, supports GET, POST, PUT, and
+DELETE.

+Usage example, perform and HTTP GET on http://www.google.com/:
+  import atom.http
+  client = atom.http.HttpClient()
+  http_response = client.request('GET', 'http://www.google.com/')

+Args:
+  operation: str The HTTP operation to be performed. This is usually one
+      of 'GET', 'POST', 'PUT', or 'DELETE'
+  data: filestream, list of parts, or other object which can be converted
+      to a string. Should be set to None when performing a GET or DELETE.
+      If data is a file-like object which can be read, this method will
+      read a chunk of 100K bytes at a time and send them.
+      If the data is a list of parts to be sent, each part will be
+      evaluated and sent.
+  url: The full URL to which the request should be sent. Can be a string
+      or atom.url.Url.
+  headers: dict of strings. HTTP headers which should be sent
+      in the request.
+ +
+Methods inherited from atom.http_interface.GenericHttpClient:
+
delete(self, url, headers=None)
+ +
get(self, url, headers=None)
+ +
post(self, url, data, headers=None)
+ +
put(self, url, data, headers=None)
+ +
+Data and other attributes inherited from atom.http_interface.GenericHttpClient:
+
__dict__ = <dictproxy object>
dictionary for instance variables (if defined)
+ +
__weakref__ = <attribute '__weakref__' of 'GenericHttpClient' objects>
list of weak references to the object (if defined)
+ +
debug = False
+ +

+ + + + + + + +
 
+class AppEngineTokenStore(atom.token_store.TokenStore)
   Stores the user's auth tokens in the App Engine datastore.

+Tokens are only written to the datastore if a user is signed in (if 
+users.get_current_user() returns a user object).
 
 
Method resolution order:
+
AppEngineTokenStore
+
atom.token_store.TokenStore
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self)
+ +
add_token(self, token)
Associates the token with the current user and stores it.

+If there is no current user, the token will not be stored.

+Returns:
+  False if the token was not stored.
+ +
find_token(self, url)
Searches the current user's collection of token for a token which can
+be used for a request to the url.

+Returns:
+  The stored token which belongs to the current user and is valid for the
+  desired URL. If there is no current user, or there is no valid user 
+  token in the datastore, a atom.http_interface.GenericToken is returned.
+ +
remove_all_tokens(self)
Removes all of the current user's tokens from the datastore.
+ +
remove_token(self, token)
Removes the token from the current user's collection in the datastore.

+Returns:
+  False if the token was not removed, this could be because the token was
+  not in the datastore, or because there is no current user.
+ +
+Data and other attributes inherited from atom.token_store.TokenStore:
+
__dict__ = <dictproxy object>
dictionary for instance variables (if defined)
+ +
__weakref__ = <attribute '__weakref__' of 'TokenStore' objects>
list of weak references to the object (if defined)
+ +

+ + + + + + + +
 
+class HttpResponse(__builtin__.object)
   Translates a urlfetch resoinse to look like an hhtplib resoinse.

+Used to allow the resoinse from HttpRequest to be usable by gdata.service
+methods.
 
 Methods defined here:
+
__init__(self, urlfetch_response)
+ +
getheader(self, name)
+ +
read(self, length=None)
+ +
+Data and other attributes defined here:
+
__dict__ = <dictproxy object>
dictionary for instance variables (if defined)
+ +
__weakref__ = <attribute '__weakref__' of 'HttpResponse' objects>
list of weak references to the object (if defined)
+ +

+ + + + + + + +
 
+class TokenCollection(google.appengine.ext.db.Model)
   Datastore Model which associates auth tokens with the current user.
 
 
Method resolution order:
+
TokenCollection
+
google.appengine.ext.db.Model
+
__builtin__.object
+
+
+Data and other attributes defined here:
+
pickled_tokens = <google.appengine.ext.db.BlobProperty object>
A string that can be longer than 500 bytes.

+This type should be used for large binary values to make sure the datastore
+has good performance for queries.
+ +
user = <google.appengine.ext.db.UserProperty object>
A user property.
+ +
+Methods inherited from google.appengine.ext.db.Model:
+
__init__(self, parent=None, key_name=None, _app=None, **kwds)
Creates a new instance of this model.

+To create a new entity, you instantiate a model and then call save(),
+which saves the entity to the datastore:

+   person = Person()
+   person.name = 'Bret'
+   person.save()

+You can initialize properties in the model in the constructor with keyword
+arguments:

+   person = Person(name='Bret')

+We initialize all other properties to the default value (as defined by the
+properties in the model definition) if they are not provided in the
+constructor.

+Args:
+  parent: Parent instance for this instance or None, indicating a top-
+    level instance.
+  key_name: Name for new model instance.
+  _app: Intentionally undocumented.
+  args: Keyword arguments mapping to properties of model.
+ +
delete(self)
Deletes this entity from the datastore.

+Raises:
+  TransactionFailedError if the data could not be committed.
+ +
dynamic_properties(self)
Returns a list of all dynamic properties defined for instance.
+ +
instance_properties(self)
Alias for dyanmic_properties.
+ +
is_saved(self)
Determine if entity is persisted in the datastore.

+New instances of Model do not start out saved in the data.  Objects which
+are saved to or loaded from the Datastore will have a True saved state.

+Returns:
+  True if object has been persisted to the datastore, otherwise False.
+ +
key(self)
Unique key for this entity.

+This property is only available if this entity is already stored in the
+datastore, so it is available if this entity was fetched returned from a
+query, or after save() is called the first time for new entities.

+Returns:
+  Datastore key of persisted entity.

+Raises:
+  NotSavedError when entity is not persistent.
+ +
parent(self)
Get the parent of the model instance.

+Returns:
+  Parent of contained entity or parent provided in constructor, None if
+  instance has no parent.
+ +
parent_key(self)
Get the parent's key.

+This method is useful for avoiding a potential fetch from the datastore
+but still get information about the instances parent.

+Returns:
+  Parent key of entity, None if there is no parent.
+ +
put(self)
Writes this model instance to the datastore.

+If this instance is new, we add an entity to the datastore.
+Otherwise, we update this instance, and the key will remain the
+same.

+Returns:
+  The key of the instance (either the existing key or a new key).

+Raises:
+  TransactionFailedError if the data could not be committed.
+ +
save = put(self)
Writes this model instance to the datastore.

+If this instance is new, we add an entity to the datastore.
+Otherwise, we update this instance, and the key will remain the
+same.

+Returns:
+  The key of the instance (either the existing key or a new key).

+Raises:
+  TransactionFailedError if the data could not be committed.
+ +
to_xml(self, _entity_class=<class 'google.appengine.api.datastore.Entity'>)
Generate an XML representation of this model instance.

+atom and gd:namespace properties are converted to XML according to their
+respective schemas. For more information, see:

+  http://www.atomenabled.org/developers/syndication/
+  http://code.google.com/apis/gdata/common-elements.html
+ +
+Class methods inherited from google.appengine.ext.db.Model:
+
all(cls) from google.appengine.ext.db.PropertiedClass
Returns a query over all instances of this model from the datastore.

+Returns:
+  Query that will retrieve all instances from entity collection.
+ +
entity_type(cls) from google.appengine.ext.db.PropertiedClass
Soon to be removed alias for kind.
+ +
fields(cls) from google.appengine.ext.db.PropertiedClass
Soon to be removed alias for properties.
+ +
from_entity(cls, entity) from google.appengine.ext.db.PropertiedClass
Converts the entity representation of this model to an instance.

+Converts datastore.Entity instance to an instance of cls.

+Args:
+  entity: Entity loaded directly from datastore.

+Raises:
+  KindError when cls is incorrect model for entity.
+ +
get(cls, keys) from google.appengine.ext.db.PropertiedClass
Fetch instance from the datastore of a specific Model type using key.

+We support Key objects and string keys (we convert them to Key objects
+automatically).

+Useful for ensuring that specific instance types are retrieved from the
+datastore.  It also helps that the source code clearly indicates what
+kind of object is being retreived.  Example:

+  story = Story.get(story_key)

+Args:
+  keys: Key within datastore entity collection to find; or string key;
+    or list of Keys or string keys.

+Returns:
+  If a single key was given: a Model instance associated with key
+  for provided class if it exists in the datastore, otherwise
+  None; if a list of keys was given: a list whose items are either
+  a Model instance or None.

+Raises:
+  KindError if any of the retreived objects are not instances of the
+  type associated with call to 'get'.
+ +
get_by_id(cls, ids, parent=None) from google.appengine.ext.db.PropertiedClass
Get instance of Model class by id.

+Args:
+  key_names: A single id or a list of ids.
+  parent: Parent of instances to get.  Can be a model or key.
+ +
get_by_key_name(cls, key_names, parent=None) from google.appengine.ext.db.PropertiedClass
Get instance of Model class by its key's name.

+Args:
+  key_names: A single key-name or a list of key-names.
+  parent: Parent of instances to get.  Can be a model or key.
+ +
get_or_insert(cls, key_name, **kwds) from google.appengine.ext.db.PropertiedClass
Transactionally retrieve or create an instance of Model class.

+This acts much like the Python dictionary setdefault() method, where we
+first try to retrieve a Model instance with the given key name and parent.
+If it's not present, then we create a new instance (using the *kwds
+supplied) and insert that with the supplied key name.

+Subsequent calls to this method with the same key_name and parent will
+always yield the same entity (though not the same actual object instance),
+regardless of the *kwds supplied. If the specified entity has somehow
+been deleted separately, then the next call will create a new entity and
+return it.

+If the 'parent' keyword argument is supplied, it must be a Model instance.
+It will be used as the parent of the new instance of this Model class if
+one is created.

+This method is especially useful for having just one unique entity for
+a specific identifier. Insertion/retrieval is done transactionally, which
+guarantees uniqueness.

+Example usage:

+  class WikiTopic(db.Model):
+    creation_date = db.DatetimeProperty(auto_now_add=True)
+    body = db.TextProperty(required=True)

+  # The first time through we'll create the new topic.
+  wiki_word = 'CommonIdioms'
+  topic = WikiTopic.get_or_insert(wiki_word,
+                                  body='This topic is totally new!')
+  assert topic.key().name() == 'CommonIdioms'
+  assert topic.body == 'This topic is totally new!'

+  # The second time through will just retrieve the entity.
+  overwrite_topic = WikiTopic.get_or_insert(wiki_word,
+                                  body='A totally different message!')
+  assert topic.key().name() == 'CommonIdioms'
+  assert topic.body == 'This topic is totally new!'

+Args:
+  key_name: Key name to retrieve or create.
+  **kwds: Keyword arguments to pass to the constructor of the model class
+    if an instance for the specified key name does not already exist. If
+    an instance with the supplied key_name and parent already exists, the
+    rest of these arguments will be discarded.

+Returns:
+  Existing instance of Model class with the specified key_name and parent
+  or a new one that has just been created.

+Raises:
+  TransactionFailedError if the specified Model instance could not be
+  retrieved or created transactionally (due to high contention, etc).
+ +
gql(cls, query_string, *args, **kwds) from google.appengine.ext.db.PropertiedClass
Returns a query using GQL query string.

+See appengine/ext/gql for more information about GQL.

+Args:
+  query_string: properly formatted GQL query string with the
+    'SELECT * FROM <entity>' part omitted
+  *args: rest of the positional arguments used to bind numeric references
+    in the query.
+  **kwds: dictionary-based arguments (for named parameters).
+ +
kind(cls) from google.appengine.ext.db.PropertiedClass
Returns the datastore kind we use for this model.

+We just use the name of the model for now, ignoring potential collisions.
+ +
properties(cls) from google.appengine.ext.db.PropertiedClass
Returns a dictionary of all the properties defined for this model.
+ +
+Data and other attributes inherited from google.appengine.ext.db.Model:
+
__dict__ = <dictproxy object>
dictionary for instance variables (if defined)
+ +
__metaclass__ = <class 'google.appengine.ext.db.PropertiedClass'>
Meta-class for initializing Model classes properties.

+Used for initializing Properties defined in the context of a model.
+By using a meta-class much of the configuration of a Property
+descriptor becomes implicit.  By using this meta-class, descriptors
+that are of class Model are notified about which class they
+belong to and what attribute they are associated with and can
+do appropriate initialization via __property_config__.

+Duplicate properties are not permitted.
+ +
__weakref__ = <attribute '__weakref__' of 'Model' objects>
list of weak references to the object (if defined)
+ +

+ + + + + +
 
+Functions
       
load_auth_tokens()
Reads a dictionary of the current user's tokens from the datastore.

+If there is no current user (a user is not signed in to the app) or the user
+does not have any tokens, an empty dictionary is returned.
+
run_on_appengine(gdata_service)
Modifies a GDataService object to allow it to run on App Engine.

+Args:
+  gdata_service: An instance of AtomService, GDataService, or any
+      of their subclasses which has an http_client member and a 
+      token_store member.
+
save_auth_tokens(token_dict)
Associates the tokens with the current user and writes to the datastore.

+If there us no current user, the tokens are not written and this function
+returns None.

+Returns:
+  The key of the datastore entity containing the user's tokens, or None if
+  there was no current user.
+

+ + + + + +
 
+Data
       __author__ = 'api.jscudder (Jeff Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeff Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.apps.emailsettings.html b/gdata.py-1.2.3/pydocs/gdata.apps.emailsettings.html new file mode 100644 index 0000000..a4a91b7 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.apps.emailsettings.html @@ -0,0 +1,34 @@ + + +Python: package gdata.apps.emailsettings + + + + +
 
+ 
gdata.apps.emailsettings
index
/usr/local/svn/gdata-python-client/src/gdata/apps/emailsettings/__init__.py
+

# Copyright (C) 2008 Google
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+

+ + + + + +
 
+Package Contents
       
service
+
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.apps.emailsettings.service.html b/gdata.py-1.2.3/pydocs/gdata.apps.emailsettings.service.html new file mode 100644 index 0000000..e3314cd --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.apps.emailsettings.service.html @@ -0,0 +1,712 @@ + + +Python: module gdata.apps.emailsettings.service + + + + +
 
+ 
gdata.apps.emailsettings.service
index
/usr/local/svn/gdata-python-client/src/gdata/apps/emailsettings/service.py
+

Allow Google Apps domain administrators to set users' email settings.

+EmailSettingsService: Set various email settings.

+

+ + + + + +
 
+Modules
       
gdata
+

+ + + + + +
 
+Classes
       
+
gdata.apps.service.PropertyService(gdata.service.GDataService) +
+
+
EmailSettingsService +
+
+
+

+ + + + + + + +
 
+class EmailSettingsService(gdata.apps.service.PropertyService)
   Client for the Google Apps Email Settings service.
 
 
Method resolution order:
+
EmailSettingsService
+
gdata.apps.service.PropertyService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
CreateFilter(self, username, from_=None, to=None, subject=None, has_the_word=None, does_not_have_the_word=None, has_attachment=None, label=None, should_mark_as_read=None, should_archive=None)
Create a filter.

+Args:
+  username: User to create filter for.
+  from_: Filter from string.
+  to: Filter to string.
+  subject: Filter subject.
+  has_the_word: Words to filter in.
+  does_not_have_the_word: Words to filter out.
+  has_attachment:  Boolean for message having attachment.
+  label: Label to apply.
+  should_mark_as_read: Boolean for marking message as read.
+  should_archive: Boolean for archiving message.

+Returns:
+  A dict containing the result of the create operation.
+ +
CreateLabel(self, username, label)
Create a label.

+Args:
+  username: User to create label for.
+  label: Label to create.

+Returns:
+  A dict containing the result of the create operation.
+ +
CreateSendAsAlias(self, username, name, address, reply_to=None, make_default=None)
Create alias to send mail as.

+Args:
+  username: User to create alias for.
+  name: Name of alias.
+  address: Email address to send from.
+  reply_to: Email address to reply to.
+  make_default: Boolean for whether this is the new default sending alias.

+Returns:
+  A dict containing the result of the create operation.
+ +
UpdateForwarding(self, username, enable, forward_to=None, action=None)
Update forwarding settings.

+Args:
+  username: User to update forwarding for.
+  enable: Boolean whether to enable this forwarding rule.
+  forward_to: Email address to forward to.
+  action: Action to take after forwarding.

+Returns:
+  A dict containing the result of the update operation.
+ +
UpdateGeneral(self, username, page_size=None, shortcuts=None, arrows=None, snippets=None, unicode=None)
Update general settings.

+Args:
+  username: User to update general settings for.
+  page_size: Number of messages to show.
+  shortcuts: Boolean whether shortcuts are enabled.
+  arrows: Boolean whether arrows are enabled.
+  snippets: Boolean whether snippets are enabled.
+  unicode: Wheter unicode is enabled.

+Returns:
+  A dict containing the result of the update operation.
+ +
UpdateImap(self, username, enable)
Update IMAP settings.

+Args:
+  username: User to update IMAP settings for.
+  enable: Boolean whether to enable IMAP.

+Returns:
+  A dict containing the result of the update operation.
+ +
UpdateLanguage(self, username, language)
Update user interface language.

+Args:
+  username: User to update language for.
+  language: Language code.

+Returns:
+  A dict containing the result of the update operation.
+ +
UpdatePop(self, username, enable, enable_for=None, action=None)
Update POP3 settings.

+Args:
+  username: User to update POP3 settings for.
+  enable: Boolean whether to enable POP3.
+  enable_for: Which messages to make available via POP3.
+  action: Action to take after user retrieves email via POP3.

+Returns:
+  A dict containing the result of the update operation.
+ +
UpdateSignature(self, username, signature)
Update signature.

+Args:
+  username: User to update signature for.
+  signature: Signature string.

+Returns:
+  A dict containing the result of the update operation.
+ +
UpdateVacation(self, username, enable, subject=None, message=None, contacts_only=None)
Update vacation settings.

+Args:
+  username: User to update vacation settings for.
+  enable: Boolean whether to enable vacation responses.
+  subject: Vacation message subject.
+  message: Vacation message body.
+  contacts_only: Boolean whether to send message only to contacts.

+Returns:
+  A dict containing the result of the update operation.
+ +
+Methods inherited from gdata.apps.service.PropertyService:
+
__init__(self, email=None, password=None, domain=None, source=None, server='apps-apis.google.com', additional_headers=None)
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7f4e2d52eb18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+Data
       ALL_MAIL = 'ALL_MAIL'
+API_VER = '2.0'
+ARCHIVE = 'ARCHIVE'
+DELETE = 'DELETE'
+KEEP = 'KEEP'
+MAIL_FROM_NOW_ON = 'MAIL_FROM_NOW_ON'
+__author__ = 'google-apps-apis@googlegroups.com'

+ + + + + +
 
+Author
       google-apps-apis@googlegroups.com
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.apps.html b/gdata.py-1.2.3/pydocs/gdata.apps.html new file mode 100644 index 0000000..f09e4f6 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.apps.html @@ -0,0 +1,1398 @@ + + +Python: package gdata.apps + + + + +
 
+ 
gdata.apps
index
/usr/local/svn/gdata-python-client/src/gdata/apps/__init__.py
+

Contains objects used with Google Apps.

+

+ + + + + +
 
+Package Contents
       
emailsettings (package)
+
migration (package)
+
service
+

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
EmailList +
Login +
Name +
Nickname +
Property +
Quota +
Who +
+
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
EmailListEntry +
EmailListRecipientEntry +
NicknameEntry +
PropertyEntry +
UserEntry +
+
+
gdata.GDataFeed(atom.Feed, gdata.LinkFinder) +
+
+
EmailListFeed(gdata.GDataFeed, gdata.LinkFinder) +
EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder) +
NicknameFeed(gdata.GDataFeed, gdata.LinkFinder) +
UserFeed(gdata.GDataFeed, gdata.LinkFinder) +
+
+
gdata.LinkFinder(atom.LinkFinder) +
+
+
EmailListFeed(gdata.GDataFeed, gdata.LinkFinder) +
EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder) +
NicknameFeed(gdata.GDataFeed, gdata.LinkFinder) +
UserFeed(gdata.GDataFeed, gdata.LinkFinder) +
+
+
+

+ + + + + + + +
 
+class EmailList(atom.AtomBase)
   The Google Apps EmailList element
 
 
Method resolution order:
+
EmailList
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class EmailListEntry(gdata.GDataEntry)
   A Google Apps EmailList flavor of an Atom Entry
 
 
Method resolution order:
+
EmailListEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, email_list=None, feed_link=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder)
   A Google Apps EmailList feed flavor of an Atom Feed
 
 
Method resolution order:
+
EmailListFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class EmailListRecipientEntry(gdata.GDataEntry)
   A Google Apps EmailListRecipient flavor of an Atom Entry
 
 
Method resolution order:
+
EmailListRecipientEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, who=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder)
   A Google Apps EmailListRecipient feed flavor of an Atom Feed
 
 
Method resolution order:
+
EmailListRecipientFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class Login(atom.AtomBase)
   The Google Apps Login element
 
 
Method resolution order:
+
Login
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, user_name=None, password=None, suspended=None, ip_whitelisted=None, hash_function_name=None, admin=None, change_password=None, agreed_to_terms=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Name(atom.AtomBase)
   The Google Apps Name element
 
 
Method resolution order:
+
Name
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, family_name=None, given_name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Nickname(atom.AtomBase)
   The Google Apps Nickname element
 
 
Method resolution order:
+
Nickname
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class NicknameEntry(gdata.GDataEntry)
   A Google Apps flavor of an Atom Entry for Nickname
 
 
Method resolution order:
+
NicknameEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, login=None, nickname=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder)
   A Google Apps Nickname feed flavor of an Atom Feed
 
 
Method resolution order:
+
NicknameFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class Property(atom.AtomBase)
   The Google Apps Property element
 
 
Method resolution order:
+
Property
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class PropertyEntry(gdata.GDataEntry)
   A Google Apps Property flavor of an Atom Entry
 
 
Method resolution order:
+
PropertyEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, property=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class Quota(atom.AtomBase)
   The Google Apps Quota element
 
 
Method resolution order:
+
Quota
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, limit=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class UserEntry(gdata.GDataEntry)
   A Google Apps flavor of an Atom Entry
 
 
Method resolution order:
+
UserEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, login=None, name=None, quota=None, who=None, feed_link=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class UserFeed(gdata.GDataFeed, gdata.LinkFinder)
   A Google Apps User feed flavor of an Atom Feed
 
 
Method resolution order:
+
UserFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class Who(atom.AtomBase)
   The Google Apps Who element
 
 
Method resolution order:
+
Who
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, rel=None, email=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
EmailListEntryFromString(xml_string)
+
EmailListFeedFromString(xml_string)
+
EmailListFromString(xml_string)
+
EmailListRecipientEntryFromString(xml_string)
+
EmailListRecipientFeedFromString(xml_string)
+
LoginFromString(xml_string)
+
NameFromString(xml_string)
+
NicknameEntryFromString(xml_string)
+
NicknameFeedFromString(xml_string)
+
NicknameFromString(xml_string)
+
PropertyEntryFromString(xml_string)
+
PropertyFromString(xml_string)
+
QuotaFromString(xml_string)
+
UserEntryFromString(xml_string)
+
UserFeedFromString(xml_string)
+
WhoFromString(xml_string)
+

+ + + + + +
 
+Data
       APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
+APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
+__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'

+ + + + + +
 
+Author
       tmatsuo@sios.com (Takashi MATSUO)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.apps.migration.html b/gdata.py-1.2.3/pydocs/gdata.apps.migration.html new file mode 100644 index 0000000..a21d673 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.apps.migration.html @@ -0,0 +1,658 @@ + + +Python: package gdata.apps.migration + + + + +
 
+ 
gdata.apps.migration
index
/usr/local/svn/gdata-python-client/src/gdata/apps/migration/__init__.py
+

Contains objects used with Google Apps.

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
Label +
MailItemProperty +
Rfc822Msg +
+
+
gdata.BatchEntry(gdata.GDataEntry) +
+
+
BatchMailEntry +
+
+
gdata.BatchFeed(gdata.GDataFeed) +
+
+
BatchMailEventFeed +
+
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
MailEntry +
+
+
+

+ + + + + + + +
 
+class BatchMailEntry(gdata.BatchEntry)
   A Google Migration flavor of an Atom Entry.
 
 
Method resolution order:
+
BatchMailEntry
+
gdata.BatchEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, rfc822_msg=None, mail_item_property=None, label=None, batch_operation=None, batch_id=None, batch_status=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class BatchMailEventFeed(gdata.BatchFeed)
   A Migration event feed flavor of an Atom Feed.
 
 
Method resolution order:
+
BatchMailEventFeed
+
gdata.BatchFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, interrupted=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.BatchFeed:
+
AddBatchEntry(self, entry=None, id_url_string=None, batch_id_string=None, operation_string=None)
Logic for populating members of a BatchEntry and adding to the feed.


+If the entry is not a BatchEntry, it is converted to a BatchEntry so
+that the batch specific members will be present. 

+The id_url_string can be used in place of an entry if the batch operation
+applies to a URL. For example query and delete operations require just
+the URL of an entry, no body is sent in the HTTP request. If an
+id_url_string is sent instead of an entry, a BatchEntry is created and
+added to the feed.

+This method also assigns the desired batch id to the entry so that it 
+can be referenced in the server's response. If the batch_id_string is
+None, this method will assign a batch_id to be the index at which this
+entry will be in the feed's entry list.

+Args:
+  entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
+      entry which will be sent to the server as part of the batch request.
+      The item must have a valid atom id so that the server knows which 
+      entry this request references.
+  id_url_string: str (optional) The URL of the entry to be acted on. You
+      can find this URL in the text member of the atom id for an entry.
+      If an entry is not sent, this id will be used to construct a new
+      BatchEntry which will be added to the request feed.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+  operation_string: str (optional) The desired batch operation which will
+      set the batch_operation.type member of the entry. Options are
+      'insert', 'update', 'delete', and 'query'

+Raises:
+  MissingRequiredParameters: Raised if neither an id_ url_string nor an
+      entry are provided in the request.

+Returns:
+  The added entry.
+ +
AddDelete(self, url_string=None, entry=None, batch_id_string=None)
Adds a delete request to the batch request feed.

+This method takes either the url_string which is the atom id of the item
+to be deleted, or the entry itself. The atom id of the entry must be 
+present so that the server knows which entry should be deleted.

+Args:
+  url_string: str (optional) The URL of the entry to be deleted. You can
+     find this URL in the text member of the atom id for an entry. 
+  entry: BatchEntry (optional) The entry to be deleted.
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters: Raised if neither a url_string nor an entry 
+      are provided in the request.
+ +
AddInsert(self, entry, batch_id_string=None)
Add an insert request to the operations in this batch request feed.

+If the entry doesn't yet have an operation or a batch id, these will
+be set to the insert operation and a batch_id specified as a parameter.

+Args:
+  entry: BatchEntry The entry which will be sent in the batch feed as an
+      insert request.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+ +
AddQuery(self, url_string=None, entry=None, batch_id_string=None)
Adds a query request to the batch request feed.

+This method takes either the url_string which is the query URL 
+whose results will be added to the result feed. The query URL will
+be encapsulated in a BatchEntry, and you may pass in the BatchEntry
+with a query URL instead of sending a url_string.

+Args:
+  url_string: str (optional)
+  entry: BatchEntry (optional)
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters
+ +
AddUpdate(self, entry, batch_id_string=None)
Add an update request to the list of batch operations in this feed.

+Sets the operation type of the entry to insert if it is not already set
+and assigns the desired batch id to the entry so that it can be 
+referenced in the server's response.

+Args:
+  entry: BatchEntry The entry which will be sent to the server as an
+      update (HTTP PUT) request. The item must have a valid atom id
+      so that the server knows which entry to replace.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. See also comments for AddInsert.
+ +
GetBatchLink(self)
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class Label(atom.AtomBase)
   The Migration label element.
 
 
Method resolution order:
+
Label
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, label_name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class MailEntry(gdata.GDataEntry)
   A Google Migration flavor of an Atom Entry.
 
 
Method resolution order:
+
MailEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, rfc822_msg=None, mail_item_property=None, label=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class MailItemProperty(atom.AtomBase)
   The Migration mailItemProperty element.
 
 
Method resolution order:
+
MailItemProperty
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Rfc822Msg(atom.AtomBase)
   The Migration rfc822Msg element.
 
 
Method resolution order:
+
Rfc822Msg
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
BatchMailEntryFromString(xml_string)
Parse in the BatchMailEntry from the XML definition.
+
BatchMailEventFeedFromString(xml_string)
Parse in the BatchMailEventFeed from the XML definition.
+
LabelFromString(xml_string)
Parse in the mailItemProperty from the XML definition.
+
MailEntryFromString(xml_string)
Parse in the MailEntry from the XML definition.
+
MailItemPropertyFromString(xml_string)
Parse in the MailItemProperiy from the XML definition.
+
Rfc822MsgFromString(xml_string)
Parse in the Rrc822 message from the XML definition.
+

+ + + + + +
 
+Data
       APPS_NAMESPACE = 'http://schemas.google.com/apps/2006'
+APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s'
+__author__ = 'google-apps-apis@googlegroups.com'

+ + + + + +
 
+Author
       google-apps-apis@googlegroups.com
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.apps.migration.service.html b/gdata.py-1.2.3/pydocs/gdata.apps.migration.service.html new file mode 100644 index 0000000..0835e74 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.apps.migration.service.html @@ -0,0 +1,700 @@ + + +Python: module gdata.apps.migration.service + + + + +
 
+ 
gdata.apps.migration.service
index
/usr/local/svn/gdata-python-client/src/gdata/apps/migration/service.py
+

Contains the methods to import mail via Google Apps Email Migration API.

+MigrationService: Provides methids to import mail.

+

+ + + + + +
 
+Modules
       
base64
+
gdata
+
gdata.apps.migration
+

+ + + + + +
 
+Classes
       
+
gdata.apps.service.AppsService(gdata.service.GDataService) +
+
+
MigrationService +
+
+
+

+ + + + + + + +
 
+class MigrationService(gdata.apps.service.AppsService)
   Client for the EMAPI migration service.  Use either ImportMail to import
+one message at a time, or AddBatchEntry and SubmitBatch to import a batch of
+messages at a time.
 
 
Method resolution order:
+
MigrationService
+
gdata.apps.service.AppsService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
AddBatchEntry(self, mail_message, mail_item_properties, mail_labels)
Add a message to the current batch that you later will submit.

+Args:
+  mail_message: An RFC822 format email message.
+  mail_item_properties: A list of Gmail properties to apply to the message.
+  mail_labels: A list of labels to apply to the message.

+Returns:
+  The length of the MailEntry representing the message.
+ +
ImportMail(self, user_name, mail_message, mail_item_properties, mail_labels)
Import a single mail message.

+Args:
+  user_name: The username to import messages to.
+  mail_message: An RFC822 format email message.
+  mail_item_properties: A list of Gmail properties to apply to the message.
+  mail_labels: A list of labels to apply to the message.

+Returns:
+  A MailEntry representing the successfully imported message.

+Raises:
+  AppsForYourDomainException: An error occurred importing the message.
+ +
SubmitBatch(self, user_name)
Send a all the mail items you have added to the batch to the server.

+Args:
+  user_name: The username to import messages to.

+Returns:
+  A HTTPResponse from the web service call.

+Raises:
+  AppsForYourDomainException: An error occurred importing the batch.
+ +
__init__(self, email=None, password=None, domain=None, source=None, server='apps-apis.google.com', additional_headers=None)
+ +
+Methods inherited from gdata.apps.service.AppsService:
+
AddAllElementsFromAllPages(self, link_finder, func)
retrieve all pages and add all elements
+ +
AddRecipientToEmailList(self, recipient, list_name)
Add a recipient to a email list.
+ +
CreateEmailList(self, list_name)
Create a email list.
+ +
CreateNickname(self, user_name, nickname)
Create a nickname
+ +
CreateUser(self, user_name, family_name, given_name, password, suspended='false', quota_limit=None, password_hash_function=None)
Create a user account.
+ +
DeleteEmailList(self, list_name)
Delete a email list
+ +
DeleteNickname(self, nickname)
Delete a nickname
+ +
DeleteUser(self, user_name)
Delete a user account
+ +
GetGeneratorForAllUsers(self)
Retrieve a generator for all users in this domain.
+ +
GetGeneratorFromLinkFinder(self, link_finder, func)
returns a generator for pagination
+ +
RemoveRecipientFromEmailList(self, recipient, list_name)
Remove recipient from email list.
+ +
RestoreUser(self, user_name)
+ +
RetrieveAllEmailLists(self)
Retrieve all email list of a domain.
+ +
RetrieveAllNicknames(self)
Retrieve all nicknames in the domain
+ +
RetrieveAllRecipients(self, list_name)
Retrieve all recipient of an email list.
+ +
RetrieveAllUsers(self)
Retrieve all users in this domain. OBSOLETE
+ +
RetrieveEmailList(self, list_name)
Retreive a single email list by the list's name.
+ +
RetrieveEmailLists(self, recipient)
Retrieve All Email List Subscriptions for an Email Address.
+ +
RetrieveNickname(self, nickname)
Retrieve a nickname.

+Args:
+  nickname: string The nickname to retrieve

+Returns:
+  gdata.apps.NicknameEntry
+ +
RetrieveNicknames(self, user_name)
Retrieve nicknames of the user
+ +
RetrievePageOfEmailLists(self, start_email_list_name=None)
Retrieve one page of email list
+ +
RetrievePageOfNicknames(self, start_nickname=None)
Retrieve one page of nicknames in the domain
+ +
RetrievePageOfRecipients(self, list_name, start_recipient=None)
Retrieve one page of recipient of an email list.
+ +
RetrievePageOfUsers(self, start_username=None)
Retrieve one page of users in this domain.
+ +
RetrieveUser(self, user_name)
Retrieve an user account.

+Args:
+  user_name: string The user name to retrieve

+Returns:
+  gdata.apps.UserEntry
+ +
SuspendUser(self, user_name)
+ +
UpdateUser(self, user_name, user_entry)
Update a user account.
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7f4c2c3acb18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+Data
       API_VER = '2.0'
+__author__ = 'google-apps-apis@googlegroups.com'

+ + + + + +
 
+Author
       google-apps-apis@googlegroups.com
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.apps.service.html b/gdata.py-1.2.3/pydocs/gdata.apps.service.html new file mode 100644 index 0000000..080aeb7 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.apps.service.html @@ -0,0 +1,1358 @@ + + +Python: module gdata.apps.service + + + + +
 
+ 
gdata.apps.service
index
/usr/local/svn/gdata-python-client/src/gdata/apps/service.py
+

# Copyright (C) 2007 SIOS Technology, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+

+ + + + + +
 
+Modules
       
xml.etree.cElementTree
+
atom
+
gdata
+
urllib
+

+ + + + + +
 
+Classes
       
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
AppsForYourDomainException +
+
+
+
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
AppsService +
PropertyService +
+
+
+

+ + + + + +
 
+class AppsForYourDomainException(Error)
    
Method resolution order:
+
AppsForYourDomainException
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, response)
+ +
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class AppsService(gdata.service.GDataService)
   Client for the Google Apps Provisioning service.
 
 
Method resolution order:
+
AppsService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
AddAllElementsFromAllPages(self, link_finder, func)
retrieve all pages and add all elements
+ +
AddRecipientToEmailList(self, recipient, list_name)
Add a recipient to a email list.
+ +
CreateEmailList(self, list_name)
Create a email list.
+ +
CreateNickname(self, user_name, nickname)
Create a nickname
+ +
CreateUser(self, user_name, family_name, given_name, password, suspended='false', quota_limit=None, password_hash_function=None)
Create a user account.
+ +
DeleteEmailList(self, list_name)
Delete a email list
+ +
DeleteNickname(self, nickname)
Delete a nickname
+ +
DeleteUser(self, user_name)
Delete a user account
+ +
GetGeneratorForAllUsers(self)
Retrieve a generator for all users in this domain.
+ +
GetGeneratorFromLinkFinder(self, link_finder, func)
returns a generator for pagination
+ +
RemoveRecipientFromEmailList(self, recipient, list_name)
Remove recipient from email list.
+ +
RestoreUser(self, user_name)
+ +
RetrieveAllEmailLists(self)
Retrieve all email list of a domain.
+ +
RetrieveAllNicknames(self)
Retrieve all nicknames in the domain
+ +
RetrieveAllRecipients(self, list_name)
Retrieve all recipient of an email list.
+ +
RetrieveAllUsers(self)
Retrieve all users in this domain. OBSOLETE
+ +
RetrieveEmailList(self, list_name)
Retreive a single email list by the list's name.
+ +
RetrieveEmailLists(self, recipient)
Retrieve All Email List Subscriptions for an Email Address.
+ +
RetrieveNickname(self, nickname)
Retrieve a nickname.

+Args:
+  nickname: string The nickname to retrieve

+Returns:
+  gdata.apps.NicknameEntry
+ +
RetrieveNicknames(self, user_name)
Retrieve nicknames of the user
+ +
RetrievePageOfEmailLists(self, start_email_list_name=None)
Retrieve one page of email list
+ +
RetrievePageOfNicknames(self, start_nickname=None)
Retrieve one page of nicknames in the domain
+ +
RetrievePageOfRecipients(self, list_name, start_recipient=None)
Retrieve one page of recipient of an email list.
+ +
RetrievePageOfUsers(self, start_username=None)
Retrieve one page of users in this domain.
+ +
RetrieveUser(self, user_name)
Retrieve an user account.

+Args:
+  user_name: string The user name to retrieve

+Returns:
+  gdata.apps.UserEntry
+ +
SuspendUser(self, user_name)
+ +
UpdateUser(self, user_name, user_entry)
Update a user account.
+ +
__init__(self, email=None, password=None, domain=None, source=None, server='www.google.com', additional_headers=None)
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7f62cda81b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class PropertyService(gdata.service.GDataService)
   Client for the Google Apps Property service.
 
 
Method resolution order:
+
PropertyService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, email=None, password=None, domain=None, source=None, server='apps-apis.google.com', additional_headers=None)
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7f62cda81b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+Data
       API_VER = '2.0'
+DEFAULT_QUOTA_LIMIT = '2048'
+DOMAIN_ALIAS_LIMIT_EXCEEDED = 1201
+DOMAIN_FEATURE_UNAVAILABLE = 1203
+DOMAIN_SUSPENDED = 1202
+DOMAIN_USER_LIMIT_EXCEEDED = 1200
+ENTITY_DOES_NOT_EXIST = 1301
+ENTITY_EXISTS = 1300
+ENTITY_NAME_IS_RESERVED = 1302
+ENTITY_NAME_NOT_VALID = 1303
+HTTP_OK = 200
+INVALID_EMAIL_ADDRESS = 1406
+INVALID_FAMILY_NAME = 1401
+INVALID_GIVEN_NAME = 1400
+INVALID_HASH_DIGGEST_LENGTH = 1405
+INVALID_HASH_FUNCTION_NAME = 1404
+INVALID_PASSWORD = 1402
+INVALID_QUERY_PARAMETER_VALUE = 1407
+INVALID_USERNAME = 1403
+TOO_MANY_RECIPIENTS_ON_EMAIL_LIST = 1500
+UNKOWN_ERROR = 1000
+USER_DELETED_RECENTLY = 1100
+USER_SUSPENDED = 1101
+__author__ = 'tmatsuo@sios.com (Takashi MATSUO)'

+ + + + + +
 
+Author
       tmatsuo@sios.com (Takashi MATSUO)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.auth.html b/gdata.py-1.2.3/pydocs/gdata.auth.html new file mode 100644 index 0000000..24d2110 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.auth.html @@ -0,0 +1,731 @@ + + +Python: module gdata.auth + + + + +
 
+ 
gdata.auth
index
/usr/local/svn/gdata-python-client/src/gdata/auth.py
+

#/usr/bin/python
+#
+# Copyright (C) 2007, 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+

+ + + + + +
 
+Modules
       
atom
+cgi
+gdata.tlslite.utils.cryptomath
+
gdata.tlslite.utils.keyfactory
+math
+gdata.oauth
+
gdata.oauth.rsa
+random
+re
+
time
+types
+urllib
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
OAuthInputParams +
OAuthSignatureMethod +
+
+
atom.http_interface.GenericToken(__builtin__.object) +
+
+
ClientLoginToken +
+
+
AuthSubToken +
+
+
SecureAuthSubToken +
+
+
+
+
OAuthToken +
+
+
+

+ + + + + +
 
+class AuthSubToken(ClientLoginToken)
    
Method resolution order:
+
AuthSubToken
+
ClientLoginToken
+
atom.http_interface.GenericToken
+
__builtin__.object
+
+
+Methods defined here:
+
get_token_string(self)
Removes AUTHSUB_AUTH_LABEL to give just the token value.
+ +
set_token_string(self, token_string)
+ +
+Methods inherited from ClientLoginToken:
+
__init__(self, auth_header=None, scopes=None)
+ +
__str__(self)
+ +
perform_request(self, http_client, operation, url, data=None, headers=None)
Sets the Authorization header and makes the HTTP request.
+ +
valid_for_scope(self, url)
Tells the caller if the token authorizes access to the desired URL.
+ +
+Data descriptors inherited from atom.http_interface.GenericToken:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class ClientLoginToken(atom.http_interface.GenericToken)
   Stores the Authorization header in auth_header and adds to requests.

+This token will add it's Authorization header to an HTTP request
+as it is made. Ths token class is simple but
+some Token classes must calculate portions of the Authorization header
+based on the request being made, which is why the token is responsible
+for making requests via an http_client parameter.

+Args:
+  auth_header: str The value for the Authorization header.
+  scopes: list of str or atom.url.Url specifying the beginnings of URLs
+      for which this token can be used. For example, if scopes contains
+      'http://example.com/foo', then this token can be used for a request to
+      'http://example.com/foo/bar' but it cannot be used for a request to
+      'http://example.com/baz'
 
 
Method resolution order:
+
ClientLoginToken
+
atom.http_interface.GenericToken
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, auth_header=None, scopes=None)
+ +
__str__(self)
+ +
get_token_string(self)
Removes PROGRAMMATIC_AUTH_LABEL to give just the token value.
+ +
perform_request(self, http_client, operation, url, data=None, headers=None)
Sets the Authorization header and makes the HTTP request.
+ +
set_token_string(self, token_string)
+ +
valid_for_scope(self, url)
Tells the caller if the token authorizes access to the desired URL.
+ +
+Data descriptors inherited from atom.http_interface.GenericToken:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class OAuthInputParams(__builtin__.object)
   Stores OAuth input parameters.

+This class is a store for OAuth input parameters viz. consumer key and secret,
+signature method and RSA key.
 
 Methods defined here:
+
GetConsumer(self)
Gets the OAuth consumer.

+Returns:
+  object of type <oauth.oauth.Consumer>
+ +
GetSignatureMethod(self)
Gets the OAuth signature method.

+Returns:
+  object of supertype <oauth.oauth.OAuthSignatureMethod>
+ +
__init__(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None)
Initializes object with parameters required for using OAuth mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class OAuthSignatureMethod(__builtin__.object)
   Holds valid OAuth signature methods.

+RSA_SHA1: Class to build signature according to RSA-SHA1 algorithm.
+HMAC_SHA1: Class to build signature according to HMAC-SHA1 algorithm.
 
 Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Data and other attributes defined here:
+
HMAC_SHA1 = <class 'gdata.oauth.OAuthSignatureMethod_HMAC_SHA1'>
+ +
RSA_SHA1 = <class 'gdata.auth.RSA_SHA1'>
Provides implementation for abstract methods to return RSA certs.
+ +

+ + + + + + + +
 
+class OAuthToken(atom.http_interface.GenericToken)
   Stores the token key, token secret and scopes for which token is valid.

+This token adds the authorization header to each request made. It
+re-calculates authorization header for every request since the OAuth
+signature to be added to the authorization header is dependent on the
+request parameters.

+Attributes:
+  key: str The value for the OAuth token i.e. token key.
+  secret: str The value for the OAuth token secret.
+  scopes: list of str or atom.url.Url specifying the beginnings of URLs
+      for which this token can be used. For example, if scopes contains
+      'http://example.com/foo', then this token can be used for a request to
+      'http://example.com/foo/bar' but it cannot be used for a request to
+      'http://example.com/baz'
+  oauth_input_params: OAuthInputParams OAuth input parameters.
 
 
Method resolution order:
+
OAuthToken
+
atom.http_interface.GenericToken
+
__builtin__.object
+
+
+Methods defined here:
+
GetAuthHeader(self, http_method, http_url, realm='')
Get the authentication header.

+Args:
+  http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc.
+  http_url: string or atom.url.Url HTTP URL to which request is made.
+  realm: string (default='') realm parameter to be included in the
+      authorization header.

+Returns:
+  dict Header to be sent with every subsequent request after
+  authentication.
+ +
__init__(self, key=None, secret=None, scopes=None, oauth_input_params=None)
+ +
__str__(self)
+ +
get_token_string(self)
Returns the token string.

+The token string returned is of format
+oauth_token=[0]&oauth_token_secret=[1], where [0] and [1] are some strings.

+Returns:
+  A token string of format oauth_token=[0]&oauth_token_secret=[1],
+  where [0] and [1] are some strings. If self.secret is absent, it just
+  returns oauth_token=[0]. If self.key is absent, it just returns
+  oauth_token_secret=[1]. If both are absent, it returns None.
+ +
perform_request(self, http_client, operation, url, data=None, headers=None)
Sets the Authorization header and makes the HTTP request.
+ +
set_token_string(self, token_string)
Sets the token key and secret from the token string.

+Args:
+  token_string: str Token string of form
+      oauth_token=[0]&oauth_token_secret=[1]. If oauth_token is not present,
+      self.key will be None. If oauth_token_secret is not present,
+      self.secret will be None.
+ +
valid_for_scope(self, url)
+ +
+Data descriptors inherited from atom.http_interface.GenericToken:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class SecureAuthSubToken(AuthSubToken)
   Stores the rsa private key, token, and scopes for the secure AuthSub token.

+This token adds the authorization header to each request made. It
+re-calculates authorization header for every request since the secure AuthSub
+signature to be added to the authorization header is dependent on the
+request parameters.

+Attributes:
+  rsa_key: string The RSA private key in PEM format that the token will
+           use to sign requests
+  token_string: string (optional) The value for the AuthSub token.
+  scopes: list of str or atom.url.Url specifying the beginnings of URLs
+      for which this token can be used. For example, if scopes contains
+      'http://example.com/foo', then this token can be used for a request to
+      'http://example.com/foo/bar' but it cannot be used for a request to
+      'http://example.com/baz'
 
 
Method resolution order:
+
SecureAuthSubToken
+
AuthSubToken
+
ClientLoginToken
+
atom.http_interface.GenericToken
+
__builtin__.object
+
+
+Methods defined here:
+
GetAuthHeader(self, http_method, http_url)
Generates the Authorization header.

+The form of the secure AuthSub Authorization header is
+Authorization: AuthSub token="token" sigalg="sigalg" data="data" sig="sig"
+and  data represents a string in the form
+data = http_method http_url timestamp nonce

+Args:
+  http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc.
+  http_url: string or atom.url.Url HTTP URL to which request is made.
+  
+Returns:
+  dict Header to be sent with every subsequent request after authentication.
+ +
__init__(self, rsa_key, token_string=None, scopes=None)
+ +
__str__(self)
+ +
get_token_string(self)
+ +
perform_request(self, http_client, operation, url, data=None, headers=None)
Sets the Authorization header and makes the HTTP request.
+ +
set_token_string(self, token_string)
+ +
+Methods inherited from ClientLoginToken:
+
valid_for_scope(self, url)
Tells the caller if the token authorizes access to the desired URL.
+ +
+Data descriptors inherited from atom.http_interface.GenericToken:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
AuthSubTokenFromHttpBody(http_body)
Extracts the AuthSub token from an HTTP body string.

+Used to find the new session token after making a request to upgrade a
+single use AuthSub token.

+Args:
+  http_body: str The repsonse from the server which contains the AuthSub
+      key. For example, this function would find the new session token
+      from the server's response to an upgrade token request.

+Returns:
+  The header value to use for Authorization which contains the AuthSub
+  token.
+
AuthSubTokenFromUrl(url)
Extracts the AuthSub token from the URL. 

+Used after the AuthSub redirect has sent the user to the 'next' page and
+appended the token to the URL. This function returns the value to be used
+in the Authorization header. 

+Args:
+  url: str The URL of the current page which contains the AuthSub token as
+      a URL parameter.
+
GenerateAuthSubUrl(next, scope, secure=False, session=True, request_url='https://www.google.com/accounts/AuthSubRequest', domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/AuthForWebApps.html

+Args:
+  request_url: str The beginning of the request URL. This is normally
+      'http://www.google.com/accounts/AuthSubRequest' or 
+      '/accounts/AuthSubRequest'
+  next: string The URL user will be sent to after logging in.
+  scope: string The URL of the service to be accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+  domain: str (optional) The Google Apps domain for this account. If this
+          is not a Google Apps account, use 'default' which is the default
+          value.
+
GenerateClientLoginAuthToken(http_body)
Returns the token value to use in Authorization headers.

+Reads the token from the server's response to a Client Login request and
+creates header value to use in requests.

+Args:
+  http_body: str The body of the server's HTTP response to a Client Login
+      request

+Returns:
+  The value half of an Authorization header.
+
GenerateClientLoginRequestBody = generate_client_login_request_body(email, password, service, source, account_type='HOSTED_OR_GOOGLE', captcha_token=None, captcha_response=None)
Creates the body of the autentication request

+See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request
+for more details.

+Args:
+  email: str
+  password: str
+  service: str
+  source: str
+  account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid
+      values are 'GOOGLE' and 'HOSTED'
+  captcha_token: str (optional)
+  captcha_response: str (optional)

+Returns:
+  The HTTP body to send in a request for a client login token.
+
GenerateOAuthAccessTokenUrl(authorized_request_token, oauth_input_params, access_token_url='https://www.google.com/accounts/OAuthGetAccessToken', oauth_version='1.0')
Generates URL at which user will login to authorize the request token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken OAuth authorized request
+      token.
+  oauth_input_params: OAuthInputParams OAuth input parameters.    
+  access_token_url: string The beginning of the authorization URL. This is
+      normally 'https://www.google.com/accounts/OAuthGetAccessToken' or
+      '/accounts/OAuthGetAccessToken'
+  oauth_version: str (default='1.0') oauth_version parameter.

+Returns:
+  atom.url.Url OAuth access token URL.
+
GenerateOAuthAuthorizationUrl(request_token, authorization_url='https://www.google.com/accounts/OAuthAuthorizeToken', callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+  authorization_url: string The beginning of the authorization URL. This is
+      normally 'https://www.google.com/accounts/OAuthAuthorizeToken' or
+      '/accounts/OAuthAuthorizeToken'
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.

+Returns:
+  atom.url.Url OAuth authorization URL.
+
GenerateOAuthRequestTokenUrl(oauth_input_params, scopes, request_token_url='https://www.google.com/accounts/OAuthGetRequestToken', extra_parameters=None)
Generate a URL at which a request for OAuth request token is to be sent.

+Args:
+  oauth_input_params: OAuthInputParams OAuth input parameters.
+  scopes: list of strings The URLs of the services to be accessed.
+  request_token_url: string The beginning of the request token URL. This is
+      normally 'https://www.google.com/accounts/OAuthGetRequestToken' or
+      '/accounts/OAuthGetRequestToken'
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}

+Returns:
+  atom.url.Url OAuth request token URL.
+
GetCaptchaChallenge = get_captcha_challenge(http_body, captcha_base_url='http://www.google.com/accounts/')
Returns the URL and token for a CAPTCHA challenge issued by the server.

+Args:
+  http_body: str The body of the HTTP response from the server which 
+      contains the CAPTCHA challenge.
+  captcha_base_url: str This function returns a full URL for viewing the 
+      challenge image which is built from the server's response. This
+      base_url is used as the beginning of the URL because the server
+      only provides the end of the URL. For example the server provides
+      'Captcha?ctoken=Hi...N' and the URL for the image is
+      'http://www.google.com/accounts/Captcha?ctoken=Hi...N'

+Returns:
+  A dictionary containing the information needed to repond to the CAPTCHA
+  challenge, the image URL and the ID token of the challenge. The 
+  dictionary is in the form:
+  {'token': string identifying the CAPTCHA image,
+   'url': string containing the URL of the image}
+  Returns None if there was no CAPTCHA challenge in the response.
+
OAuthTokenFromHttpBody(http_body)
Parses the HTTP response body and returns an OAuth token.

+The returned OAuth token will just have key and secret parameters set.
+It won't have any knowledge about the scopes or oauth_input_params. It is
+your responsibility to make it aware of the remaining parameters.

+Returns:
+  OAuthToken OAuth token.
+
OAuthTokenFromUrl(url, scopes_param_prefix='oauth_token_scope')
Creates an OAuthToken and sets token key and scopes (if present) from URL.

+After the Google Accounts OAuth pages redirect the user's broswer back to 
+the web application (using the 'callback' URL from the request) the web app
+can extract the token from the current page's URL. The token is same as the
+request token, but it is either authorized (if user grants access) or
+unauthorized (if user denies access). The token is provided as a 
+URL parameter named 'oauth_token' and if it was chosen to use
+GenerateOAuthAuthorizationUrl with include_scopes_in_param=True, the token's
+valid scopes are included in a URL parameter whose name is specified in
+scopes_param_prefix.

+Args:
+  url: atom.url.Url or str representing the current URL. The token value
+      and valid scopes should be included as URL parameters.
+  scopes_param_prefix: str (optional) The URL parameter key which maps to
+      the list of valid scopes for the token.

+Returns:
+  An OAuthToken with the token key from the URL and set to be valid for
+  the scopes passed in on the URL. If no scopes were included in the URL,
+  the OAuthToken defaults to being valid for no scopes. If there was no
+  'oauth_token' parameter in the URL, this function returns None.
+
TokenFromHttpBody = token_from_http_body(http_body)
Extracts the AuthSub token from an HTTP body string.

+Used to find the new session token after making a request to upgrade a 
+single use AuthSub token.

+Args:
+  http_body: str The repsonse from the server which contains the AuthSub 
+      key. For example, this function would find the new session token
+      from the server's response to an upgrade token request.

+Returns:
+  The raw token value to use in an AuthSubToken object.
+
TokenFromUrl(url)
Extracts the AuthSub token from the URL.

+Returns the raw token value.

+Args:
+  url: str The URL or the query portion of the URL string (after the ?) of
+      the current page which contains the AuthSub token as a URL parameter.
+
extract_auth_sub_token_from_url(url, scopes_param_prefix='auth_sub_scopes', rsa_key=None)
Creates an AuthSubToken and sets the token value and scopes from the URL.

+After the Google Accounts AuthSub pages redirect the user's broswer back to 
+the web application (using the 'next' URL from the request) the web app must
+extract the token from the current page's URL. The token is provided as a 
+URL parameter named 'token' and if generate_auth_sub_url was used to create
+the request, the token's valid scopes are included in a URL parameter whose
+name is specified in scopes_param_prefix.

+Args:
+  url: atom.url.Url or str representing the current URL. The token value
+       and valid scopes should be included as URL parameters.
+  scopes_param_prefix: str (optional) The URL parameter key which maps to
+                       the list of valid scopes for the token.

+Returns:
+  An AuthSubToken with the token value from the URL and set to be valid for
+  the scopes passed in on the URL. If no scopes were included in the URL,
+  the AuthSubToken defaults to being valid for no scopes. If there was no
+  'token' parameter in the URL, this function returns None.
+
extract_client_login_token(http_body, scopes)
Parses the server's response and returns a ClientLoginToken.

+Args:
+  http_body: str The body of the server's HTTP response to a Client Login
+             request. It is assumed that the login request was successful.
+  scopes: list containing atom.url.Urls or strs. The scopes list contains
+          all of the partial URLs under which the client login token is
+          valid. For example, if scopes contains ['http://example.com/foo']
+          then the client login token would be valid for 
+          http://example.com/foo/bar/baz

+Returns:
+  A ClientLoginToken which is valid for the specified scopes.
+
generate_auth_sub_url(next, scopes, secure=False, session=True, request_url='https://www.google.com/accounts/AuthSubRequest', domain='default', scopes_param_prefix='auth_sub_scopes')
Constructs a URL string for requesting a multiscope AuthSub token.

+The generated token will contain a URL parameter to pass along the 
+requested scopes to the next URL. When the Google Accounts page 
+redirects the broswser to the 'next' URL, it appends the single use
+AuthSub token value to the URL as a URL parameter with the key 'token'.
+However, the information about which scopes were requested is not
+included by Google Accounts. This method adds the scopes to the next
+URL before making the request so that the redirect will be sent to 
+a page, and both the token value and the list of scopes can be 
+extracted from the request URL. 

+Args:
+  next: atom.url.URL or string The URL user will be sent to after
+        authorizing this web application to access their data.
+  scopes: list containint strings The URLs of the services to be accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+  request_url: atom.url.Url or str The beginning of the request URL. This
+      is normally 'http://www.google.com/accounts/AuthSubRequest' or 
+      '/accounts/AuthSubRequest'
+  domain: The domain which the account is part of. This is used for Google
+      Apps accounts, the default value is 'default' which means that the
+      requested account is a Google Account (@gmail.com for example)
+  scopes_param_prefix: str (optional) The requested scopes are added as a 
+      URL parameter to the next URL so that the page at the 'next' URL can
+      extract the token value and the valid scopes from the URL. The key
+      for the URL parameter defaults to 'auth_sub_scopes'

+Returns:
+  An atom.url.Url which the user's browser should be directed to in order
+  to authorize this application to access their information.
+
generate_client_login_request_body(email, password, service, source, account_type='HOSTED_OR_GOOGLE', captcha_token=None, captcha_response=None)
Creates the body of the autentication request

+See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request
+for more details.

+Args:
+  email: str
+  password: str
+  service: str
+  source: str
+  account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid
+      values are 'GOOGLE' and 'HOSTED'
+  captcha_token: str (optional)
+  captcha_response: str (optional)

+Returns:
+  The HTTP body to send in a request for a client login token.
+
get_captcha_challenge(http_body, captcha_base_url='http://www.google.com/accounts/')
Returns the URL and token for a CAPTCHA challenge issued by the server.

+Args:
+  http_body: str The body of the HTTP response from the server which 
+      contains the CAPTCHA challenge.
+  captcha_base_url: str This function returns a full URL for viewing the 
+      challenge image which is built from the server's response. This
+      base_url is used as the beginning of the URL because the server
+      only provides the end of the URL. For example the server provides
+      'Captcha?ctoken=Hi...N' and the URL for the image is
+      'http://www.google.com/accounts/Captcha?ctoken=Hi...N'

+Returns:
+  A dictionary containing the information needed to repond to the CAPTCHA
+  challenge, the image URL and the ID token of the challenge. The 
+  dictionary is in the form:
+  {'token': string identifying the CAPTCHA image,
+   'url': string containing the URL of the image}
+  Returns None if there was no CAPTCHA challenge in the response.
+
get_client_login_token(http_body)
Returns the token value for a ClientLoginToken.

+Reads the token from the server's response to a Client Login request and
+creates the token value string to use in requests.

+Args:
+  http_body: str The body of the server's HTTP response to a Client Login
+      request

+Returns:
+  The token value string for a ClientLoginToken.
+
token_from_http_body(http_body)
Extracts the AuthSub token from an HTTP body string.

+Used to find the new session token after making a request to upgrade a 
+single use AuthSub token.

+Args:
+  http_body: str The repsonse from the server which contains the AuthSub 
+      key. For example, this function would find the new session token
+      from the server's response to an upgrade token request.

+Returns:
+  The raw token value to use in an AuthSubToken object.
+

+ + + + + +
 
+Data
       AUTHSUB_AUTH_LABEL = 'AuthSub token='
+PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth='
+__author__ = 'api.jscudder (Jeff Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeff Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.base.html b/gdata.py-1.2.3/pydocs/gdata.base.html new file mode 100644 index 0000000..c2c01b8 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.base.html @@ -0,0 +1,1957 @@ + + +Python: package gdata.base + + + + +
 
+ 
gdata.base
index
/usr/local/svn/gdata-python-client/src/gdata/base/__init__.py
+

Contains extensions to Atom objects used with Google Base.

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
ItemAttributeContainer +
+
+
GBaseItem(ItemAttributeContainer, gdata.BatchEntry) +
+
+
GBaseSnippet +
+
+
+
+
+
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
Attributes +
Label +
Thumbnail +
Value +
+
+
atom.Text(atom.AtomBase) +
+
+
Attribute +
ImageLink +
ItemAttribute +
ItemType +
+
+
MetaItemType +
+
+
+
+
gdata.BatchFeed(gdata.GDataFeed) +
+
+
GBaseItemFeed +
+
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
GBaseAttributeEntry +
GBaseItemTypeEntry +
+
+
gdata.GDataFeed(atom.Feed, gdata.LinkFinder) +
+
+
GBaseAttributesFeed +
GBaseItemTypesFeed +
GBaseLocalesFeed +
GBaseSnippetFeed +
+
+
+

+ + + + + + + +
 
+class Attribute(atom.Text)
   Metadata about an attribute from the attributes feed

+An entry from the attributes feed contains a list of attributes. Each 
+attribute describes the attribute's type and count of the items which
+use the attribute.
 
 
Method resolution order:
+
Attribute
+
atom.Text
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, attribute_type=None, count=None, value=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Attribute metadata element

+Args:
+  name: str (optional) The name of the attribute
+  attribute_type: str (optional) The type for the attribute. Examples:
+      test, float, etc.
+  count: str (optional) The number of times this attribute appears in
+      the query results.
+  value: list (optional) The values which are often used for this 
+      attirbute.
+  text: str (optional) The text contents of the XML for this attribute.
+  extension_elements: list (optional) A  list of ExtensionElement 
+      instances
+  extension_attributes: dict (optional) A dictionary of attribute value 
+      string pairs
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Attributes(atom.AtomBase)
   A collection of Google Base metadata attributes
 
 
Method resolution order:
+
Attributes
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, attribute=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class GBaseAttributeEntry(gdata.GDataEntry)
   An Atom Entry from the attributes feed
 
 
Method resolution order:
+
GBaseAttributeEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, title=None, updated=None, label=None, attribute=None, control=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class GBaseAttributesFeed(gdata.GDataFeed)
   A feed containing Google Base Attributes

+A query sent to the attributes feed will return a feed of
+attributes which are present in the items that match the
+query.
 
 
Method resolution order:
+
GBaseAttributesFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class GBaseItem(ItemAttributeContainer, gdata.BatchEntry)
   An Google Base flavor of an Atom Entry.

+Google Base items have required attributes, recommended attributes, and user
+defined attributes. The required attributes are stored in this class as 
+members, and other attributes are stored as extension elements. You can 
+access the recommended and user defined attributes by using 
+AddItemAttribute, SetItemAttribute, FindItemAttribute, and 
+RemoveItemAttribute.

+The Base Item
 
 
Method resolution order:
+
GBaseItem
+
ItemAttributeContainer
+
gdata.BatchEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, title=None, updated=None, control=None, label=None, item_type=None, item_attributes=None, batch_operation=None, batch_id=None, batch_status=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from ItemAttributeContainer:
+
AddItemAttribute(self, name, value, value_type=None, access=None)
Adds a new item attribute tag containing the value.

+Creates a new extension element in the GBase namespace to represent a
+Google Base item attribute.

+Args:
+  name: str The tag name for the new attribute. This must be a valid xml
+    tag name. The tag will be placed in the GBase namespace.
+  value: str Contents for the item attribute
+  value_type: str (optional) The type of data in the vlaue, Examples: text
+      float
+  access: str (optional) Used to hide attributes. The attribute is not 
+      exposed in the snippets feed if access is set to 'private'.
+ +
FindItemAttribute(self, name)
Get the contents of the first Base item attribute which matches name.

+This method is deprecated, please use GetItemAttributes instead.

+Args: 
+  name: str The tag of the desired base attribute. For example, calling
+      this method with name = 'rating' would search for a tag rating
+      in the GBase namespace in the item attributes. 

+Returns:
+  The text contents of the item attribute, or none if the attribute was
+  not found.
+ +
GetItemAttributes(self, name)
Returns a list of all item attributes which have the desired name.

+Args:
+  name: str The tag of the desired base attributes. For example, calling
+      this method with 'rating' would return a list of ItemAttributes
+      represented by a 'g:rating' tag.

+Returns:
+  A list of matching ItemAttribute objects.
+ +
RemoveItemAttribute(self, name)
Deletes the first extension element which matches name.

+Deletes the first extension element which matches name.
+ +
SetItemAttribute(self, name, value)
Changes an existing item attribute's value.
+ +
+Data descriptors inherited from ItemAttributeContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class GBaseItemFeed(gdata.BatchFeed)
   A feed containing Google Base Items
 
 
Method resolution order:
+
GBaseItemFeed
+
gdata.BatchFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.BatchFeed:
+
AddBatchEntry(self, entry=None, id_url_string=None, batch_id_string=None, operation_string=None)
Logic for populating members of a BatchEntry and adding to the feed.


+If the entry is not a BatchEntry, it is converted to a BatchEntry so
+that the batch specific members will be present. 

+The id_url_string can be used in place of an entry if the batch operation
+applies to a URL. For example query and delete operations require just
+the URL of an entry, no body is sent in the HTTP request. If an
+id_url_string is sent instead of an entry, a BatchEntry is created and
+added to the feed.

+This method also assigns the desired batch id to the entry so that it 
+can be referenced in the server's response. If the batch_id_string is
+None, this method will assign a batch_id to be the index at which this
+entry will be in the feed's entry list.

+Args:
+  entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
+      entry which will be sent to the server as part of the batch request.
+      The item must have a valid atom id so that the server knows which 
+      entry this request references.
+  id_url_string: str (optional) The URL of the entry to be acted on. You
+      can find this URL in the text member of the atom id for an entry.
+      If an entry is not sent, this id will be used to construct a new
+      BatchEntry which will be added to the request feed.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+  operation_string: str (optional) The desired batch operation which will
+      set the batch_operation.type member of the entry. Options are
+      'insert', 'update', 'delete', and 'query'

+Raises:
+  MissingRequiredParameters: Raised if neither an id_ url_string nor an
+      entry are provided in the request.

+Returns:
+  The added entry.
+ +
AddDelete(self, url_string=None, entry=None, batch_id_string=None)
Adds a delete request to the batch request feed.

+This method takes either the url_string which is the atom id of the item
+to be deleted, or the entry itself. The atom id of the entry must be 
+present so that the server knows which entry should be deleted.

+Args:
+  url_string: str (optional) The URL of the entry to be deleted. You can
+     find this URL in the text member of the atom id for an entry. 
+  entry: BatchEntry (optional) The entry to be deleted.
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters: Raised if neither a url_string nor an entry 
+      are provided in the request.
+ +
AddInsert(self, entry, batch_id_string=None)
Add an insert request to the operations in this batch request feed.

+If the entry doesn't yet have an operation or a batch id, these will
+be set to the insert operation and a batch_id specified as a parameter.

+Args:
+  entry: BatchEntry The entry which will be sent in the batch feed as an
+      insert request.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+ +
AddQuery(self, url_string=None, entry=None, batch_id_string=None)
Adds a query request to the batch request feed.

+This method takes either the url_string which is the query URL 
+whose results will be added to the result feed. The query URL will
+be encapsulated in a BatchEntry, and you may pass in the BatchEntry
+with a query URL instead of sending a url_string.

+Args:
+  url_string: str (optional)
+  entry: BatchEntry (optional)
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters
+ +
AddUpdate(self, entry, batch_id_string=None)
Add an update request to the list of batch operations in this feed.

+Sets the operation type of the entry to insert if it is not already set
+and assigns the desired batch id to the entry so that it can be 
+referenced in the server's response.

+Args:
+  entry: BatchEntry The entry which will be sent to the server as an
+      update (HTTP PUT) request. The item must have a valid atom id
+      so that the server knows which entry to replace.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. See also comments for AddInsert.
+ +
GetBatchLink(self)
+ +
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, interrupted=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class GBaseItemTypeEntry(gdata.GDataEntry)
   An Atom entry from the item types feed

+These entries contain a list of attributes which are stored in one
+XML node called attributes. This class simplifies the data structure
+by treating attributes as a list of attribute instances. 

+Note that the item_type for an item type entry is in the Google Base meta
+namespace as opposed to item_types encountered in other feeds.
 
 
Method resolution order:
+
GBaseItemTypeEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, title=None, updated=None, label=None, item_type=None, control=None, attribute=None, attributes=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class GBaseItemTypesFeed(gdata.GDataFeed)
   A feed from the Google Base item types feed
 
 
Method resolution order:
+
GBaseItemTypesFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class GBaseLocalesFeed(gdata.GDataFeed)
   The locales feed from Google Base.

+This read-only feed defines the permitted locales for Google Base. The 
+locale value identifies the language, currency, and date formats used in a
+feed.
 
 
Method resolution order:
+
GBaseLocalesFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+class GBaseSnippet(GBaseItem)
    
Method resolution order:
+
GBaseSnippet
+
GBaseItem
+
ItemAttributeContainer
+
gdata.BatchEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from GBaseItem:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, title=None, updated=None, control=None, label=None, item_type=None, item_attributes=None, batch_operation=None, batch_id=None, batch_status=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from ItemAttributeContainer:
+
AddItemAttribute(self, name, value, value_type=None, access=None)
Adds a new item attribute tag containing the value.

+Creates a new extension element in the GBase namespace to represent a
+Google Base item attribute.

+Args:
+  name: str The tag name for the new attribute. This must be a valid xml
+    tag name. The tag will be placed in the GBase namespace.
+  value: str Contents for the item attribute
+  value_type: str (optional) The type of data in the vlaue, Examples: text
+      float
+  access: str (optional) Used to hide attributes. The attribute is not 
+      exposed in the snippets feed if access is set to 'private'.
+ +
FindItemAttribute(self, name)
Get the contents of the first Base item attribute which matches name.

+This method is deprecated, please use GetItemAttributes instead.

+Args: 
+  name: str The tag of the desired base attribute. For example, calling
+      this method with name = 'rating' would search for a tag rating
+      in the GBase namespace in the item attributes. 

+Returns:
+  The text contents of the item attribute, or none if the attribute was
+  not found.
+ +
GetItemAttributes(self, name)
Returns a list of all item attributes which have the desired name.

+Args:
+  name: str The tag of the desired base attributes. For example, calling
+      this method with 'rating' would return a list of ItemAttributes
+      represented by a 'g:rating' tag.

+Returns:
+  A list of matching ItemAttribute objects.
+ +
RemoveItemAttribute(self, name)
Deletes the first extension element which matches name.

+Deletes the first extension element which matches name.
+ +
SetItemAttribute(self, name, value)
Changes an existing item attribute's value.
+ +
+Data descriptors inherited from ItemAttributeContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class GBaseSnippetFeed(gdata.GDataFeed)
   A feed containing Google Base Snippets
 
 
Method resolution order:
+
GBaseSnippetFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class ImageLink(atom.Text)
   The Google Base image_link element
 
 
Method resolution order:
+
ImageLink
+
atom.Text
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, thumbnail=None, text=None, extension_elements=None, text_type=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class ItemAttribute(atom.Text)
   An optional or user defined attribute for a GBase item.

+Google Base allows items to have custom attribute child nodes. These nodes
+have contents and a type attribute which tells Google Base whether the
+contents are text, a float value with units, etc. The Atom text class has 
+the same structure, so this class inherits from Text.
 
 
Method resolution order:
+
ItemAttribute
+
atom.Text
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name, text_type=None, access=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for a GBase item attribute

+Args:
+  name: str The name of the attribute. Examples include
+      price, color, make, model, pages, salary, etc.
+  text_type: str (optional) The type associated with the text contents
+  access: str (optional) If the access attribute is set to 'private', the
+      attribute will not be included in the item's description in the 
+      snippets feed
+  text: str (optional) The text data in the this element
+  extension_elements: list (optional) A  list of ExtensionElement 
+      instances
+  extension_attributes: dict (optional) A dictionary of attribute 
+      value string pairs
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class ItemAttributeContainer(__builtin__.object)
   Provides methods for finding Google Base Item attributes.

+Google Base item attributes are child nodes in the gbase namespace. Google
+Base allows you to define your own item attributes and this class provides
+methods to interact with the custom attributes.
 
 Methods defined here:
+
AddItemAttribute(self, name, value, value_type=None, access=None)
Adds a new item attribute tag containing the value.

+Creates a new extension element in the GBase namespace to represent a
+Google Base item attribute.

+Args:
+  name: str The tag name for the new attribute. This must be a valid xml
+    tag name. The tag will be placed in the GBase namespace.
+  value: str Contents for the item attribute
+  value_type: str (optional) The type of data in the vlaue, Examples: text
+      float
+  access: str (optional) Used to hide attributes. The attribute is not 
+      exposed in the snippets feed if access is set to 'private'.
+ +
FindItemAttribute(self, name)
Get the contents of the first Base item attribute which matches name.

+This method is deprecated, please use GetItemAttributes instead.

+Args: 
+  name: str The tag of the desired base attribute. For example, calling
+      this method with name = 'rating' would search for a tag rating
+      in the GBase namespace in the item attributes. 

+Returns:
+  The text contents of the item attribute, or none if the attribute was
+  not found.
+ +
GetItemAttributes(self, name)
Returns a list of all item attributes which have the desired name.

+Args:
+  name: str The tag of the desired base attributes. For example, calling
+      this method with 'rating' would return a list of ItemAttributes
+      represented by a 'g:rating' tag.

+Returns:
+  A list of matching ItemAttribute objects.
+ +
RemoveItemAttribute(self, name)
Deletes the first extension element which matches name.

+Deletes the first extension element which matches name.
+ +
SetItemAttribute(self, name, value)
Changes an existing item attribute's value.
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class ItemType(atom.Text)
   The Google Base item_type element
 
 
Method resolution order:
+
ItemType
+
atom.Text
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, text_type=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Label(atom.AtomBase)
   The Google Base label element
 
 
Method resolution order:
+
Label
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class MetaItemType(ItemType)
   The Google Base item_type element
 
 
Method resolution order:
+
MetaItemType
+
ItemType
+
atom.Text
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ItemType:
+
__init__(self, text=None, extension_elements=None, text_type=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Thumbnail(atom.AtomBase)
   The Google Base thumbnail element
 
 
Method resolution order:
+
Thumbnail
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, width=None, height=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Value(atom.AtomBase)
   Metadata about common values for a given attribute

+A value is a child of an attribute which comes from the attributes feed.
+The value's text is a commonly used value paired with an attribute name
+and the value's count tells how often this value appears for the given
+attribute in the search results.
 
 
Method resolution order:
+
Value
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, count=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Attribute metadata element

+Args:
+  count: str (optional) The number of times the value in text is given
+      for the parent attribute.
+  text: str (optional) The value which appears in the search results.
+  extension_elements: list (optional) A  list of ExtensionElement
+      instances
+  extension_attributes: dict (optional) A dictionary of attribute value
+      string pairs
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
AttributeFromString(xml_string)
+
GBaseAttributeEntryFromString(xml_string)
+
GBaseAttributesFeedFromString(xml_string)
+
GBaseItemFeedFromString(xml_string)
+
GBaseItemFromString(xml_string)
+
GBaseItemTypeEntryFromString(xml_string)
+
GBaseItemTypesFeedFromString(xml_string)
+
GBaseLocalesFeedFromString(xml_string)
+
GBaseSnippetFeedFromString(xml_string)
+
GBaseSnippetFromString(xml_string)
+
ImageLinkFromString(xml_string)
+
ItemAttributeFromString(xml_string)
+
ItemTypeFromString(xml_string)
+
LabelFromString(xml_string)
+
MetaItemTypeFromString(xml_string)
+
ThumbnailFromString(xml_string)
+
ValueFromString(xml_string)
+

+ + + + + +
 
+Data
       GBASE_NAMESPACE = 'http://base.google.com/ns/1.0'
+GBASE_TEMPLATE = '{http://base.google.com/ns/1.0}%s'
+GMETA_NAMESPACE = 'http://base.google.com/ns-metadata/1.0'
+GMETA_TEMPLATE = '{http://base.google.com/ns-metadata/1.0}%s'
+__author__ = 'api.jscudder (Jeffrey Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeffrey Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.base.service.html b/gdata.py-1.2.3/pydocs/gdata.base.service.html new file mode 100644 index 0000000..2d4b5b9 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.base.service.html @@ -0,0 +1,988 @@ + + +Python: module gdata.base.service + + + + +
 
+ 
gdata.base.service
index
/usr/local/svn/gdata-python-client/src/gdata/base/service.py
+

GBaseService extends the GDataService to streamline Google Base operations.

+GBaseService: Provides methods to query feeds and manipulate items. Extends 
+              GDataService.

+DictionaryToParamList: Function which converts a dictionary into a list of 
+                       URL arguments (represented as strings). This is a 
+                       utility function used in CRUD operations.

+

+ + + + + +
 
+Modules
       
atom
+
gdata
+
urllib
+

+ + + + + +
 
+Classes
       
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
RequestError +
+
+
+
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
GBaseService +
+
+
gdata.service.Query(__builtin__.dict) +
+
+
BaseQuery +
+
+
+

+ + + + + +
 
+class BaseQuery(gdata.service.Query)
    
Method resolution order:
+
BaseQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Data descriptors defined here:
+
bq
+
The bq query parameter
+
+
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__init__(self, feed=None, text_query=None, params=None, categories=None)
Constructor for Query

+Args:
+  feed: str (optional) The path for the feed (Examples: 
+      '/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full'
+  text_query: str (optional) The contents of the q query parameter. The
+      contents of the text_query are URL escaped upon conversion to a URI.
+  params: dict (optional) Parameter value string pairs which become URL
+      params when translated to a URI. These parameters are added to the
+      query's items (key-value pairs).
+  categories: list (optional) List of category strings which should be
+      included as query categories. See 
+      http://code.google.com/apis/gdata/reference.html#Queries for 
+      details. If you want to get results from category A or B (both 
+      categories), specify a single list item 'A|B'.
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc57f40>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class GBaseService(gdata.service.GDataService)
   Client for the Google Base service.
 
 
Method resolution order:
+
GBaseService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
DeleteItem(self, item_id, url_params=None, escape_params=True)
Removes an item with the specified ID from Google Base.

+Args:
+  item_id: string The ID of the item to be deleted. Example:
+           'http://www.google.com/base/feeds/items/13185446517496042648'
+  url_params: dict (optional) Additional URL parameters to be included
+              in the deletion request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  True if the delete succeeded.
+ +
ExecuteBatch(self, batch_feed, converter=<function GBaseItemFeedFromString at 0x9df668>)
Sends a batch request feed to the server.

+Args: 
+  batch_feed: gdata.BatchFeed A feed containing BatchEntry elements which
+      contain the desired CRUD operation and any necessary entry data.
+  converter: Function (optional) Function to be executed on the server's
+      response. This function should take one string as a parameter. The
+      default value is GBaseItemFeedFromString which will turn the result 
+      into a gdata.base.GBaseItem object.

+Returns:
+  A gdata.BatchFeed containing the results.
+ +
GetAttribute(self, uri)
+ +
GetItem(self, uri)
+ +
GetItemType(self, uri)
+ +
GetLocale(self, uri)
+ +
GetSnippet(self, uri)
+ +
InsertItem(self, new_item, url_params=None, escape_params=True, converter=None)
Adds an item to Google Base.

+Args: 
+  new_item: atom.Entry or subclass A new item which is to be added to 
+            Google Base.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      GBaseItemFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a GBaseItem.
+ +
Query(self, uri, converter=None)
Performs a style query and returns a resulting feed or entry.

+Args:
+  uri: string The full URI which be queried. Examples include
+      '/base/feeds/snippets?bq=digital+camera', 
+      'http://www.google.com/base/feeds/snippets?bq=digital+camera'
+      '/base/feeds/items'
+      I recommend creating a URI using a query class.
+  converter: func (optional) A function which will be executed on the
+      server's response. Examples include GBaseItemFromString, etc. 

+Returns:
+  If converter was specified, returns the results of calling converter on
+  the server's response. If converter was not specified, and the result
+  was an Atom Entry, returns a GBaseItem, by default, the method returns
+  the result of calling gdata.service's Get method.
+ +
QueryAttributesFeed(self, uri)
+ +
QueryItemTypesFeed(self, uri)
+ +
QueryItemsFeed(self, uri)
+ +
QueryLocalesFeed(self, uri)
+ +
QuerySnippetsFeed(self, uri)
+ +
UpdateItem(self, item_id, updated_item, url_params=None, escape_params=True, converter=<function GBaseItemFromString at 0x9df2a8>)
Updates an existing item.

+Args:
+  item_id: string The ID of the item to be updated.  Example:
+           'http://www.google.com/base/feeds/items/13185446517496042648'
+  updated_item: atom.Entry, subclass, or string, containing
+                the Atom Entry which will replace the base item which is 
+                stored at the item_id.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the update request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      GBaseItemFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a GBaseItem.
+ +
__init__(self, email=None, password=None, source=None, server='base.google.com', api_key=None, additional_headers=None, handler=None)
+ +
+Data descriptors defined here:
+
api_key
+
Get or set the API key to be included in all requests.
+
+
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7fb40b7e3b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+class RequestError(Error)
    
Method resolution order:
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+Data
       BASE_BATCH_URL = 'http://www.google.com/base/feeds/items/batch'
+__author__ = 'api.jscudder (Jeffrey Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeffrey Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.blogger.html b/gdata.py-1.2.3/pydocs/gdata.blogger.html new file mode 100644 index 0000000..2721797 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.blogger.html @@ -0,0 +1,1132 @@ + + +Python: package gdata.blogger + + + + +
 
+ 
gdata.blogger
index
/usr/local/svn/gdata-python-client/src/gdata/blogger/__init__.py
+

Contains extensions to Atom objects used with Blogger.

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
InReplyTo +
+
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
BloggerEntry +
+
+
BlogEntry +
BlogPostEntry +
CommentEntry +
+
+
+
+
gdata.GDataFeed(atom.Feed, gdata.LinkFinder) +
+
+
BlogFeed +
BlogPostFeed +
CommentFeed +
+
+
+

+ + + + + + + +
 
+class BlogEntry(BloggerEntry)
   Describes a blog entry in the feed listing a user's blogs.
 
 
Method resolution order:
+
BlogEntry
+
BloggerEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from BloggerEntry:
+
GetBlogId(self)
Extracts the Blogger id of this blog.
+This method is useful when contructing URLs by hand. The blog id is
+often used in blogger operation URLs. This should not be confused with
+the id member of a BloggerBlog. The id element is the Atom id XML element.
+The blog id which this method returns is a part of the Atom id.

+Returns:
+  The blog's unique id as a string.
+ +
GetBlogName(self)
Finds the name of this blog as used in the 'alternate' URL.
+An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
+entry representing the above example, this method would return 'blogName'.

+Returns:
+  The blog's URL name component as a string.
+ +
+Data and other attributes inherited from BloggerEntry:
+
blog_id2_pattern = <_sre.SRE_Pattern object at 0x8c0840>
+ +
blog_id_pattern = <_sre.SRE_Pattern object at 0x8bf2b0>
+ +
blog_name_pattern = <_sre.SRE_Pattern object at 0x88e030>
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.Entry:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for atom:entry

+Args:
+  author: list A list of Author instances which belong to this class.
+  category: list A list of Category instances
+  content: Content The entry's Content
+  contributor: list A list on Contributor instances
+  id: Id The entry's Id element
+  link: list A list of Link instances
+  published: Published The entry's Published element
+  rights: Rights The entry's Rights element
+  source: Source the entry's source element
+  summary: Summary the entry's summary element
+  title: Title the entry's title element
+  updated: Updated the entry's updated element
+  control: The entry's app:control element which can be used to mark an 
+      entry as a draft which should not be publicly viewable.
+  text: String The text contents of the element. This is the contents
+      of the Entry's XML text node. (Example: <foo>This is the text</foo>)
+  extension_elements: list A list of ExtensionElement instances which are
+      children of this element.
+  extension_attributes: dict A dictionary of strings which are the values
+      for additional XML attributes of this element.
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class BlogFeed(gdata.GDataFeed)
   Describes a feed of a user's blogs.
 
 
Method resolution order:
+
BlogFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class BlogPostEntry(BloggerEntry)
   Describes a blog post entry in the feed of a blog's posts.
 
 
Method resolution order:
+
BlogPostEntry
+
BloggerEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
AddLabel(self, label)
Adds a label to the blog post. 

+The label is represented by an Atom category element, so this method
+is shorthand for appending a new atom.Category object.

+Args:
+  label: str
+ +
GetPostId(self)
Extracts the postID string from the entry's Atom id.

+Returns: A string of digits which identify this post within the blog.
+ +
+Data and other attributes defined here:
+
post_id_pattern = <_sre.SRE_Pattern object at 0xaaad40>
+ +
+Methods inherited from BloggerEntry:
+
GetBlogId(self)
Extracts the Blogger id of this blog.
+This method is useful when contructing URLs by hand. The blog id is
+often used in blogger operation URLs. This should not be confused with
+the id member of a BloggerBlog. The id element is the Atom id XML element.
+The blog id which this method returns is a part of the Atom id.

+Returns:
+  The blog's unique id as a string.
+ +
GetBlogName(self)
Finds the name of this blog as used in the 'alternate' URL.
+An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
+entry representing the above example, this method would return 'blogName'.

+Returns:
+  The blog's URL name component as a string.
+ +
+Data and other attributes inherited from BloggerEntry:
+
blog_id2_pattern = <_sre.SRE_Pattern object at 0x8c0840>
+ +
blog_id_pattern = <_sre.SRE_Pattern object at 0x8bf2b0>
+ +
blog_name_pattern = <_sre.SRE_Pattern object at 0x88e030>
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.Entry:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for atom:entry

+Args:
+  author: list A list of Author instances which belong to this class.
+  category: list A list of Category instances
+  content: Content The entry's Content
+  contributor: list A list on Contributor instances
+  id: Id The entry's Id element
+  link: list A list of Link instances
+  published: Published The entry's Published element
+  rights: Rights The entry's Rights element
+  source: Source the entry's source element
+  summary: Summary the entry's summary element
+  title: Title the entry's title element
+  updated: Updated the entry's updated element
+  control: The entry's app:control element which can be used to mark an 
+      entry as a draft which should not be publicly viewable.
+  text: String The text contents of the element. This is the contents
+      of the Entry's XML text node. (Example: <foo>This is the text</foo>)
+  extension_elements: list A list of ExtensionElement instances which are
+      children of this element.
+  extension_attributes: dict A dictionary of strings which are the values
+      for additional XML attributes of this element.
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class BlogPostFeed(gdata.GDataFeed)
   Describes a feed of a blog's posts.
 
 
Method resolution order:
+
BlogPostFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class BloggerEntry(gdata.GDataEntry)
   Adds convenience methods inherited by all Blogger entries.
 
 
Method resolution order:
+
BloggerEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
GetBlogId(self)
Extracts the Blogger id of this blog.
+This method is useful when contructing URLs by hand. The blog id is
+often used in blogger operation URLs. This should not be confused with
+the id member of a BloggerBlog. The id element is the Atom id XML element.
+The blog id which this method returns is a part of the Atom id.

+Returns:
+  The blog's unique id as a string.
+ +
GetBlogName(self)
Finds the name of this blog as used in the 'alternate' URL.
+An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
+entry representing the above example, this method would return 'blogName'.

+Returns:
+  The blog's URL name component as a string.
+ +
+Data and other attributes defined here:
+
blog_id2_pattern = <_sre.SRE_Pattern object at 0x8c0840>
+ +
blog_id_pattern = <_sre.SRE_Pattern object at 0x8bf2b0>
+ +
blog_name_pattern = <_sre.SRE_Pattern object at 0x88e030>
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.Entry:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for atom:entry

+Args:
+  author: list A list of Author instances which belong to this class.
+  category: list A list of Category instances
+  content: Content The entry's Content
+  contributor: list A list on Contributor instances
+  id: Id The entry's Id element
+  link: list A list of Link instances
+  published: Published The entry's Published element
+  rights: Rights The entry's Rights element
+  source: Source the entry's source element
+  summary: Summary the entry's summary element
+  title: Title the entry's title element
+  updated: Updated the entry's updated element
+  control: The entry's app:control element which can be used to mark an 
+      entry as a draft which should not be publicly viewable.
+  text: String The text contents of the element. This is the contents
+      of the Entry's XML text node. (Example: <foo>This is the text</foo>)
+  extension_elements: list A list of ExtensionElement instances which are
+      children of this element.
+  extension_attributes: dict A dictionary of strings which are the values
+      for additional XML attributes of this element.
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CommentEntry(BloggerEntry)
   Describes a blog post comment entry in the feed of a blog post's 
+comments.
 
 
Method resolution order:
+
CommentEntry
+
BloggerEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
GetCommentId(self)
Extracts the commentID string from the entry's Atom id.

+Returns: A string of digits which identify this post within the blog.
+ +
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, in_reply_to=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data and other attributes defined here:
+
comment_id_pattern = <_sre.SRE_Pattern object at 0x81d6b0>
+ +
+Methods inherited from BloggerEntry:
+
GetBlogId(self)
Extracts the Blogger id of this blog.
+This method is useful when contructing URLs by hand. The blog id is
+often used in blogger operation URLs. This should not be confused with
+the id member of a BloggerBlog. The id element is the Atom id XML element.
+The blog id which this method returns is a part of the Atom id.

+Returns:
+  The blog's unique id as a string.
+ +
GetBlogName(self)
Finds the name of this blog as used in the 'alternate' URL.
+An alternate URL is in the form 'http://blogName.blogspot.com/'. For an
+entry representing the above example, this method would return 'blogName'.

+Returns:
+  The blog's URL name component as a string.
+ +
+Data and other attributes inherited from BloggerEntry:
+
blog_id2_pattern = <_sre.SRE_Pattern object at 0x8c0840>
+ +
blog_id_pattern = <_sre.SRE_Pattern object at 0x8bf2b0>
+ +
blog_name_pattern = <_sre.SRE_Pattern object at 0x88e030>
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CommentFeed(gdata.GDataFeed)
   Describes a feed of a blog post's comments.
 
 
Method resolution order:
+
CommentFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+class InReplyTo(atom.AtomBase)
    
Method resolution order:
+
InReplyTo
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, href=None, ref=None, source=None, type=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
BlogEntryFromString(xml_string)
+
BlogFeedFromString(xml_string)
+
BlogPostEntryFromString(xml_string)
+
BlogPostFeedFromString(xml_string)
+
CommentEntryFromString(xml_string)
+
CommentFeedFromString(xml_string)
+
InReplyToFromString(xml_string)
+

+ + + + + +
 
+Data
       LABEL_SCHEME = 'http://www.blogger.com/atom/ns#'
+THR_NAMESPACE = 'http://purl.org/syndication/thread/1.0'
+__author__ = 'api.jscudder (Jeffrey Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeffrey Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.blogger.service.html b/gdata.py-1.2.3/pydocs/gdata.blogger.service.html new file mode 100644 index 0000000..ba8f8e0 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.blogger.service.html @@ -0,0 +1,1020 @@ + + +Python: module gdata.blogger.service + + + + +
 
+ 
gdata.blogger.service
index
/usr/local/svn/gdata-python-client/src/gdata/blogger/service.py
+

Classes to interact with the Blogger server.

+

+ + + + + +
 
+Modules
       
gdata
+

+ + + + + +
 
+Classes
       
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
BloggerService +
+
+
gdata.service.Query(__builtin__.dict) +
+
+
BlogCommentQuery +
BlogPostQuery +
BlogQuery +
+
+
+

+ + + + + +
 
+class BlogCommentQuery(gdata.service.Query)
    
Method resolution order:
+
BlogCommentQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, feed=None, params=None, categories=None, blog_id=None, post_id=None, comment_id=None)
+ +
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc4c840>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+class BlogPostQuery(gdata.service.Query)
    
Method resolution order:
+
BlogPostQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, feed=None, params=None, categories=None, blog_id=None, post_id=None)
+ +
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc4c170>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+class BlogQuery(gdata.service.Query)
    
Method resolution order:
+
BlogQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, feed=None, params=None, categories=None, blog_id=None)
Constructs a query object for the list of a user's Blogger blogs.

+Args:
+  feed: str (optional) The beginning of the URL to be queried. If the
+      feed is not set, and there is no blog_id passed in, the default
+      value is used ('http://www.blogger.com/feeds/default/blogs').
+  params: dict (optional)
+  categories: list (optional)
+  blog_id: str (optional)
+ +
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc4acc0>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+class BloggerService(gdata.service.GDataService)
    
Method resolution order:
+
BloggerService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
AddComment(self, comment_entry, blog_id=None, post_id=None, uri=None)
Adds a new comment to the specified blog post.
+ +
AddPost(self, entry, blog_id=None, uri=None)
+ +
DeleteComment(self, entry=None, uri=None)
+ +
DeletePost(self, entry=None, uri=None)
+ +
GetBlogCommentFeed(self, blog_id=None, uri=None)
Retrieve a list of the comments for this blog.
+ +
GetBlogFeed(self, uri=None)
Retrieve a list of the blogs to which the current user may manage.
+ +
GetBlogPostFeed(self, blog_id=None, uri=None)
+ +
GetPostCommentFeed(self, blog_id=None, post_id=None, uri=None)
Retrieve a list of the comments for this particular blog post.
+ +
UpdatePost(self, entry, uri=None)
+ +
__init__(self, email=None, password=None, source=None, server=None, api_key=None, additional_headers=None)
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7f0f271b9b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+Data
       __author__ = 'api.jscudder (Jeffrey Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeffrey Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.calendar.html b/gdata.py-1.2.3/pydocs/gdata.calendar.html new file mode 100644 index 0000000..d104c04 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.calendar.html @@ -0,0 +1,2758 @@ + + +Python: package gdata.calendar + + + + +
 
+ 
gdata.calendar
index
/usr/local/svn/gdata-python-client/src/gdata/calendar/__init__.py
+

Contains extensions to ElementWrapper objects used with Google Calendar.

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
Comments +
OriginalEvent +
QuickAdd +
Recurrence +
RecurrenceException +
Reminder +
Scope +
SendEventNotifications +
UriEnumElement +
+
+
AttendeeStatus +
AttendeeType +
EventStatus +
Transparency +
Visibility +
Who +
+
+
ValueAttributeContainer +
+
+
AccessLevel +
Color +
Hidden +
Role +
Selected +
Timezone +
+
+
WebContent +
WebContentGadgetPref +
When +
Where +
+
+
atom.Link(atom.AtomBase) +
+
+
WebContentLink +
+
+
gdata.BatchEntry(gdata.GDataEntry) +
+
+
CalendarEventEntry +
+
+
gdata.BatchFeed(gdata.GDataFeed) +
+
+
CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder) +
+
+
gdata.EntryLink(atom.AtomBase) +
+
+
CalendarEventEntryLink +
+
+
gdata.ExtendedProperty(atom.AtomBase) +
+
+
ExtendedProperty +
+
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder) +
CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder) +
CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder) +
+
+
gdata.GDataFeed(atom.Feed, gdata.LinkFinder) +
+
+
CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder) +
CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder) +
CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder) +
+
+
gdata.LinkFinder(atom.LinkFinder) +
+
+
CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder) +
CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder) +
CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder) +
CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder) +
CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder) +
CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder) +
CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder) +
+
+
+

+ + + + + + + +
 
+class AccessLevel(ValueAttributeContainer)
   The Google Calendar accesslevel element
 
 
Method resolution order:
+
AccessLevel
+
ValueAttributeContainer
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ValueAttributeContainer:
+
__init__(self, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class AttendeeStatus(UriEnumElement)
   The Google Calendar attendeeStatus element
 
 
Method resolution order:
+
AttendeeStatus
+
UriEnumElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data and other attributes defined here:
+
attendee_enum = {'http://schemas.google.com/g/2005#event.accepted': 'ACCEPTED', 'http://schemas.google.com/g/2005#event.declined': 'DECLINED', 'http://schemas.google.com/g/2005#event.invited': 'INVITED', 'http://schemas.google.com/g/2005#event.tentative': 'TENTATIVE'}
+ +
+Methods inherited from UriEnumElement:
+
findKey(self, value)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class AttendeeType(UriEnumElement)
   The Google Calendar attendeeType element
 
 
Method resolution order:
+
AttendeeType
+
UriEnumElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data and other attributes defined here:
+
attendee_type_enum = {'http://schemas.google.com/g/2005#event.optional': 'OPTIONAL', 'http://schemas.google.com/g/2005#event.required': 'REQUIRED'}
+ +
+Methods inherited from UriEnumElement:
+
findKey(self, value)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder)
   A Google Calendar ACL Entry flavor of an Atom Entry
 
 
Method resolution order:
+
CalendarAclEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, scope=None, role=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder)
   A Google Calendar ACL feed flavor of an Atom Feed
 
 
Method resolution order:
+
CalendarAclFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder)
   A Google Calendar event comments entry flavor of an Atom Entry
 
 
Method resolution order:
+
CalendarEventCommentEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.Entry:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for atom:entry

+Args:
+  author: list A list of Author instances which belong to this class.
+  category: list A list of Category instances
+  content: Content The entry's Content
+  contributor: list A list on Contributor instances
+  id: Id The entry's Id element
+  link: list A list of Link instances
+  published: Published The entry's Published element
+  rights: Rights The entry's Rights element
+  source: Source the entry's source element
+  summary: Summary the entry's summary element
+  title: Title the entry's title element
+  updated: Updated the entry's updated element
+  control: The entry's app:control element which can be used to mark an 
+      entry as a draft which should not be publicly viewable.
+  text: String The text contents of the element. This is the contents
+      of the Entry's XML text node. (Example: <foo>This is the text</foo>)
+  extension_elements: list A list of ExtensionElement instances which are
+      children of this element.
+  extension_attributes: dict A dictionary of strings which are the values
+      for additional XML attributes of this element.
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder)
   A Google Calendar event comments feed flavor of an Atom Feed
 
 
Method resolution order:
+
CalendarEventCommentFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CalendarEventEntry(gdata.BatchEntry)
   A Google Calendar flavor of an Atom Entry
 
 
Method resolution order:
+
CalendarEventEntry
+
gdata.BatchEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
GetWebContentLink(self)
Finds the first link with rel set to WEB_CONTENT_REL

+Returns:
+  A gdata.calendar.WebContentLink or none if none of the links had rel 
+  equal to WEB_CONTENT_REL
+ +
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, transparency=None, comments=None, event_status=None, send_event_notifications=None, visibility=None, recurrence=None, recurrence_exception=None, where=None, when=None, who=None, quick_add=None, extended_property=None, original_event=None, batch_operation=None, batch_id=None, batch_status=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CalendarEventEntryLink(gdata.EntryLink)
   An entryLink which contains a calendar event entry

+Within an event's recurranceExceptions, an entry link
+points to a calendar event entry. This class exists
+to capture the calendar specific extensions in the entry.
 
 
Method resolution order:
+
CalendarEventEntryLink
+
gdata.EntryLink
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from gdata.EntryLink:
+
__init__(self, href=None, read_only=None, rel=None, entry=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder)
   A Google Calendar event feed flavor of an Atom Feed
 
 
Method resolution order:
+
CalendarEventFeed
+
gdata.BatchFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, interrupted=None, timezone=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.BatchFeed:
+
AddBatchEntry(self, entry=None, id_url_string=None, batch_id_string=None, operation_string=None)
Logic for populating members of a BatchEntry and adding to the feed.


+If the entry is not a BatchEntry, it is converted to a BatchEntry so
+that the batch specific members will be present. 

+The id_url_string can be used in place of an entry if the batch operation
+applies to a URL. For example query and delete operations require just
+the URL of an entry, no body is sent in the HTTP request. If an
+id_url_string is sent instead of an entry, a BatchEntry is created and
+added to the feed.

+This method also assigns the desired batch id to the entry so that it 
+can be referenced in the server's response. If the batch_id_string is
+None, this method will assign a batch_id to be the index at which this
+entry will be in the feed's entry list.

+Args:
+  entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
+      entry which will be sent to the server as part of the batch request.
+      The item must have a valid atom id so that the server knows which 
+      entry this request references.
+  id_url_string: str (optional) The URL of the entry to be acted on. You
+      can find this URL in the text member of the atom id for an entry.
+      If an entry is not sent, this id will be used to construct a new
+      BatchEntry which will be added to the request feed.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+  operation_string: str (optional) The desired batch operation which will
+      set the batch_operation.type member of the entry. Options are
+      'insert', 'update', 'delete', and 'query'

+Raises:
+  MissingRequiredParameters: Raised if neither an id_ url_string nor an
+      entry are provided in the request.

+Returns:
+  The added entry.
+ +
AddDelete(self, url_string=None, entry=None, batch_id_string=None)
Adds a delete request to the batch request feed.

+This method takes either the url_string which is the atom id of the item
+to be deleted, or the entry itself. The atom id of the entry must be 
+present so that the server knows which entry should be deleted.

+Args:
+  url_string: str (optional) The URL of the entry to be deleted. You can
+     find this URL in the text member of the atom id for an entry. 
+  entry: BatchEntry (optional) The entry to be deleted.
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters: Raised if neither a url_string nor an entry 
+      are provided in the request.
+ +
AddInsert(self, entry, batch_id_string=None)
Add an insert request to the operations in this batch request feed.

+If the entry doesn't yet have an operation or a batch id, these will
+be set to the insert operation and a batch_id specified as a parameter.

+Args:
+  entry: BatchEntry The entry which will be sent in the batch feed as an
+      insert request.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+ +
AddQuery(self, url_string=None, entry=None, batch_id_string=None)
Adds a query request to the batch request feed.

+This method takes either the url_string which is the query URL 
+whose results will be added to the result feed. The query URL will
+be encapsulated in a BatchEntry, and you may pass in the BatchEntry
+with a query URL instead of sending a url_string.

+Args:
+  url_string: str (optional)
+  entry: BatchEntry (optional)
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters
+ +
AddUpdate(self, entry, batch_id_string=None)
Add an update request to the list of batch operations in this feed.

+Sets the operation type of the entry to insert if it is not already set
+and assigns the desired batch id to the entry so that it can be 
+referenced in the server's response.

+Args:
+  entry: BatchEntry The entry which will be sent to the server as an
+      update (HTTP PUT) request. The item must have a valid atom id
+      so that the server knows which entry to replace.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. See also comments for AddInsert.
+ +
GetBatchLink(self)
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder)
   A Google Calendar meta Entry flavor of an Atom Entry
 
 
Method resolution order:
+
CalendarListEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, color=None, access_level=None, hidden=None, timezone=None, selected=None, where=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder)
   A Google Calendar meta feed flavor of an Atom Feed
 
 
Method resolution order:
+
CalendarListFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class Color(ValueAttributeContainer)
   The Google Calendar color element
 
 
Method resolution order:
+
Color
+
ValueAttributeContainer
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ValueAttributeContainer:
+
__init__(self, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Comments(atom.AtomBase)
   The Google Calendar comments element
 
 
Method resolution order:
+
Comments
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, rel=None, feed_link=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class EventStatus(UriEnumElement)
   The Google Calendar eventStatus element
 
 
Method resolution order:
+
EventStatus
+
UriEnumElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data and other attributes defined here:
+
status_enum = {'http://schemas.google.com/g/2005#event.canceled': 'CANCELED', 'http://schemas.google.com/g/2005#event.confirmed': 'CONFIRMED', 'http://schemas.google.com/g/2005#event.tentative': 'TENTATIVE'}
+ +
+Methods inherited from UriEnumElement:
+
findKey(self, value)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class ExtendedProperty(gdata.ExtendedProperty)
   A transparent subclass of gdata.ExtendedProperty added to this module
+for backwards compatibility.
 
 
Method resolution order:
+
ExtendedProperty
+
gdata.ExtendedProperty
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from gdata.ExtendedProperty:
+
GetXmlBlobExtensionElement(self)
Returns the XML blob as an atom.ExtensionElement.

+Returns:
+  An atom.ExtensionElement representing the blob's XML, or None if no
+  blob was set.
+ +
GetXmlBlobString(self)
Returns the XML blob as a string.

+Returns:
+  A string containing the blob's XML, or None if no blob was set.
+ +
SetXmlBlob(self, blob)
Sets the contents of the extendedProperty to XML as a child node.

+Since the extendedProperty is only allowed one child element as an XML
+blob, setting the XML blob will erase any preexisting extension elements
+in this object.

+Args:
+  blob: str, ElementTree Element or atom.ExtensionElement representing
+        the XML blob stored in the extendedProperty.
+ +
__init__(self, name=None, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Hidden(ValueAttributeContainer)
   The Google Calendar hidden element
 
 
Method resolution order:
+
Hidden
+
ValueAttributeContainer
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ValueAttributeContainer:
+
__init__(self, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class OriginalEvent(atom.AtomBase)
   The Google Calendar OriginalEvent element
 
 
Method resolution order:
+
OriginalEvent
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, id=None, href=None, when=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class QuickAdd(atom.AtomBase)
   The Google Calendar quickadd element
 
 
Method resolution order:
+
QuickAdd
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, value=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Recurrence(atom.AtomBase)
   The Google Calendar Recurrence element
 
 
Method resolution order:
+
Recurrence
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class RecurrenceException(atom.AtomBase)
   The Google Calendar RecurrenceException element
 
 
Method resolution order:
+
RecurrenceException
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, specialized=None, entry_link=None, original_event=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Reminder(atom.AtomBase)
   The Google Calendar reminder element
 
 
Method resolution order:
+
Reminder
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, absolute_time=None, days=None, hours=None, minutes=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Role(ValueAttributeContainer)
   The Google Calendar timezone element
 
 
Method resolution order:
+
Role
+
ValueAttributeContainer
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ValueAttributeContainer:
+
__init__(self, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Scope(atom.AtomBase)
   The Google ACL scope element
 
 
Method resolution order:
+
Scope
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, value=None, scope_type=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Selected(ValueAttributeContainer)
   The Google Calendar selected element
 
 
Method resolution order:
+
Selected
+
ValueAttributeContainer
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ValueAttributeContainer:
+
__init__(self, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class SendEventNotifications(atom.AtomBase)
   The Google Calendar sendEventNotifications element
 
 
Method resolution order:
+
SendEventNotifications
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, value=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Timezone(ValueAttributeContainer)
   The Google Calendar timezone element
 
 
Method resolution order:
+
Timezone
+
ValueAttributeContainer
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ValueAttributeContainer:
+
__init__(self, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Transparency(UriEnumElement)
   The Google Calendar Transparency element
 
 
Method resolution order:
+
Transparency
+
UriEnumElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data and other attributes defined here:
+
transparency_enum = {'http://schemas.google.com/g/2005#event.opaque': 'OPAQUE', 'http://schemas.google.com/g/2005#event.transparent': 'TRANSPARENT'}
+ +
+Methods inherited from UriEnumElement:
+
findKey(self, value)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class UriEnumElement(atom.AtomBase)
    
Method resolution order:
+
UriEnumElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, tag, enum_map, attrib_name='value', extension_elements=None, extension_attributes=None, text=None)
+ +
findKey(self, value)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class ValueAttributeContainer(atom.AtomBase)
   A parent class for all Calendar classes which have a value attribute.

+Children include ColorAccessLevelHidden
 
 
Method resolution order:
+
ValueAttributeContainer
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Visibility(UriEnumElement)
   The Google Calendar Visibility element
 
 
Method resolution order:
+
Visibility
+
UriEnumElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data and other attributes defined here:
+
visibility_enum = {'http://schemas.google.com/g/2005#event.confidential': 'CONFIDENTIAL', 'http://schemas.google.com/g/2005#event.default': 'DEFAULT', 'http://schemas.google.com/g/2005#event.private': 'PRIVATE', 'http://schemas.google.com/g/2005#event.public': 'PUBLIC'}
+ +
+Methods inherited from UriEnumElement:
+
findKey(self, value)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class WebContent(atom.AtomBase)
    
Method resolution order:
+
WebContent
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, url=None, width=None, height=None, text=None, gadget_pref=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class WebContentGadgetPref(atom.AtomBase)
    
Method resolution order:
+
WebContentGadgetPref
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class WebContentLink(atom.Link)
    
Method resolution order:
+
WebContentLink
+
atom.Link
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, title=None, href=None, link_type=None, web_content=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class When(atom.AtomBase)
   The Google Calendar When element
 
 
Method resolution order:
+
When
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, start_time=None, end_time=None, reminder=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Where(atom.AtomBase)
   The Google Calendar Where element
 
 
Method resolution order:
+
Where
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, value_string=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Who(UriEnumElement)
   The Google Calendar Who element
 
 
Method resolution order:
+
Who
+
UriEnumElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, email=None, attendee_status=None, attendee_type=None, rel=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data and other attributes defined here:
+
relEnum = {'http://schemas.google.com/g/2005#event.attendee': 'ATTENDEE', 'http://schemas.google.com/g/2005#event.organizer': 'ORGANIZER', 'http://schemas.google.com/g/2005#event.performer': 'PERFORMER', 'http://schemas.google.com/g/2005#event.speaker': 'SPEAKER', 'http://schemas.google.com/g/2005#message.bcc': 'BCC', 'http://schemas.google.com/g/2005#message.cc': 'CC', 'http://schemas.google.com/g/2005#message.from': 'FROM', 'http://schemas.google.com/g/2005#message.reply-to': 'REPLY_TO', 'http://schemas.google.com/g/2005#message.to': 'TO'}
+ +
+Methods inherited from UriEnumElement:
+
findKey(self, value)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
CalendarAclEntryFromString(xml_string)
+
CalendarAclFeedFromString(xml_string)
+
CalendarEventCommentEntryFromString(xml_string)
+
CalendarEventCommentFeedFromString(xml_string)
+
CalendarEventEntryFromString(xml_string)
+
CalendarEventEntryLinkFromString(xml_string)
+
CalendarEventFeedFromString(xml_string)
+
CalendarListEntryFromString(xml_string)
+
CalendarListFeedFromString(xml_string)
+
GetCalendarEventEntryClass()
+

+ + + + + +
 
+Data
       GACL_NAMESPACE = 'http://schemas.google.com/acl/2007'
+GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
+GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005'
+GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005}%s'
+WEB_CONTENT_LINK_REL = 'http://schemas.google.com/gCal/2005/webContent'
+__author__ = 'api.vli (Vivian Li), api.rboyd (Ryan Boyd)'

+ + + + + +
 
+Author
       api.vli (Vivian Li), api.rboyd (Ryan Boyd)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.calendar.service.html b/gdata.py-1.2.3/pydocs/gdata.calendar.service.html new file mode 100644 index 0000000..b361515 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.calendar.service.html @@ -0,0 +1,1414 @@ + + +Python: module gdata.calendar.service + + + + +
 
+ 
gdata.calendar.service
index
/usr/local/svn/gdata-python-client/src/gdata/calendar/service.py
+

CalendarService extends the GDataService to streamline Google Calendar operations.

+CalendarService: Provides methods to query feeds and manipulate items. Extends 
+              GDataService.

+DictionaryToParamList: Function which converts a dictionary into a list of 
+                       URL arguments (represented as strings). This is a 
+                       utility function used in CRUD operations.

+

+ + + + + +
 
+Modules
       
atom
+
gdata
+
urllib
+

+ + + + + +
 
+Classes
       
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
RequestError +
+
+
+
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
CalendarService +
+
+
gdata.service.Query(__builtin__.dict) +
+
+
CalendarEventCommentQuery +
CalendarEventQuery +
CalendarListQuery +
+
+
+

+ + + + + + + +
 
+class CalendarEventCommentQuery(gdata.service.Query)
   Queries the Google Calendar event comments feed
 
 
Method resolution order:
+
CalendarEventCommentQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, feed=None)
+ +
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc62200>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+class CalendarEventQuery(gdata.service.Query)
    
Method resolution order:
+
CalendarEventQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, user='default', visibility='private', projection='full', text_query=None, params=None, categories=None)
+ +
+Data descriptors defined here:
+
ctz
+
The ctz query parameter which sets report time on the server.
+
+
futureevents
+
The futureevents query parameter
+
+
orderby
+
The orderby query parameter
+
+
recurrence_expansion_end
+
The recurrence-expansion-end query parameter
+
+
recurrence_expansion_start
+
The recurrence-expansion-start query parameter
+
+
singleevents
+
The singleevents query parameter
+
+
sortorder
+
The sortorder query parameter
+
+
start_max
+
The start-max query parameter
+
+
start_min
+
The start-min query parameter
+
+
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc63b10>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + + + +
 
+class CalendarListQuery(gdata.service.Query)
   Queries the Google Calendar meta feed
 
 
Method resolution order:
+
CalendarListQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, userId=None, text_query=None, params=None, categories=None)
+ +
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc61ce0>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + + + +
 
+class CalendarService(gdata.service.GDataService)
   Client for the Google Calendar service.
 
 
Method resolution order:
+
CalendarService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
CalendarQuery(self, query)
+ +
DeleteAclEntry(self, edit_uri, extra_headers=None, url_params=None, escape_params=True)
Removes an ACL entry at the given edit_uri from Google Calendar.

+Args:
+  edit_uri: string The edit URL of the entry to be deleted. Example:
+           'http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default'
+  url_params: dict (optional) Additional URL parameters to be included
+              in the deletion request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful delete,  a httplib.HTTPResponse containing the server's
+    response to the DELETE request.
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
DeleteCalendarEntry(self, edit_uri, extra_headers=None, url_params=None, escape_params=True)
Removes a calendar entry at the given edit_uri from Google Calendar.

+Args:
+  edit_uri: string The edit URL of the entry to be deleted. Example:
+           'http://www.google.com/calendar/feeds/default/allcalendars/abcdef@group.calendar.google.com'
+  url_params: dict (optional) Additional URL parameters to be included
+              in the deletion request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful delete, True is returned
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
DeleteEvent(self, edit_uri, extra_headers=None, url_params=None, escape_params=True)
Removes an event with the specified ID from Google Calendar.

+Args:
+  edit_uri: string The edit URL of the entry to be deleted. Example:
+           'http://www.google.com/calendar/feeds/default/private/full/abx'
+  url_params: dict (optional) Additional URL parameters to be included
+              in the deletion request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful delete,  a httplib.HTTPResponse containing the server's
+    response to the DELETE request.
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
ExecuteBatch(self, batch_feed, url, converter=<function CalendarEventFeedFromString at 0x9e7500>)
Sends a batch request feed to the server.

+The batch request needs to be sent to the batch URL for a particular 
+calendar. You can find the URL by calling GetBatchLink().href on the 
+CalendarEventFeed.

+Args:
+  batch_feed: gdata.calendar.CalendarEventFeed A feed containing batch
+      request entries. Each entry contains the operation to be performed 
+      on the data contained in the entry. For example an entry with an 
+      operation type of insert will be used as if the individual entry 
+      had been inserted.
+  url: str The batch URL for the Calendar to which these operations should
+      be applied.
+  converter: Function (optional) The function used to convert the server's
+      response to an object. The default value is 
+      CalendarEventFeedFromString.

+Returns:
+  The results of the batch request's execution on the server. If the 
+  default converter is used, this is stored in a CalendarEventFeed.
+ +
GetAllCalendarsFeed(self, uri='/calendar/feeds/default/allcalendars/full')
+ +
GetCalendarAclEntry(self, uri)
+ +
GetCalendarAclFeed(self, uri='/calendar/feeds/default/acl/full')
+ +
GetCalendarEventCommentEntry(self, uri)
+ +
GetCalendarEventCommentFeed(self, uri)
+ +
GetCalendarEventEntry(self, uri)
+ +
GetCalendarEventFeed(self, uri='/calendar/feeds/default/private/full')
+ +
GetCalendarListEntry(self, uri)
+ +
GetCalendarListFeed(self, uri='/calendar/feeds/default/allcalendars/full')
+ +
GetOwnCalendarsFeed(self, uri='/calendar/feeds/default/owncalendars/full')
+ +
InsertAclEntry(self, new_entry, insert_uri, url_params=None, escape_params=True)
Adds an ACL entry (rule) to Google Calendar.

+Args: 
+  new_entry: atom.Entry or subclass A new ACL entry which is to be added to 
+            Google Calendar.
+  insert_uri: the URL to post new entries to the ACL feed
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful insert,  an entry containing the ACL entry created
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
InsertCalendar(self, new_calendar, url_params=None, escape_params=True)
Creates a new calendar.

+Args: 
+  new_calendar: The calendar to be created
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful insert,  an entry containing the calendar created
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
InsertCalendarSubscription(self, calendar, url_params=None, escape_params=True)
Subscribes the authenticated user to the provided calendar.

+Args: 
+  calendar: The calendar to which the user should be subscribed.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful insert,  an entry containing the subscription created
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
InsertEvent(self, new_event, insert_uri, url_params=None, escape_params=True)
Adds an event to Google Calendar.

+Args: 
+  new_event: atom.Entry or subclass A new event which is to be added to 
+            Google Calendar.
+  insert_uri: the URL to post new events to the feed
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful insert,  an entry containing the event created
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
InsertEventComment(self, new_entry, insert_uri, url_params=None, escape_params=True)
Adds an entry to Google Calendar.

+Args:
+  new_entry: atom.Entry or subclass A new entry which is to be added to
+            Google Calendar.
+  insert_uri: the URL to post new entrys to the feed
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful insert,  an entry containing the comment created
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
Query(self, uri, converter=None)
Performs a query and returns a resulting feed or entry.

+Args:
+  feed: string The feed which is to be queried

+Returns:
+  On success, a GDataFeed or Entry depending on which is sent from the 
+    server.
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
UpdateAclEntry(self, edit_uri, updated_rule, url_params=None, escape_params=True)
Updates an existing ACL rule.

+Args:
+  edit_uri: string The edit link URI for the element being updated
+  updated_rule: string, atom.Entry, or subclass containing
+                the Atom Entry which will replace the event which is 
+                stored at the edit_url 
+  url_params: dict (optional) Additional URL parameters to be included
+              in the update request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful update,  a httplib.HTTPResponse containing the server's
+    response to the PUT request.
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
UpdateCalendar(self, calendar, url_params=None, escape_params=True)
Updates a calendar.

+Args: 
+  calendar: The calendar which should be updated
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful insert,  an entry containing the calendar created
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
UpdateEvent(self, edit_uri, updated_event, url_params=None, escape_params=True)
Updates an existing event.

+Args:
+  edit_uri: string The edit link URI for the element being updated
+  updated_event: string, atom.Entry, or subclass containing
+                the Atom Entry which will replace the event which is 
+                stored at the edit_url 
+  url_params: dict (optional) Additional URL parameters to be included
+              in the update request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful update,  a httplib.HTTPResponse containing the server's
+    response to the PUT request.
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
__init__(self, email=None, password=None, source=None, server='www.google.com', additional_headers=None)
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7fba6392cb18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class RequestError(Error)
    
Method resolution order:
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+Data
       DEFAULT_BATCH_URL = 'http://www.google.com/calendar/feeds/default/private/full/batch'
+__author__ = 'api.vli (Vivian Li)'

+ + + + + +
 
+Author
       api.vli (Vivian Li)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.client.html b/gdata.py-1.2.3/pydocs/gdata.client.html new file mode 100644 index 0000000..f5e2e98 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.client.html @@ -0,0 +1,570 @@ + + +Python: module gdata.client + + + + +
 
+ 
gdata.client
index
/usr/local/svn/gdata-python-client/src/gdata/client.py
+

# Copyright (C) 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+

+ + + + + +
 
+Modules
       
atom
+
gdata
+
urllib
+
urlparse
+

+ + + + + +
 
+Classes
       
+
gdata.service.Error(exceptions.Exception) +
+
+
AuthorizationRequired +
+
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
GDataClient +
+
+
+

+ + + + + +
 
+class AuthorizationRequired(gdata.service.Error)
    
Method resolution order:
+
AuthorizationRequired
+
gdata.service.Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from gdata.service.Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class GDataClient(gdata.service.GDataService)
   This class is deprecated. 

+All functionality has been migrated to gdata.service.GDataService.
 
 
Method resolution order:
+
GDataClient
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
ClientLogin(self, username, password, service_name, source=None, account_type=None, auth_url=None, login_token=None, login_captcha=None)
+ +
Delete(self, url)
+ +
Get(self, url, parser)
Simplified interface for Get.

+Requires a parser function which takes the server response's body as
+the only argument.

+Args:
+  url: A string or something that can be converted to a string using str.
+      The URL of the requested resource.
+  parser: A function which takes the HTTP body from the server as it's
+      only result. Common values would include str, 
+      gdata.GDataEntryFromString, and gdata.GDataFeedFromString.

+Returns: The result of calling parser(http_response_body).
+ +
Post(self, data, url, parser, media_source=None)
Streamlined version of Post.

+Requires a parser function which takes the server response's body as
+the only argument.
+ +
Put(self, data, url, parser, media_source=None)
Streamlined version of Put.

+Requires a parser function which takes the server response's body as
+the only argument.
+ +
__init__(self, application_name=None, tokens=None)
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x886b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+Data
       CLIENT_LOGIN_SCOPES = {'apps': ['http://www.google.com/a/feeds/', 'https://www.google.com/a/feeds/', 'http://apps-apis.google.com/a/feeds/', 'https://apps-apis.google.com/a/feeds/'], 'blogger': ['http://www.blogger.com/feeds/'], 'cl': ['https://www.google.com/calendar/feeds/', 'http://www.google.com/calendar/feeds/'], 'codesearch': ['http://www.google.com/codesearch/feeds/'], 'cp': ['https://www.google.com/m8/feeds/', 'http://www.google.com/m8/feeds/'], 'finance': ['http://finance.google.com/finance/feeds/'], 'gbase': ['http://base.google.com/base/feeds/', 'http://www.google.com/base/feeds/'], 'health': ['https://www.google.com/health/feeds/'], 'lh2': ['http://picasaweb.google.com/data/'], 'sitemaps': ['https://www.google.com/webmasters/tools/feeds/'], ...}
+SCOPE_URL_PARAM_NAME = 'authsub_token_scope'
+__author__ = 'api.jscudder (Jeff Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeff Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.codesearch.html b/gdata.py-1.2.3/pydocs/gdata.codesearch.html new file mode 100644 index 0000000..7d425e9 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.codesearch.html @@ -0,0 +1,469 @@ + + +Python: package gdata.codesearch + + + + +
 
+ 
gdata.codesearch
index
/usr/local/svn/gdata-python-client/src/gdata/codesearch/__init__.py
+

Contains extensions to Atom objects used by Google Codesearch

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
File +
Match +
Package +
+
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
CodesearchEntry +
+
+
gdata.GDataFeed(atom.Feed, gdata.LinkFinder) +
+
+
CodesearchFeed +
+
+
+

+ + + + + + + +
 
+class CodesearchEntry(gdata.GDataEntry)
   Google codesearch atom entry
 
 
Method resolution order:
+
CodesearchEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, match=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class CodesearchFeed(gdata.GDataFeed)
   feed containing list of Google codesearch Items
 
 
Method resolution order:
+
CodesearchFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class File(atom.AtomBase)
   The Google Codesearch file element
 
 
Method resolution order:
+
File
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Match(atom.AtomBase)
   The Google Codesearch match element
 
 
Method resolution order:
+
Match
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, line_number=None, type=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Package(atom.AtomBase)
   The Google Codesearch package element
 
 
Method resolution order:
+
Package
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, uri=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
CodesearchEntryFromString(xml_string)
Converts an XML string into a CodesearchEntry object.

+Args:
+    xml_string: string The XML describing a Codesearch feed entry.

+Returns:
+    A CodesearchEntry object corresponding to the given XML.
+
CodesearchFeedFromString(xml_string)
Converts an XML string into a CodesearchFeed object.
+Args:
+xml_string: string The XML describing a Codesearch feed.
+Returns:
+A CodeseartchFeed object corresponding to the given XML.
+

+ + + + + +
 
+Data
       CODESEARCH_NAMESPACE = 'http://schemas.google.com/codesearch/2006'
+CODESEARCH_TEMPLATE = '{http://shema.google.com/codesearch/2006}%s'
+__author__ = 'Benoit Chesneau'

+ + + + + +
 
+Author
       Benoit Chesneau
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.codesearch.service.html b/gdata.py-1.2.3/pydocs/gdata.codesearch.service.html new file mode 100644 index 0000000..f3419b0 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.codesearch.service.html @@ -0,0 +1,785 @@ + + +Python: module gdata.codesearch.service + + + + +
 
+ 
gdata.codesearch.service
index
/usr/local/svn/gdata-python-client/src/gdata/codesearch/service.py
+

CodesearchService extends GDataService to streamline Google Codesearch 
+operations

+

+ + + + + +
 
+Modules
       
atom
+
gdata
+

+ + + + + +
 
+Classes
       
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
CodesearchService +
+
+
gdata.service.Query(__builtin__.dict) +
+
+
CodesearchQuery +
+
+
+

+ + + + + + + +
 
+class CodesearchQuery(gdata.service.Query)
   Object used to construct the query to the Google Codesearch feed. here only as a shorcut
 
 
Method resolution order:
+
CodesearchQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, feed='/codesearch/feeds/search', text_query=None, params=None, categories=None)
Constructor for Codesearch Query.

+Args:
+    feed: string (optional) The path for the feed. (e.g. '/codesearch/feeds/search')
+    text_query: string (optional) The contents of the q query parameter. This
+                string is URL escaped upon conversion to a URI.
+    params: dict (optional) Parameter value string pairs which become URL
+            params when translated to a URI. These parameters are added to
+            the query's items.
+    categories: list (optional) List of category strings which should be
+                included as query categories. See gdata.service.Query for
+                additional documentation.

+Yelds:
+    A CodesearchQuery object to construct a URI based on Codesearch feed
+ +
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc489b0>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + + + +
 
+class CodesearchService(gdata.service.GDataService)
   Client extension for Google codesearch service
 
 
Method resolution order:
+
CodesearchService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
GetSnippetsFeed(self, text_query=None)
Retrieve Codesearch feed for a keyword

+Args:
+    text_query : string (optional) The contents of the q query parameter. This
+                 string is URL escaped upon conversion to a URI.
+Returns:
+    A CodesearchFeed objects representing the feed returned by the server
+ +
Query(self, uri, converter=<function CodesearchFeedFromString at 0x7f88644c0d70>)
Queries the Codesearch feed and returns the resulting feed of
+   entries.

+Args:
+uri: string The full URI to be queried. This can contain query
+     parameters, a hostname, or simply the relative path to a Document
+     List feed. The DocumentQuery object is useful when constructing
+     query parameters.
+converter: func (optional) A function which will be executed on the
+           retrieved item, generally to render it into a Python object.
+           By default the CodesearchFeedFromString function is used to
+           return a CodesearchFeed object. This is because most feed
+           queries will result in a feed and not a single entry.

+Returns :
+    A CodesearchFeed objects representing the feed returned by the server
+ +
__init__(self, email=None, password=None, source=None, server='www.google.com', additional_headers=None)
Constructor for the CodesearchService.

+Args:
+    email: string (optional) The e-mail address of the account to use for
+           authentication.
+    password: string (optional) The password of the account to use for
+              authentication.
+    source: string (optional) The name of the user's application.
+    server: string (optional) The server the feed is hosted on.
+    additional_headers: dict (optional) Any additional HTTP headers to be
+                        transmitted to the service in the form of key-value
+                        pairs.
+Yields:
+    A CodesearchService object used to communicate with the Google Codesearch
+    service.
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7f88644d1b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+Data
       __author__ = 'Benoit Chesneau'

+ + + + + +
 
+Author
       Benoit Chesneau
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.contacts.html b/gdata.py-1.2.3/pydocs/gdata.contacts.html new file mode 100644 index 0000000..1f0c94a --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.contacts.html @@ -0,0 +1,1188 @@ + + +Python: package gdata.contacts + + + + +
 
+ 
gdata.contacts
index
/usr/local/svn/gdata-python-client/src/gdata/contacts/__init__.py
+

Contains extensions to ElementWrapper objects used with Google Contacts.

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
Deleted +
Email +
GroupMembershipInfo +
IM +
OrgName +
OrgTitle +
Organization +
PhoneNumber +
PostalAddress +
+
+
gdata.BatchEntry(gdata.GDataEntry) +
+
+
ContactEntry +
GroupEntry +
+
+
gdata.BatchFeed(gdata.GDataFeed) +
+
+
ContactsFeed(gdata.BatchFeed, gdata.LinkFinder) +
GroupsFeed +
+
+
gdata.LinkFinder(atom.LinkFinder) +
+
+
ContactsFeed(gdata.BatchFeed, gdata.LinkFinder) +
+
+
+

+ + + + + + + +
 
+class ContactEntry(gdata.BatchEntry)
   A Google Contact flavor of an Atom Entry
 
 
Method resolution order:
+
ContactEntry
+
gdata.BatchEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
GetPhotoEditLink(self)
+ +
GetPhotoLink(self)
+ +
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, email=None, postal_address=None, deleted=None, organization=None, phone_number=None, im=None, extended_property=None, group_membership_info=None, batch_operation=None, batch_id=None, batch_status=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder)
   A Google Contacts feed flavor of an Atom Feed
 
 
Method resolution order:
+
ContactsFeed
+
gdata.BatchFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.BatchFeed:
+
AddBatchEntry(self, entry=None, id_url_string=None, batch_id_string=None, operation_string=None)
Logic for populating members of a BatchEntry and adding to the feed.


+If the entry is not a BatchEntry, it is converted to a BatchEntry so
+that the batch specific members will be present. 

+The id_url_string can be used in place of an entry if the batch operation
+applies to a URL. For example query and delete operations require just
+the URL of an entry, no body is sent in the HTTP request. If an
+id_url_string is sent instead of an entry, a BatchEntry is created and
+added to the feed.

+This method also assigns the desired batch id to the entry so that it 
+can be referenced in the server's response. If the batch_id_string is
+None, this method will assign a batch_id to be the index at which this
+entry will be in the feed's entry list.

+Args:
+  entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
+      entry which will be sent to the server as part of the batch request.
+      The item must have a valid atom id so that the server knows which 
+      entry this request references.
+  id_url_string: str (optional) The URL of the entry to be acted on. You
+      can find this URL in the text member of the atom id for an entry.
+      If an entry is not sent, this id will be used to construct a new
+      BatchEntry which will be added to the request feed.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+  operation_string: str (optional) The desired batch operation which will
+      set the batch_operation.type member of the entry. Options are
+      'insert', 'update', 'delete', and 'query'

+Raises:
+  MissingRequiredParameters: Raised if neither an id_ url_string nor an
+      entry are provided in the request.

+Returns:
+  The added entry.
+ +
AddDelete(self, url_string=None, entry=None, batch_id_string=None)
Adds a delete request to the batch request feed.

+This method takes either the url_string which is the atom id of the item
+to be deleted, or the entry itself. The atom id of the entry must be 
+present so that the server knows which entry should be deleted.

+Args:
+  url_string: str (optional) The URL of the entry to be deleted. You can
+     find this URL in the text member of the atom id for an entry. 
+  entry: BatchEntry (optional) The entry to be deleted.
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters: Raised if neither a url_string nor an entry 
+      are provided in the request.
+ +
AddInsert(self, entry, batch_id_string=None)
Add an insert request to the operations in this batch request feed.

+If the entry doesn't yet have an operation or a batch id, these will
+be set to the insert operation and a batch_id specified as a parameter.

+Args:
+  entry: BatchEntry The entry which will be sent in the batch feed as an
+      insert request.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+ +
AddQuery(self, url_string=None, entry=None, batch_id_string=None)
Adds a query request to the batch request feed.

+This method takes either the url_string which is the query URL 
+whose results will be added to the result feed. The query URL will
+be encapsulated in a BatchEntry, and you may pass in the BatchEntry
+with a query URL instead of sending a url_string.

+Args:
+  url_string: str (optional)
+  entry: BatchEntry (optional)
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters
+ +
AddUpdate(self, entry, batch_id_string=None)
Add an update request to the list of batch operations in this feed.

+Sets the operation type of the entry to insert if it is not already set
+and assigns the desired batch id to the entry so that it can be 
+referenced in the server's response.

+Args:
+  entry: BatchEntry The entry which will be sent to the server as an
+      update (HTTP PUT) request. The item must have a valid atom id
+      so that the server knows which entry to replace.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. See also comments for AddInsert.
+ +
GetBatchLink(self)
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+class Deleted(atom.AtomBase)
    
Method resolution order:
+
Deleted
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class Email(atom.AtomBase)
    
Method resolution order:
+
Email
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, primary=None, rel=None, address=None, text=None, label=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class GroupEntry(gdata.BatchEntry)
   Represents a contact group.
 
 
Method resolution order:
+
GroupEntry
+
gdata.BatchEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, extended_property=None, batch_operation=None, batch_id=None, batch_status=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+class GroupMembershipInfo(atom.AtomBase)
    
Method resolution order:
+
GroupMembershipInfo
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, deleted=None, href=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class GroupsFeed(gdata.BatchFeed)
   A Google contact groups feed flavor of an Atom Feed
 
 
Method resolution order:
+
GroupsFeed
+
gdata.BatchFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.BatchFeed:
+
AddBatchEntry(self, entry=None, id_url_string=None, batch_id_string=None, operation_string=None)
Logic for populating members of a BatchEntry and adding to the feed.


+If the entry is not a BatchEntry, it is converted to a BatchEntry so
+that the batch specific members will be present. 

+The id_url_string can be used in place of an entry if the batch operation
+applies to a URL. For example query and delete operations require just
+the URL of an entry, no body is sent in the HTTP request. If an
+id_url_string is sent instead of an entry, a BatchEntry is created and
+added to the feed.

+This method also assigns the desired batch id to the entry so that it 
+can be referenced in the server's response. If the batch_id_string is
+None, this method will assign a batch_id to be the index at which this
+entry will be in the feed's entry list.

+Args:
+  entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
+      entry which will be sent to the server as part of the batch request.
+      The item must have a valid atom id so that the server knows which 
+      entry this request references.
+  id_url_string: str (optional) The URL of the entry to be acted on. You
+      can find this URL in the text member of the atom id for an entry.
+      If an entry is not sent, this id will be used to construct a new
+      BatchEntry which will be added to the request feed.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+  operation_string: str (optional) The desired batch operation which will
+      set the batch_operation.type member of the entry. Options are
+      'insert', 'update', 'delete', and 'query'

+Raises:
+  MissingRequiredParameters: Raised if neither an id_ url_string nor an
+      entry are provided in the request.

+Returns:
+  The added entry.
+ +
AddDelete(self, url_string=None, entry=None, batch_id_string=None)
Adds a delete request to the batch request feed.

+This method takes either the url_string which is the atom id of the item
+to be deleted, or the entry itself. The atom id of the entry must be 
+present so that the server knows which entry should be deleted.

+Args:
+  url_string: str (optional) The URL of the entry to be deleted. You can
+     find this URL in the text member of the atom id for an entry. 
+  entry: BatchEntry (optional) The entry to be deleted.
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters: Raised if neither a url_string nor an entry 
+      are provided in the request.
+ +
AddInsert(self, entry, batch_id_string=None)
Add an insert request to the operations in this batch request feed.

+If the entry doesn't yet have an operation or a batch id, these will
+be set to the insert operation and a batch_id specified as a parameter.

+Args:
+  entry: BatchEntry The entry which will be sent in the batch feed as an
+      insert request.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+ +
AddQuery(self, url_string=None, entry=None, batch_id_string=None)
Adds a query request to the batch request feed.

+This method takes either the url_string which is the query URL 
+whose results will be added to the result feed. The query URL will
+be encapsulated in a BatchEntry, and you may pass in the BatchEntry
+with a query URL instead of sending a url_string.

+Args:
+  url_string: str (optional)
+  entry: BatchEntry (optional)
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters
+ +
AddUpdate(self, entry, batch_id_string=None)
Add an update request to the list of batch operations in this feed.

+Sets the operation type of the entry to insert if it is not already set
+and assigns the desired batch id to the entry so that it can be 
+referenced in the server's response.

+Args:
+  entry: BatchEntry The entry which will be sent to the server as an
+      update (HTTP PUT) request. The item must have a valid atom id
+      so that the server knows which entry to replace.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. See also comments for AddInsert.
+ +
GetBatchLink(self)
+ +
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, interrupted=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+class IM(atom.AtomBase)
    
Method resolution order:
+
IM
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, primary=None, rel=None, address=None, protocol=None, label=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class OrgName(atom.AtomBase)
    
Method resolution order:
+
OrgName
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class OrgTitle(atom.AtomBase)
    
Method resolution order:
+
OrgTitle
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class Organization(atom.AtomBase)
    
Method resolution order:
+
Organization
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, rel=None, primary='false', org_name=None, org_title=None, label=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class PhoneNumber(atom.AtomBase)
    
Method resolution order:
+
PhoneNumber
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, primary=None, rel=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class PostalAddress(atom.AtomBase)
    
Method resolution order:
+
PostalAddress
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, primary=None, rel=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
ContactEntryFromString(xml_string)
+
ContactsFeedFromString(xml_string)
+
GroupEntryFromString(xml_string)
+
GroupsFeedFromString(xml_string)
+

+ + + + + +
 
+Data
       CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008'
+IM_AIM = 'http://schemas.google.com/g/2005#AIM'
+IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
+IM_ICQ = 'http://schemas.google.com/g/2005#ICQ'
+IM_JABBER = 'http://schemas.google.com/g/2005#JABBER'
+IM_MSN = 'http://schemas.google.com/g/2005#MSN'
+IM_QQ = 'http://schemas.google.com/g/2005#QQ'
+IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE'
+IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO'
+PHONE_CAR = 'http://schemas.google.com/g/2005#car'
+PHONE_FAX = 'http://schemas.google.com/g/2005#fax'
+PHONE_GENERAL = 'http://schemas.google.com/g/2005#general'
+PHONE_HOME = 'http://schemas.google.com/g/2005#home'
+PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax'
+PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension'
+PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile'
+PHONE_OTHER = 'http://schemas.google.com/g/2005#other'
+PHONE_PAGER = 'http://schemas.google.com/g/2005#pager'
+PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite'
+PHONE_VOIP = 'http://schemas.google.com/g/2005#voip'
+PHONE_WORK = 'http://schemas.google.com/g/2005#work'
+PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax'
+PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo'
+PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo'
+REL_HOME = 'http://schemas.google.com/g/2005#home'
+REL_OTHER = 'http://schemas.google.com/g/2005#other'
+REL_WORK = 'http://schemas.google.com/g/2005#work'
+__author__ = 'dbrattli (Dag Brattli)'

+ + + + + +
 
+Author
       dbrattli (Dag Brattli)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.contacts.service.html b/gdata.py-1.2.3/pydocs/gdata.contacts.service.html new file mode 100644 index 0000000..6bd8d50 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.contacts.service.html @@ -0,0 +1,1117 @@ + + +Python: module gdata.contacts.service + + + + +
 
+ 
gdata.contacts.service
index
/usr/local/svn/gdata-python-client/src/gdata/contacts/service.py
+

ContactsService extends the GDataService to streamline Google Contacts operations.

+ContactsService: Provides methods to query feeds and manipulate items. Extends 
+              GDataService.

+DictionaryToParamList: Function which converts a dictionary into a list of 
+                       URL arguments (represented as strings). This is a 
+                       utility function used in CRUD operations.

+

+ + + + + +
 
+Modules
       
atom
+
gdata
+

+ + + + + +
 
+Classes
       
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
RequestError +
+
+
+
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
ContactsService +
+
+
gdata.service.Query(__builtin__.dict) +
+
+
ContactsQuery +
GroupsQuery +
+
+
+

+ + + + + +
 
+class ContactsQuery(gdata.service.Query)
    
Method resolution order:
+
ContactsQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, feed=None, text_query=None, params=None, categories=None, group=None)
+ +
+Data descriptors defined here:
+
group
+
The group query parameter to find only contacts in this group
+
+
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc6d080>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + + + +
 
+class ContactsService(gdata.service.GDataService)
   Client for the Google Contats service.
 
 
Method resolution order:
+
ContactsService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
ChangePhoto(self, media, contact_entry_or_url, content_type=None, content_length=None)
Change the photo for the contact by uploading a new photo.

+Performs a PUT against the photo edit URL to send the binary data for the
+photo.

+Args:
+  media: filename, file-like-object, or a gdata.MediaSource object to send.
+  contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this
+                        method will search for an edit photo link URL and
+                        perform a PUT to the URL.
+  content_type: str (optional) the mime type for the photo data. This is
+                necessary if media is a file or file name, but if media
+                is a MediaSource object then the media object can contain
+                the mime type. If media_type is set, it will override the
+                mime type in the media object.
+  content_length: int or str (optional) Specifying the content length is
+                  only required if media is a file-like object. If media
+                  is a filename, the length is determined using 
+                  os.path.getsize. If media is a MediaSource object, it is
+                  assumed that it already contains the content length.
+ +
CreateContact(self, new_contact, insert_uri='/m8/feeds/contacts/default/full', url_params=None, escape_params=True)
Adds an new contact to Google Contacts.

+Args: 
+  new_contact: atom.Entry or subclass A new contact which is to be added to
+            Google Contacts.
+  insert_uri: the URL to post new contacts to the feed
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful insert,  an entry containing the contact created
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
CreateGroup(self, new_group, insert_uri='/m8/feeds/groups/default/full', url_params=None, escape_params=True)
+ +
DeleteContact(self, edit_uri, extra_headers=None, url_params=None, escape_params=True)
Removes an contact with the specified ID from Google Contacts.

+Args:
+  edit_uri: string The edit URL of the entry to be deleted. Example:
+           'http://www.google.com/m8/feeds/contacts/default/full/xxx/yyy'
+  url_params: dict (optional) Additional URL parameters to be included
+              in the deletion request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful delete,  a httplib.HTTPResponse containing the server's
+    response to the DELETE request.
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
DeleteGroup(self, edit_uri, extra_headers=None, url_params=None, escape_params=True)
+ +
DeletePhoto(self, contact_entry_or_url)
+ +
ExecuteBatch(self, batch_feed, url, converter=<function ContactsFeedFromString at 0x9dc050>)
Sends a batch request feed to the server.

+Args:
+  batch_feed: gdata.contacts.ContactFeed A feed containing batch
+      request entries. Each entry contains the operation to be performed
+      on the data contained in the entry. For example an entry with an
+      operation type of insert will be used as if the individual entry
+      had been inserted.
+  url: str The batch URL to which these operations should be applied.
+  converter: Function (optional) The function used to convert the server's
+      response to an object. The default value is ContactsFeedFromString.

+Returns:
+  The results of the batch request's execution on the server. If the
+  default converter is used, this is stored in a ContactsFeed.
+ +
GetContact(self, uri)
+ +
GetContactsFeed(self, uri='http://www.google.com/m8/feeds/contacts/default/full')
+ +
GetGroupsFeed(self, uri='http://www.google.com/m8/feeds/groups/default/full')
+ +
GetPhoto(self, contact_entry_or_url)
Retrives the binary data for the contact's profile photo as a string.

+Args:
+  contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string
+     containing the photo link's URL. If the contact entry does not 
+     contain a photo link, the image will not be fetched and this method
+     will return None.
+ +
UpdateContact(self, edit_uri, updated_contact, url_params=None, escape_params=True)
Updates an existing contact.

+Args:
+  edit_uri: string The edit link URI for the element being updated
+  updated_contact: string, atom.Entry or subclass containing
+                the Atom Entry which will replace the contact which is 
+                stored at the edit_url 
+  url_params: dict (optional) Additional URL parameters to be included
+              in the update request.
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  On successful update,  a httplib.HTTPResponse containing the server's
+    response to the PUT request.
+  On failure, a RequestError is raised of the form:
+    {'status': HTTP status code from server, 
+     'reason': HTTP reason from the server, 
+     'body': HTTP body of the server's response}
+ +
UpdateGroup(self, edit_uri, updated_group, url_params=None, escape_params=True)
+ +
__init__(self, email=None, password=None, source=None, server='www.google.com', additional_headers=None)
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7fb51957ab18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class GroupsQuery(gdata.service.Query)
    
Method resolution order:
+
GroupsQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, feed=None, text_query=None, params=None, categories=None)
+ +
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc6d420>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+class RequestError(Error)
    
Method resolution order:
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+Data
       DEFAULT_BATCH_URL = 'http://www.google.com/m8/feeds/contacts/default/full/batch'
+__author__ = 'dbrattli (Dag Brattli)'

+ + + + + +
 
+Author
       dbrattli (Dag Brattli)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.docs.html b/gdata.py-1.2.3/pydocs/gdata.docs.html new file mode 100644 index 0000000..1c1818f --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.docs.html @@ -0,0 +1,330 @@ + + +Python: package gdata.docs + + + + +
 
+ 
gdata.docs
index
/usr/local/svn/gdata-python-client/src/gdata/docs/__init__.py
+

Contains extensions to Atom objects used with Google Documents.

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
DocumentListEntry +
+
+
gdata.GDataFeed(atom.Feed, gdata.LinkFinder) +
+
+
DocumentListFeed +
+
+
+

+ + + + + + + +
 
+class DocumentListEntry(gdata.GDataEntry)
   The Google Documents version of an Atom Entry
 
 
Method resolution order:
+
DocumentListEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.Entry:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for atom:entry

+Args:
+  author: list A list of Author instances which belong to this class.
+  category: list A list of Category instances
+  content: Content The entry's Content
+  contributor: list A list on Contributor instances
+  id: Id The entry's Id element
+  link: list A list of Link instances
+  published: Published The entry's Published element
+  rights: Rights The entry's Rights element
+  source: Source the entry's source element
+  summary: Summary the entry's summary element
+  title: Title the entry's title element
+  updated: Updated the entry's updated element
+  control: The entry's app:control element which can be used to mark an 
+      entry as a draft which should not be publicly viewable.
+  text: String The text contents of the element. This is the contents
+      of the Entry's XML text node. (Example: <foo>This is the text</foo>)
+  extension_elements: list A list of ExtensionElement instances which are
+      children of this element.
+  extension_attributes: dict A dictionary of strings which are the values
+      for additional XML attributes of this element.
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class DocumentListFeed(gdata.GDataFeed)
   A feed containing a list of Google Documents Items
 
 
Method resolution order:
+
DocumentListFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+Functions
       
DocumentListEntryFromString(xml_string)
Converts an XML string into a DocumentListEntry object.

+Args:
+  xml_string: string The XML describing a Document List feed entry.

+Returns:
+  A DocumentListEntry object corresponding to the given XML.
+
DocumentListFeedFromString(xml_string)
Converts an XML string into a DocumentListFeed object.

+Args:
+  xml_string: string The XML describing a DocumentList feed.

+Returns:
+  A DocumentListFeed object corresponding to the given XML.
+

+ + + + + +
 
+Data
       __author__ = 'api.jfisher (Jeff Fisher)'

+ + + + + +
 
+Author
       api.jfisher (Jeff Fisher)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.docs.service.html b/gdata.py-1.2.3/pydocs/gdata.docs.service.html new file mode 100644 index 0000000..a780a7f --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.docs.service.html @@ -0,0 +1,874 @@ + + +Python: module gdata.docs.service + + + + +
 
+ 
gdata.docs.service
index
/usr/local/svn/gdata-python-client/src/gdata/docs/service.py
+

DocsService extends the GDataService to streamline Google Documents
+operations.

+DocsService: Provides methods to query feeds and manipulate items.
+                  Extends GDataService.

+DocumentQuery: Queries a Google Document list feed.

+

+ + + + + +
 
+Modules
       
atom
+
gdata
+
urllib
+

+ + + + + +
 
+Classes
       
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
DocsService +
+
+
gdata.service.Query(__builtin__.dict) +
+
+
DocumentQuery +
+
+
+

+ + + + + + + +
 
+class DocsService(gdata.service.GDataService)
   Client extension for the Google Documents service Document List feed.
 
 
Method resolution order:
+
DocsService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
GetDocumentListEntry(self, uri)
Retrieves a particular DocumentListEntry by its unique URI.

+Args:
+  uri: string The unique URI of an entry in a Document List feed.

+Returns:
+  A DocumentListEntry object representing the retrieved entry.
+ +
GetDocumentListFeed(self)
Retrieves a feed containing all of a user's documents.
+ +
Query(self, uri, converter=<function DocumentListFeedFromString at 0x7f8b187a79b0>)
Queries the Document List feed and returns the resulting feed of
+   entries.

+Args:
+  uri: string The full URI to be queried. This can contain query
+       parameters, a hostname, or simply the relative path to a Document
+       List feed. The DocumentQuery object is useful when constructing
+       query parameters.
+  converter: func (optional) A function which will be executed on the
+             retrieved item, generally to render it into a Python object.
+             By default the DocumentListFeedFromString function is used to
+             return a DocumentListFeed object. This is because most feed
+             queries will result in a feed and not a single entry.
+ +
QueryDocumentListFeed(self, uri)
Retrieves a DocumentListFeed by retrieving a URI based off the Document
+   List feed, including any query parameters. A DocumentQuery object can
+   be used to construct these parameters.

+Args:
+  uri: string The URI of the feed being retrieved possibly with query
+       parameters.

+Returns:
+  A DocumentListFeed object representing the feed returned by the server.
+ +
UploadDocument(self, media_source, title)
Uploads a document inside of a MediaSource object to the Document List
+   feed with the given title.

+Args:
+  media_source: MediaSource The gdata.MediaSource object containing a
+                document file to be uploaded.
+  title: string The title of the document on the server after being
+         uploaded.

+Returns:
+  A GDataEntry containing information about the document created on the
+  Google Documents service.
+ +
UploadPresentation(self, media_source, title)
Uploads a presentation inside of a MediaSource object to the Document
+   List feed with the given title.

+Args:
+  media_source: MediaSource The MediaSource object containing a
+      presentation file to be uploaded.
+  title: string The title of the presentation on the server after being
+      uploaded.

+Returns:
+  A GDataEntry containing information about the presentation created on the
+  Google Documents service.
+ +
UploadSpreadsheet(self, media_source, title)
Uploads a spreadsheet inside of a MediaSource object to the Document
+   List feed with the given title.

+Args:
+  media_source: MediaSource The MediaSource object containing a spreadsheet
+                file to be uploaded.
+  title: string The title of the spreadsheet on the server after being
+         uploaded.

+Returns:
+  A GDataEntry containing information about the spreadsheet created on the
+  Google Documents service.
+ +
__init__(self, email=None, password=None, source=None, server='docs.google.com', additional_headers=None)
Constructor for the DocsService.

+Args:
+  email: string (optional) The e-mail address of the account to use for
+         authentication.
+  password: string (optional) The password of the account to use for
+            authentication.
+  source: string (optional) The name of the user's application.
+  server: string (optional) The server the feed is hosted on.
+  additional_headers: dict (optional) Any additional HTTP headers to be
+                      transmitted to the service in the form of key-value
+                      pairs.

+Yields:
+  A DocsService object used to communicate with the Google Documents
+  service.
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7f8b187bbb18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + + + +
 
+class DocumentQuery(gdata.service.Query)
   Object used to construct a URI to query the Google Document List feed
 
 
Method resolution order:
+
DocumentQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
AddNamedFolder(self, email, folder_name)
Adds a named folder category, qualified by a schema.

+This function lets you query for documents that are contained inside a
+named folder without fear of collision with other categories.

+Args:
+  email: string The email of the user who owns the folder.
+  folder_name: string The name of the folder.

+  Returns:
+    The string of the category that was added to the object.
+ +
RemoveNamedFolder(self, email, folder_name)
Removes a named folder category, qualified by a schema.

+Args:
+  email: string The email of the user who owns the folder.
+  folder_name: string The name of the folder.

+  Returns:
+    The string of the category that was removed to the object.
+ +
ToUri(self)
Generates a URI from the query parameters set in the object.

+Returns:
+  A string containing the URI used to retrieve entries from the Document
+  List feed.
+ +
__init__(self, feed='/feeds/documents', visibility='private', projection='full', text_query=None, params=None, categories=None)
Constructor for Document List Query

+Args:
+  feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
+  visibility: string (optional) The visibility chosen for the current feed.
+  projection: string (optional) The projection chosen for the current feed.
+  text_query: string (optional) The contents of the q query parameter. This
+              string is URL escaped upon conversion to a URI.
+  params: dict (optional) Parameter value string pairs which become URL
+          params when translated to a URI. These parameters are added to
+          the query's items.
+  categories: list (optional) List of category strings which should be
+          included as query categories. See gdata.service.Query for
+          additional documentation.

+Yields:
+  A DocumentQuery object used to construct a URI based on the Document
+  List feed.
+ +
+Methods inherited from gdata.service.Query:
+
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc486a0>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+Data
       DATA_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind'
+DOCUMENT_KIND_TERM = 'http://schemas.google.com/docs/2007#document'
+PRESENTATION_KIND_TERM = 'http://schemas.google.com/docs/2007#presentation'
+SPREADSHEET_KIND_TERM = 'http://schemas.google.com/docs/2007#spreadsheet'
+SUPPORTED_FILETYPES = {'CSV': 'text/csv', 'DOC': 'application/msword', 'HTM': 'text/html', 'HTML': 'text/html', 'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet', 'ODT': 'application/vnd.oasis.opendocument.text', 'PPS': 'application/vnd.ms-powerpoint', 'PPT': 'application/vnd.ms-powerpoint', 'RTF': 'application/rtf', 'SXW': 'application/vnd.sun.xml.writer', ...}
+__author__ = 'api.jfisher (Jeff Fisher)'

+ + + + + +
 
+Author
       api.jfisher (Jeff Fisher)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.exif.html b/gdata.py-1.2.3/pydocs/gdata.exif.html new file mode 100644 index 0000000..30f95a0 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.exif.html @@ -0,0 +1,759 @@ + + +Python: package gdata.exif + + + + +
 
+ 
gdata.exif
index
/usr/local/svn/gdata-python-client/src/gdata/exif/__init__.py
+

This module maps elements from the {EXIF} namespace[1] to GData objects. 
+These elements describe image data, using exif attributes[2].

+Picasa Web Albums uses the exif namespace to represent Exif data encoded 
+in a photo [3].

+Picasa Web Albums uses the following exif elements:
+exif:distance
+exif:exposure
+exif:flash
+exif:focallength
+exif:fstop
+exif:imageUniqueID
+exif:iso
+exif:make
+exif:model
+exif:tags
+exif:time

+[1]: http://schemas.google.com/photos/exif/2007
+[2]: http://en.wikipedia.org/wiki/Exif
+[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference

+

+ + + + + +
 
+Package Contents
       

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
ExifBaseElement +
+
+
Distance +
Exposure +
Flash +
Focallength +
Fstop +
ImageUniqueID +
Iso +
Make +
Model +
Tags +
Time +
+
+
+
+
+

+ + + + + + + +
 
+class Distance(ExifBaseElement)
   (float) The distance to the subject, e.g. 0.0
 
 
Method resolution order:
+
Distance
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class ExifBaseElement(atom.AtomBase)
    
Method resolution order:
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Exposure(ExifBaseElement)
   (float) The exposure time used, e.g. 0.025 or 8.0E4
 
 
Method resolution order:
+
Exposure
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Flash(ExifBaseElement)
   (string) Boolean value indicating whether the flash was used.
+The .text attribute will either be `true' or `false'

+As a convenience, this object's .bool method will return what you want,
+so you can say:

+flash_used = bool(Flash)
 
 
Method resolution order:
+
Flash
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__bool__(self)
+ +
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Focallength(ExifBaseElement)
   (float) The focal length used, e.g. 23.7
 
 
Method resolution order:
+
Focallength
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Fstop(ExifBaseElement)
   (float) The fstop value used, e.g. 5.0
 
 
Method resolution order:
+
Fstop
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class ImageUniqueID(ExifBaseElement)
   (string) The unique image ID for the photo. Generated by Google Photo servers
 
 
Method resolution order:
+
ImageUniqueID
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Iso(ExifBaseElement)
   (int) The iso equivalent value used, e.g. 200
 
 
Method resolution order:
+
Iso
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Make(ExifBaseElement)
   (string) The make of the camera used, e.g. Fictitious Camera Company
 
 
Method resolution order:
+
Make
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Model(ExifBaseElement)
   (string) The model of the camera used,e.g AMAZING-100D
 
 
Method resolution order:
+
Model
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Tags(ExifBaseElement)
   The container for all exif elements.
+The <exif:tags> element can appear as a child of a photo entry.
 
 
Method resolution order:
+
Tags
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Time(ExifBaseElement)
   (int) The date/time the photo was taken, e.g. 1180294337000.
+Represented as the number of milliseconds since January 1st, 1970.

+The value of this element will always be identical to the value
+of the <gphoto:timestamp>.

+Look at this object's .isoformat() for a human friendly datetime string:

+photo_epoch = Time.text # 1180294337000
+photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'

+Alternatively: 
+photo_datetime = Time.datetime() # (requires python >= 2.3)
 
 
Method resolution order:
+
Time
+
ExifBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
datetime(self)
(datetime.datetime) Return the timestamp as a datetime.datetime object

+Requires python 2.3
+ +
isoformat(self)
(string) Return the timestamp as a ISO 8601 formatted string,
+e.g. '2007-05-27T19:32:17.000Z'
+ +
+Methods inherited from ExifBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
DistanceFromString(xml_string)
+
ExposureFromString(xml_string)
+
FlashFromString(xml_string)
+
FocallengthFromString(xml_string)
+
FstopFromString(xml_string)
+
ImageUniqueIDFromString(xml_string)
+
IsoFromString(xml_string)
+
MakeFromString(xml_string)
+
ModelFromString(xml_string)
+
TagsFromString(xml_string)
+
TimeFromString(xml_string)
+

+ + + + + +
 
+Data
       EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
+__author__ = u'havard@gulldahl.no'
+__license__ = 'Apache License v2'

+ + + + + +
 
+Author
       havard@gulldahl.no
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.geo.html b/gdata.py-1.2.3/pydocs/gdata.geo.html new file mode 100644 index 0000000..172eee5 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.geo.html @@ -0,0 +1,339 @@ + + +Python: package gdata.geo + + + + +
 
+ 
gdata.geo
index
/usr/local/svn/gdata-python-client/src/gdata/geo/__init__.py
+

Picasa Web Albums uses the georss and gml namespaces for 
+elements defined in the GeoRSS and Geography Markup Language specifications.

+Specifically, Picasa Web Albums uses the following elements:

+georss:where
+gml:Point
+gml:pos

+http://code.google.com/apis/picasaweb/reference.html#georss_reference


+Picasa Web Albums also accepts geographic-location data in two other formats:
+W3C format and plain-GeoRSS (without GML) format.

+

+ + + + + +
 
+Package Contents
       

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
GeoBaseElement +
+
+
Point +
Pos +
Where +
+
+
+
+
+

+ + + + + + + +
 
+class GeoBaseElement(atom.AtomBase)
   Base class for elements.

+To add new elements, you only need to add the element tag name to self._tag
+and the namespace to self._namespace
 
 
Method resolution order:
+
GeoBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Point(GeoBaseElement)
   (container)  Specifies a particular geographical point, by means of
+a <gml:pos> element.
 
 
Method resolution order:
+
Point
+
GeoBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, pos=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Pos(GeoBaseElement)
   (string) Specifies a latitude and longitude, separated by a space,
+e.g. `35.669998 139.770004'
 
 
Method resolution order:
+
Pos
+
GeoBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from GeoBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Where(GeoBaseElement)
   (container) Specifies a geographical location or region.
+A container element, containing a single <gml:Point> element.
+(Not to be confused with <gd:where>.) 

+Note that the (only) child attribute, .Point, is title-cased.
+This reflects the names of elements in the xml stream
+(principle of least surprise).

+As a convenience, you can get a tuple of (lat, lon) with Where.location(),
+and set the same data with Where.setLocation( (lat, lon) ).

+Similarly, there are methods to set and get only latitude and longitude.
 
 
Method resolution order:
+
Where
+
GeoBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, point=None, extension_elements=None, extension_attributes=None, text=None)
+ +
latitude(self)
(float) Get the latitude value of the geo-tag. See also .location()
+ +
location(self)
(float, float) Return Where.Point.pos.text as a (lat,lon) tuple
+ +
longitude(self)
(float) Get the longtitude value of the geo-tag. See also .location()
+ +
longtitude = longitude(self)
+ +
set_latitude(self, lat)
(bool) Set the latitude value of the geo-tag.

+Args:
+lat (float): The new latitude value

+See also .set_location()
+ +
set_location(self, latlon)
(bool) Set Where.Point.pos.text from a (lat,lon) tuple.

+Arguments:
+lat (float): The latitude in degrees, from -90.0 to 90.0
+lon (float): The longitude in degrees, from -180.0 to 180.0

+Returns True on success.
+ +
set_longitude(self, lon)
(bool) Set the longtitude value of the geo-tag.

+Args:
+lat (float): The new latitude value

+See also .set_location()
+ +
set_longtitude = set_longitude(self, lon)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
PointFromString(xml_string)
+
PosFromString(xml_string)
+
WhereFromString(xml_string)
+

+ + + + + +
 
+Data
       GEORSS_NAMESPACE = 'http://www.georss.org/georss'
+GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
+GML_NAMESPACE = 'http://www.opengis.net/gml'
+__author__ = u'havard@gulldahl.no'
+__license__ = 'Apache License v2'

+ + + + + +
 
+Author
       havard@gulldahl.no
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.html b/gdata.py-1.2.3/pydocs/gdata.html new file mode 100644 index 0000000..4341f70 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.html @@ -0,0 +1,1507 @@ + + +Python: package gdata + + + + +
 
+ 
gdata
index
/usr/local/svn/gdata-python-client/src/gdata/__init__.py
+

Contains classes representing Google Data elements.

+Extends Atom classes to add Google Data specific elements.

+

+ + + + + +
 
+Package Contents
       
Crypto (package)
+alt (package)
+apps (package)
+auth
+base (package)
+blogger (package)
+
calendar (package)
+client
+codesearch (package)
+contacts (package)
+docs (package)
+exif (package)
+
geo (package)
+media (package)
+oauth (package)
+photos (package)
+service
+spreadsheet (package)
+
test_data
+tlslite (package)
+urlfetch
+webmastertools (package)
+youtube (package)
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
MediaSource +
+
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
BatchId +
BatchInterrupted +
BatchOperation +
BatchStatus +
EntryLink +
ExtendedProperty +
FeedLink +
ItemsPerPage +
StartIndex +
TotalResults +
+
+
atom.Entry(atom.FeedEntryParent) +
+
+
GDataEntry(atom.Entry, LinkFinder) +
+
+
BatchEntry +
+
+
+
+
atom.Feed(atom.Source) +
+
+
GDataFeed(atom.Feed, LinkFinder) +
+
+
BatchFeed +
+
+
+
+
atom.LinkFinder(__builtin__.object) +
+
+
LinkFinder +
+
+
GDataEntry(atom.Entry, LinkFinder) +
+
+
BatchEntry +
+
+
GDataFeed(atom.Feed, LinkFinder) +
+
+
BatchFeed +
+
+
+
+
+
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
MissingRequiredParameters +
+
+
+
+
+

+ + + + + + + +
 
+class BatchEntry(GDataEntry)
   An atom:entry for use in batch requests.

+The BatchEntry contains additional members to specify the operation to be
+performed on this entry and a batch ID so that the server can reference
+individual operations in the response feed. For more information, see:
+http://code.google.com/apis/gdata/batch.html
 
 
Method resolution order:
+
BatchEntry
+
GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, batch_operation=None, batch_id=None, batch_status=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class BatchFeed(GDataFeed)
   A feed containing a list of batch request entries.
 
 
Method resolution order:
+
BatchFeed
+
GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
AddBatchEntry(self, entry=None, id_url_string=None, batch_id_string=None, operation_string=None)
Logic for populating members of a BatchEntry and adding to the feed.


+If the entry is not a BatchEntry, it is converted to a BatchEntry so
+that the batch specific members will be present. 

+The id_url_string can be used in place of an entry if the batch operation
+applies to a URL. For example query and delete operations require just
+the URL of an entry, no body is sent in the HTTP request. If an
+id_url_string is sent instead of an entry, a BatchEntry is created and
+added to the feed.

+This method also assigns the desired batch id to the entry so that it 
+can be referenced in the server's response. If the batch_id_string is
+None, this method will assign a batch_id to be the index at which this
+entry will be in the feed's entry list.

+Args:
+  entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
+      entry which will be sent to the server as part of the batch request.
+      The item must have a valid atom id so that the server knows which 
+      entry this request references.
+  id_url_string: str (optional) The URL of the entry to be acted on. You
+      can find this URL in the text member of the atom id for an entry.
+      If an entry is not sent, this id will be used to construct a new
+      BatchEntry which will be added to the request feed.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+  operation_string: str (optional) The desired batch operation which will
+      set the batch_operation.type member of the entry. Options are
+      'insert', 'update', 'delete', and 'query'

+Raises:
+  MissingRequiredParameters: Raised if neither an id_ url_string nor an
+      entry are provided in the request.

+Returns:
+  The added entry.
+ +
AddDelete(self, url_string=None, entry=None, batch_id_string=None)
Adds a delete request to the batch request feed.

+This method takes either the url_string which is the atom id of the item
+to be deleted, or the entry itself. The atom id of the entry must be 
+present so that the server knows which entry should be deleted.

+Args:
+  url_string: str (optional) The URL of the entry to be deleted. You can
+     find this URL in the text member of the atom id for an entry. 
+  entry: BatchEntry (optional) The entry to be deleted.
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters: Raised if neither a url_string nor an entry 
+      are provided in the request.
+ +
AddInsert(self, entry, batch_id_string=None)
Add an insert request to the operations in this batch request feed.

+If the entry doesn't yet have an operation or a batch id, these will
+be set to the insert operation and a batch_id specified as a parameter.

+Args:
+  entry: BatchEntry The entry which will be sent in the batch feed as an
+      insert request.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+ +
AddQuery(self, url_string=None, entry=None, batch_id_string=None)
Adds a query request to the batch request feed.

+This method takes either the url_string which is the query URL 
+whose results will be added to the result feed. The query URL will
+be encapsulated in a BatchEntry, and you may pass in the BatchEntry
+with a query URL instead of sending a url_string.

+Args:
+  url_string: str (optional)
+  entry: BatchEntry (optional)
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters
+ +
AddUpdate(self, entry, batch_id_string=None)
Add an update request to the list of batch operations in this feed.

+Sets the operation type of the entry to insert if it is not already set
+and assigns the desired batch id to the entry so that it can be 
+referenced in the server's response.

+Args:
+  entry: BatchEntry The entry which will be sent to the server as an
+      update (HTTP PUT) request. The item must have a valid atom id
+      so that the server knows which entry to replace.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. See also comments for AddInsert.
+ +
GetBatchLink(self)
+ +
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, interrupted=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors inherited from GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+class BatchId(atom.AtomBase)
    
Method resolution order:
+
BatchId
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class BatchInterrupted(atom.AtomBase)
   The batch:interrupted element sent if batch request was interrupted.

+Only appears in a feed if some of the batch entries could not be processed.
+See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
 
 
Method resolution order:
+
BatchInterrupted
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, reason=None, success=None, failures=None, parsed=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class BatchOperation(atom.AtomBase)
    
Method resolution order:
+
BatchOperation
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, op_type=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class BatchStatus(atom.AtomBase)
   The batch:status element present in a batch response entry.

+A status element contains the code (HTTP response code) and 
+reason as elements. In a single request these fields would
+be part of the HTTP response, but in a batch request each
+Entry operation has a corresponding Entry in the response
+feed which includes status information.

+See http://code.google.com/apis/gdata/batch.html#Handling_Errors
 
 
Method resolution order:
+
BatchStatus
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, code=None, reason=None, content_type=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class EntryLink(atom.AtomBase)
   The gd:entryLink element
 
 
Method resolution order:
+
EntryLink
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, href=None, read_only=None, rel=None, entry=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class ExtendedProperty(atom.AtomBase)
   The Google Data extendedProperty element.

+Used to store arbitrary key-value information specific to your
+application. The value can either be a text string stored as an XML 
+attribute (.value), or an XML node (XmlBlob) as a child element.

+This element is used in the Google Calendar data API and the Google
+Contacts data API.
 
 
Method resolution order:
+
ExtendedProperty
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
GetXmlBlobExtensionElement(self)
Returns the XML blob as an atom.ExtensionElement.

+Returns:
+  An atom.ExtensionElement representing the blob's XML, or None if no
+  blob was set.
+ +
GetXmlBlobString(self)
Returns the XML blob as a string.

+Returns:
+  A string containing the blob's XML, or None if no blob was set.
+ +
SetXmlBlob(self, blob)
Sets the contents of the extendedProperty to XML as a child node.

+Since the extendedProperty is only allowed one child element as an XML
+blob, setting the XML blob will erase any preexisting extension elements
+in this object.

+Args:
+  blob: str, ElementTree Element or atom.ExtensionElement representing
+        the XML blob stored in the extendedProperty.
+ +
__init__(self, name=None, value=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class FeedLink(atom.AtomBase)
   The gd:feedLink element
 
 
Method resolution order:
+
FeedLink
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, count_hint=None, href=None, read_only=None, rel=None, feed=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class GDataEntry(atom.Entry, LinkFinder)
   Extends Atom Entry to provide data processing
 
 
Method resolution order:
+
GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors defined here:
+
id
+
+
+Methods inherited from atom.Entry:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for atom:entry

+Args:
+  author: list A list of Author instances which belong to this class.
+  category: list A list of Category instances
+  content: Content The entry's Content
+  contributor: list A list on Contributor instances
+  id: Id The entry's Id element
+  link: list A list of Link instances
+  published: Published The entry's Published element
+  rights: Rights The entry's Rights element
+  source: Source the entry's source element
+  summary: Summary the entry's summary element
+  title: Title the entry's title element
+  updated: Updated the entry's updated element
+  control: The entry's app:control element which can be used to mark an 
+      entry as a draft which should not be publicly viewable.
+  text: String The text contents of the element. This is the contents
+      of the Entry's XML text node. (Example: <foo>This is the text</foo>)
+  extension_elements: list A list of ExtensionElement instances which are
+      children of this element.
+  extension_attributes: dict A dictionary of strings which are the values
+      for additional XML attributes of this element.
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class GDataFeed(atom.Feed, LinkFinder)
   Feed from a GData service
 
 
Method resolution order:
+
GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors defined here:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class ItemsPerPage(atom.AtomBase)
   The opensearch:itemsPerPage element in GData feed
 
 
Method resolution order:
+
ItemsPerPage
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class LinkFinder(atom.LinkFinder)
   An "interface" providing methods to find link elements

+GData Entry elements often contain multiple links which differ in the rel
+attribute or content type. Often, developers are interested in a specific
+type of link so this class provides methods to find specific classes of
+links.

+This class is used as a mixin in GData entries.
 
 
Method resolution order:
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data descriptors inherited from atom.LinkFinder:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class MediaSource(__builtin__.object)
   GData Entries can refer to media sources, so this class provides a
+place to store references to these objects along with some metadata.
 
 Methods defined here:
+
__init__(self, file_handle=None, content_type=None, content_length=None, file_path=None, file_name=None)
Creates an object of type MediaSource.

+Args:
+  file_handle: A file handle pointing to the file to be encapsulated in the
+               MediaSource
+  content_type: string The MIME type of the file. Required if a file_handle
+                is given.
+  content_length: int The size of the file. Required if a file_handle is
+                  given.
+  file_path: string (optional) A full path name to the file. Used in
+                place of a file_handle.
+  file_name: string The name of the file without any path information.
+             Required if a file_handle is given.
+ +
setFile(self, file_name, content_type)
A helper function which can create a file handle from a given filename
+and set the content type and length all at once.

+Args:
+  file_name: string The path and file name to the file containing the media
+  content_type: string A MIME type representing the type of the media
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class MissingRequiredParameters(Error)
    
Method resolution order:
+
MissingRequiredParameters
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class StartIndex(atom.AtomBase)
   The opensearch:startIndex element in GData feed
 
 
Method resolution order:
+
StartIndex
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class TotalResults(atom.AtomBase)
   opensearch:TotalResults for a GData feed
 
 
Method resolution order:
+
TotalResults
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
BatchEntryFromString(xml_string)
+
BatchFeedFromString(xml_string)
+
BatchIdFromString(xml_string)
+
BatchInterruptedFromString(xml_string)
+
BatchOperationFromString(xml_string)
+
BatchStatusFromString(xml_string)
+
EntryLinkFromString(xml_string)
+
ExtendedPropertyFromString(xml_string)
+
FeedLinkFromString(xml_string)
+
GDataEntryFromString(xml_string)
Creates a new GDataEntry instance given a string of XML.
+
GDataFeedFromString(xml_string)
+
ItemsPerPageFromString(xml_string)
+
StartIndexFromString(xml_string)
+
TotalResultsFromString(xml_string)
+

+ + + + + +
 
+Data
       BATCH_DELETE = 'delete'
+BATCH_INSERT = 'insert'
+BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch'
+BATCH_QUERY = 'query'
+BATCH_UPDATE = 'update'
+GACL_NAMESPACE = 'http://schemas.google.com/acl/2007'
+GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s'
+GDATA_NAMESPACE = 'http://schemas.google.com/g/2005'
+GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
+OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/'
+OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
+__author__ = 'api.jscudder (Jeffrey Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeffrey Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.media.html b/gdata.py-1.2.3/pydocs/gdata.media.html new file mode 100644 index 0000000..ffb062c --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.media.html @@ -0,0 +1,794 @@ + + +Python: package gdata.media + + + + +
 
+ 
gdata.media
index
/usr/local/svn/gdata-python-client/src/gdata/media/__init__.py
+

Essential attributes of photos in Google Photos/Picasa Web Albums are 
+expressed using elements from the `media' namespace, defined in the 
+MediaRSS specification[1].

+Due to copyright issues, the elements herein are documented sparingly, please 
+consult with the Google Photos API Reference Guide[2], alternatively the 
+official MediaRSS specification[1] for details. 
+(If there is a version conflict between the two sources, stick to the 
+Google Photos API).

+[1]: http://search.yahoo.com/mrss (version 1.1.1)
+[2]: http://code.google.com/apis/picasaweb/reference.html#media_reference

+Keep in mind that Google Photos only uses a subset of the MediaRSS elements 
+(and some of the attributes are trimmed down, too): 

+media:content
+media:credit
+media:description
+media:group
+media:keywords
+media:thumbnail
+media:title

+

+ + + + + +
 
+Package Contents
       

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
Duration +
MediaBaseElement +
+
+
Category +
Content +
Credit +
Description +
Group +
Keywords +
Player +
Thumbnail +
Title +
+
+
Private +
+
+
+

+ + + + + + + +
 
+class Category(MediaBaseElement)
   The mediagroup:category element
 
 
Method resolution order:
+
Category
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, term=None, scheme=None, label=None, text=None, extension_elements=None, extension_attributes=None)
Constructor for Category

+Args:
+  term: str
+  scheme: str
+  label: str
+  text: str The text data in the this element
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Content(MediaBaseElement)
   (attribute container) This element describes the original content,
+  e.g. an image or a video. There may be multiple Content elements
+  in a media:Group.

+  For example, a video may have a
+  <media:content medium="image"> element that specifies a JPEG
+  representation of the video, and a <media:content medium="video">
+  element that specifies the URL of the video itself.

+Attributes:
+url: non-ambigous reference to online object
+width: width of the object frame, in pixels
+height: width of the object frame, in pixels
+medium: one of `image' or `video', allowing the api user to quickly
+  determine the object's type
+type: Internet media Type[1] (a.k.a. mime type) of the object -- a more
+  verbose way of determining the media type
+(optional) fileSize: the size of the object, in bytes

+[1]: http://en.wikipedia.org/wiki/Internet_media_type
 
 
Method resolution order:
+
Content
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, url=None, width=None, height=None, medium=None, content_type=None, fileSize=None, format=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Credit(MediaBaseElement)
   (string) Contains the nickname of the user who created the content,
+e.g. `Liz Bennet'.

+This is a user-specified value that should be used when referring to
+the user by name.

+Note that none of the attributes from the MediaRSS spec are supported.
 
 
Method resolution order:
+
Credit
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from MediaBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Description(MediaBaseElement)
   (string) A description of the media object.
+Either plain unicode text, or entity-encoded html (look at the `type'
+attribute).

+E.g `A set of photographs I took while vacationing in Italy.'

+For `api' projections, the description is in plain text;
+for `base' projections, the description is in HTML.

+Attributes:
+type: either `text' or `html'.
 
 
Method resolution order:
+
Description
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, description_type=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Duration(atom.AtomBase)
   The YouTube Duration element
 
 
Method resolution order:
+
Duration
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Group(MediaBaseElement)
   Container element for all media elements.
+The <media:group> element can appear as a child of an album, photo or 
+video entry.
 
 
Method resolution order:
+
Group
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, content=None, credit=None, description=None, keywords=None, thumbnail=None, title=None, duration=None, private=None, category=None, player=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Keywords(MediaBaseElement)
   (string) Lists the tags associated with the entry,
+e.g `italy, vacation, sunset'.

+Contains a comma-separated list of tags that have been added to the photo, or
+all tags that have been added to photos in the album.
 
 
Method resolution order:
+
Keywords
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from MediaBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class MediaBaseElement(atom.AtomBase)
   Base class for elements in the MEDIA_NAMESPACE. 
+To add new elements, you only need to add the element tag name to self._tag
 
 
Method resolution order:
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Player(MediaBaseElement)
   (string) Contains the embeddable player URL for the entry's media content 
+if the media is a video.

+Attributes:
+url: Always set to plain
 
 
Method resolution order:
+
Player
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, player_url=None, extension_attributes=None, extension_elements=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Private(atom.AtomBase)
   The YouTube Private element
 
 
Method resolution order:
+
Private
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Thumbnail(MediaBaseElement)
   (attributes) Contains the URL of a thumbnail of a photo or album cover.

+There can be multiple <media:thumbnail> elements for a given <media:group>; 
+for example, a given item may have multiple thumbnails at different sizes. 
+Photos generally have two thumbnails at different sizes; 
+albums generally have one cropped thumbnail.  
+  
+If the thumbsize parameter is set to the initial query, this element points 
+to thumbnails of the requested sizes; otherwise the thumbnails are the 
+default thumbnail size. 

+This element must not be confused with the <gphoto:thumbnail> element.

+Attributes:
+url:  The URL of the thumbnail image.
+height:  The height of the thumbnail image, in pixels.
+width:  The width of the thumbnail image, in pixels.
 
 
Method resolution order:
+
Thumbnail
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, url=None, width=None, height=None, extension_attributes=None, text=None, extension_elements=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Title(MediaBaseElement)
   (string) Contains the title of the entry's media content, in plain text.

+Attributes:
+type: Always set to plain
 
 
Method resolution order:
+
Title
+
MediaBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, title_type=None, extension_attributes=None, text=None, extension_elements=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
ContentFromString(xml_string)
+
CreditFromString(xml_string)
+
DescriptionFromString(xml_string)
+
GroupFromString(xml_string)
+
KeywordsFromString(xml_string)
+
ThumbnailFromString(xml_string)
+
TitleFromString(xml_string)
+

+ + + + + +
 
+Data
       MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/'
+YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007'
+__author__ = u'havard@gulldahl.no'
+__license__ = 'Apache License v2'

+ + + + + +
 
+Author
       havard@gulldahl.no
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.photos.html b/gdata.py-1.2.3/pydocs/gdata.photos.html new file mode 100644 index 0000000..7214cbd --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.photos.html @@ -0,0 +1,4128 @@ + + +Python: package gdata.photos + + + + +
 
+ 
gdata.photos (version 164)
index
/usr/local/svn/gdata-python-client/src/gdata/photos/__init__.py
+

This module provides a pythonic, gdata-centric interface to Google Photos
+(a.k.a. Picasa Web Services.

+It is modelled after the gdata/* interfaces from the gdata-python-client
+project[1] by Google. 

+You'll find the user-friendly api in photos.service. Please see the
+documentation or live help() system for available methods.

+[1]: http://gdata-python-client.googlecode.com/

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
AlbumData +
GPhotosBaseData +
+
+
CommentData +
TagData +
UserData +
+
+
PhotoData +
+
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
PhotosBaseElement +
+
+
Access +
Albumid +
BytesUsed +
Checksum +
Client +
CommentCount +
CommentingEnabled +
Height +
Id +
Location +
MaxPhotosPerAlbum +
Name +
Nickname +
Numphotos +
Numphotosremaining +
Photoid +
Position +
Quotacurrent +
Quotalimit +
Rotation +
Size +
Snippet +
Snippettype +
Thumbnail +
Timestamp +
Truncated +
User +
Version +
Weight +
Width +
+
+
+
+
atom.Author(atom.Person) +
+
+
CommentAuthor +
+
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
GPhotosBaseEntry(gdata.GDataEntry, gdata.LinkFinder) +
+
+
AlbumEntry(GPhotosBaseEntry, AlbumData) +
CommentEntry(GPhotosBaseEntry, CommentData) +
PhotoEntry(GPhotosBaseEntry, PhotoData) +
PhotosBaseEntry +
TagEntry(GPhotosBaseEntry, TagData) +
UserEntry(GPhotosBaseEntry, UserData) +
+
+
+
+
gdata.GDataFeed(atom.Feed, gdata.LinkFinder) +
+
+
GPhotosBaseFeed(gdata.GDataFeed, gdata.LinkFinder) +
+
+
AlbumFeed(GPhotosBaseFeed, AlbumData) +
CommentFeed(GPhotosBaseFeed, CommentData) +
PhotoFeed(GPhotosBaseFeed, PhotoData) +
PhotosBaseFeed +
TagFeed(GPhotosBaseFeed, TagData) +
UserFeed(GPhotosBaseFeed, UserData) +
+
+
+
+
gdata.LinkFinder(atom.LinkFinder) +
+
+
GPhotosBaseEntry(gdata.GDataEntry, gdata.LinkFinder) +
+
+
AlbumEntry(GPhotosBaseEntry, AlbumData) +
CommentEntry(GPhotosBaseEntry, CommentData) +
PhotoEntry(GPhotosBaseEntry, PhotoData) +
PhotosBaseEntry +
TagEntry(GPhotosBaseEntry, TagData) +
UserEntry(GPhotosBaseEntry, UserData) +
+
+
GPhotosBaseFeed(gdata.GDataFeed, gdata.LinkFinder) +
+
+
AlbumFeed(GPhotosBaseFeed, AlbumData) +
CommentFeed(GPhotosBaseFeed, CommentData) +
PhotoFeed(GPhotosBaseFeed, PhotoData) +
PhotosBaseFeed +
TagFeed(GPhotosBaseFeed, TagData) +
UserFeed(GPhotosBaseFeed, UserData) +
+
+
+
+
+

+ + + + + + + +
 
+class Access(PhotosBaseElement)
   The Google Photo `Access' element.

+The album's access level. Valid values are `public' or `private'.
+In documentation, access level is also referred to as `visibility.'
 
 
Method resolution order:
+
Access
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class AlbumData(__builtin__.object)
    Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Data and other attributes defined here:
+
access = None
+ +
bytesUsed = None
+ +
commentCount = None
+ +
commentingEnabled = None
+ +
gphoto_id = None
+ +
location = None
+ +
name = None
+ +
nickname = None
+ +
numphotos = None
+ +
numphotosremaining = None
+ +
timestamp = None
+ +
user = None
+ +

+ + + + + + + +
 
+class AlbumEntry(GPhotosBaseEntry, AlbumData)
   All metadata for a Google Photos Album

+Take a look at AlbumData for metadata accessible as attributes to this object.

+Notes:
+  To avoid name clashes, and to create a more sensible api, some
+  objects have names that differ from the original elements:

+  o media:group -> self.media,
+  o geo:where -> self.geo,
+  o photo:id -> self.gphoto_id
 
 
Method resolution order:
+
AlbumEntry
+
GPhotosBaseEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
AlbumData
+
__builtin__.object
+
+
+Methods defined here:
+
GetAlbumId(self)
Return the id of this album
+ +
GetCommentsUri(self)
(string) Return the uri to this albums feed of the CommentEntry kind
+ +
GetPhotosUri(self)
(string) Return the uri to this albums feed of the PhotoEntry kind
+ +
GetTagsUri(self)
(string) Return the uri to this albums feed of the TagEntry kind
+ +
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, gphoto_id=None, name=None, location=None, access=None, timestamp=None, numphotos=None, user=None, nickname=None, commentingEnabled=None, commentCount=None, thumbnail=None, media=None, geo=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data and other attributes defined here:
+
geo = <gdata.geo.Where object at 0xa78350>
+ +
media = <gdata.media.Group object at 0xa782d0>
+ +
+Methods inherited from GPhotosBaseEntry:
+
kind(self)
(string) Returns the kind
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from AlbumData:
+
access = None
+ +
bytesUsed = None
+ +
commentCount = None
+ +
commentingEnabled = None
+ +
gphoto_id = None
+ +
location = None
+ +
name = None
+ +
nickname = None
+ +
numphotos = None
+ +
numphotosremaining = None
+ +
timestamp = None
+ +
user = None
+ +

+ + + + + + + +
 
+class AlbumFeed(GPhotosBaseFeed, AlbumData)
   All metadata for a Google Photos Album, including its sub-elements

+This feed represents an album as the container for other objects.

+A Album feed contains entries of
+PhotoEntryCommentEntry or TagEntry,
+depending on the `kind' parameter in the original query.

+Take a look at AlbumData for accessible attributes.
 
 
Method resolution order:
+
AlbumFeed
+
GPhotosBaseFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
AlbumData
+
__builtin__.object
+
+
+Methods defined here:
+
GetCommentsUri(self)
(string) Return the uri to the same feed, but of the CommentEntry kind
+ +
GetPhotosUri(self)
(string) Return the uri to the same feed, but of the PhotoEntry kind
+ +
GetTagsUri(self)
(string) Return the uri to the same feed, but of the TagEntry kind
+ +
+Methods inherited from GPhotosBaseFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
kind(self)
(string) Returns the kind
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from AlbumData:
+
access = None
+ +
bytesUsed = None
+ +
commentCount = None
+ +
commentingEnabled = None
+ +
gphoto_id = None
+ +
location = None
+ +
name = None
+ +
nickname = None
+ +
numphotos = None
+ +
numphotosremaining = None
+ +
timestamp = None
+ +
user = None
+ +

+ + + + + + + +
 
+class Albumid(PhotosBaseElement)
   The Google Photo `Albumid' element
 
 
Method resolution order:
+
Albumid
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class BytesUsed(PhotosBaseElement)
   The Google Photo `BytesUsed' element
 
 
Method resolution order:
+
BytesUsed
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Checksum(PhotosBaseElement)
   The Google Photo `Checksum' element
 
 
Method resolution order:
+
Checksum
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Client(PhotosBaseElement)
   The Google Photo `Client' element
 
 
Method resolution order:
+
Client
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class CommentAuthor(atom.Author)
   The Atom `Author' element in CommentEntry entries is augmented to
+contain elements from the PHOTOS_NAMESPACE

+http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
 
 
Method resolution order:
+
CommentAuthor
+
atom.Author
+
atom.Person
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.Author:
+
__init__(self, name=None, email=None, uri=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Author

+Args:
+  name: Name
+  email: Email
+  uri: Uri
+  extension_elements: list A  list of ExtensionElement instances
+  extension_attributes: dict A dictionary of attribute value string pairs
+  text: str The text data in the this element
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class CommentCount(PhotosBaseElement)
   The Google Photo `CommentCount' element
 
 
Method resolution order:
+
CommentCount
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class CommentData(GPhotosBaseData)
    
Method resolution order:
+
CommentData
+
GPhotosBaseData
+
__builtin__.object
+
+
+Data and other attributes defined here:
+
albumid = None
+ +
author = None
+ +
gphoto_id = None
+ +
photoid = None
+ +
+Data descriptors inherited from GPhotosBaseData:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class CommentEntry(GPhotosBaseEntry, CommentData)
   All metadata for a Google Photos Comment

+The comment is stored in the .content.text attribute,
+with a content type in .content.type.
 
 
Method resolution order:
+
CommentEntry
+
GPhotosBaseEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
CommentData
+
GPhotosBaseData
+
__builtin__.object
+
+
+Methods defined here:
+
GetAlbumUri(self)
Return the uri to the AlbumEntry containing this comment
+ +
GetCommentId(self)
Return the globally unique id of this comment
+ +
GetPhotoUri(self)
Return the uri to the PhotoEntry containing this comment
+ +
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, gphoto_id=None, albumid=None, photoid=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from GPhotosBaseEntry:
+
kind(self)
(string) Returns the kind
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from CommentData:
+
albumid = None
+ +
author = None
+ +
gphoto_id = None
+ +
photoid = None
+ +

+ + + + + + + +
 
+class CommentFeed(GPhotosBaseFeed, CommentData)
   All metadata for a Google Photos Comment, including its sub-elements
 
 
Method resolution order:
+
CommentFeed
+
GPhotosBaseFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
CommentData
+
GPhotosBaseData
+
__builtin__.object
+
+
+Methods inherited from GPhotosBaseFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
kind(self)
(string) Returns the kind
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from CommentData:
+
albumid = None
+ +
author = None
+ +
gphoto_id = None
+ +
photoid = None
+ +

+ + + + + + + +
 
+class CommentingEnabled(PhotosBaseElement)
   The Google Photo `CommentingEnabled' element
 
 
Method resolution order:
+
CommentingEnabled
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class GPhotosBaseData(__builtin__.object)
    Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class GPhotosBaseEntry(gdata.GDataEntry, gdata.LinkFinder)
   Base class for all Entry elements in gdata.photos
 
 
Method resolution order:
+
GPhotosBaseEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
kind(self)
(string) Returns the kind
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class GPhotosBaseFeed(gdata.GDataFeed, gdata.LinkFinder)
   Base class for all Feeds in gdata.photos
 
 
Method resolution order:
+
GPhotosBaseFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
kind(self)
(string) Returns the kind
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class Height(PhotosBaseElement)
   The Google Photo `Height' element
 
 
Method resolution order:
+
Height
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Id(PhotosBaseElement)
   The Google Photo `Id' element
 
 
Method resolution order:
+
Id
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Location(PhotosBaseElement)
   The Google Photo `Location' element
 
 
Method resolution order:
+
Location
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class MaxPhotosPerAlbum(PhotosBaseElement)
   The Google Photo `MaxPhotosPerAlbum' element
 
 
Method resolution order:
+
MaxPhotosPerAlbum
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Name(PhotosBaseElement)
   The Google Photo `Name' element
 
 
Method resolution order:
+
Name
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Nickname(PhotosBaseElement)
   The Google Photo `Nickname' element
 
 
Method resolution order:
+
Nickname
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Numphotos(PhotosBaseElement)
   The Google Photo `Numphotos' element
 
 
Method resolution order:
+
Numphotos
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Numphotosremaining(PhotosBaseElement)
   The Google Photo `Numphotosremaining' element
 
 
Method resolution order:
+
Numphotosremaining
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class PhotoData(__builtin__.object)
    Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Data and other attributes defined here:
+
albumid = None
+ +
checksum = None
+ +
client = None
+ +
commentCount = None
+ +
commentingEnabled = None
+ +
geo = <gdata.geo.Where object at 0xa78490>
+ +
gphoto_id = None
+ +
height = None
+ +
media = <gdata.media.Group object at 0xa78450>
+ +
position = None
+ +
rotation = None
+ +
size = None
+ +
snippet = None
+ +
snippettype = None
+ +
tags = <gdata.exif.Tags object at 0xa78590>
+ +
timestamp = None
+ +
truncated = None
+ +
version = None
+ +
width = None
+ +

+ + + + + + + +
 
+class PhotoEntry(GPhotosBaseEntry, PhotoData)
   All metadata for a Google Photos Photo

+Take a look at PhotoData for metadata accessible as attributes to this object.

+Notes:
+  To avoid name clashes, and to create a more sensible api, some
+  objects have names that differ from the original elements:

+  o media:group -> self.media,
+  o exif:tags -> self.exif,
+  o geo:where -> self.geo,
+  o photo:id -> self.gphoto_id
 
 
Method resolution order:
+
PhotoEntry
+
GPhotosBaseEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
PhotoData
+
__builtin__.object
+
+
+Methods defined here:
+
GetAlbumUri(self)
Return the uri to the AlbumEntry containing this photo
+ +
GetCommentsUri(self)
Return the uri to this photo's feed of CommentEntry comments
+ +
GetPostLink(self)
Return the uri to this photo's `POST' link (use it for updates of the object)
+ +
GetTagsUri(self)
Return the uri to this photo's feed of TagEntry tags
+ +
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, text=None, gphoto_id=None, albumid=None, checksum=None, client=None, height=None, position=None, rotation=None, size=None, timestamp=None, version=None, width=None, commentCount=None, commentingEnabled=None, media=None, exif=None, geo=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from GPhotosBaseEntry:
+
kind(self)
(string) Returns the kind
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from PhotoData:
+
albumid = None
+ +
checksum = None
+ +
client = None
+ +
commentCount = None
+ +
commentingEnabled = None
+ +
geo = <gdata.geo.Where object at 0xa78490>
+ +
gphoto_id = None
+ +
height = None
+ +
media = <gdata.media.Group object at 0xa78450>
+ +
position = None
+ +
rotation = None
+ +
size = None
+ +
snippet = None
+ +
snippettype = None
+ +
tags = <gdata.exif.Tags object at 0xa78590>
+ +
timestamp = None
+ +
truncated = None
+ +
version = None
+ +
width = None
+ +

+ + + + + + + +
 
+class PhotoFeed(GPhotosBaseFeed, PhotoData)
   All metadata for a Google Photos Photo, including its sub-elements

+This feed represents a photo as the container for other objects.

+A Photo feed contains entries of
+CommentEntry or TagEntry,
+depending on the `kind' parameter in the original query.

+Take a look at PhotoData for metadata accessible as attributes to this object.
 
 
Method resolution order:
+
PhotoFeed
+
GPhotosBaseFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
PhotoData
+
__builtin__.object
+
+
+Methods defined here:
+
GetCommentsUri(self)
(string) Return the uri to the same feed, but of the CommentEntry kind
+ +
GetTagsUri(self)
(string) Return the uri to the same feed, but of the TagEntry kind
+ +
+Methods inherited from GPhotosBaseFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
kind(self)
(string) Returns the kind
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from PhotoData:
+
albumid = None
+ +
checksum = None
+ +
client = None
+ +
commentCount = None
+ +
commentingEnabled = None
+ +
geo = <gdata.geo.Where object at 0xa78490>
+ +
gphoto_id = None
+ +
height = None
+ +
media = <gdata.media.Group object at 0xa78450>
+ +
position = None
+ +
rotation = None
+ +
size = None
+ +
snippet = None
+ +
snippettype = None
+ +
tags = <gdata.exif.Tags object at 0xa78590>
+ +
timestamp = None
+ +
truncated = None
+ +
version = None
+ +
width = None
+ +

+ + + + + + + +
 
+class Photoid(PhotosBaseElement)
   The Google Photo `Photoid' element
 
 
Method resolution order:
+
Photoid
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class PhotosBaseElement(atom.AtomBase)
   Base class for elements in the PHOTO_NAMESPACE. To add new elements,
+you only need to add the element tag name to self._tag
 
 
Method resolution order:
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class PhotosBaseEntry(GPhotosBaseEntry)
    
Method resolution order:
+
PhotosBaseEntry
+
GPhotosBaseEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from GPhotosBaseEntry:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
kind(self)
(string) Returns the kind
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+class PhotosBaseFeed(GPhotosBaseFeed)
    
Method resolution order:
+
PhotosBaseFeed
+
GPhotosBaseFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from GPhotosBaseFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
kind(self)
(string) Returns the kind
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class Position(PhotosBaseElement)
   The Google Photo `Position' element
 
 
Method resolution order:
+
Position
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Quotacurrent(PhotosBaseElement)
   The Google Photo `Quotacurrent' element
 
 
Method resolution order:
+
Quotacurrent
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Quotalimit(PhotosBaseElement)
   The Google Photo `Quotalimit' element
 
 
Method resolution order:
+
Quotalimit
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Rotation(PhotosBaseElement)
   The Google Photo `Rotation' element
 
 
Method resolution order:
+
Rotation
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Size(PhotosBaseElement)
   The Google Photo `Size' element
 
 
Method resolution order:
+
Size
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Snippet(PhotosBaseElement)
   The Google Photo `snippet' element.

+When searching, the snippet element will contain a 
+string with the word you're looking for, highlighted in html markup
+E.g. when your query is `hafjell', this element may contain:
+`... here at <b>Hafjell</b>.'

+You'll find this element in searches -- that is, feeds that combine the 
+`kind=photo' and `q=yoursearch' parameters in the request.

+See also gphoto:truncated and gphoto:snippettype.
 
 
Method resolution order:
+
Snippet
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Snippettype(PhotosBaseElement)
   The Google Photo `Snippettype' element

+When searching, this element will tell you the type of element that matches.

+You'll find this element in searches -- that is, feeds that combine the 
+`kind=photo' and `q=yoursearch' parameters in the request.

+See also gphoto:snippet and gphoto:truncated.

+Possible values and their interpretation: 
+o ALBUM_TITLE       - The album title matches 
+o PHOTO_TAGS        - The match is a tag/keyword
+o PHOTO_DESCRIPTION - The match is in the photo's description

+If you discover a value not listed here, please submit a patch to update this docstring.
 
 
Method resolution order:
+
Snippettype
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class TagData(GPhotosBaseData)
    
Method resolution order:
+
TagData
+
GPhotosBaseData
+
__builtin__.object
+
+
+Data and other attributes defined here:
+
weight = None
+ +
+Data descriptors inherited from GPhotosBaseData:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class TagEntry(GPhotosBaseEntry, TagData)
   All metadata for a Google Photos Tag

+The actual tag is stored in the .title.text attribute
 
 
Method resolution order:
+
TagEntry
+
GPhotosBaseEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
TagData
+
GPhotosBaseData
+
__builtin__.object
+
+
+Methods defined here:
+
GetAlbumUri(self)
Return the uri to the AlbumEntry containing this tag
+ +
GetPhotoUri(self)
Return the uri to the PhotoEntry containing this tag
+ +
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, weight=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from GPhotosBaseEntry:
+
kind(self)
(string) Returns the kind
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from TagData:
+
weight = None
+ +

+ + + + + + + +
 
+class TagFeed(GPhotosBaseFeed, TagData)
   All metadata for a Google Photos Tag, including its sub-elements
 
 
Method resolution order:
+
TagFeed
+
GPhotosBaseFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
TagData
+
GPhotosBaseData
+
__builtin__.object
+
+
+Methods inherited from GPhotosBaseFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
kind(self)
(string) Returns the kind
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from TagData:
+
weight = None
+ +

+ + + + + + + +
 
+class Thumbnail(PhotosBaseElement)
   The Google Photo `Thumbnail' element

+Used to display user's photo thumbnail (hackergotchi).

+(Not to be confused with the <media:thumbnail> element, which gives you
+small versions of the photo object.)
 
 
Method resolution order:
+
Thumbnail
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Timestamp(PhotosBaseElement)
   The Google Photo `Timestamp' element
+Represented as the number of milliseconds since January 1st, 1970.


+Take a look at the convenience methods .isoformat() and .datetime():

+photo_epoch     = Time.text        # 1180294337000
+photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'

+Alternatively: 
+photo_datetime  = Time.datetime()  # (requires python >= 2.3)
 
 
Method resolution order:
+
Timestamp
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
datetime(self)
(datetime.datetime) Return the timestamp as a datetime.datetime object

+Requires python 2.3
+ +
isoformat(self)
(string) Return the timestamp as a ISO 8601 formatted string,
+e.g. '2007-05-27T19:32:17.000Z'
+ +
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Truncated(PhotosBaseElement)
   The Google Photo `Truncated' element

+You'll find this element in searches -- that is, feeds that combine the 
+`kind=photo' and `q=yoursearch' parameters in the request.

+See also gphoto:snippet and gphoto:snippettype.

+Possible values and their interpretation:
+0 -- unknown
 
 
Method resolution order:
+
Truncated
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class User(PhotosBaseElement)
   The Google Photo `User' element
 
 
Method resolution order:
+
User
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class UserData(GPhotosBaseData)
    
Method resolution order:
+
UserData
+
GPhotosBaseData
+
__builtin__.object
+
+
+Data and other attributes defined here:
+
gphoto_id = None
+ +
maxPhotosPerAlbum = None
+ +
nickname = None
+ +
quotacurrent = None
+ +
quotalimit = None
+ +
thumbnail = None
+ +
user = None
+ +
+Data descriptors inherited from GPhotosBaseData:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class UserEntry(GPhotosBaseEntry, UserData)
   All metadata for a Google Photos User

+This entry represents an album owner and all appropriate metadata.

+Take a look at at the attributes of the UserData for metadata available.
 
 
Method resolution order:
+
UserEntry
+
GPhotosBaseEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
UserData
+
GPhotosBaseData
+
__builtin__.object
+
+
+Methods defined here:
+
GetAlbumsUri(self)
(string) Return the uri to this user's feed of the AlbumEntry kind
+ +
GetCommentsUri(self)
(string) Return the uri to this user's feed of the CommentEntry kind
+ +
GetPhotosUri(self)
(string) Return the uri to this user's feed of the PhotoEntry kind
+ +
GetTagsUri(self)
(string) Return the uri to this user's feed of the TagEntry kind
+ +
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, gphoto_id=None, maxPhotosPerAlbum=None, nickname=None, quotalimit=None, quotacurrent=None, thumbnail=None, user=None, extended_property=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from GPhotosBaseEntry:
+
kind(self)
(string) Returns the kind
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from UserData:
+
gphoto_id = None
+ +
maxPhotosPerAlbum = None
+ +
nickname = None
+ +
quotacurrent = None
+ +
quotalimit = None
+ +
thumbnail = None
+ +
user = None
+ +

+ + + + + + + +
 
+class UserFeed(GPhotosBaseFeed, UserData)
   Feed for a User in the google photos api.

+This feed represents a user as the container for other objects.

+A User feed contains entries of
+AlbumEntryPhotoEntryCommentEntryUserEntry or TagEntry,
+depending on the `kind' parameter in the original query.

+The user feed itself also contains all of the metadata available
+as part of a UserData object.
 
 
Method resolution order:
+
UserFeed
+
GPhotosBaseFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
UserData
+
GPhotosBaseData
+
__builtin__.object
+
+
+Methods defined here:
+
GetAlbumsUri(self)
Get the uri to this feed, but with entries of the AlbumEntry kind.
+ +
GetCommentsUri(self)
Get the uri to this feed, but with entries of the CommentsEntry kind.
+ +
GetPhotosUri(self)
Get the uri to this feed, but with entries of the PhotosEntry kind.
+ +
GetTagsUri(self)
Get the uri to this feed, but with entries of the TagEntry kind.
+ +
+Methods inherited from GPhotosBaseFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
+ +
kind(self)
(string) Returns the kind
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
+Data and other attributes inherited from UserData:
+
gphoto_id = None
+ +
maxPhotosPerAlbum = None
+ +
nickname = None
+ +
quotacurrent = None
+ +
quotalimit = None
+ +
thumbnail = None
+ +
user = None
+ +

+ + + + + + + +
 
+class Version(PhotosBaseElement)
   The Google Photo `Version' element
 
 
Method resolution order:
+
Version
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Weight(PhotosBaseElement)
   The Google Photo `Weight' element.

+The weight of the tag is the number of times the tag
+appears in the collection of tags currently being viewed.
+The default weight is 1, in which case this tags is omitted.
 
 
Method resolution order:
+
Weight
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Width(PhotosBaseElement)
   The Google Photo `Width' element
 
 
Method resolution order:
+
Width
+
PhotosBaseElement
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from PhotosBaseElement:
+
__init__(self, name=None, extension_elements=None, extension_attributes=None, text=None)
+ +
__int__(self)
+ +
bool(self)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
AccessFromString(xml_string)
+
AlbumEntryFromString(xml_string)
+
AlbumFeedFromString(xml_string)
+
AlbumidFromString(xml_string)
+
AnyEntryFromString(xml_string)
Creates an instance of the appropriate entry class from the
+  xml string contents.

+Args:
+  xml_string: str A string which contains valid XML. The root element
+      of the XML string should match the tag and namespace of the desired
+      class.

+Returns:
+  An instance of the target class with members assigned according to the
+  contents of the XML - or a basic gdata.GDataEndry instance if it is
+  impossible to determine the appropriate class (look for extra elements
+  in GDataEntry's .FindExtensions() and extension_elements[] ).
+
AnyFeedFromString(xml_string)
Creates an instance of the appropriate feed class from the
+  xml string contents.

+Args:
+  xml_string: str A string which contains valid XML. The root element
+      of the XML string should match the tag and namespace of the desired
+      class.

+Returns:
+  An instance of the target class with members assigned according to the
+  contents of the XML - or a basic gdata.GDataFeed instance if it is
+  impossible to determine the appropriate class (look for extra elements
+  in GDataFeed's .FindExtensions() and extension_elements[] ).
+
BytesUsedFromString(xml_string)
+
ChecksumFromString(xml_string)
+
ClientFromString(xml_string)
+
CommentAuthorFromString(xml_string)
+
CommentCountFromString(xml_string)
+
CommentEntryFromString(xml_string)
+
CommentFeedFromString(xml_string)
+
CommentingEnabledFromString(xml_string)
+
HeightFromString(xml_string)
+
IdFromString(xml_string)
+
LocationFromString(xml_string)
+
MaxPhotosPerAlbumFromString(xml_string)
+
NameFromString(xml_string)
+
NicknameFromString(xml_string)
+
NumphotosFromString(xml_string)
+
NumphotosremainingFromString(xml_string)
+
PhotoEntryFromString(xml_string)
+
PhotoFeedFromString(xml_string)
+
PhotoidFromString(xml_string)
+
PositionFromString(xml_string)
+
QuotacurrentFromString(xml_string)
+
QuotalimitFromString(xml_string)
+
RotationFromString(xml_string)
+
SizeFromString(xml_string)
+
SnippetFromString(xml_string)
+
SnippettypeFromString(xml_string)
+
TagEntryFromString(xml_string)
+
TagFeedFromString(xml_string)
+
ThumbnailFromString(xml_string)
+
TimestampFromString(xml_string)
+
TruncatedFromString(xml_string)
+
UserEntryFromString(xml_string)
+
UserFeedFromString(xml_string)
+
UserFromString(xml_string)
+
VersionFromString(xml_string)
+
WeightFromString(xml_string)
+
WidthFromString(xml_string)
+

+ + + + + +
 
+Data
       BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch'
+EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
+GEORSS_NAMESPACE = 'http://www.georss.org/georss'
+GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
+GML_NAMESPACE = 'http://www.opengis.net/gml'
+MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/'
+OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/'
+PHEED_NAMESPACE = 'http://www.pheed.com/pheed/'
+PHOTOS_NAMESPACE = 'http://schemas.google.com/photos/2007'
+__author__ = u'havard@gulldahl.no'
+__license__ = 'Apache License v2'
+__version__ = '164'

+ + + + + +
 
+Author
       havard@gulldahl.no
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.photos.service.html b/gdata.py-1.2.3/pydocs/gdata.photos.service.html new file mode 100644 index 0000000..a545716 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.photos.service.html @@ -0,0 +1,989 @@ + + +Python: module gdata.photos.service + + + + +
 
+ 
gdata.photos.service (version 176)
index
/usr/local/svn/gdata-python-client/src/gdata/photos/service.py
+

Google PhotoService provides a human-friendly interface to
+Google Photo (a.k.a Picasa Web) services[1].

+It extends gdata.service.GDataService and as such hides all the
+nasty details about authenticating, parsing and communicating with
+Google Photos. 

+[1]: http://code.google.com/apis/picasaweb/gdata.html

+Example:
+  import gdata.photos, gdata.photos.service
+  pws = gdata.photos.service.PhotosService()
+  pws.ClientLogin(username, password)
+  #Get all albums
+  albums = pws.GetUserFeed().entry
+  # Get all photos in second album
+  photos = pws.GetFeed(albums[1].GetPhotosUri()).entry
+  # Get all tags for photos in second album and print them
+  tags = pws.GetFeed(albums[1].GetTagsUri()).entry
+  print [ tag.summary.text for tag in tags ]
+  # Get all comments for the first photos in list and print them
+  comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry
+  print [ c.summary.text for c in comments ]

+  # Get a photo to work with
+  photo = photos[0]
+  # Update metadata

+  # Attributes from the <gphoto:*> namespace
+  photo.summary.text = u'A nice view from my veranda'
+  photo.title.text = u'Verandaview.jpg'

+  # Attributes from the <media:*> namespace
+  photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated

+  # Adding attributes to media object

+  # Rotate 90 degrees clockwise
+  photo.rotation = gdata.photos.Rotation(text='90') 

+  # Submit modified photo object
+  photo = pws.UpdatePhotoMetadata(photo)
+  
+  # Make sure you only modify the newly returned object, else you'll get
+  # versioning errors. See Optimistic-concurrency

+  # Add comment to a picture
+  comment = pws.InsertComment(photo, u'I wish the water always was this warm')

+  # Remove comment because it was silly
+  print "*blush*"
+  pws.Delete(comment.GetEditLink().href)

+

+ + + + + +
 
+Modules
       
StringIO
+atom
+
gdata
+os
+
sys
+time
+

+ + + + + +
 
+Classes
       
+
exceptions.Exception(exceptions.BaseException) +
+
+
GooglePhotosException +
+
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
PhotosService +
+
+
+

+ + + + + +
 
+class GooglePhotosException(exceptions.Exception)
    
Method resolution order:
+
GooglePhotosException
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, response)
+ +
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class PhotosService(gdata.service.GDataService)
    
Method resolution order:
+
PhotosService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
Delete(self, object_or_uri, *args, **kwargs)
Delete an object.

+Re-implementing the GDataService.Delete method, to add some
+convenience.

+Arguments:
+object_or_uri: Any object that has a GetEditLink() method that
+  returns a link, or a uri to that object.

+Returns:
+? or GooglePhotosException on errors
+ +
GetContacts(self, user='default', limit=None)
Retrieve a feed that contains a list of your contacts

+Arguments:
+user: Username of the user whose contacts you want

+Returns
+gdata.photos.UserFeed, with UserEntry entries

+See:
+http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
+ +
GetEntry(self, uri, limit=None, start_index=None)
Get an Entry.

+Arguments:
+uri: the uri to the entry
+limit (optional): the maximum number of entries to return. Defaults to what
+  the server returns.

+Returns:
+one of gdata.photos.AlbumEntry,
+       gdata.photos.UserEntry,
+       gdata.photos.PhotoEntry,
+       gdata.photos.CommentEntry,
+       gdata.photos.TagEntry,
+  depending on the results of the query.
+Raises:
+GooglePhotosException
+ +
GetFeed(self, uri, limit=None, start_index=None)
Get a feed.

+ The results are ordered by the values of their `updated' elements,
+ with the most recently updated entry appearing first in the feed.

+Arguments:
+uri: the uri to fetch
+limit (optional): the maximum number of entries to return. Defaults to what
+  the server returns.

+Returns:
+one of gdata.photos.AlbumFeed,
+       gdata.photos.UserFeed,
+       gdata.photos.PhotoFeed,
+       gdata.photos.CommentFeed,
+       gdata.photos.TagFeed,
+  depending on the results of the query.
+Raises:
+GooglePhotosException

+See:
+http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
+ +
GetTaggedPhotos(self, tag, user='default', limit=None)
Get all photos belonging to a specific user, tagged by the given keyword

+Arguments:
+tag: The tag you're looking for, e.g. `dog'
+user (optional): Whose images/videos you want to search, defaults
+  to current user
+limit (optional): the maximum number of entries to return.
+  Defaults to everything the server returns.

+Returns:
+gdata.photos.UserFeed containing PhotoEntry elements
+ +
GetUserFeed(self, kind='album', user='default', limit=None)
Get user-based feed, containing albums, photos, comments or tags;
+  defaults to albums.

+The entries are ordered by the values of their `updated' elements,
+with the most recently updated entry appearing first in the feed.

+Arguments:
+kind: the kind of entries to get, either `album', `photo',
+  `comment' or `tag', or a python list of these. Defaults to `album'.
+user (optional): whose albums we're querying. Defaults to current user.
+limit (optional): the maximum number of entries to return.
+  Defaults to everything the server returns.


+Returns:
+gdata.photos.UserFeed, containing appropriate Entry elements

+See:
+http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual
+http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html
+ +
InsertAlbum(self, title, summary, location=None, access='public', commenting_enabled='true', timestamp=None)
Add an album.

+Needs authentication, see ClientLogin()

+Arguments:
+title: Album title 
+summary: Album summary / description
+access (optional): `private' or `public'. Public albums are searchable
+  by everyone on the internet. Defaults to `public'
+commenting_enabled (optional): `true' or `false'. Defaults to `true'.
+timestamp (optional): A date and time for the album, in milliseconds since
+  Unix epoch[1] UTC. Defaults to now.

+Returns:
+The newly created gdata.photos.AlbumEntry

+See:
+http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed

+[1]: http://en.wikipedia.org/wiki/Unix_epoch
+ +
InsertComment(self, photo_or_uri, comment)
Add a comment to a photo.

+Needs authentication, see ClientLogin()

+Arguments:
+photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented
+  , or a `post' uri pointing to it
+(string) comment: The actual comment

+Returns:
+The new gdata.photos.CommentEntry

+Example:
+p = GetFeed(PhotoUri)
+tag = InsertComment(p, 'OOOH! I would have loved to be there.
+  Who's that in the back?')
+ +
InsertPhoto(self, album_or_uri, photo, filename_or_handle, content_type='image/jpeg')
Add a PhotoEntry

+Needs authentication, see ClientLogin()

+Arguments:
+album_or_uri: AlbumFeed or uri of the album where the photo should go
+photo: PhotoEntry to add
+filename_or_handle: A file-like object or file name where the image/video
+  will be read from
+content_type (optional): Internet media type (a.k.a. mime type) of
+  media object. Currently Google Photos supports these types:
+   o image/bmp
+   o image/gif
+   o image/jpeg
+   o image/png
+   
+  Images will be converted to jpeg on upload. Defaults to `image/jpeg'
+ +
InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle, content_type='image/jpeg', keywords=None)
Add a photo without constructing a PhotoEntry.

+Needs authentication, see ClientLogin()

+Arguments:
+album_or_uri: AlbumFeed or uri of the album where the photo should go
+title: Photo title
+summary: Photo summary / description
+filename_or_handle: A file-like object or file name where the image/video
+  will be read from
+content_type (optional): Internet media type (a.k.a. mime type) of
+  media object. Currently Google Photos supports these types:
+   o image/bmp
+   o image/gif
+   o image/jpeg
+   o image/png
+   
+  Images will be converted to jpeg on upload. Defaults to `image/jpeg'
+keywords (optional): a 1) comma separated string or 2) a python list() of
+  keywords (a.k.a. tags) to add to the image.
+  E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation']

+Returns:
+The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors

+See:
+http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed
+[1]: http://en.wikipedia.org/wiki/Unix_epoch
+ +
InsertTag(self, photo_or_uri, tag)
Add a tag (a.k.a. keyword) to a photo.

+Needs authentication, see ClientLogin()

+Arguments:
+photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a
+  `post' uri pointing to it
+(string) tag: The tag/keyword

+Returns:
+The new gdata.photos.TagEntry

+Example:
+p = GetFeed(PhotoUri)
+tag = InsertTag(p, 'Beautiful sunsets')
+ +
SearchCommunityPhotos(self, query, limit=100)
Search through all public photos and return a feed.
+This will look for matches in file names and image tags (a.k.a. keywords)

+Arguments:
+query: The string you're looking for, e.g. `vacation'
+limit (optional): Don't return more than `limit' hits, defaults to 100

+Returns:
+gdata.GDataFeed with PhotoEntry elements
+ +
SearchContactsPhotos(self, user='default', search=None, limit=None)
Search over your contacts' photos and return a feed

+Arguments:
+user: Username of the user whose contacts you want
+search (optional): What to search for (photo title, description and keywords)

+Returns
+gdata.photos.UserFeed, with PhotoEntry elements

+See:
+http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38
+ +
SearchUserPhotos(self, query, user='default', limit=100)
Search through all photos for a specific user and return a feed.
+This will look for matches in file names and image tags (a.k.a. keywords)

+Arguments:
+query: The string you're looking for, e.g. `vacation'
+user (optional): The username of whose photos you want to search, defaults
+  to current user.
+limit (optional): Don't return more than `limit' hits, defaults to 100

+Only public photos are searched, unless you are authenticated and
+searching through your own photos.

+Returns:
+gdata.photos.UserFeed with PhotoEntry elements
+ +
UpdatePhotoBlob(self, photo_or_uri, filename_or_handle, content_type='image/jpeg')
Update a photo's binary data.

+Needs authentication, see ClientLogin()

+Arguments:
+photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a
+  `edit-media' uri pointing to it
+filename_or_handle:  A file-like object or file name where the image/video
+  will be read from
+content_type (optional): Internet media type (a.k.a. mime type) of
+  media object. Currently Google Photos supports these types:
+   o image/bmp
+   o image/gif
+   o image/jpeg
+   o image/png
+Images will be converted to jpeg on upload. Defaults to `image/jpeg'

+Returns:
+The modified gdata.photos.PhotoEntry

+Example:
+p = GetFeed(PhotoUri)
+p = UpdatePhotoBlob(p, '/tmp/newPic.jpg')

+It is important that you don't keep the old object around, once
+it has been updated. See
+http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
+ +
UpdatePhotoMetadata(self, photo)
Update a photo's metadata. 

+Needs authentication, see ClientLogin()

+You can update any or all of the following metadata properties:
+ * <title>
+ * <media:description>
+ * <gphoto:checksum>
+ * <gphoto:client>
+ * <gphoto:rotation>
+ * <gphoto:timestamp>
+ * <gphoto:commentingEnabled>

+ Arguments:
+ photo: a gdata.photos.PhotoEntry object with updated elements

+ Returns:
+ The modified gdata.photos.PhotoEntry

+ Example:
+ p = GetFeed(uri).entry[0]
+ p.title.text = u'My new text'
+ p.commentingEnabled.text = 'false'
+ p = UpdatePhotoMetadata(p)

+ It is important that you don't keep the old object around, once
+ it has been updated. See
http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency
+ +
__init__(self, email=None, password=None, source=None, server='picasaweb.google.com', additional_headers=None)
GooglePhotosService constructor.
+  
+Arguments:
+email: string (optional) The e-mail address of the account to use for
+       authentication.
+password: string (optional) The password of the account to use for
+          authentication.
+source: string (optional) The name of the user's application.
+server: string (optional) The server the feed is hosted on.
+additional_headers: dict (optional) Any additional HTTP headers to be
+                    transmitted to the service in the form of key-value
+                    pairs.

+Returns:
+A PhotosService object used to communicate with the Google Photos
+service.
+ +
+Data and other attributes defined here:
+
userUri = '/data/feed/api/user/%s'
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+Functions
       
ConvertAtomTimestampToEpoch(timestamp)
Helper function to convert a timestamp string, for instance
+from atom:updated or atom:published, to milliseconds since Unix epoch
+(a.k.a. POSIX time).

+`2007-07-22T00:45:10.000Z' ->
+
GetSmallestThumbnail(media_thumbnail_list)
Helper function to get the smallest thumbnail of a list of
+  gdata.media.Thumbnail.
+Returns gdata.media.Thumbnail
+

+ + + + + +
 
+Data
       GPHOTOS_BAD_REQUEST = 400
+GPHOTOS_CONFLICT = 409
+GPHOTOS_INTERNAL_SERVER_ERROR = 500
+GPHOTOS_INVALID_ARGUMENT = 601
+GPHOTOS_INVALID_CONTENT_TYPE = 602
+GPHOTOS_INVALID_KIND = 604
+GPHOTOS_NOT_AN_IMAGE = 603
+SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png')
+UNKOWN_ERROR = 1000
+__author__ = u'havard@gulldahl.no'
+__license__ = 'Apache License v2'
+__version__ = '176'

+ + + + + +
 
+Author
       havard@gulldahl.no
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.service.html b/gdata.py-1.2.3/pydocs/gdata.service.html new file mode 100644 index 0000000..3ceb138 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.service.html @@ -0,0 +1,1735 @@ + + +Python: module gdata.service + + + + +
 
+ 
gdata.service
index
/usr/local/svn/gdata-python-client/src/gdata/service.py
+

GDataService provides CRUD ops. and programmatic login for GData services.

+Error: A base exception class for all exceptions in the gdata_client
+       module.

+CaptchaRequired: This exception is thrown when a login attempt results in a
+                 captcha challenge from the ClientLogin service. When this
+                 exception is thrown, the captcha_token and captcha_url are
+                 set to the values provided in the server's response.

+BadAuthentication: Raised when a login attempt is made with an incorrect
+                   username or password.

+NotAuthenticated: Raised if an operation requiring authentication is called
+                  before a user has authenticated.

+NonAuthSubToken: Raised if a method to modify an AuthSub token is used when
+                 the user is either not authenticated or is authenticated
+                 through another authentication mechanism.

+NonOAuthToken: Raised if a method to modify an OAuth token is used when the
+               user is either not authenticated or is authenticated through
+               another authentication mechanism.

+RequestError: Raised if a CRUD request returned a non-success code. 

+UnexpectedReturnType: Raised if the response from the server was not of the
+                      desired type. For example, this would be raised if the
+                      server sent a feed when the client requested an entry.

+GDataService: Encapsulates user credentials needed to perform insert, update
+              and delete operations with the GData API. An instance can
+              perform user authentication, query, insertion, deletion, and 
+              update.

+Query: Eases query URI creation by allowing URI parameters to be set as 
+       dictionary attributes. For example a query with a feed of 
+       '/base/feeds/snippets' and ['bq'] set to 'digital camera' will 
+       produce '/base/feeds/snippets?bq=digital+camera' when .ToUri() is 
+       called on it.

+

+ + + + + +
 
+Modules
       
xml.etree.cElementTree
+atom
+
gdata
+atom.service
+
re
+urllib
+
urlparse
+

+ + + + + +
 
+Classes
       
+
__builtin__.dict(__builtin__.object) +
+
+
Query +
+
+
atom.service.AtomService(__builtin__.object) +
+
+
GDataService +
+
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
AuthorizationRequired +
BadAuthentication +
BadAuthenticationServiceURL +
CaptchaRequired +
NonAuthSubToken +
NonOAuthToken +
NotAuthenticated +
RequestError +
+
+
FetchingOAuthRequestTokenFailed +
RevokingOAuthTokenFailed +
TokenUpgradeFailed +
+
+
TokenHadNoScope +
UnexpectedReturnType +
+
+
+
+
+

+ + + + + +
 
+class AuthorizationRequired(Error)
    
Method resolution order:
+
AuthorizationRequired
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class BadAuthentication(Error)
    
Method resolution order:
+
BadAuthentication
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class BadAuthenticationServiceURL(Error)
    
Method resolution order:
+
BadAuthenticationServiceURL
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class CaptchaRequired(Error)
    
Method resolution order:
+
CaptchaRequired
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class FetchingOAuthRequestTokenFailed(RequestError)
    
Method resolution order:
+
FetchingOAuthRequestTokenFailed
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class GDataService(atom.service.AtomService)
   Contains elements needed for GData login and CRUD request headers.

+Maintains additional headers (tokens for example) needed for the GData 
+services to allow a user to perform inserts, updates, and deletes.
 
 
Method resolution order:
+
GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x886b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
__init__(self, email=None, password=None, account_type='HOSTED_OR_GOOGLE', service=None, auth_service_url=None, source=None, server=None, additional_headers=None, handler=None, tokens=None, http_client=None, token_store=None)
Creates an object of type GDataService.

+Args:
+  email: string (optional) The user's email address, used for
+      authentication.
+  password: string (optional) The user's password.
+  account_type: string (optional) The type of account to use. Use
+      'GOOGLE' for regular Google accounts or 'HOSTED' for Google
+      Apps accounts, or 'HOSTED_OR_GOOGLE' to try finding a HOSTED
+      account first and, if it doesn't exist, try finding a regular
+      GOOGLE account. Default value: 'HOSTED_OR_GOOGLE'.
+  service: string (optional) The desired service for which credentials
+      will be obtained.
+  auth_service_url: string (optional) User-defined auth token request URL
+      allows users to explicitly specify where to send auth token requests.
+  source: string (optional) The name of the user's application.
+  server: string (optional) The name of the server to which a connection
+      will be opened. Default value: 'base.google.com'.
+  additional_headers: dictionary (optional) Any additional headers which 
+      should be included with CRUD operations.
+  handler: module (optional) This parameter is deprecated and has been
+      replaced by http_client.
+  tokens: This parameter is deprecated, calls should be made to 
+      token_store instead.
+  http_client: An object responsible for making HTTP requests using a
+      request method. If none is provided, a new instance of
+      atom.http.ProxiedHttpClient will be used.
+  token_store: Keeps a collection of authorization tokens which can be
+      applied to requests for a specific URLs. Critical methods are
+      find_token based on a URL (atom.url.Url or a string), add_token,
+      and remove_token.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors defined here:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes defined here:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+class NonAuthSubToken(Error)
    
Method resolution order:
+
NonAuthSubToken
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class NonOAuthToken(Error)
    
Method resolution order:
+
NonOAuthToken
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class NotAuthenticated(Error)
    
Method resolution order:
+
NotAuthenticated
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class Query(__builtin__.dict)
   Constructs a query URL to be used in GET requests

+Url parameters are created by adding key-value pairs to this object as a 
+dict. For example, to add &max-results=25 to the URL do
+my_query['max-results'] = 25

+Category queries are created by adding category strings to the categories
+member. All items in the categories list will be concatenated with the /
+symbol (symbolizing a category x AND y restriction). If you would like to OR
+2 categories, append them as one string with a | between the categories. 
+For example, do query.categories.append('Fritz|Laurie') to create a query
+like this feed/-/Fritz%7CLaurie . This query will look for results in both
+categories.
 
 
Method resolution order:
+
Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
ToUri(self)
+ +
__init__(self, feed=None, text_query=None, params=None, categories=None)
Constructor for Query

+Args:
+  feed: str (optional) The path for the feed (Examples: 
+      '/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full'
+  text_query: str (optional) The contents of the q query parameter. The
+      contents of the text_query are URL escaped upon conversion to a URI.
+  params: dict (optional) Parameter value string pairs which become URL
+      params when translated to a URI. These parameters are added to the
+      query's items (key-value pairs).
+  categories: list (optional) List of category strings which should be
+      included as query categories. See 
+      http://code.google.com/apis/gdata/reference.html#Queries for 
+      details. If you want to get results from category A or B (both 
+      categories), specify a single list item 'A|B'.
+ +
__str__(self)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xbc8090>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+class RequestError(Error)
    
Method resolution order:
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class RevokingOAuthTokenFailed(RequestError)
    
Method resolution order:
+
RevokingOAuthTokenFailed
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class TokenHadNoScope(Error)
    
Method resolution order:
+
TokenHadNoScope
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class TokenUpgradeFailed(RequestError)
    
Method resolution order:
+
TokenUpgradeFailed
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class UnexpectedReturnType(Error)
    
Method resolution order:
+
UnexpectedReturnType
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+Functions
       
ExtractToken(url, scopes_included_in_next=True)
Gets the AuthSub token from the current page's URL.

+Designed to be used on the URL that the browser is sent to after the user
+authorizes this application at the page given by GenerateAuthSubRequestUrl.

+Args:
+  url: The current page's URL. It should contain the token as a URL
+      parameter. Example: 'http://example.com/?...&token=abcd435'
+  scopes_included_in_next: If True, this function looks for a scope value
+      associated with the token. The scope is a URL parameter with the
+      key set to SCOPE_URL_PARAM_NAME. This parameter should be present
+      if the AuthSub request URL was generated using
+      GenerateAuthSubRequestUrl with include_scope_in_next set to True.

+Returns:
+  A tuple containing the token string and a list of scope strings for which
+  this token should be valid. If the scope was not included in the URL, the
+  tuple will contain (token, None).
+
GenerateAuthSubRequestUrl(next, scopes, hd='default', secure=False, session=True, request_url='http://www.google.com/accounts/AuthSubRequest', include_scopes_in_next=True)
Creates a URL to request an AuthSub token to access Google services.

+For more details on AuthSub, see the documentation here:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: The URL where the browser should be sent after the user authorizes
+      the application. This page is responsible for receiving the token
+      which is embeded in the URL as a parameter.
+  scopes: The base URL to which access will be granted. Example:
+      'http://www.google.com/calendar/feeds' will grant access to all
+      URLs in the Google Calendar data API. If you would like a token for
+      multiple scopes, pass in a list of URL strings.
+  hd: The domain to which the user's account belongs. This is set to the
+      domain name if you are using Google Apps. Example: 'example.org'
+      Defaults to 'default'
+  secure: If set to True, all requests should be signed. The default is
+      False.
+  session: If set to True, the token received by the 'next' URL can be
+      upgraded to a multiuse session token. If session is set to False, the
+      token may only be used once and cannot be upgraded. Default is True.
+  request_url: The base of the URL to which the user will be sent to
+      authorize this application to access their data. The default is
+      'http://www.google.com/accounts/AuthSubRequest'.
+  include_scopes_in_next: Boolean if set to true, the 'next' parameter will
+      be modified to include the requested scope as a URL parameter. The
+      key for the next's scope parameter will be SCOPE_URL_PARAM_NAME. The
+      benefit of including the scope URL as a parameter to the next URL, is
+      that the page which receives the AuthSub token will be able to tell
+      which URLs the token grants access to.

+Returns:
+  A URL string to which the browser should be sent.
+
lookup_scopes(service_name)
Finds the scope URLs for the desired service.

+In some cases, an unknown service may be used, and in those cases this
+function will return None.
+

+ + + + + +
 
+Data
       AUTH_SERVER_HOST = 'https://www.google.com'
+CLIENT_LOGIN_SCOPES = {'apps': ['http://www.google.com/a/feeds/', 'https://www.google.com/a/feeds/', 'http://apps-apis.google.com/a/feeds/', 'https://apps-apis.google.com/a/feeds/'], 'blogger': ['http://www.blogger.com/feeds/'], 'cl': ['https://www.google.com/calendar/feeds/', 'http://www.google.com/calendar/feeds/'], 'codesearch': ['http://www.google.com/codesearch/feeds/'], 'cp': ['https://www.google.com/m8/feeds/', 'http://www.google.com/m8/feeds/'], 'finance': ['http://finance.google.com/finance/feeds/'], 'gbase': ['http://base.google.com/base/feeds/', 'http://www.google.com/base/feeds/'], 'health': ['https://www.google.com/health/feeds/'], 'lh2': ['http://picasaweb.google.com/data/'], 'sitemaps': ['https://www.google.com/webmasters/tools/feeds/'], ...}
+OAUTH_SCOPE_URL_PARAM_NAME = 'oauth_token_scope'
+SCOPE_URL_PARAM_NAME = 'authsub_token_scope'
+__author__ = 'api.jscudder (Jeffrey Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeffrey Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.spreadsheet.html b/gdata.py-1.2.3/pydocs/gdata.spreadsheet.html new file mode 100644 index 0000000..6a94576 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.spreadsheet.html @@ -0,0 +1,1314 @@ + + +Python: package gdata.spreadsheet + + + + +
 
+ 
gdata.spreadsheet
index
/usr/local/svn/gdata-python-client/src/gdata/spreadsheet/__init__.py
+

Contains extensions to Atom objects used with Google Spreadsheets.

+

+ + + + + +
 
+Package Contents
       
service
+
text_db
+

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
Cell +
ColCount +
Custom +
RowCount +
+
+
gdata.BatchEntry(gdata.GDataEntry) +
+
+
SpreadsheetsCell +
+
+
gdata.BatchFeed(gdata.GDataFeed) +
+
+
SpreadsheetsCellsFeed +
+
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
SpreadsheetsList +
SpreadsheetsSpreadsheet +
SpreadsheetsWorksheet +
+
+
gdata.GDataFeed(atom.Feed, gdata.LinkFinder) +
+
+
SpreadsheetsListFeed +
SpreadsheetsSpreadsheetsFeed +
SpreadsheetsWorksheetsFeed +
+
+
+

+ + + + + + + +
 
+class Cell(atom.AtomBase)
   The Google Spreadsheets cell element
 
 
Method resolution order:
+
Cell
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, row=None, col=None, inputValue=None, numericValue=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class ColCount(atom.AtomBase)
   The Google Spreadsheets colCount element
 
 
Method resolution order:
+
ColCount
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Custom(atom.AtomBase)
   The Google Spreadsheets custom element
 
 
Method resolution order:
+
Custom
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, column=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class RowCount(atom.AtomBase)
   The Google Spreadsheets rowCount element
 
 
Method resolution order:
+
RowCount
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class SpreadsheetsCell(gdata.BatchEntry)
   A Google Spreadsheets flavor of a Cell Atom Entry
 
 
Method resolution order:
+
SpreadsheetsCell
+
gdata.BatchEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, title=None, control=None, updated=None, cell=None, batch_operation=None, batch_id=None, batch_status=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class SpreadsheetsCellsFeed(gdata.BatchFeed)
   A feed containing Google Spreadsheets Cells
 
 
Method resolution order:
+
SpreadsheetsCellsFeed
+
gdata.BatchFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
GetBatchLink(self)
+ +
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None, row_count=None, col_count=None, interrupted=None)
+ +
+Methods inherited from gdata.BatchFeed:
+
AddBatchEntry(self, entry=None, id_url_string=None, batch_id_string=None, operation_string=None)
Logic for populating members of a BatchEntry and adding to the feed.


+If the entry is not a BatchEntry, it is converted to a BatchEntry so
+that the batch specific members will be present. 

+The id_url_string can be used in place of an entry if the batch operation
+applies to a URL. For example query and delete operations require just
+the URL of an entry, no body is sent in the HTTP request. If an
+id_url_string is sent instead of an entry, a BatchEntry is created and
+added to the feed.

+This method also assigns the desired batch id to the entry so that it 
+can be referenced in the server's response. If the batch_id_string is
+None, this method will assign a batch_id to be the index at which this
+entry will be in the feed's entry list.

+Args:
+  entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The
+      entry which will be sent to the server as part of the batch request.
+      The item must have a valid atom id so that the server knows which 
+      entry this request references.
+  id_url_string: str (optional) The URL of the entry to be acted on. You
+      can find this URL in the text member of the atom id for an entry.
+      If an entry is not sent, this id will be used to construct a new
+      BatchEntry which will be added to the request feed.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+  operation_string: str (optional) The desired batch operation which will
+      set the batch_operation.type member of the entry. Options are
+      'insert', 'update', 'delete', and 'query'

+Raises:
+  MissingRequiredParameters: Raised if neither an id_ url_string nor an
+      entry are provided in the request.

+Returns:
+  The added entry.
+ +
AddDelete(self, url_string=None, entry=None, batch_id_string=None)
Adds a delete request to the batch request feed.

+This method takes either the url_string which is the atom id of the item
+to be deleted, or the entry itself. The atom id of the entry must be 
+present so that the server knows which entry should be deleted.

+Args:
+  url_string: str (optional) The URL of the entry to be deleted. You can
+     find this URL in the text member of the atom id for an entry. 
+  entry: BatchEntry (optional) The entry to be deleted.
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters: Raised if neither a url_string nor an entry 
+      are provided in the request.
+ +
AddInsert(self, entry, batch_id_string=None)
Add an insert request to the operations in this batch request feed.

+If the entry doesn't yet have an operation or a batch id, these will
+be set to the insert operation and a batch_id specified as a parameter.

+Args:
+  entry: BatchEntry The entry which will be sent in the batch feed as an
+      insert request.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. Note that batch_ids should either always be specified or
+      never, mixing could potentially result in duplicate batch ids.
+ +
AddQuery(self, url_string=None, entry=None, batch_id_string=None)
Adds a query request to the batch request feed.

+This method takes either the url_string which is the query URL 
+whose results will be added to the result feed. The query URL will
+be encapsulated in a BatchEntry, and you may pass in the BatchEntry
+with a query URL instead of sending a url_string.

+Args:
+  url_string: str (optional)
+  entry: BatchEntry (optional)
+  batch_id_string: str (optional)

+Raises:
+  MissingRequiredParameters
+ +
AddUpdate(self, entry, batch_id_string=None)
Add an update request to the list of batch operations in this feed.

+Sets the operation type of the entry to insert if it is not already set
+and assigns the desired batch id to the entry so that it can be 
+referenced in the server's response.

+Args:
+  entry: BatchEntry The entry which will be sent to the server as an
+      update (HTTP PUT) request. The item must have a valid atom id
+      so that the server knows which entry to replace.
+  batch_id_string: str (optional) The batch ID to be used to reference
+      this batch operation in the results feed. If this parameter is None,
+      the current length of the feed's entry array will be used as a
+      count. See also comments for AddInsert.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class SpreadsheetsList(gdata.GDataEntry)
   A Google Spreadsheets flavor of a List Atom Entry
 
 
Method resolution order:
+
SpreadsheetsList
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, title=None, control=None, updated=None, custom=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class SpreadsheetsListFeed(gdata.GDataFeed)
   A feed containing Google Spreadsheets Spreadsheets
 
 
Method resolution order:
+
SpreadsheetsListFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class SpreadsheetsSpreadsheet(gdata.GDataEntry)
   A Google Spreadsheets flavor of a Spreadsheet Atom Entry
 
 
Method resolution order:
+
SpreadsheetsSpreadsheet
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, title=None, control=None, updated=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class SpreadsheetsSpreadsheetsFeed(gdata.GDataFeed)
   A feed containing Google Spreadsheets Spreadsheets
 
 
Method resolution order:
+
SpreadsheetsSpreadsheetsFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class SpreadsheetsWorksheet(gdata.GDataEntry)
   A Google Spreadsheets flavor of a Worksheet Atom Entry
 
 
Method resolution order:
+
SpreadsheetsWorksheet
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, title=None, control=None, updated=None, row_count=None, col_count=None, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class SpreadsheetsWorksheetsFeed(gdata.GDataFeed)
   A feed containing Google Spreadsheets Spreadsheets
 
 
Method resolution order:
+
SpreadsheetsWorksheetsFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+Functions
       
CellFromString(xml_string)
+
ColCountFromString(xml_string)
+
CustomFromString(xml_string)
+
RowCountFromString(xml_string)
+
SpreadsheetsCellFromString(xml_string)
+
SpreadsheetsCellsFeedFromString(xml_string)
+
SpreadsheetsListFeedFromString(xml_string)
+
SpreadsheetsListFromString(xml_string)
+
SpreadsheetsSpreadsheetFromString(xml_string)
+
SpreadsheetsSpreadsheetsFeedFromString(xml_string)
+
SpreadsheetsWorksheetFromString(xml_string)
+
SpreadsheetsWorksheetsFeedFromString(xml_string)
+

+ + + + + +
 
+Data
       GSPREADSHEETS_EXTENDED_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended'
+GSPREADSHEETS_EXTENDED_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006/extended}%s'
+GSPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006'
+GSPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
+__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)'

+ + + + + +
 
+Author
       api.laurabeth@gmail.com (Laura Beth Lincoln)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.spreadsheet.service.html b/gdata.py-1.2.3/pydocs/gdata.spreadsheet.service.html new file mode 100644 index 0000000..37ac0ce --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.spreadsheet.service.html @@ -0,0 +1,1351 @@ + + +Python: module gdata.spreadsheet.service + + + + +
 
+ 
gdata.spreadsheet.service
index
/usr/local/svn/gdata-python-client/src/gdata/spreadsheet/service.py
+

SpreadsheetsService extends the GDataService to streamline Google
+Spreadsheets operations.

+  GBaseService: Provides methods to query feeds and manipulate items. Extends
+                GDataService.

+  DictionaryToParamList: Function which converts a dictionary into a list of
+                         URL arguments (represented as strings). This is a
+                         utility function used in CRUD operations.

+

+ + + + + +
 
+Modules
       
atom
+
gdata
+

+ + + + + +
 
+Classes
       
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
RequestError +
+
+
+
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
SpreadsheetsService +
+
+
gdata.service.Query(__builtin__.dict) +
+
+
CellQuery +
DocumentQuery +
ListQuery +
+
+
+

+ + + + + +
 
+class CellQuery(gdata.service.Query)
    
Method resolution order:
+
CellQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Data descriptors defined here:
+
max_col
+
The max-col query parameter
+
+
max_row
+
The max-row query parameter
+
+
min_col
+
The min-col query parameter
+
+
min_row
+
The min-row query parameter
+
+
range
+
The range query parameter
+
+
return_empty
+
The return-empty query parameter
+
+
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__init__(self, feed=None, text_query=None, params=None, categories=None)
Constructor for Query

+Args:
+  feed: str (optional) The path for the feed (Examples: 
+      '/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full'
+  text_query: str (optional) The contents of the q query parameter. The
+      contents of the text_query are URL escaped upon conversion to a URI.
+  params: dict (optional) Parameter value string pairs which become URL
+      params when translated to a URI. These parameters are added to the
+      query's items (key-value pairs).
+  categories: list (optional) List of category strings which should be
+      included as query categories. See 
+      http://code.google.com/apis/gdata/reference.html#Queries for 
+      details. If you want to get results from category A or B (both 
+      categories), specify a single list item 'A|B'.
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc52d10>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+class DocumentQuery(gdata.service.Query)
    
Method resolution order:
+
DocumentQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Data descriptors defined here:
+
title
+
The title query parameter
+
+
title_exact
+
The title-exact query parameter
+
+
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__init__(self, feed=None, text_query=None, params=None, categories=None)
Constructor for Query

+Args:
+  feed: str (optional) The path for the feed (Examples: 
+      '/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full'
+  text_query: str (optional) The contents of the q query parameter. The
+      contents of the text_query are URL escaped upon conversion to a URI.
+  params: dict (optional) Parameter value string pairs which become URL
+      params when translated to a URI. These parameters are added to the
+      query's items (key-value pairs).
+  categories: list (optional) List of category strings which should be
+      included as query categories. See 
+      http://code.google.com/apis/gdata/reference.html#Queries for 
+      details. If you want to get results from category A or B (both 
+      categories), specify a single list item 'A|B'.
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
orderby
+
The feed query's orderby parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc51b00>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + + + +
 
+class Error(exceptions.Exception)
   Base class for exceptions in this module.
 
 
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class ListQuery(gdata.service.Query)
    
Method resolution order:
+
ListQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Data descriptors defined here:
+
orderby
+
The orderby query parameter
+
+
reverse
+
The reverse query parameter
+
+
sq
+
The sq query parameter
+
+
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__init__(self, feed=None, text_query=None, params=None, categories=None)
Constructor for Query

+Args:
+  feed: str (optional) The path for the feed (Examples: 
+      '/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full'
+  text_query: str (optional) The contents of the q query parameter. The
+      contents of the text_query are URL escaped upon conversion to a URI.
+  params: dict (optional) Parameter value string pairs which become URL
+      params when translated to a URI. These parameters are added to the
+      query's items (key-value pairs).
+  categories: list (optional) List of category strings which should be
+      included as query categories. See 
+      http://code.google.com/apis/gdata/reference.html#Queries for 
+      details. If you want to get results from category A or B (both 
+      categories), specify a single list item 'A|B'.
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc536c0>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+class RequestError(Error)
    
Method resolution order:
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class SpreadsheetsService(gdata.service.GDataService)
   Client for the Google Spreadsheets service.
 
 
Method resolution order:
+
SpreadsheetsService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
AddWorksheet(self, title, row_count, col_count, key)
Creates a new worksheet in the desired spreadsheet.

+The new worksheet is appended to the end of the list of worksheets. The
+new worksheet will only have the available number of columns and cells 
+specified.

+Args:
+  title: str The title which will be displayed in the list of worksheets.
+  row_count: int or str The number of rows in the new worksheet.
+  col_count: int or str The number of columns in the new worksheet.
+  key: str The spreadsheet key to the spreadsheet to which the new 
+      worksheet should be added. 

+Returns:
+  A SpreadsheetsWorksheet if the new worksheet was created succesfully.
+ +
DeleteRow(self, entry)
Deletes a row, the provided entry

+Args:
+  entry: gdata.spreadsheet.SpreadsheetsList The row to be deleted

+Returns:
+  The delete response
+ +
DeleteWorksheet(self, worksheet_entry=None, url=None)
Removes the desired worksheet from the spreadsheet

+Args:
+  worksheet_entry: SpreadsheetWorksheet (optional) The worksheet to
+      be deleted. If this is none, then the DELETE reqest is sent to 
+      the url specified in the url parameter.
+  url: str (optaional) The URL to which the DELETE request should be
+      sent. If left as None, the worksheet's edit URL is used.

+Returns:
+  True if the worksheet was deleted successfully.
+ +
ExecuteBatch(self, batch_feed, url=None, spreadsheet_key=None, worksheet_id=None, converter=<function SpreadsheetsCellsFeedFromString at 0x9dc9b0>)
Sends a batch request feed to the server.

+The batch request needs to be sent to the batch URL for a particular 
+worksheet. You can specify the worksheet by providing the spreadsheet_key
+and worksheet_id, or by sending the URL from the cells feed's batch link.

+Args:
+  batch_feed: gdata.spreadsheet.SpreadsheetsCellFeed A feed containing 
+      BatchEntry elements which contain the desired CRUD operation and 
+      any necessary data to modify a cell.
+  url: str (optional) The batch URL for the cells feed to which these 
+      changes should be applied. This can be found by calling 
+      cells_feed.GetBatchLink().href.
+  spreadsheet_key: str (optional) Used to generate the batch request URL
+      if the url argument is None. If using the spreadsheet key to 
+      generate the URL, the worksheet id is also required.
+  worksheet_id: str (optional) Used if the url is not provided, it is 
+      oart of the batch feed target URL. This is used with the spreadsheet
+      key.
+  converter: Function (optional) Function to be executed on the server's
+      response. This function should take one string as a parameter. The
+      default value is SpreadsheetsCellsFeedFromString which will turn the result
+      into a gdata.base.GBaseItem object.

+Returns:
+  A gdata.BatchFeed containing the results.
+ +
GetCellsFeed(self, key, wksht_id='default', cell=None, query=None, visibility='private', projection='full')
Gets a cells feed or a specific entry if a cell is defined
+Args:
+  key: string The spreadsheet key defined in /ccc?key=
+  wksht_id: string The id for a specific worksheet entry
+  cell: string (optional) The R1C1 address of the cell
+  query: DocumentQuery (optional) Query parameters
+  
+Returns:
+  If there is no cell, then a SpreadsheetsCellsFeed.
+  If there is a cell, then a SpreadsheetsCell.
+ +
GetListFeed(self, key, wksht_id='default', row_id=None, query=None, visibility='private', projection='full')
Gets a list feed or a specific entry if a row_id is defined
+Args:
+  key: string The spreadsheet key defined in /ccc?key=
+  wksht_id: string The id for a specific worksheet entry
+  row_id: string (optional) The row_id of a row in the list
+  query: DocumentQuery (optional) Query parameters
+  
+Returns:
+  If there is no row_id, then a SpreadsheetsListFeed.
+  If there is a row_id, then a SpreadsheetsList.
+ +
GetSpreadsheetsFeed(self, key=None, query=None, visibility='private', projection='full')
Gets a spreadsheets feed or a specific entry if a key is defined
+Args:
+  key: string (optional) The spreadsheet key defined in /ccc?key=
+  query: DocumentQuery (optional) Query parameters
+  
+Returns:
+  If there is no key, then a SpreadsheetsSpreadsheetsFeed.
+  If there is a key, then a SpreadsheetsSpreadsheet.
+ +
GetWorksheetsFeed(self, key, wksht_id=None, query=None, visibility='private', projection='full')
Gets a worksheets feed or a specific entry if a wksht is defined
+Args:
+  key: string The spreadsheet key defined in /ccc?key=
+  wksht_id: string (optional) The id for a specific worksheet entry
+  query: DocumentQuery (optional) Query parameters
+  
+Returns:
+  If there is no wksht_id, then a SpreadsheetsWorksheetsFeed.
+  If there is a wksht_id, then a SpreadsheetsWorksheet.
+ +
InsertRow(self, row_data, key, wksht_id='default')
Inserts a new row with the provided data

+Args:
+  uri: string The post uri of the list feed
+  row_data: dict A dictionary of column header to row data

+Returns:
+  The inserted row
+ +
UpdateCell(self, row, col, inputValue, key, wksht_id='default')
Updates an existing cell.

+Args:
+  row: int The row the cell to be editted is in
+  col: int The column the cell to be editted is in
+  inputValue: str the new value of the cell
+  key: str The key of the spreadsheet in which this cell resides.
+  wksht_id: str The ID of the worksheet which holds this cell.
+  
+Returns:
+  The updated cell entry
+ +
UpdateRow(self, entry, new_row_data)
Updates a row with the provided data

+Args:
+  entry: gdata.spreadsheet.SpreadsheetsList The entry to be updated
+  new_row_data: dict A dictionary of column header to row data
+  
+Returns:
+  The updated row
+ +
UpdateWorksheet(self, worksheet_entry, url=None)
Changes the size and/or title of the desired worksheet.

+Args:
+  worksheet_entry: SpreadsheetWorksheet The new contents of the 
+      worksheet. 
+  url: str (optional) The URL to which the edited worksheet entry should
+      be sent. If the url is None, the edit URL from the worksheet will
+      be used.

+Returns: 
+  A SpreadsheetsWorksheet with the new information about the worksheet.
+ +
__init__(self, email=None, password=None, source=None, server='spreadsheets.google.com', additional_headers=None)
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7fe705240b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+Data
       __author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)'

+ + + + + +
 
+Author
       api.laurabeth@gmail.com (Laura Beth Lincoln)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.spreadsheet.text_db.html b/gdata.py-1.2.3/pydocs/gdata.spreadsheet.text_db.html new file mode 100644 index 0000000..1c18075 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.spreadsheet.text_db.html @@ -0,0 +1,641 @@ + + +Python: module gdata.spreadsheet.text_db + + + + +
 
+ 
gdata.spreadsheet.text_db
index
/usr/local/svn/gdata-python-client/src/gdata/spreadsheet/text_db.py
+

# Copyright Google 2007-2008, all rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+

+ + + + + +
 
+Modules
       
StringIO
+
gdata
+

+ + + + + +
 
+Classes
       
+
__builtin__.list(__builtin__.object) +
+
+
RecordResultSet +
+
+
__builtin__.object +
+
+
Database +
DatabaseClient +
Record +
Table +
+
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
BadCredentials +
CaptchaRequired +
+
+
+
+
+

+ + + + + +
 
+class BadCredentials(Error)
    
Method resolution order:
+
BadCredentials
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+class CaptchaRequired(Error)
    
Method resolution order:
+
CaptchaRequired
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class Database(__builtin__.object)
   Provides interface to find and create tables.

+The database represents a Google Spreadsheet.
 
 Methods defined here:
+
CreateTable(self, name, fields=None)
Add a new worksheet to this spreadsheet and fill in column names.

+Args:
+  name: str The title of the new worksheet.
+  fields: list of strings The column names which are placed in the
+      first row of this worksheet. These names are converted into XML
+      tags by the server. To avoid changes during the translation
+      process I recommend using all lowercase alphabetic names. For
+      example ['somelongname', 'theothername']

+Returns:
+  Table representing the newly created worksheet.
+ +
Delete(self)
Deletes the entire database spreadsheet from Google Spreadsheets.
+ +
GetTables(self, worksheet_id=None, name=None)
Searches for a worksheet with the specified ID or name.

+The list of results should have one table at most, or no results
+if the id or name were not found.

+Args:
+  worksheet_id: str The ID of the worksheet, example: 'od6'
+  name: str The title of the worksheet.

+Returns:
+  A list of length 0 or 1 containing the desired Table. A list is returned
+  to make this method feel like GetDatabases and GetRecords.
+ +
__init__(self, spreadsheet_entry=None, database_client=None)
Constructor for a database object.

+Args:
+  spreadsheet_entry: gdata.docs.DocumentListEntry The 
+      Atom entry which represents the Google Spreadsheet. The
+      spreadsheet's key is extracted from the entry and stored as a 
+      member.
+  database_client: DatabaseClient A client which can talk to the
+      Google Spreadsheets servers to perform operations on worksheets
+      within this spreadsheet.
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class DatabaseClient(__builtin__.object)
   Allows creation and finding of Google Spreadsheets databases.

+The DatabaseClient simplifies the process of creating and finding Google 
+Spreadsheets and will talk to both the Google Spreadsheets API and the 
+Google Documents List API.
 
 Methods defined here:
+
CreateDatabase(self, name)
Creates a new Google Spreadsheet with the desired name.

+Args:
+  name: str The title for the spreadsheet.

+Returns:
+  A Database instance representing the new spreadsheet.
+ +
GetDatabases(self, spreadsheet_key=None, name=None)
Finds spreadsheets which have the unique key or title.

+If querying on the spreadsheet_key there will be at most one result, but
+searching by name could yield multiple results.

+Args:
+  spreadsheet_key: str The unique key for the spreadsheet, this 
+      usually in the the form 'pk23...We' or 'o23...423.12,,,3'.
+  name: str The title of the spreadsheets.

+Returns:
+  A list of Database objects representing the desired spreadsheets.
+ +
SetCredentials(self, username, password)
Attempts to log in to Google APIs using the provided credentials.

+If the username or password are None, the client will not request auth 
+tokens.

+Args:
+  username: str (optional) Example: jo@example.com
+  password: str (optional)
+ +
__init__(self, username=None, password=None)
Constructor for a Database Client. 

+If the username and password are present, the constructor  will contact
+the Google servers to authenticate.

+Args:
+  username: str (optional) Example: jo@example.com
+  password: str (optional)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class Record(__builtin__.object)
   Represents one row in a worksheet and provides a dictionary of values.

+Attributes:
+  custom: dict Represents the contents of the row with cell values mapped
+      to column headers.
 
 Methods defined here:
+
Delete(self)
+ +
ExtractContentFromEntry(self, entry)
Populates the content and row_id based on content of the entry.

+This method is used in the Record's contructor.

+Args:
+  entry: gdata.spreadsheet.SpreadsheetsList The Atom entry 
+      representing this row in the worksheet.
+ +
Pull(self)
Query Google Spreadsheets to get the latest data from the server.

+Fetches the entry for this row and repopulates the content dictionary 
+with the data found in the row.
+ +
Push(self)
Send the content of the record to spreadsheets to edit the row.

+All items in the content dictionary will be sent. Items which have been
+removed from the content may remain in the row. The content member
+of the record will not be modified so additional fields in the row
+might be absent from this local copy.
+ +
__init__(self, content=None, row_entry=None, spreadsheet_key=None, worksheet_id=None, database_client=None)
Constructor for a record.

+Args:
+  content: dict of strings Mapping of string values to column names.
+  row_entry: gdata.spreadsheet.SpreadsheetsList The Atom entry 
+      representing this row in the worksheet.
+  spreadsheet_key: str The ID of the spreadsheet in which this row 
+      belongs.
+  worksheet_id: str The ID of the worksheet in which this row belongs.
+  database_client: DatabaseClient The client which can be used to talk
+      the Google Spreadsheets server to edit this row.
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class RecordResultSet(__builtin__.list)
   A collection of rows which allows fetching of the next set of results.

+The server may not send all rows in the requested range because there are
+too many. Using this result set you can access the first set of results
+as if it is a list, then get the next batch (if there are more results) by
+calling GetNext().
 
 
Method resolution order:
+
RecordResultSet
+
__builtin__.list
+
__builtin__.object
+
+
+Methods defined here:
+
GetNext(self)
Fetches the next batch of rows in the result set.

+Returns:
+  A new RecordResultSet.
+ +
__init__(self, feed, client, spreadsheet_key, worksheet_id)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from __builtin__.list:
+
__add__(...)
x.__add__(y) <==> x+y
+ +
__contains__(...)
x.__contains__(y) <==> y in x
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__delslice__(...)
x.__delslice__(i, j) <==> del x[i:j]

+Use of negative indices is not supported.
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iadd__(...)
x.__iadd__(y) <==> x+=y
+ +
__imul__(...)
x.__imul__(y) <==> x*=y
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__mul__(...)
x.__mul__(n) <==> x*n
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__reversed__(...)
L.__reversed__() -- return a reverse iterator over the list
+ +
__rmul__(...)
x.__rmul__(n) <==> n*x
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
__setslice__(...)
x.__setslice__(i, j, y) <==> x[i:j]=y

+Use  of negative indices is not supported.
+ +
append(...)
L.append(object) -- append object to end
+ +
count(...)
L.count(value) -> integer -- return number of occurrences of value
+ +
extend(...)
L.extend(iterable) -- extend list by appending elements from the iterable
+ +
index(...)
L.index(value, [start, [stop]]) -> integer -- return first index of value
+ +
insert(...)
L.insert(index, object) -- insert object before index
+ +
pop(...)
L.pop([index]) -> item -- remove and return item at index (default last)
+ +
remove(...)
L.remove(value) -- remove first occurrence of value
+ +
reverse(...)
L.reverse() -- reverse *IN PLACE*
+ +
sort(...)
L.sort(cmp=None, key=None, reverse=False) -- stable sort *IN PLACE*;
+cmp(x, y) -> -1, 0, 1
+ +
+Data and other attributes inherited from __builtin__.list:
+
__new__ = <built-in method __new__ of type object at 0x729b20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +

+ + + + + +
 
+class Table(__builtin__.object)
    Methods defined here:
+
AddRecord(self, data)
Adds a new row to this worksheet.

+Args:
+  data: dict of strings Mapping of string values to column names. 

+Returns:
+  Record which represents this row of the spreadsheet.
+ +
Delete(self)
Deletes this worksheet from the spreadsheet.
+ +
FindRecords(self, query_string)
Performs a query against the worksheet to find rows which match.

+For details on query string syntax see the section on sq under
+http://code.google.com/apis/spreadsheets/reference.html#list_Parameters

+Args:
+  query_string: str Examples: 'name == john' to find all rows with john
+      in the name column, '(cost < 19.50 and name != toy) or cost > 500'

+Returns:
+  RecordResultSet with the first group of matches.
+ +
GetRecord(self, row_id=None, row_number=None)
Gets a single record from the worksheet based on row ID or number.

+Args:
+  row_id: The ID for the individual row.
+  row_number: str or int The position of the desired row. Numbering 
+      begins at 1, which refers to the second row in the worksheet since
+      the first row is used for column names.

+Returns:
+  Record for the desired row.
+ +
GetRecords(self, start_row, end_row)
Gets all rows between the start and end row numbers inclusive.

+Args:
+  start_row: str or int
+  end_row: str or int

+Returns:
+  RecordResultSet for the desired rows.
+ +
LookupFields(self)
Queries to find the column names in the first row of the worksheet.

+Useful when you have retrieved the table from the server and you don't 
+know the column names.
+ +
SetFields(self, fields)
Changes the contents of the cells in the first row of this worksheet.

+Args:
+  fields: list of strings The names in the list comprise the
+      first row of the worksheet. These names are converted into XML
+      tags by the server. To avoid changes during the translation
+      process I recommend using all lowercase alphabetic names. For
+      example ['somelongname', 'theothername']
+ +
__init__(self, name=None, worksheet_entry=None, database_client=None, spreadsheet_key=None, fields=None)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
ConvertStringsToColumnHeaders(proposed_headers)
Converts a list of strings to column names which spreadsheets accepts.

+When setting values in a record, the keys which represent column names must
+fit certain rules. They are all lower case, contain no spaces or special
+characters. If two columns have the same name after being sanitized, the 
+columns further to the right have _2, _3 _4, etc. appended to them.

+If there are column names which consist of all special characters, or if
+the column header is blank, an obfuscated value will be used for a column
+name. This method does not handle blank column names or column names with
+only special characters.
+

+ + + + + +
 
+Data
       __author__ = 'api.jscudder (Jeffrey Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeffrey Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.urlfetch.html b/gdata.py-1.2.3/pydocs/gdata.urlfetch.html new file mode 100644 index 0000000..511191b --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.urlfetch.html @@ -0,0 +1,212 @@ + + +Python: module gdata.urlfetch + + + + +
 
+ 
gdata.urlfetch
index
/usr/local/svn/gdata-python-client/src/gdata/urlfetch.py
+

Provides HTTP functions for gdata.service to use on Google App Engine

+AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
+   urlfetch API. Set the http_client member of a GDataService object to an
+   instance of an AppEngineHttpClient to allow the gdata library to run on
+   Google App Engine.

+run_on_appengine: Function which will modify an existing GDataService object
+   to allow it to run on App Engine. It works by creating a new instance of
+   the AppEngineHttpClient and replacing the GDataService object's 
+   http_client.

+HttpRequest: Function that wraps google.appengine.api.urlfetch.Fetch in a 
+    common interface which is used by gdata.service.GDataService. In other 
+    words, this module can be used as the gdata service request handler so 
+    that all HTTP requests will be performed by the hosting Google App Engine
+    server.

+

+ + + + + +
 
+Modules
       
StringIO
+
atom
+
google.appengine.api.urlfetch
+

+ + + + + +
 
+Classes
       
+
__builtin__.object +
+
+
HttpResponse +
+
+
atom.http_interface.GenericHttpClient(__builtin__.object) +
+
+
AppEngineHttpClient +
+
+
+

+ + + + + +
 
+class AppEngineHttpClient(atom.http_interface.GenericHttpClient)
    
Method resolution order:
+
AppEngineHttpClient
+
atom.http_interface.GenericHttpClient
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, headers=None)
+ +
request(self, operation, url, data=None, headers=None)
Performs an HTTP call to the server, supports GET, POST, PUT, and
+DELETE.

+Usage example, perform and HTTP GET on http://www.google.com/:
+  import atom.http
+  client = atom.http.HttpClient()
+  http_response = client.request('GET', 'http://www.google.com/')

+Args:
+  operation: str The HTTP operation to be performed. This is usually one
+      of 'GET', 'POST', 'PUT', or 'DELETE'
+  data: filestream, list of parts, or other object which can be converted
+      to a string. Should be set to None when performing a GET or DELETE.
+      If data is a file-like object which can be read, this method will
+      read a chunk of 100K bytes at a time and send them.
+      If the data is a list of parts to be sent, each part will be
+      evaluated and sent.
+  url: The full URL to which the request should be sent. Can be a string
+      or atom.url.Url.
+  headers: dict of strings. HTTP headers which should be sent
+      in the request.
+ +
+Methods inherited from atom.http_interface.GenericHttpClient:
+
delete(self, url, headers=None)
+ +
get(self, url, headers=None)
+ +
post(self, url, data, headers=None)
+ +
put(self, url, data, headers=None)
+ +
+Data descriptors inherited from atom.http_interface.GenericHttpClient:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Data and other attributes inherited from atom.http_interface.GenericHttpClient:
+
debug = False
+ +

+ + + + + + + +
 
+class HttpResponse(__builtin__.object)
   Translates a urlfetch resoinse to look like an hhtplib resoinse.

+Used to allow the resoinse from HttpRequest to be usable by gdata.service
+methods.
 
 Methods defined here:
+
__init__(self, urlfetch_response)
+ +
getheader(self, name)
+ +
read(self, length=None)
+ +
+Data descriptors defined here:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
HttpRequest(service, operation, data, uri, extra_headers=None, url_params=None, escape_params=True, content_type='application/atom+xml')
Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE.

+This function is deprecated, use AppEngineHttpClient.request instead.

+To use this module with gdata.service, you can set this module to be the
+http_request_handler so that HTTP requests use Google App Engine's urlfetch.
+import gdata.service
+import gdata.urlfetch
+gdata.service.http_request_handler = gdata.urlfetch

+Args:
+  service: atom.AtomService object which contains some of the parameters
+      needed to make the request. The following members are used to
+      construct the HTTP call: server (str), additional_headers (dict),
+      port (int), and ssl (bool).
+  operation: str The HTTP operation to be performed. This is usually one of
+      'GET', 'POST', 'PUT', or 'DELETE'
+  data: filestream, list of parts, or other object which can be
+      converted to a string.
+      Should be set to None when performing a GET or PUT.
+      If data is a file-like object which can be read, this method will read
+      a chunk of 100K bytes at a time and send them.
+      If the data is a list of parts to be sent, each part will be evaluated
+      and sent.
+  uri: The beginning of the URL to which the request should be sent.
+      Examples: '/', '/base/feeds/snippets',
+      '/m8/feeds/contacts/default/base'
+  extra_headers: dict of strings. HTTP headers which should be sent
+      in the request. These headers are in addition to those stored in
+      service.additional_headers.
+  url_params: dict of strings. Key value pairs to be added to the URL as
+      URL parameters. For example {'foo':'bar', 'test':'param'} will
+      become ?foo=bar&test=param.
+  escape_params: bool default True. If true, the keys and values in
+      url_params will be URL escaped when the form is constructed
+      (Special characters converted to %XX form.)
+  content_type: str The MIME type for the data being sent. Defaults to
+      'application/atom+xml', this is only used if data is set.
+
run_on_appengine(gdata_service)
Modifies a GDataService object to allow it to run on App Engine.

+Args:
+  gdata_service: An instance of AtomService, GDataService, or any
+      of their subclasses which has an http_client member.
+

+ + + + + +
 
+Data
       __author__ = 'api.jscudder (Jeff Scudder)'

+ + + + + +
 
+Author
       api.jscudder (Jeff Scudder)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.webmastertools.html b/gdata.py-1.2.3/pydocs/gdata.webmastertools.html new file mode 100644 index 0000000..fc36b65 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.webmastertools.html @@ -0,0 +1,1453 @@ + + +Python: package gdata.webmastertools + + + + +
 
+ 
gdata.webmastertools
index
/usr/local/svn/gdata-python-client/src/gdata/webmastertools/__init__.py
+

Contains extensions to Atom objects used with Google Webmaster Tools.

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
CrawlRate +
EnhancedImageSearch +
GeoLocation +
Indexed +
MarkupLanguage +
PreferredDomain +
PublicationLabel +
SitemapMobile +
SitemapMobileMarkupLanguage +
SitemapNews +
SitemapNewsPublicationLabel +
SitemapStatus +
SitemapType +
SitemapUrlCount +
VerificationMethod +
VerificationMethodMeta +
Verified +
+
+
atom.Date(atom.AtomBase) +
+
+
Crawled +
SitemapLastDownloaded +
+
+
atom.Entry(atom.FeedEntryParent) +
+
+
SitemapsEntry(atom.Entry, LinkFinder) +
SitesEntry(atom.Entry, LinkFinder) +
+
+
atom.Feed(atom.Source) +
+
+
SitemapsFeed(atom.Feed, LinkFinder) +
SitesFeed(atom.Feed, LinkFinder) +
+
+
atom.LinkFinder(__builtin__.object) +
+
+
LinkFinder +
+
+
SitemapsEntry(atom.Entry, LinkFinder) +
SitemapsFeed(atom.Feed, LinkFinder) +
SitesEntry(atom.Entry, LinkFinder) +
SitesFeed(atom.Feed, LinkFinder) +
+
+
+
+
+

+ + + + + +
 
+class CrawlRate(atom.AtomBase)
    
Method resolution order:
+
CrawlRate
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class Crawled(atom.Date)
    
Method resolution order:
+
Crawled
+
atom.Date
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.Date:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class EnhancedImageSearch(atom.AtomBase)
    
Method resolution order:
+
EnhancedImageSearch
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class GeoLocation(atom.AtomBase)
    
Method resolution order:
+
GeoLocation
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class Indexed(atom.AtomBase)
    
Method resolution order:
+
Indexed
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class LinkFinder(atom.LinkFinder)
   An "interface" providing methods to find link elements

+SitesEntry elements often contain multiple links which differ in the rel 
+attribute or content type. Often, developers are interested in a specific
+type of link so this class provides methods to find specific classes of links.

+This class is used as a mixin in SitesEntry.
 
 
Method resolution order:
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
GetEditLink(self)
+ +
GetFeedLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +
+Data descriptors inherited from atom.LinkFinder:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class MarkupLanguage(atom.AtomBase)
    
Method resolution order:
+
MarkupLanguage
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class PreferredDomain(atom.AtomBase)
    
Method resolution order:
+
PreferredDomain
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class PublicationLabel(atom.AtomBase)
    
Method resolution order:
+
PublicationLabel
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class SitemapLastDownloaded(atom.Date)
    
Method resolution order:
+
SitemapLastDownloaded
+
atom.Date
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.Date:
+
__init__(self, text=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class SitemapMobile(atom.AtomBase)
    
Method resolution order:
+
SitemapMobile
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, markup_language=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class SitemapMobileMarkupLanguage(atom.AtomBase)
    
Method resolution order:
+
SitemapMobileMarkupLanguage
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class SitemapNews(atom.AtomBase)
    
Method resolution order:
+
SitemapNews
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, publication_label=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class SitemapNewsPublicationLabel(atom.AtomBase)
    
Method resolution order:
+
SitemapNewsPublicationLabel
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class SitemapStatus(atom.AtomBase)
    
Method resolution order:
+
SitemapStatus
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class SitemapType(atom.AtomBase)
    
Method resolution order:
+
SitemapType
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class SitemapUrlCount(atom.AtomBase)
    
Method resolution order:
+
SitemapUrlCount
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class SitemapsEntry(atom.Entry, LinkFinder)
   A Google Webmaster Tools meta Sitemaps Entry flavor of an Atom Entry
 
 
Method resolution order:
+
SitemapsEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, category=None, content=None, atom_id=None, link=None, title=None, updated=None, sitemap_type=None, sitemap_status=None, sitemap_last_downloaded=None, sitemap_url_count=None, sitemap_mobile_markup_language=None, sitemap_news_publication_label=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors defined here:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetEditLink(self)
+ +
GetFeedLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +

+ + + + + + + +
 
+class SitemapsFeed(atom.Feed, LinkFinder)
   A Google Webmaster Tools meta Sitemaps feed flavor of an Atom Feed
 
 
Method resolution order:
+
SitemapsFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, category=None, content=None, atom_id=None, link=None, title=None, updated=None, entry=None, sitemap_mobile=None, sitemap_news=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors defined here:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetEditLink(self)
+ +
GetFeedLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +

+ + + + + + + +
 
+class SitesEntry(atom.Entry, LinkFinder)
   A Google Webmaster Tools meta Entry flavor of an Atom Entry
 
 
Method resolution order:
+
SitesEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, category=None, content=None, atom_id=None, link=None, title=None, updated=None, entry_link=None, indexed=None, crawled=None, geolocation=None, preferred_domain=None, crawl_rate=None, enhanced_image_search=None, verified=None, verification_method=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Data descriptors defined here:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetEditLink(self)
+ +
GetFeedLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +

+ + + + + + + +
 
+class SitesFeed(atom.Feed, LinkFinder)
   A Google Webmaster Tools meta Sites feed flavor of an Atom Feed
 
 
Method resolution order:
+
SitesFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, start_index=None, atom_id=None, title=None, entry=None, category=None, link=None, updated=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  category: list (optional) A list of Category instances
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors defined here:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from LinkFinder:
+
GetEditLink(self)
+ +
GetFeedLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +
GetNextLink(self)
+ +

+ + + + + +
 
+class VerificationMethod(atom.AtomBase)
    
Method resolution order:
+
VerificationMethod
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, in_use=None, meta=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class VerificationMethodMeta(atom.AtomBase)
    
Method resolution order:
+
VerificationMethodMeta
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, text=None, name=None, content=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+class Verified(atom.AtomBase)
    
Method resolution order:
+
Verified
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + +
 
+Functions
       
CrawlRateFromString(xml_string)
+
CrawledFromString(xml_string)
+
EnhancedImageSearchFromString(xml_string)
+
GeoLocationFromString(xml_string)
+
IndexedFromString(xml_string)
+
MarkupLanguageFromString(xml_string)
+
PreferredDomainFromString(xml_string)
+
PublicationLabelFromString(xml_string)
+
SitemapLastDownloadedFromString(xml_string)
+
SitemapMobileFromString(xml_string)
+
SitemapMobileMarkupLanguageFromString(xml_string)
+
SitemapNewsFromString(xml_string)
+
SitemapNewsPublicationLabelFromString(xml_string)
+
SitemapStatusFromString(xml_string)
+
SitemapTypeFromString(xml_string)
+
SitemapUrlCountFromString(xml_string)
+
SitemapsEntryFromString(xml_string)
+
SitemapsFeedFromString(xml_string)
+
SitesEntryFromString(xml_string)
+
SitesFeedFromString(xml_string)
+
VerificationMethodFromString(xml_string)
+
VerificationMethodMetaFromString(xml_string)
+
VerifiedFromString(xml_string)
+

+ + + + + +
 
+Data
       GWEBMASTERTOOLS_NAMESPACE = 'http://schemas.google.com/webmasters/tools/2007'
+GWEBMASTERTOOLS_TEMPLATE = '{http://schemas.google.com/webmasters/tools/2007}%s'
+__author__ = 'livibetter (Yu-Jie Lin)'

+ + + + + +
 
+Author
       livibetter (Yu-Jie Lin)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.webmastertools.service.html b/gdata.py-1.2.3/pydocs/gdata.webmastertools.service.html new file mode 100644 index 0000000..fcae498 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.webmastertools.service.html @@ -0,0 +1,974 @@ + + +Python: module gdata.webmastertools.service + + + + +
 
+ 
gdata.webmastertools.service
index
/usr/local/svn/gdata-python-client/src/gdata/webmastertools/service.py
+

GWebmasterToolsService extends the GDataService to streamline
+Google Webmaster Tools operations.

+  GWebmasterToolsService: Provides methods to query feeds and manipulate items.
+                          Extends GDataService.

+

+ + + + + +
 
+Modules
       
atom
+
gdata
+
urllib
+
gdata.webmastertools
+

+ + + + + +
 
+Classes
       
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
RequestError +
+
+
+
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
GWebmasterToolsService +
+
+
+

+ + + + + +
 
+class Error(exceptions.Exception)
    
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class GWebmasterToolsService(gdata.service.GDataService)
   Client for the Google Webmaster Tools service.
 
 
Method resolution order:
+
GWebmasterToolsService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
AddMobileSitemap(self, site_uri, sitemap_uri, sitemap_mobile_markup_language='XHTML', uri='https://www.google.com/webmasters/tools/feeds/%(site_id)s/sitemaps/', url_params=None, escape_params=True, converter=None)
Adds a mobile sitemap to a site.

+Args: 
+  site_uri: str URI of which site to add sitemap for.
+  sitemap_uri: str URI of sitemap to add to a site.
+  sitemap_mobile_markup_language: str Format of added sitemap. Valid types:
+                                  XHTML, WML, or cHTML.
+  uri: str (optional) URI template to add a sitemap.
+       Default SITEMAP_FEED_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitemapsEntryFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitemapsEntry object.
+ +
AddNewsSitemap(self, site_uri, sitemap_uri, sitemap_news_publication_label, uri='https://www.google.com/webmasters/tools/feeds/%(site_id)s/sitemaps/', url_params=None, escape_params=True, converter=None)
Adds a news sitemap to a site.

+Args: 
+  site_uri: str URI of which site to add sitemap for.
+  sitemap_uri: str URI of sitemap to add to a site.
+  sitemap_news_publication_label: str, list of str Publication Labels for
+                                  sitemap.
+  uri: str (optional) URI template to add a sitemap.
+       Default SITEMAP_FEED_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitemapsEntryFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitemapsEntry object.
+ +
AddSite(self, site_uri, uri='https://www.google.com/webmasters/tools/feeds/sites/', url_params=None, escape_params=True, converter=None)
Adds a site to Google Webmaster Tools.

+Args: 
+  site_uri: str URI of which site to add.
+  uri: str (optional) URI to add a site.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitesEntryFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitesEntry object.
+ +
AddSitemap(self, site_uri, sitemap_uri, sitemap_type='WEB', uri='https://www.google.com/webmasters/tools/feeds/%(site_id)s/sitemaps/', url_params=None, escape_params=True, converter=None)
Adds a regular sitemap to a site.

+Args: 
+  site_uri: str URI of which site to add sitemap for.
+  sitemap_uri: str URI of sitemap to add to a site.
+  sitemap_type: str Type of added sitemap. Valid types: WEB, VIDEO, or CODE.
+  uri: str (optional) URI template to add a sitemap.
+       Default SITEMAP_FEED_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitemapsEntryFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitemapsEntry object.
+ +
DeleteSite(self, site_uri, uri='https://www.google.com/webmasters/tools/feeds/sites/%s', url_params=None, escape_params=True)
Removes a site from Google Webmaster Tools.

+Args: 
+  site_uri: str URI of which site to remove.
+  uri: str (optional) A URI template to send DELETE request.
+       Default SITE_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  True if the delete succeeded.
+ +
DeleteSitemap(self, site_uri, sitemap_uri, uri='https://www.google.com/webmasters/tools/feeds/%(site_id)s/sitemaps/%(sitemap_id)s', url_params=None, escape_params=True)
Removes a sitemap from a site.

+Args: 
+  site_uri: str URI of which site to remove a sitemap from.
+  sitemap_uri: str URI of sitemap to remove from a site.
+  uri: str (optional) A URI template to send DELETE request.
+       Default SITEMAP_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.

+Returns:
+  True if the delete succeeded.
+ +
GetSitemapsFeed(self, site_uri, uri='https://www.google.com/webmasters/tools/feeds/%(site_id)s/sitemaps/', converter=<function SitemapsFeedFromString at 0x9e2cf8>)
Gets sitemaps feed of a site.

+Args:
+  site_uri: str (optional) URI of which site to retrieve its sitemaps feed.
+  uri: str (optional) URI to retrieve sites feed.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitemapsFeedFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitemapsFeed object.
+ +
GetSitesFeed(self, uri='https://www.google.com/webmasters/tools/feeds/sites/', converter=<function SitesFeedFromString at 0x9e27d0>)
Gets sites feed.

+Args:
+  uri: str (optional) URI to retrieve sites feed.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitesFeedFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitesFeed object.
+ +
UpdateCrawlRate(self, site_uri, crawl_rate, uri='https://www.google.com/webmasters/tools/feeds/sites/%s', url_params=None, escape_params=True, converter=None)
Updates crawl rate setting of a site.

+Args: 
+  site_uri: str URI of which site to add sitemap for.
+  crawl_rate: str The crawl rate for a site. Valid values are 'slower',
+              'normal', and 'faster'.
+  uri: str (optional) URI template to update a site.
+       Default SITE_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitemapsEntryFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitesEntry object.
+ +
UpdateEnhancedImageSearch(self, site_uri, enhanced_image_search, uri='https://www.google.com/webmasters/tools/feeds/sites/%s', url_params=None, escape_params=True, converter=None)
Updates enhanced image search setting of a site.

+Args: 
+  site_uri: str URI of which site to add sitemap for.
+  enhanced_image_search: str The enhanced image search setting for a site.
+                         Valid values are 'true', and 'false'.
+  uri: str (optional) URI template to update a site.
+       Default SITE_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitemapsEntryFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitesEntry object.
+ +
UpdateGeoLocation(self, site_uri, geolocation, uri='https://www.google.com/webmasters/tools/feeds/sites/%s', url_params=None, escape_params=True, converter=None)
Updates geolocation setting of a site.

+Args: 
+  site_uri: str URI of which site to add sitemap for.
+  geolocation: str The geographic location. Valid values are listed in
+               http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
+  uri: str (optional) URI template to update a site.
+       Default SITE_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitemapsEntryFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitesEntry object.
+ +
UpdatePreferredDomain(self, site_uri, preferred_domain, uri='https://www.google.com/webmasters/tools/feeds/sites/%s', url_params=None, escape_params=True, converter=None)
Updates preferred domain setting of a site.

+Note that if using 'preferwww', will also need www.example.com in account to
+take effect.

+Args: 
+  site_uri: str URI of which site to add sitemap for.
+  preferred_domain: str The preferred domain for a site. Valid values are 'none',
+                    'preferwww', and 'prefernowww'.
+  uri: str (optional) URI template to update a site.
+       Default SITE_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitemapsEntryFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitesEntry object.
+ +
VerifySite(self, site_uri, verification_method, uri='https://www.google.com/webmasters/tools/feeds/sites/%s', url_params=None, escape_params=True, converter=None)
Requests a verification of a site.

+Args: 
+  site_uri: str URI of which site to add sitemap for.
+  verification_method: str The method to verify a site. Valid values are
+                       'htmlpage', and 'metatag'.
+  uri: str (optional) URI template to update a site.
+       Default SITE_TEMPLATE.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the insertion request. 
+  escape_params: boolean (optional) If true, the url_parameters will be
+                 escaped before they are included in the request.
+  converter: func (optional) Function which is executed on the server's
+      response before it is returned. Usually this is a function like
+      SitemapsEntryFromString which will parse the response and turn it into
+      an object.

+Returns:
+  If converter is defined, the results of running converter on the server's
+  response. Otherwise, it will be a SitesEntry object.
+ +
__init__(self, email=None, password=None, source=None, server='www.google.com', api_key=None, additional_headers=None, handler=None)
+ +
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7f1118f21b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + +
 
+class RequestError(Error)
    
Method resolution order:
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + +
 
+Data
       FEED_BASE = 'https://www.google.com/webmasters/tools/feeds/'
+SITEMAPS_FEED_TEMPLATE = 'https://www.google.com/webmasters/tools/feeds/%(site_id)s/sitemaps/'
+SITEMAP_TEMPLATE = 'https://www.google.com/webmasters/tools/feeds/%(site_id)s/sitemaps/%(sitemap_id)s'
+SITES_FEED = 'https://www.google.com/webmasters/tools/feeds/sites/'
+SITE_TEMPLATE = 'https://www.google.com/webmasters/tools/feeds/sites/%s'
+__author__ = 'livibetter (Yu-Jie Lin)'

+ + + + + +
 
+Author
       livibetter (Yu-Jie Lin)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.youtube.html b/gdata.py-1.2.3/pydocs/gdata.youtube.html new file mode 100644 index 0000000..0242c59 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.youtube.html @@ -0,0 +1,3197 @@ + + +Python: package gdata.youtube + + + + +
 
+ 
gdata.youtube
index
/usr/local/svn/gdata-python-client/src/gdata/youtube/__init__.py
+

# Copyright (C) 2008 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.

+

+ + + + + +
 
+Package Contents
       
service
+

+ + + + + +
 
+Classes
       
+
atom.AtomBase(atom.ExtensionContainer) +
+
+
Age +
Books +
Comments +
Company +
Description +
FirstName +
Gender +
Hobbies +
Hometown +
LastName +
Location +
Movies +
Music +
NoEmbed +
Occupation +
Position +
Private +
QueryString +
Racy +
Rating +
Recorded +
Relationship +
School +
Statistics +
Status +
Username +
+
+
gdata.GDataEntry(atom.Entry, gdata.LinkFinder) +
+
+
YouTubeContactEntry +
YouTubePlaylistEntry +
YouTubePlaylistVideoEntry +
YouTubeSubscriptionEntry +
YouTubeUserEntry +
YouTubeVideoCommentEntry +
YouTubeVideoEntry +
YouTubeVideoResponseEntry +
+
+
gdata.GDataFeed(atom.Feed, gdata.LinkFinder) +
+
+
YouTubeContactFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubePlaylistFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubePlaylistVideoFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubeSubscriptionFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubeVideoCommentFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubeVideoFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubeVideoResponseFeed(gdata.GDataFeed, gdata.LinkFinder) +
+
+
gdata.LinkFinder(atom.LinkFinder) +
+
+
YouTubeContactFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubePlaylistFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubePlaylistVideoFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubeSubscriptionFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubeVideoCommentFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubeVideoFeed(gdata.GDataFeed, gdata.LinkFinder) +
YouTubeVideoResponseFeed(gdata.GDataFeed, gdata.LinkFinder) +
+
+
+

+ + + + + + + +
 
+class Age(atom.AtomBase)
   The YouTube Age element
 
 
Method resolution order:
+
Age
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Books(atom.AtomBase)
   The YouTube Books element
 
 
Method resolution order:
+
Books
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Comments(atom.AtomBase)
   The GData Comments element
 
 
Method resolution order:
+
Comments
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, feed_link=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Company(atom.AtomBase)
   The YouTube Company element
 
 
Method resolution order:
+
Company
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Description(atom.AtomBase)
   The YouTube Description element.
 
 
Method resolution order:
+
Description
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class FirstName(atom.AtomBase)
   The YouTube FirstName element
 
 
Method resolution order:
+
FirstName
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Gender(atom.AtomBase)
   The YouTube Gender element
 
 
Method resolution order:
+
Gender
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Hobbies(atom.AtomBase)
   The YouTube Hobbies element
 
 
Method resolution order:
+
Hobbies
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Hometown(atom.AtomBase)
   The YouTube Hometown element
 
 
Method resolution order:
+
Hometown
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class LastName(atom.AtomBase)
   The YouTube LastName element
 
 
Method resolution order:
+
LastName
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Location(atom.AtomBase)
   The YouTube Location element
 
 
Method resolution order:
+
Location
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Movies(atom.AtomBase)
   The YouTube Movies element
 
 
Method resolution order:
+
Movies
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Music(atom.AtomBase)
   The YouTube Music element
 
 
Method resolution order:
+
Music
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class NoEmbed(atom.AtomBase)
   The YouTube VideoShare element. Whether a video can be embedded or not.
 
 
Method resolution order:
+
NoEmbed
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Occupation(atom.AtomBase)
   The YouTube Occupation element
 
 
Method resolution order:
+
Occupation
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Position(atom.AtomBase)
   The YouTube Position element. The position in a playlist feed.
 
 
Method resolution order:
+
Position
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Private(atom.AtomBase)
   The YouTube Private element.
 
 
Method resolution order:
+
Private
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class QueryString(atom.AtomBase)
   The YouTube QueryString element
 
 
Method resolution order:
+
QueryString
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Racy(atom.AtomBase)
   The YouTube Racy element.
 
 
Method resolution order:
+
Racy
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Rating(atom.AtomBase)
   The GData Rating element
 
 
Method resolution order:
+
Rating
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, min=None, max=None, num_raters=None, average=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Recorded(atom.AtomBase)
   The YouTube Recorded element
 
 
Method resolution order:
+
Recorded
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Relationship(atom.AtomBase)
   The YouTube Relationship element
 
 
Method resolution order:
+
Relationship
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class School(atom.AtomBase)
   The YouTube School element
 
 
Method resolution order:
+
School
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Statistics(atom.AtomBase)
   The YouTube Statistics element.
 
 
Method resolution order:
+
Statistics
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, view_count=None, video_watch_count=None, favorite_count=None, subscriber_count=None, last_web_access=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Status(atom.AtomBase)
   The YouTube Status element
 
 
Method resolution order:
+
Status
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class Username(atom.AtomBase)
   The YouTube Username element
 
 
Method resolution order:
+
Username
+
atom.AtomBase
+
atom.ExtensionContainer
+
__builtin__.object
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__init__(self, extension_elements=None, extension_attributes=None, text=None)
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+

+ + + + + + + +
 
+class YouTubeContactEntry(gdata.GDataEntry)
   Represents a contact entry.
 
 
Method resolution order:
+
YouTubeContactEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, username=None, status=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeContactFeed(gdata.GDataFeed, gdata.LinkFinder)
   Represents a feed of a users contacts.
 
 
Method resolution order:
+
YouTubeContactFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubePlaylistEntry(gdata.GDataEntry)
   Represents a playlist in YouTube.
 
 
Method resolution order:
+
YouTubePlaylistEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, private=None, feed_link=None, description=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubePlaylistFeed(gdata.GDataFeed, gdata.LinkFinder)
   Represents a feed of a user's playlists
 
 
Method resolution order:
+
YouTubePlaylistFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubePlaylistVideoEntry(gdata.GDataEntry)
   Represents a YouTubeVideoEntry on a YouTubePlaylist.
 
 
Method resolution order:
+
YouTubePlaylistVideoEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, feed_link=None, description=None, rating=None, comments=None, statistics=None, location=None, position=None, media=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubePlaylistVideoFeed(gdata.GDataFeed, gdata.LinkFinder)
   Represents a feed of video entry on a playlist.
 
 
Method resolution order:
+
YouTubePlaylistVideoFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeSubscriptionEntry(gdata.GDataEntry)
   Represents a subscription entry on YouTube.
 
 
Method resolution order:
+
YouTubeSubscriptionEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
GetSubscriptionType(self)
Retrieve the type of this subscription.

+Returns:
+  A string that is either 'channel, 'query' or 'favorites'
+ +
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, username=None, query_string=None, feed_link=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeSubscriptionFeed(gdata.GDataFeed, gdata.LinkFinder)
   Represents a feed of a users subscriptions.
 
 
Method resolution order:
+
YouTubeSubscriptionFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeUserEntry(gdata.GDataEntry)
   Represents a user on YouTube.
 
 
Method resolution order:
+
YouTubeUserEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, username=None, first_name=None, last_name=None, age=None, books=None, gender=None, company=None, description=None, hobbies=None, hometown=None, location=None, movies=None, music=None, occupation=None, school=None, relationship=None, statistics=None, feed_link=None, extension_elements=None, extension_attributes=None, text=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeVideoCommentEntry(gdata.GDataEntry)
   Represents a comment on YouTube.
 
 
Method resolution order:
+
YouTubeVideoCommentEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.Entry:
+
__init__(self, author=None, category=None, content=None, contributor=None, atom_id=None, link=None, published=None, rights=None, source=None, summary=None, control=None, title=None, updated=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for atom:entry

+Args:
+  author: list A list of Author instances which belong to this class.
+  category: list A list of Category instances
+  content: Content The entry's Content
+  contributor: list A list on Contributor instances
+  id: Id The entry's Id element
+  link: list A list of Link instances
+  published: Published The entry's Published element
+  rights: Rights The entry's Rights element
+  source: Source the entry's source element
+  summary: Summary the entry's summary element
+  title: Title the entry's title element
+  updated: Updated the entry's updated element
+  control: The entry's app:control element which can be used to mark an 
+      entry as a draft which should not be publicly viewable.
+  text: String The text contents of the element. This is the contents
+      of the Entry's XML text node. (Example: <foo>This is the text</foo>)
+  extension_elements: list A list of ExtensionElement instances which are
+      children of this element.
+  extension_attributes: dict A dictionary of strings which are the values
+      for additional XML attributes of this element.
+ +
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeVideoCommentFeed(gdata.GDataFeed, gdata.LinkFinder)
   Represents a feed of comments for a video.
 
 
Method resolution order:
+
YouTubeVideoCommentFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeVideoEntry(gdata.GDataEntry)
   Represents a video on YouTube.
 
 
Method resolution order:
+
YouTubeVideoEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
AddDeveloperTags(self, developer_tags)
Add a developer tag for this entry.

+Developer tags can only be set during the initial upload.

+Arguments:
+  developer_tags: A list of developer tags as strings.

+Returns:
+  A list of all developer tags for this video entry.
+ +
GetDeveloperTags(self)
Retrieve developer tags for this video entry.
+ +
GetSwfUrl(self)
Return the URL for the embeddable Video

+Returns:
+    URL of the embeddable video
+ +
GetYouTubeCategoryAsString(self)
Convenience method to return the YouTube category as string.

+YouTubeVideoEntries can contain multiple Category objects with differing 
+    schemes. This method returns only the category with the correct
+    scheme, ignoring developer tags.
+ +
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, rating=None, noembed=None, statistics=None, racy=None, media=None, geo=None, recorded=None, comments=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeVideoFeed(gdata.GDataFeed, gdata.LinkFinder)
   Represents a video feed on YouTube.
 
 
Method resolution order:
+
YouTubeVideoFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeVideoResponseEntry(gdata.GDataEntry)
   Represents a video response.
 
 
Method resolution order:
+
YouTubeVideoResponseEntry
+
gdata.GDataEntry
+
atom.Entry
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, author=None, category=None, content=None, atom_id=None, link=None, published=None, title=None, updated=None, rating=None, noembed=None, statistics=None, racy=None, media=None, extension_elements=None, extension_attributes=None)
+ +
+Methods inherited from gdata.GDataEntry:
+
GetMediaURL(self)
Returns the URL to the media content, if the entry is a media entry.
+Otherwise returns None.
+ +
IsMedia(self)
Determines whether or not an entry is a GData Media entry.
+ +
+Data descriptors inherited from gdata.GDataEntry:
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + + + +
 
+class YouTubeVideoResponseFeed(gdata.GDataFeed, gdata.LinkFinder)
   Represents a feed of video responses.
 
 
Method resolution order:
+
YouTubeVideoResponseFeed
+
gdata.GDataFeed
+
atom.Feed
+
atom.Source
+
atom.FeedEntryParent
+
atom.AtomBase
+
atom.ExtensionContainer
+
gdata.LinkFinder
+
atom.LinkFinder
+
__builtin__.object
+
+
+Methods inherited from gdata.GDataFeed:
+
__init__(self, author=None, category=None, contributor=None, generator=None, icon=None, atom_id=None, link=None, logo=None, rights=None, subtitle=None, title=None, updated=None, entry=None, total_results=None, start_index=None, items_per_page=None, extension_elements=None, extension_attributes=None, text=None)
Constructor for Source

+Args:
+  author: list (optional) A list of Author instances which belong to this
+      class.
+  category: list (optional) A list of Category instances
+  contributor: list (optional) A list on Contributor instances
+  generator: Generator (optional) 
+  icon: Icon (optional) 
+  id: Id (optional) The entry's Id element
+  link: list (optional) A list of Link instances
+  logo: Logo (optional) 
+  rights: Rights (optional) The entry's Rights element
+  subtitle: Subtitle (optional) The entry's subtitle element
+  title: Title (optional) the entry's title element
+  updated: Updated (optional) the entry's updated element
+  entry: list (optional) A list of the Entry instances contained in the 
+      feed.
+  text: String (optional) The text contents of the element. This is the 
+      contents of the Entry's XML text node. 
+      (Example: <foo>This is the text</foo>)
+  extension_elements: list (optional) A list of ExtensionElement instances
+      which are children of this element.
+  extension_attributes: dict (optional) A dictionary of strings which are 
+      the values for additional XML attributes of this element.
+ +
+Data descriptors inherited from gdata.GDataFeed:
+
generator
+
+
id
+
+
+Methods inherited from atom.AtomBase:
+
ToString(self, string_encoding='UTF-8')
Converts the Atom object to a string containing XML.
+ +
__str__(self)
+ +
+Methods inherited from atom.ExtensionContainer:
+
FindExtensions(self, tag=None, namespace=None)
Searches extension elements for child nodes with the desired name.

+Returns a list of extension elements within this object whose tag
+and/or namespace match those passed in. To find all extensions in
+a particular namespace, specify the namespace but not the tag name.
+If you specify only the tag, the result list may contain extension
+elements in multiple namespaces.

+Args:
+  tag: str (optional) The desired tag
+  namespace: str (optional) The desired namespace

+Returns:
+  A list of elements whose tag and/or namespace match the parameters
+  values
+ +
+Data descriptors inherited from atom.ExtensionContainer:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from gdata.LinkFinder:
+
GetAclLink(self)
+ +
GetEditLink(self)
+ +
GetEditMediaLink(self)
The Picasa API mistakenly returns media-edit rather than edit-media, but
+this may change soon.
+ +
GetFeedLink(self)
+ +
GetHtmlLink(self)
Find the first link with rel of alternate and type of text/html

+Returns:
+  An atom.Link or None if no links matched
+ +
GetNextLink(self)
+ +
GetPostLink(self)
Get a link containing the POST target URL.

+The POST target URL is used to insert new entries.

+Returns:
+  A link object with a rel matching the POST type.
+ +
GetPrevLink(self)
+ +
GetSelfLink(self)
Find the first link with rel set to 'self'

+Returns:
+  An atom.Link or none if none of the links had rel equal to 'self'
+ +
+Methods inherited from atom.LinkFinder:
+
GetAlternateLink(self)
+ +
GetLicenseLink(self)
+ +

+ + + + + +
 
+Functions
       
YouTubeContactEntryFromString(xml_string)
+
YouTubeContactFeedFromString(xml_string)
+
YouTubePlaylistEntryFromString(xml_string)
+
YouTubePlaylistFeedFromString(xml_string)
+
YouTubePlaylistVideoEntryFromString(xml_string)
+
YouTubePlaylistVideoFeedFromString(xml_string)
+
YouTubeSubscriptionEntryFromString(xml_string)
+
YouTubeSubscriptionFeedFromString(xml_string)
+
YouTubeUserEntryFromString(xml_string)
+
YouTubeUserFeedFromString(xml_string)
+
YouTubeVideoCommentEntryFromString(xml_string)
+
YouTubeVideoCommentFeedFromString(xml_string)
+
YouTubeVideoEntryFromString(xml_string)
+
YouTubeVideoFeedFromString(xml_string)
+
YouTubeVideoResponseEntryFromString(xml_string)
+
YouTubeVideoResponseFeedFromString(xml_string)
+

+ + + + + +
 
+Data
       YOUTUBE_DEVELOPER_TAG_SCHEME = 'http://gdata.youtube.com/schemas/2007/developertags.cat'
+YOUTUBE_FORMAT = '{http://gdata.youtube.com/schemas/2007}format'
+YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007'
+YOUTUBE_SUBSCRIPTION_TYPE_SCHEME = 'http://gdata.youtube.com/schemas/2007/subscriptiontypes.cat'
+__author__ = 'api.stephaniel@gmail.com (Stephanie Liu), api.jhartmann@gmail.com (Jochen Hartmann)'

+ + + + + +
 
+Author
       api.stephaniel@gmail.com (Stephanie Liu), api.jhartmann@gmail.com (Jochen Hartmann)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/gdata.youtube.service.html b/gdata.py-1.2.3/pydocs/gdata.youtube.service.html new file mode 100644 index 0000000..9839117 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/gdata.youtube.service.html @@ -0,0 +1,2042 @@ + + +Python: module gdata.youtube.service + + + + +
 
+ 
gdata.youtube.service
index
/usr/local/svn/gdata-python-client/src/gdata/youtube/service.py
+

YouTubeService extends GDataService to streamline YouTube operations.

+YouTubeService: Provides methods to perform CRUD operations on YouTube feeds.
+Extends GDataService.

+

+ + + + + +
 
+Modules
       
xml.etree.cElementTree
+
atom
+
gdata
+
os
+

+ + + + + +
 
+Classes
       
+
exceptions.Exception(exceptions.BaseException) +
+
+
Error +
+
+
RequestError +
YouTubeError +
+
+
+
+
gdata.service.GDataService(atom.service.AtomService) +
+
+
YouTubeService +
+
+
gdata.service.Query(__builtin__.dict) +
+
+
YouTubeVideoQuery +
+
+
YouTubePlaylistQuery +
YouTubeUserQuery +
+
+
+
+
+

+ + + + + + + +
 
+class Error(exceptions.Exception)
   Base class for errors within the YouTube service.
 
 
Method resolution order:
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors defined here:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class RequestError(Error)
   Error class that is thrown in response to an invalid HTTP Request.
 
 
Method resolution order:
+
RequestError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class YouTubeError(Error)
   YouTube service specific error class.
 
 
Method resolution order:
+
YouTubeError
+
Error
+
exceptions.Exception
+
exceptions.BaseException
+
__builtin__.object
+
+
+Data descriptors inherited from Error:
+
__weakref__
+
list of weak references to the object (if defined)
+
+
+Methods inherited from exceptions.Exception:
+
__init__(...)
x.__init__(...) initializes x; see x.__class__.__doc__ for signature
+ +
+Data and other attributes inherited from exceptions.Exception:
+
__new__ = <built-in method __new__ of type object at 0x722d20>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
+Methods inherited from exceptions.BaseException:
+
__delattr__(...)
x.__delattr__('name') <==> del x.name
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__getslice__(...)
x.__getslice__(i, j) <==> x[i:j]

+Use of negative indices is not supported.
+ +
__reduce__(...)
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setattr__(...)
x.__setattr__('name', value) <==> x.name = value
+ +
__setstate__(...)
+ +
__str__(...)
x.__str__() <==> str(x)
+ +
+Data descriptors inherited from exceptions.BaseException:
+
__dict__
+
+
args
+
+
message
+
exception message
+
+

+ + + + + + + +
 
+class YouTubePlaylistQuery(YouTubeVideoQuery)
   Subclasses YouTubeVideoQuery to perform playlist-specific queries.

+Attributes are set dynamically via properties. Properties correspond to
+the standard Google Data API query parameters with YouTube Data API
+extensions.
 
 
Method resolution order:
+
YouTubePlaylistQuery
+
YouTubeVideoQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, playlist_id, text_query=None, params=None, categories=None)
+ +
+Data descriptors inherited from YouTubeVideoQuery:
+
format
+
The format query parameter
+
+
location
+
The location query parameter
+
+
lr
+
The lr (language restriction) query parameter
+
+
orderby
+
The orderby query parameter
+
+
racy
+
The racy query parameter
+
+
restriction
+
The restriction query parameter
+
+
time
+
The time query parameter
+
+
vq
+
The video query (vq) query parameter
+
+
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc6f9c0>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + + + +
 
+class YouTubeService(gdata.service.GDataService)
   Client for the YouTube service.

+Performs all documented Google Data YouTube API functions, such as inserting,
+updating and deleting videos, comments, playlist, subscriptions etc.
+YouTube Service requires authentication for any write, update or delete
+actions.

+Attributes:
+  email: An optional string identifying the user. Required only for
+      authenticated actions.
+  password: An optional string identifying the user's password.
+  source: An optional string identifying the name of your application.
+  server: An optional address of the YouTube API server. gdata.youtube.com 
+      is provided as the default value.
+  additional_headers: An optional dictionary containing additional headers
+      to be passed along with each request. Use to store developer key.
+  client_id: An optional string identifying your application, required for   
+      authenticated requests, along with a developer key.
+  developer_key: An optional string value. Register your application at
+      http://code.google.com/apis/youtube/dashboard to obtain a (free) key.
 
 
Method resolution order:
+
YouTubeService
+
gdata.service.GDataService
+
atom.service.AtomService
+
__builtin__.object
+
+
+Methods defined here:
+
AddComment(self, comment_text, video_entry)
Add a comment to a video entry.

+Needs authentication. Note that each comment that is posted must contain
+    the video entry that it is to be posted to.

+Args:
+  comment_text: A string representing the text of the comment.
+  video_entry: The YouTubeVideoEntry to be commented on.

+Returns:
+  True if the comment was added successfully.
+ +
AddComplaint(self, complaint_text, complaint_term, video_id)
Add a complaint for a particular video entry.

+Needs authentication.

+Args:
+  complaint_text: A string representing the complaint text.
+  complaint_term: A string representing the complaint category term.
+  video_id: A string representing the ID of YouTubeVideoEntry to
+      complain about.

+Returns:
+  True if posted successfully.

+Raises:
+  YouTubeError: Your complaint_term is not valid.
+ +
AddContact(self, contact_username, my_username='default')
Add a new contact to the currently authenticated user's contact feed.

+Needs authentication.

+Args:
+  contact_username: A string representing the username of the contact
+      that you wish to add.
+  my_username: An optional string representing the username to whose
+      contact the new contact is to be added.

+Returns:
+    A YouTubeContactEntry if added successfully.
+ +
AddPlaylist(self, playlist_title, playlist_description, playlist_private=None)
Add a new playlist to the currently authenticated users account.

+Needs authentication.

+Args:
+  playlist_title: A string representing the title for the new playlist.
+  playlist_description: A string representing the description of the
+      playlist.
+  playlist_private: An optional boolean, set to True if the playlist is
+      to be private.

+Returns:
+  The YouTubePlaylistEntry if successfully posted.
+ +
AddPlaylistVideoEntryToPlaylist(self, playlist_uri, video_id, custom_video_title=None, custom_video_description=None)
Add a video entry to a playlist, optionally providing a custom title
+and description.

+Needs authentication.

+Args:
+  playlist_uri: A string representing the URI of the playlist to which this
+      video entry is to be added.
+  video_id: A string representing the ID of the video entry to add.
+  custom_video_title: An optional string representing a custom title for
+      the video (only shown on the playlist).
+  custom_video_description: An optional string representing a custom
+      description for the video (only shown on the playlist).

+Returns:
+  A YouTubePlaylistVideoEntry if successfully posted.
+ +
AddRating(self, rating_value, video_entry)
Add a rating to a video entry.

+Needs authentication.

+Args:
+  rating_value: The integer value for the rating (between 1 and 5).
+  video_entry: The YouTubeVideoEntry to be rated.

+Returns:
+  True if the rating was added successfully.

+Raises:
+  YouTubeError: rating_value must be between 1 and 5 in AddRating().
+ +
AddSubscriptionToChannel(self, username_to_subscribe_to, my_username='default')
Add a new channel subscription to the currently authenticated users
+account.

+Needs authentication.

+Args:
+  username_to_subscribe_to: A string representing the username of the 
+      channel to which we want to subscribe to.
+  my_username: An optional string representing the name of the user which
+      we want to subscribe. Defaults to currently authenticated user.

+Returns:
+  A new YouTubeSubscriptionEntry if successfully posted.
+ +
AddSubscriptionToFavorites(self, username, my_username='default')
Add a new subscription to a users favorites to the currently
+authenticated user's account.

+Needs authentication

+Args:
+  username: A string representing the username of the user's favorite feed
+      to subscribe to.
+  my_username: An optional string representing the username of the user
+      that is to be subscribed. Defaults to currently authenticated user.

+Returns:
+    A new YouTubeSubscriptionEntry if successful.
+ +
AddSubscriptionToQuery(self, query, my_username='default')
Add a new subscription to a specific keyword query to the currently
+authenticated user's account.

+Needs authentication

+Args:
+  query: A string representing the keyword query to subscribe to.
+  my_username: An optional string representing the username of the user
+      that is to be subscribed. Defaults to currently authenticated user.

+Returns:
+    A new YouTubeSubscriptionEntry if successful.
+ +
AddVideoEntryToFavorites(self, video_entry, username='default')
Add a video entry to a users favorite feed.

+Needs authentication.

+Args:
+  video_entry: The YouTubeVideoEntry to add.
+  username: An optional string representing the username to whose favorite
+      feed you wish to add the entry. Defaults to the currently
+      authenticated user.
+Returns:
+    The posted YouTubeVideoEntry if successfully posted.
+ +
AddVideoResponse(self, video_id_to_respond_to, video_response)
Add a video response.

+Needs authentication.

+Args:
+  video_id_to_respond_to: A string representing the ID of the video to be
+      responded to.
+  video_response: YouTubeVideoEntry to be posted as a response.

+Returns:
+  True if video response was posted successfully.
+ +
CheckUploadStatus(self, video_entry=None, video_id=None)
Check upload status on a recently uploaded video entry.

+Needs authentication. Either video_entry or video_id must be provided.

+Args:
+  video_entry: An optional YouTubeVideoEntry whose upload status to check
+  video_id: An optional string representing the ID of the uploaded video
+      whose status is to be checked.

+Returns:
+  A tuple containing (video_upload_state, detailed_message) or None if
+      no status information is found.

+Raises:
+  YouTubeError: You must provide at least a video_entry or a video_id to the
+      CheckUploadStatus() method.
+ +
DeleteContact(self, contact_username, my_username='default')
Delete a contact from a users contact feed.

+Needs authentication.

+Args:
+  contact_username: A string representing the username of the contact
+      that is to be deleted.
+  my_username: An optional string representing the username of the user's
+      contact feed from which to delete the contact. Defaults to the
+      currently authenticated user.

+Returns:
+  True if the contact was deleted successfully
+ +
DeletePlaylist(self, playlist_uri)
Delete a playlist from the currently authenticated users playlists.

+Needs authentication.

+Args:
+  playlist_uri: A string representing the URI of the playlist that is
+      to be deleted.

+Returns:
+  True if successfully deleted.
+ +
DeletePlaylistVideoEntry(self, playlist_uri, playlist_video_entry_id)
Delete a playlist video entry from a playlist.

+Needs authentication.

+Args:
+  playlist_uri: A URI representing the playlist from which the playlist
+      video entry is to be removed from.
+  playlist_video_entry_id: A string representing id of the playlist video
+      entry that is to be removed.

+Returns:
+    True if entry was successfully deleted.
+ +
DeleteSubscription(self, subscription_uri)
Delete a subscription from the currently authenticated user's account.

+Needs authentication.

+Args:
+  subscription_uri: A string representing the URI of the subscription that
+      is to be deleted.

+Returns:
+  True if deleted successfully.
+ +
DeleteVideoEntry(self, video_entry)
Deletes a video entry.

+Needs authentication.

+Args:
+  video_entry: The YouTubeVideoEntry to be deleted.

+Returns:
+  True if entry was deleted successfully.
+ +
DeleteVideoEntryFromFavorites(self, video_id, username='default')
Delete a video entry from the users favorite feed.

+Needs authentication.

+Args:
+  video_id: A string representing the ID of the video that is to be removed
+  username: An optional string representing the username of the user's
+      favorite feed. Defaults to the currently authenticated user.

+Returns:
+    True if entry was successfully deleted.
+ +
DeleteVideoResponse(self, video_id, response_video_id)
Delete a video response.

+Needs authentication.

+Args:
+  video_id: A string representing the ID of video that contains the
+      response.
+  response_video_id: A string representing the ID of the video that was
+      posted as a response.

+Returns:
+  True if video response was deleted succcessfully.
+ +
GetFormUploadToken(self, video_entry, uri='http://gdata.youtube.com/action/GetUploadToken')
Receives a YouTube Token and a YouTube PostUrl from a YouTubeVideoEntry.

+Needs authentication.

+Args:
+  video_entry: The YouTubeVideoEntry to upload (meta-data only).
+  uri: An optional string representing the URI from where to fetch the
+      token information. Defaults to the YOUTUBE_UPLOADTOKEN_URI.

+Returns:
+  A tuple containing the URL to which to post your video file, along
+      with the youtube token that must be included with your upload in the
+      form of: (post_url, youtube_token).
+ +
GetMostDiscussedVideoFeed(self)
Retrieve the 'most_discussed' standard video feed.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetMostLinkedVideoFeed(self)
Retrieve the 'most_linked' standard video feed.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetMostRecentVideoFeed(self)
Retrieve the 'most_recent' standard video feed.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetMostRespondedVideoFeed(self)
Retrieve the 'most_responded' standard video feed.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetMostViewedVideoFeed(self)
Retrieve the 'most_viewed' standard video feed.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetRecentlyFeaturedVideoFeed(self)
Retrieve the 'recently_featured' standard video feed.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetTopFavoritesVideoFeed(self)
Retrieve the 'top_favorites' standard video feed.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetTopRatedVideoFeed(self)
Retrieve the 'top_rated' standard video feed.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetUserFavoritesFeed(self, username='default')
Retrieve the favorites feed for a given user.

+Args:
+  username: An optional string representing the username whose favorites
+      feed is to be retrieved. Defaults to the currently authenticated user.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetWatchOnMobileVideoFeed(self)
Retrieve the 'watch_on_mobile' standard video feed.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetYouTubeContactEntry(self, uri)
Retrieve a YouTubeContactEntry.

+Args:
+  uri: A string representing the URI of the contact entry that is to
+      be retrieved.

+Returns:
+  A YouTubeContactEntry if successfully retrieved.
+ +
GetYouTubeContactFeed(self, uri=None, username='default')
Retrieve a YouTubeContactFeed.

+Either a uri or a username must be provided.

+Args:
+  uri: An optional string representing the URI of the contact feed that
+      is to be retrieved.
+  username: An optional string representing the username. Defaults to the
+      currently authenticated user.

+Returns:
+  A YouTubeContactFeed if successfully retrieved.

+Raises:
+  YouTubeError: You must provide at least a uri or a username to the
+      GetYouTubeContactFeed() method.
+ +
GetYouTubePlaylistEntry(self, uri)
Retrieve a YouTubePlaylistEntry.

+Args:
+  uri: A string representing the URI of the playlist feed that is to
+      be retrieved.

+Returns:
+  A YouTubePlaylistEntry if successfully retrieved.
+ +
GetYouTubePlaylistFeed(self, uri=None, username='default')
Retrieve a YouTubePlaylistFeed (a feed of playlists for a user).

+Either a uri or a username must be provided.

+Args:
+  uri: An optional string representing the URI of the playlist feed that
+      is to be retrieved.
+  username: An optional string representing the username. Defaults to the
+      currently authenticated user.

+Returns:
+  A YouTubePlaylistFeed if successfully retrieved.

+Raises:
+  YouTubeError: You must provide at least a uri or a username to the
+      GetYouTubePlaylistFeed() method.
+ +
GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None)
Retrieve a YouTubePlaylistVideoFeed (a feed of videos on a playlist).

+Either a uri or a playlist_id must be provided.

+Args:
+  uri: An optional string representing the URI of the playlist video feed
+      that is to be retrieved.
+  playlist_id: An optional string representing the Id of the playlist whose
+      playlist video feed is to be retrieved.

+Returns:
+  A YouTubePlaylistVideoFeed if successfully retrieved.

+Raises:
+  YouTubeError: You must provide at least a uri or a playlist_id to the
+      GetYouTubePlaylistVideoFeed() method.
+ +
GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None)
Retrieve a YouTubeRelatedVideoFeed.

+Either a uri for the feed or a video_id is required.

+Args:
+  uri: An optional string representing the URI of the feed that is to
+      be retrieved.
+  video_id: An optional string representing the ID of the video for which
+      to retrieve the related video feed.

+Returns:
+  A YouTubeRelatedVideoFeed if successfully retrieved.

+Raises:
+  YouTubeError: You must provide at least a uri or a video_id to the
+      GetYouTubeRelatedVideoFeed() method.
+ +
GetYouTubeSubscriptionEntry(self, uri)
Retrieve a YouTubeSubscriptionEntry.

+Args:
+  uri: A string representing the URI of the entry that is to be retrieved.

+Returns:
+  A YouTubeVideoSubscriptionEntry if successfully retrieved.
+ +
GetYouTubeSubscriptionFeed(self, uri=None, username='default')
Retrieve a YouTubeSubscriptionFeed.

+Either the uri of the feed or a username must be provided.

+Args:
+  uri: An optional string representing the URI of the feed that is to
+      be retrieved.
+  username: An optional string representing the username whose subscription
+      feed is to be retrieved. Defaults to the currently authenticted user.

+Returns:
+  A YouTubeVideoSubscriptionFeed if successfully retrieved.
+ +
GetYouTubeUserEntry(self, uri=None, username=None)
Retrieve a YouTubeUserEntry.

+Either a uri or a username must be provided.

+Args:
+  uri: An optional string representing the URI of the user entry that is
+      to be retrieved.
+  username: An optional string representing the username.

+Returns:
+  A YouTubeUserEntry if successfully retrieved.

+Raises:
+  YouTubeError: You must provide at least a uri or a username to the
+      GetYouTubeUserEntry() method.
+ +
GetYouTubeUserFeed(self, uri=None, username=None)
Retrieve a YouTubeUserFeed.

+Either a uri or a username must be provided.

+Args:
+  uri: An optional string representing the URI of the user feed that is
+      to be retrieved.
+  username: An optional string representing the username.

+Returns:
+  A YouTubeUserFeed if successfully retrieved.

+Raises:
+  YouTubeError: You must provide at least a uri or a username to the
+      GetYouTubeUserFeed() method.
+ +
GetYouTubeVideoCommentEntry(self, uri)
Retrieve a YouTubeVideoCommentEntry.

+Args:
+  uri: A string representing the URI of the comment entry that is to
+      be retrieved.

+Returns:
+  A YouTubeCommentEntry if successfully retrieved.
+ +
GetYouTubeVideoCommentFeed(self, uri=None, video_id=None)
Retrieve a YouTubeVideoCommentFeed.

+Either a uri or a video_id must be provided.

+Args:
+  uri: An optional string representing the URI of the comment feed that
+      is to be retrieved.
+  video_id: An optional string representing the ID of the video for which
+      to retrieve the comment feed.

+Returns:
+  A YouTubeVideoCommentFeed if successfully retrieved.

+Raises:
+  YouTubeError: You must provide at least a uri or a video_id to the
+      GetYouTubeVideoCommentFeed() method.
+ +
GetYouTubeVideoEntry(self, uri=None, video_id=None)
Retrieve a YouTubeVideoEntry.

+Either a uri or a video_id must be provided.

+Args:
+  uri: An optional string representing the URI of the entry that is to 
+      be retrieved.
+  video_id: An optional string representing the ID of the video.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.

+Raises:
+  YouTubeError: You must provide at least a uri or a video_id to the
+      GetYouTubeVideoEntry() method.
+ +
GetYouTubeVideoFeed(self, uri)
Retrieve a YouTubeVideoFeed.

+Args:
+  uri: A string representing the URI of the feed that is to be retrieved.

+Returns:
+  A YouTubeVideoFeed if successfully retrieved.
+ +
GetYouTubeVideoResponseEntry(self, uri)
Retrieve a YouTubeVideoResponseEntry.

+Args:
+  uri: A string representing the URI of the video response entry that
+      is to be retrieved.

+Returns:
+  A YouTubeVideoResponseEntry if successfully retrieved.
+ +
GetYouTubeVideoResponseFeed(self, uri=None, video_id=None)
Retrieve a YouTubeVideoResponseFeed.

+Either a uri or a playlist_id must be provided.

+Args:
+  uri: An optional string representing the URI of the video response feed
+      that is to be retrieved.
+  video_id: An optional string representing the ID of the video whose
+      response feed is to be retrieved.

+Returns:
+  A YouTubeVideoResponseFeed if successfully retrieved.

+Raises:
+  YouTubeError: You must provide at least a uri or a video_id to the
+      GetYouTubeVideoResponseFeed() method.
+ +
InsertVideoEntry(self, video_entry, filename_or_handle, youtube_username='default', content_type='video/quicktime')
Upload a new video to YouTube using the direct upload mechanism.

+Needs authentication.

+Args:
+  video_entry: The YouTubeVideoEntry to upload.
+  filename_or_handle: A file-like object or file name where the video
+      will be read from.
+  youtube_username: An optional string representing the username into whose
+      account this video is to be uploaded to. Defaults to the currently
+      authenticated user.
+  content_type: An optional string representing internet media type
+      (a.k.a. mime type) of the media object. Currently the YouTube API
+      supports these types:
+        o video/mpeg
+        o video/quicktime
+        o video/x-msvideo
+        o video/mp4
+        o video/x-flv

+Returns:
+  The newly created YouTubeVideoEntry if successful.

+Raises:
+  AssertionError: video_entry must be a gdata.youtube.VideoEntry instance.
+  YouTubeError: An error occurred trying to read the video file provided.
+  gdata.service.RequestError: An error occurred trying to upload the video
+      to the API server.
+ +
Query(self, uri)
Performs a query and returns a resulting feed or entry.

+Args:
+  uri: A string representing the URI of the feed that is to be queried.

+Returns:
+  On success, a tuple in the form:
+  (boolean succeeded=True, ElementTree._Element result)
+  On failure, a tuple in the form:
+  (boolean succeeded=False, {'status': HTTP status code from server,
+                             'reason': HTTP reason from the server,
+                             'body': HTTP body of the server's response})
+ +
UpdateContact(self, contact_username, new_contact_status, new_contact_category, my_username='default')
Update a contact, providing a new status and a new category.

+Needs authentication.

+Args:
+  contact_username: A string representing the username of the contact
+      that is to be updated.
+  new_contact_status: A string representing the new status of the contact.
+      This can either be set to 'accepted' or 'rejected'.
+  new_contact_category: A string representing the new category for the
+      contact, either 'Friends' or 'Family'.
+  my_username: An optional string representing the username of the user
+      whose contact feed we are modifying. Defaults to the currently
+      authenticated user.

+Returns:
+  A YouTubeContactEntry if updated succesfully.

+Raises:
+  YouTubeError: New contact status must be within the accepted values. Or
+      new contact category must be within the accepted categories.
+ +
UpdatePlaylist(self, playlist_id, new_playlist_title, new_playlist_description, playlist_private=None, username='default')
Update a playlist with new meta-data.

+ Needs authentication.

+ Args:
+   playlist_id: A string representing the ID of the playlist to be updated.
+   new_playlist_title: A string representing a new title for the playlist.
+   new_playlist_description: A string representing a new description for the
+       playlist.
+   playlist_private: An optional boolean, set to True if the playlist is
+       to be private.
+   username: An optional string representing the username whose playlist is
+       to be updated. Defaults to the currently authenticated user.

+Returns:
+   A YouTubePlaylistEntry if the update was successful.
+ +
UpdatePlaylistVideoEntryMetaData(self, playlist_uri, playlist_entry_id, new_video_title, new_video_description, new_video_position)
Update the meta data for a YouTubePlaylistVideoEntry.

+Needs authentication.

+Args:
+  playlist_uri: A string representing the URI of the playlist that contains
+      the entry to be updated.
+  playlist_entry_id: A string representing the ID of the entry to be
+      updated.
+  new_video_title: A string representing the new title for the video entry.
+  new_video_description: A string representing the new description for
+      the video entry.
+  new_video_position: An integer representing the new position on the
+      playlist for the video.

+Returns:
+  A YouTubePlaylistVideoEntry if the update was successful.
+ +
UpdateVideoEntry(self, video_entry)
Updates a video entry's meta-data.

+Needs authentication.

+Args:
+  video_entry: The YouTubeVideoEntry to update, containing updated
+      meta-data.

+Returns:
+  An updated YouTubeVideoEntry on success or None.
+ +
YouTubeQuery(self, query)
Performs a YouTube specific query and returns a resulting feed or entry.

+Args:
+  query: A Query object or one if its sub-classes (YouTubeVideoQuery,
+      YouTubeUserQuery or YouTubePlaylistQuery).

+Returns:
+  Depending on the type of Query object submitted returns either a
+      YouTubeVideoFeed, a YouTubeUserFeed, a YouTubePlaylistFeed. If the
+      Query object provided was not YouTube-related, a tuple is returned.
+      On success the tuple will be in this form:
+      (boolean succeeded=True, ElementTree._Element result)
+      On failure, the tuple will be in this form:
+      (boolean succeeded=False, {'status': HTTP status code from server,
+                                 'reason': HTTP reason from the server,
+                                 'body': HTTP body of the server response})
+ +
__init__(self, email=None, password=None, source=None, server='gdata.youtube.com', additional_headers=None, client_id=None, developer_key=None)
+ +
+Data descriptors defined here:
+
client_id
+
The ClientId property
+
+
developer_key
+
The Developer Key property
+
+
+Methods inherited from gdata.service.GDataService:
+
AuthSubTokenInfo(self)
Fetches the AuthSub token's metadata from the server.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
ClientLogin(self, username, password, account_type=None, service=None, auth_service_url=None, source=None, captcha_token=None, captcha_response=None)
Convenience method for authenticating using ProgrammaticLogin. 

+Sets values for email, password, and other optional members.

+Args:
+  username:
+  password:
+  account_type: string (optional)
+  service: string (optional)
+  auth_service_url: string (optional)
+  captcha_token: string (optional)
+  captcha_response: string (optional)
+ +
Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4)
Deletes the entry at the given URI.

+Args:
+  uri: string The URI of the entry to be deleted. Example: 
+       '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.

+Returns:
+  True if the entry was deleted.
+ +
FetchOAuthRequestToken(self, scopes=None, extra_parameters=None)
Fetches OAuth request token and returns it.

+Args:
+  scopes: string or list of string base URL(s) of the service(s) to be
+      accessed. If None, then this method tries to determine the
+      scope(s) from the current service.
+  extra_parameters: dict (optional) key-value pairs as any additional
+      parameters to be included in the URL and signature while making a
+      request for fetching an OAuth request token. All the OAuth parameters
+      are added by default. But if provided through this argument, any
+      default parameters will be overwritten. For e.g. a default parameter
+      oauth_version 1.0 can be overwritten if
+      extra_parameters = {'oauth_version': '2.0'}
+  
+Returns:
+  The fetched request token as a gdata.auth.OAuthToken object.
+  
+Raises:
+  FetchingOAuthRequestTokenFailed if the server responded to the request
+  with an error.
+ +
GenerateAuthSubURL(self, next, scope, secure=False, session=True, domain='default')
Generate a URL at which the user will login and be redirected back.

+Users enter their credentials on a Google login page and a token is sent
+to the URL specified in next. See documentation for AuthSub login at:
+http://code.google.com/apis/accounts/docs/AuthSub.html

+Args:
+  next: string The URL user will be sent to after logging in.
+  scope: string or list of strings. The URLs of the services to be 
+         accessed.
+  secure: boolean (optional) Determines whether or not the issued token
+          is a secure token.
+  session: boolean (optional) Determines whether or not the issued token
+           can be upgraded to a session token.
+ +
GenerateOAuthAuthorizationURL(self, request_token=None, callback_url=None, extra_params=None, include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope')
Generates URL at which user will login to authorize the request token.

+Args:
+  request_token: gdata.auth.OAuthToken (optional) OAuth request token.
+      If not specified, then the current token will be used if it is of
+      type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.    
+  callback_url: string (optional) The URL user will be sent to after
+      logging in and granting access.
+  extra_params: dict (optional) Additional parameters to be sent.
+  include_scopes_in_callback: Boolean (default=False) if set to True, and
+      if 'callback_url' is present, the 'callback_url' will be modified to
+      include the scope(s) from the request token as a URL parameter. The
+      key for the 'callback' URL's scope parameter will be
+      OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as
+      a parameter to the 'callback' URL, is that the page which receives
+      the OAuth token will be able to tell which URLs the token grants
+      access to.
+  scopes_param_prefix: string (default='oauth_token_scope') The URL
+      parameter key which maps to the list of valid scopes for the token.
+      This URL parameter will be included in the callback URL along with
+      the scopes of the token as value if include_scopes_in_callback=True.
+      
+Returns:
+  A string URL at which the user is required to login.

+Raises:
+  NonOAuthToken if the user's request token is not an OAuth token or if a
+  request token was not available.
+ +
Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None)
Query the GData API with the given URI

+The uri is the portion of the URI after the server value 
+(ex: www.google.com).

+To perform a query against Google Base, set the server to 
+'base.google.com' and set the uri to '/base/feeds/...', where ... is 
+your query. For example, to find snippets for all digital cameras uri 
+should be set to: '/base/feeds/snippets?bq=digital+camera'

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to 
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and 
+                 Authorization headers.
+  redirects_remaining: int (optional) Tracks the number of additional
+      redirects this method will allow. If the service object receives
+      a redirect and remaining is 0, it will not follow the redirect. 
+      This was added to avoid infinite redirect loops.
+  encoding: string (optional) The character encoding for the server's
+      response. Default is UTF-8
+  converter: func (optional) A function which will transform
+      the server's results before it is returned. Example: use 
+      GDataFeedFromString to parse the server response as if it
+      were a GDataFeed.

+Returns:
+  If there is no ResultsTransformer specified in the call, a GDataFeed 
+  or GDataEntry depending on which is sent from the server. If the 
+  response is niether a feed or entry and there is no ResultsTransformer,
+  return a string. If there is a ResultsTransformer, the returned value 
+  will be that of the ResultsTransformer function.
+ +
GetAuthSubToken(self)
Returns the AuthSub token as a string.

+If the token is an gdta.auth.AuthSubToken, the Authorization Label
+("AuthSub token") is removed.

+This method examines the current_token to see if it is an AuthSubToken
+or SecureAuthSubToken. If not, it searches the token_store for a token
+which matches the current scope.

+The current scope is determined by the service name string member.

+Returns:
+  If the current_token is set to an AuthSubToken/SecureAuthSubToken,
+  return the token string. If there is no current_token, a token string
+  for a token which matches the service object's default scope is returned.
+  If there are no tokens valid for the scope, returns None.
+ +
GetClientLoginToken(self)
Returns the token string for the current token or a token matching the 
+service scope.

+If the current_token is a ClientLoginToken, the token string for 
+the current token is returned. If the current_token is not set, this method
+searches for a token in the token_store which is valid for the service 
+object's current scope.

+The current scope is determined by the service name string member.
+The token string is the end of the Authorization header, it doesn not
+include the ClientLogin label.
+ +
GetEntry(self, uri, extra_headers=None)
Query the GData API with the given URI and receive an Entry.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataEntry built from the XML in the server's response.
+ +
GetFeed(self, uri, extra_headers=None, converter=<function GDataFeedFromString at 0x7f2876953b18>)
Query the GData API with the given URI and receive a Feed.

+See also documentation for gdata.service.Get

+Args:
+  uri: string The query in the form of a URI. Example:
+       '/base/feeds/snippets?bq=digital+camera'.
+  extra_headers: dictionary (optional) Extra HTTP headers to be included
+                 in the GET request. These headers are in addition to
+                 those stored in the client's additional_headers property.
+                 The client automatically sets the Content-Type and
+                 Authorization headers.

+Returns:
+  A GDataFeed built from the XML in the server's response.
+ +
GetMedia(self, uri, extra_headers=None)
Returns a MediaSource containing media and its metadata from the given
+URI string.
+ +
GetNext(self, feed)
Requests the next 'page' of results in the feed.

+This method uses the feed's next link to request an additional feed
+and uses the class of the feed to convert the results of the GET request.

+Args:
+  feed: atom.Feed or a subclass. The feed should contain a next link and
+      the type of the feed will be applied to the results from the 
+      server. The new feed which is returned will be of the same class
+      as this feed which was passed in.

+Returns:
+  A new feed representing the next set of results in the server's feed.
+  The type of this feed will match that of the feed argument.
+ +
Post(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert or update  data into a GData service at the given URI.

+Args:
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri.
+  uri: string The location (feed) to which the data should be inserted.
+       Example: '/base/feeds/items'.
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the
+      server's response. Often this is a function like
+      GDataEntryFromString which will parse the body of the server's
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4, media_source=None, converter=None)
Insert data into a GData service at the given URI.

+Args:
+  verb: string, either 'POST' or 'PUT'
+  data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The
+        XML to be sent to the uri. 
+  uri: string The location (feed) to which the data should be inserted. 
+       Example: '/base/feeds/items'. 
+  extra_headers: dict (optional) HTTP headers which are to be included. 
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  media_source: MediaSource (optional) Container for the media to be sent
+      along with the entry, if provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the post succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
ProgrammaticLogin(self, captcha_token=None, captcha_response=None)
Authenticates the user and sets the GData Auth token.

+Login retreives a temporary auth token which must be used with all
+requests to GData services. The auth token is stored in the GData client
+object.

+Login is also used to respond to a captcha challenge. If the user's login
+attempt failed with a CaptchaRequired error, the user can respond by
+calling Login with the captcha token and the answer to the challenge.

+Args:
+  captcha_token: string (optional) The identifier for the captcha challenge
+                 which was presented to the user.
+  captcha_response: string (optional) The user's answer to the captch 
+                    challenge.

+Raises:
+  CaptchaRequired if the login service will require a captcha response
+  BadAuthentication if the login service rejected the username or password
+  Error if the login service responded with a 403 different from the above
+ +
Put(self, data, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=3, media_source=None, converter=None)
Updates an entry at the given URI.

+Args:
+  data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The 
+        XML containing the updated data.
+  uri: string A URI indicating entry to which the update will be applied.
+       Example: '/base/feeds/items/ITEM-ID'
+  extra_headers: dict (optional) HTTP headers which are to be included.
+                 The client automatically sets the Content-Type,
+                 Authorization, and Content-Length headers.
+  url_params: dict (optional) Additional URL parameters to be included
+              in the URI. These are translated into query arguments
+              in the form '&dict_key=value&...'.
+              Example: {'max-results': '250'} becomes &max-results=250
+  escape_params: boolean (optional) If false, the calling code has already
+                 ensured that the query will form a valid URL (all
+                 reserved characters have been escaped). If true, this
+                 method will escape the query and any URL parameters
+                 provided.
+  converter: func (optional) A function which will be executed on the 
+      server's response. Often this is a function like 
+      GDataEntryFromString which will parse the body of the server's 
+      response and return a GDataEntry.

+Returns:
+  If the put succeeded, this method will return a GDataFeed, GDataEntry,
+  or the results of running converter on the server's result body (if
+  converter was specified).
+ +
RevokeAuthSubToken(self)
Revokes an existing AuthSub token.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+ +
RevokeOAuthToken(self)
Revokes an existing OAuth token.

+Raises:
+  NonOAuthToken if the user's auth token is not an OAuth token.
+  RevokingOAuthTokenFailed if request for revoking an OAuth token failed.
+ +
SetAuthSubToken(self, token, scopes=None, rsa_key=None)
Sets the token sent in requests to an AuthSub token.

+Sets the current_token and attempts to add the token to the token_store.

+Only use this method if you have received a token from the AuthSub
+service. The auth token is set automatically when UpgradeToSessionToken()
+is used. See documentation for Google AuthSub here:
+http://code.google.com/apis/accounts/AuthForWebApps.html 

+Args:
+ token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string
+        The token returned by the AuthSub service. If the token is an
+        AuthSubToken or SecureAuthSubToken, the scope information stored in
+        the token is used. If the token is a string, the scopes parameter is
+        used to determine the valid scopes.
+ scopes: list of URLs for which the token is valid. This is only used
+         if the token parameter is a string.
+ rsa_key: string (optional) Private key required for RSA_SHA1 signature
+          method.  This parameter is necessary if the token is a string
+          representing a secure token.
+ +
SetClientLoginToken(self, token, scopes=None)
Sets the token sent in requests to a ClientLogin token.

+This method sets the current_token to a new ClientLoginToken and it 
+also attempts to add the ClientLoginToken to the token_store.

+Only use this method if you have received a token from the ClientLogin
+service. The auth_token is set automatically when ProgrammaticLogin()
+is used. See documentation for Google ClientLogin here:
+http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html

+Args:
+  token: string or instance of a ClientLoginToken.
+ +
SetOAuthInputParameters(self, signature_method, consumer_key, consumer_secret=None, rsa_key=None, two_legged_oauth=False)
Sets parameters required for using OAuth authentication mechanism.

+NOTE: Though consumer_secret and rsa_key are optional, either of the two
+is required depending on the value of the signature_method.

+Args:
+  signature_method: class which provides implementation for strategy class
+      oauth.oauth.OAuthSignatureMethod. Signature method to be used for
+      signing each request. Valid implementations are provided as the
+      constants defined by gdata.auth.OAuthSignatureMethod. Currently
+      they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and
+      gdata.auth.OAuthSignatureMethod.HMAC_SHA1
+  consumer_key: string Domain identifying third_party web application.
+  consumer_secret: string (optional) Secret generated during registration.
+      Required only for HMAC_SHA1 signature method.
+  rsa_key: string (optional) Private key required for RSA_SHA1 signature
+      method.
+  two_legged_oauth: string (default=False) Enables two-legged OAuth process.
+ +
SetOAuthToken(self, oauth_token)
Attempts to set the current token and add it to the token store.

+The oauth_token can be any OAuth token i.e. unauthorized request token,
+authorized request token or access token.
+This method also attempts to add the token to the token store.
+Use this method any time you want the current token to point to the
+oauth_token passed. For e.g. call this method with the request token
+you receive from FetchOAuthRequestToken.

+Args:
+  request_token: gdata.auth.OAuthToken OAuth request token.
+ +
UpgradeToOAuthAccessToken(self, authorized_request_token=None, oauth_version='1.0')
Upgrades the authorized request token to an access token.

+Args:
+  authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request
+      token. If not specified, then the current token will be used if it is
+      of type <gdata.auth.OAuthToken>, else it is found by looking in the
+      token_store by looking for a token for the current scope.
+  oauth_version: str (default='1.0') oauth_version parameter. All other
+      'oauth_' parameters are added by default. This parameter too, is
+      added by default but here you can override it's value.
+      
+Raises:
+  NonOAuthToken if the user's authorized request token is not an OAuth
+  token or if an authorized request token was not available.
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
UpgradeToSessionToken(self, token=None)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         (optional) which is good for a single use but can be upgraded
+         to a session token. If no token is passed in, the token
+         is found by looking in the token_store by looking for a token
+         for the current scope.

+Raises:
+  NonAuthSubToken if the user's auth token is not an AuthSub token
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
upgrade_to_session_token(self, token)
Upgrades a single use AuthSub token to a session token.

+Args:
+  token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken
+         which is good for a single use but can be upgraded to a
+         session token.

+Returns:
+  The upgraded token as a gdata.auth.AuthSubToken object.

+Raises:
+  TokenUpgradeFailed if the server responded to the request with an 
+  error.
+ +
+Data descriptors inherited from gdata.service.GDataService:
+
captcha_token
+
Get the captcha token for a login request.
+
+
captcha_url
+
Get the captcha URL for a login request.
+
+
source
+
The source is the name of the application making the request. 
+It should be in the form company_id-app_name-app_version
+
+
+Data and other attributes inherited from gdata.service.GDataService:
+
auth_token = None
+ +
handler = None
+ +
tokens = None
+ +
+Methods inherited from atom.service.AtomService:
+
UseBasicAuth(self, username, password, for_proxy=False)
Sets an Authenticaiton: Basic HTTP header containing plaintext.

+Deprecated, use use_basic_auth instead.

+The username and password are base64 encoded and added to an HTTP header
+which will be included in each request. Note that your username and 
+password are sent in plaintext.

+Args:
+  username: str
+  password: str
+ +
request(self, operation, url, data=None, headers=None, url_params=None)
+ +
use_basic_auth(self, username, password, scopes=None)
+ +
+Data descriptors inherited from atom.service.AtomService:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
debug
+
If True, HTTP debug information is printed.
+
+
override_token
+
+
+Data and other attributes inherited from atom.service.AtomService:
+
auto_set_current_token = True
+ +
auto_store_tokens = True
+ +
current_token = None
+ +
port = 80
+ +
ssl = False
+ +

+ + + + + + + +
 
+class YouTubeUserQuery(YouTubeVideoQuery)
   Subclasses YouTubeVideoQuery to perform user-specific queries.

+Attributes are set dynamically via properties. Properties correspond to
+the standard Google Data API query parameters with YouTube Data API
+extensions.
 
 
Method resolution order:
+
YouTubeUserQuery
+
YouTubeVideoQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, username=None, feed_type=None, subscription_id=None, text_query=None, params=None, categories=None)
+ +
+Data descriptors inherited from YouTubeVideoQuery:
+
format
+
The format query parameter
+
+
location
+
The location query parameter
+
+
lr
+
The lr (language restriction) query parameter
+
+
orderby
+
The orderby query parameter
+
+
racy
+
The racy query parameter
+
+
restriction
+
The restriction query parameter
+
+
time
+
The time query parameter
+
+
vq
+
The video query (vq) query parameter
+
+
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc6f300>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + + + +
 
+class YouTubeVideoQuery(gdata.service.Query)
   Subclasses gdata.service.Query to represent a YouTube Data API query.

+Attributes are set dynamically via properties. Properties correspond to
+the standard Google Data API query parameters with YouTube Data API
+extensions. Please refer to the API documentation for details.

+Attributes:
+  vq: The vq parameter, which is only supported for video feeds, specifies a
+      search query term. Refer to API documentation for further details.
+  orderby: The orderby parameter, which is only supported for video feeds,
+      specifies the value that will be used to sort videos in the search
+      result set. Valid values for this parameter are relevance, published,
+      viewCount and rating.
+  time: The time parameter, which is only available for the top_rated,
+      top_favorites, most_viewed, most_discussed, most_linked and
+      most_responded standard feeds, restricts the search to videos uploaded
+      within the specified time. Valid values for this parameter are today
+      (1 day), this_week (7 days), this_month (1 month) and all_time.
+      The default value for this parameter is all_time.
+  format: The format parameter specifies that videos must be available in a
+      particular video format. Refer to the API documentation for details.
+  racy: The racy parameter allows a search result set to include restricted
+      content as well as standard content. Valid values for this parameter
+      are include and exclude. By default, restricted content is excluded.
+  lr: The lr parameter restricts the search to videos that have a title,
+      description or keywords in a specific language. Valid values for the lr
+      parameter are ISO 639-1 two-letter language codes.
+  restriction: The restriction parameter identifies the IP address that
+      should be used to filter videos that can only be played in specific
+      countries.
+  location: A string of geo coordinates. Note that this is not used when the
+      search is performed but rather to filter the returned videos for ones
+      that match to the location entered.
 
 
Method resolution order:
+
YouTubeVideoQuery
+
gdata.service.Query
+
__builtin__.dict
+
__builtin__.object
+
+
+Methods defined here:
+
__init__(self, video_id=None, feed_type=None, text_query=None, params=None, categories=None)
+ +
+Data descriptors defined here:
+
format
+
The format query parameter
+
+
location
+
The location query parameter
+
+
lr
+
The lr (language restriction) query parameter
+
+
orderby
+
The orderby query parameter
+
+
racy
+
The racy query parameter
+
+
restriction
+
The restriction query parameter
+
+
time
+
The time query parameter
+
+
vq
+
The video query (vq) query parameter
+
+
+Methods inherited from gdata.service.Query:
+
ToUri(self)
+ +
__str__(self)
+ +
+Data descriptors inherited from gdata.service.Query:
+
__dict__
+
dictionary for instance variables (if defined)
+
+
__weakref__
+
list of weak references to the object (if defined)
+
+
alt
+
The feed query's alt parameter
+
+
author
+
The feed query's author parameter
+
+
max_results
+
The feed query's max-results parameter
+
+
published_max
+
The feed query's published-max parameter
+
+
published_min
+
The feed query's published-min parameter
+
+
start_index
+
The feed query's start-index parameter
+
+
text_query
+
The feed query's q parameter
+
+
updated_max
+
The feed query's updated-max parameter
+
+
updated_min
+
The feed query's updated-min parameter
+
+
+Methods inherited from __builtin__.dict:
+
__cmp__(...)
x.__cmp__(y) <==> cmp(x,y)
+ +
__contains__(...)
D.__contains__(k) -> True if D has a key k, else False
+ +
__delitem__(...)
x.__delitem__(y) <==> del x[y]
+ +
__eq__(...)
x.__eq__(y) <==> x==y
+ +
__ge__(...)
x.__ge__(y) <==> x>=y
+ +
__getattribute__(...)
x.__getattribute__('name') <==> x.name
+ +
__getitem__(...)
x.__getitem__(y) <==> x[y]
+ +
__gt__(...)
x.__gt__(y) <==> x>y
+ +
__hash__(...)
x.__hash__() <==> hash(x)
+ +
__iter__(...)
x.__iter__() <==> iter(x)
+ +
__le__(...)
x.__le__(y) <==> x<=y
+ +
__len__(...)
x.__len__() <==> len(x)
+ +
__lt__(...)
x.__lt__(y) <==> x<y
+ +
__ne__(...)
x.__ne__(y) <==> x!=y
+ +
__repr__(...)
x.__repr__() <==> repr(x)
+ +
__setitem__(...)
x.__setitem__(i, y) <==> x[i]=y
+ +
clear(...)
D.clear() -> None.  Remove all items from D.
+ +
copy(...)
D.copy() -> a shallow copy of D
+ +
get(...)
D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None.
+ +
has_key(...)
D.has_key(k) -> True if D has a key k, else False
+ +
items(...)
D.items() -> list of D's (key, value) pairs, as 2-tuples
+ +
iteritems(...)
D.iteritems() -> an iterator over the (key, value) items of D
+ +
iterkeys(...)
D.iterkeys() -> an iterator over the keys of D
+ +
itervalues(...)
D.itervalues() -> an iterator over the values of D
+ +
keys(...)
D.keys() -> list of D's keys
+ +
pop(...)
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
+If key is not found, d is returned if given, otherwise KeyError is raised
+ +
popitem(...)
D.popitem() -> (k, v), remove and return some (key, value) pair as a
+2-tuple; but raise KeyError if D is empty
+ +
setdefault(...)
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D
+ +
update(...)
D.update(E, **F) -> None.  Update D from E and F: for k in E: D[k] = E[k]
+(if E has keys else: for (k, v) in E: D[k] = v) then: for k in F: D[k] = F[k]
+ +
values(...)
D.values() -> list of D's values
+ +
+Data and other attributes inherited from __builtin__.dict:
+
__new__ = <built-in method __new__ of type object at 0x72b260>
T.__new__(S, ...) -> a new object with type S, a subtype of T
+ +
fromkeys = <built-in method fromkeys of type object at 0xc71310>
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
+v defaults to None.
+ +

+ + + + + +
 
+Data
       UNKOWN_ERROR = 1000
+YOUTUBE_BAD_REQUEST = 400
+YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin'
+YOUTUBE_COMPLAINT_CATEGORY_SCHEME = 'http://gdata.youtube.com/schemas/complaint-reasons.cat'
+YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS', 'RIGHTS', 'SPAM')
+YOUTUBE_CONFLICT = 409
+YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family')
+YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected')
+YOUTUBE_INTERNAL_SERVER_ERROR = 500
+YOUTUBE_INVALID_ARGUMENT = 601
+YOUTUBE_INVALID_CONTENT_TYPE = 602
+YOUTUBE_INVALID_KIND = 604
+YOUTUBE_NOT_A_VIDEO = 603
+YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists'
+YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6')
+YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating', 'relevance')
+YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude')
+YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month', 'all_time')
+YOUTUBE_RATING_LINK_REL = 'http://gdata.youtube.com/schemas#video.ratings'
+YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas'
+YOUTUBE_SERVER = 'gdata.youtube.com'
+YOUTUBE_SERVICE = 'youtube'
+YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured', 'top_rated', 'most_viewed', 'watch_on_mobile')
+YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds'
+YOUTUBE_STANDARD_MOST_DISCUSSED_URI = 'http://gdata.youtube.com/feeds/api/standardfeeds/most_discussed'
+YOUTUBE_STANDARD_MOST_LINKED_URI = 'http://gdata.youtube.com/feeds/api/standardfeeds/most_linked'
+YOUTUBE_STANDARD_MOST_RECENT_URI = 'http://gdata.youtube.com/feeds/api/standardfeeds/most_recent'
+YOUTUBE_STANDARD_MOST_RESPONDED_URI = 'http://gdata.youtube.com/feeds/api/standardfeeds/most_responded'
+YOUTUBE_STANDARD_MOST_VIEWED_URI = 'http://gdata.youtube.com/feeds/api/standardfeeds/most_viewed'
+YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = 'http://gdata.youtube.com/feeds/api/standardfeeds/recently_featured'
+YOUTUBE_STANDARD_TOP_FAVORITES_URI = 'http://gdata.youtube.com/feeds/api/standardfeeds/top_favorites'
+YOUTUBE_STANDARD_TOP_RATED_URI = 'http://gdata.youtube.com/feeds/api/standardfeeds/top_rated'
+YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = 'http://gdata.youtube.com/feeds/api/standardfeeds/watch_on_mobile'
+YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = 'http://gdata.youtube.com/schemas/subscriptiontypes.cat'
+YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime', 'flv')
+YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken'
+YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users'
+YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users'
+YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos'
+__author__ = 'api.stephaniel@gmail.com (Stephanie Liu), api.jhartmann@gmail.com (Jochen Hartmann)'

+ + + + + +
 
+Author
       api.stephaniel@gmail.com (Stephanie Liu), api.jhartmann@gmail.com (Jochen Hartmann)
+ \ No newline at end of file diff --git a/gdata.py-1.2.3/pydocs/generate_docs b/gdata.py-1.2.3/pydocs/generate_docs new file mode 100755 index 0000000..9928c28 --- /dev/null +++ b/gdata.py-1.2.3/pydocs/generate_docs @@ -0,0 +1,44 @@ +export PYTHONPATH=../src:../../googleappengine-read-only:$PYTHONPATH +pydoc -w atom +pydoc -w atom.http_interface +pydoc -w atom.mock_http +pydoc -w atom.mock_service +pydoc -w atom.service +pydoc -w atom.token_store +pydoc -w atom.url +pydoc -w gdata +pydoc -w gdata.auth +pydoc -w gdata.service +pydoc -w gdata.client +pydoc -w gdata.urlfetch +pydoc -w gdata.alt.appengine +pydoc -w gdata.apps +pydoc -w gdata.apps.service +pydoc -w gdata.apps.emailsettings +pydoc -w gdata.apps.emailsettings.service +pydoc -w gdata.apps.migration +pydoc -w gdata.apps.migration.service +pydoc -w gdata.base +pydoc -w gdata.base.service +pydoc -w gdata.blogger +pydoc -w gdata.blogger.service +pydoc -w gdata.calendar +pydoc -w gdata.calendar.service +pydoc -w gdata.codesearch +pydoc -w gdata.codesearch.service +pydoc -w gdata.contacts +pydoc -w gdata.contacts.service +pydoc -w gdata.docs +pydoc -w gdata.docs.service +pydoc -w gdata.exif +pydoc -w gdata.geo +pydoc -w gdata.media +pydoc -w gdata.photos +pydoc -w gdata.photos.service +pydoc -w gdata.spreadsheet +pydoc -w gdata.spreadsheet.service +pydoc -w gdata.spreadsheet.text_db +pydoc -w gdata.webmastertools +pydoc -w gdata.webmastertools.service +pydoc -w gdata.youtube +pydoc -w gdata.youtube.service diff --git a/gdata.py-1.2.3/samples/authsub/secure_authsub.py b/gdata.py-1.2.3/samples/authsub/secure_authsub.py new file mode 100755 index 0000000..7efb196 --- /dev/null +++ b/gdata.py-1.2.3/samples/authsub/secure_authsub.py @@ -0,0 +1,187 @@ +#!/usr/bin/python +# +# Copyright 2008 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Sample to demonstrate using secure AuthSub in the Google Data Python client. + +This sample focuses on the Google Health Data API because it requires the use +of secure tokens. This samples makes queries against the H9 Developer's +Sandbox (https://www.google.com/h9). To run this sample: + 1.) Use Apache's mod_python + 2.) Run from your local webserver (e.g. http://localhost/...) + 3.) You need to have entered medication data into H9 + + HealthAubSubHelper: Class to handle secure AuthSub tokens. + GetMedicationHTML: Returns the user's medication formatted in HTML. + index: Main entry point for the web app. +""" + +__author__ = 'e.bidelman@google.com (Eric Bidelman)' + + +import os +import sys +import urllib +import gdata.auth +import gdata.service + +H9_PROFILE_FEED_URL = 'https://www.google.com/h9/feeds/profile/default' + +class HealthAuthSubHelper(object): + """A secure AuthSub helper to interact with the Google Health Data API""" + + H9_AUTHSUB_HANDLER = 'https://www.google.com/h9/authsub' + H9_SCOPE = 'https://www.google.com/h9/feeds/' + + def GetNextUrl(self, req): + """Computes the current URL the web app is running from. + + Args: + req: mod_python mp_request instance to build the URL from. + + Returns: + A string representing the web app's URL. + """ + if req.is_https(): + next_url = 'https://' + else: + next_url = 'http://' + next_url += req.hostname + req.unparsed_uri + return next_url + + def GenerateAuthSubRequestUrl(self, next, scopes=[H9_SCOPE], + secure=True, session=True, extra_params=None, + include_scopes_in_next=True): + """Constructs the URL to the AuthSub token handler. + + Args: + next: string The URL AuthSub will redirect back to. + Use self.GetNextUrl() to return that URL. + scopes: (optional) string or list of scopes the token will be valid for. + secure: (optional) boolean True if the token should be a secure one + session: (optional) boolean True if the token will be exchanged for a + session token. + extra_params: (optional) dict of additional parameters to pass to AuthSub. + include_scopes_in_next: (optional) boolean True if the scopes in the + scopes should be passed to AuthSub. + + Returns: + A string (as a URL) to use for the AuthSubRequest endpoint. + """ + auth_sub_url = gdata.service.GenerateAuthSubRequestUrl( + next, scopes, hd='default', secure=secure, session=session, + request_url=self.H9_AUTHSUB_HANDLER, + include_scopes_in_next=include_scopes_in_next) + if extra_params: + auth_sub_url = '%s&%s' % (auth_sub_url, urllib.urlencode(extra_params)) + return auth_sub_url + + def SetPrivateKey(self, filename): + """Reads the private key from the specified file. + + See http://code.google.com/apis/gdata/authsub.html#Registered for\ + information on how to create a RSA private key/public cert pair. + + Args: + filename: string .pem file the key is stored in. + + Returns: + The private key as a string. + + Raises: + IOError: The file could not be read or does not exist. + """ + try: + f = open(filename) + rsa_private_key = f.read() + f.close() + except IOError, (errno, strerror): + raise 'I/O error(%s): %s' % (errno, strerror) + self.rsa_key = rsa_private_key + return rsa_private_key + + + +def GetMedicationHTML(feed): + """Prints out the user's medication to the console. + + Args: + feed: A gdata.GDataFeed instance. + + Returns: + An HTML formatted string containing the user's medication data. + """ + if not feed.entry: + return 'No entries in feed
' + + html = [] + for entry in feed.entry: + try: + ccr = entry.FindExtensions('ContinuityOfCareRecord')[0] + body = ccr.FindChildren('Body')[0] + meds = body.FindChildren('Medications')[0].FindChildren('Medication') + for med in meds: + name = med.FindChildren('Product')[0].FindChildren('ProductName')[0] + html.append('

  • %s
  • ' % name.FindChildren('Text')[0].text) + except: + html.append('No medication data in this profile
    ') + return '
      %s
    ' % ''.join(html) + +def index(req): + req.content_type = 'text/html' + + authsub = HealthAuthSubHelper() + client = gdata.service.GDataService(service='weaver') + + current_url = authsub.GetNextUrl(req) + rsa_key = authsub.SetPrivateKey('/path/to/yourRSAPrivateKey.pem') + + # Strip token query parameter's value from URL if it exists + token = gdata.auth.extract_auth_sub_token_from_url(current_url, + rsa_key=rsa_key) + + if not token: + """STEP 1: No single use token in the URL or a saved session token. + Generate the AuthSub URL to fetch a single use token.""" + + params = {'permission': 1} + authsub_url = authsub.GenerateAuthSubRequestUrl(current_url, + extra_params=params) + req.write('Link your Google Health Profile' % authsub_url) + else: + """STEP 2: A single use token was extracted from the URL. + Upgrade the one time token to a session token.""" + + req.write('Single use token: %s
    ' % str(token)) + + client.UpgradeToSessionToken(token) # calls gdata.service.SetAuthSubToken() + + """STEP 3: Done with AuthSub :) Save the token for subsequent requests. + Query the Health Data API""" + req.write('Token info: %s
    ' % client.AuthSubTokenInfo()) + + req.write('Session token: %s
    ' % client.GetAuthSubToken()) + + # Query the Health Data API + params = {'digest': 'true', 'strict': 'true'} + uri = '%s?%s' % (H9_PROFILE_FEED_URL, urllib.urlencode(params)) + feed = client.GetFeed(uri) + + req.write('

    Listing medications

    ') + req.write(GetMedicationHTML(feed)) + + """STEP 4: Revoke the session token.""" + req.write('Revoked session token') + client.RevokeAuthSubToken() diff --git a/gdata.py-1.2.3/samples/base/baseQueryExample.py b/gdata.py-1.2.3/samples/base/baseQueryExample.py new file mode 100755 index 0000000..e9b18d5 --- /dev/null +++ b/gdata.py-1.2.3/samples/base/baseQueryExample.py @@ -0,0 +1,51 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gdata.base.service +import gdata.service +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import gdata.base + +# Demonstrates queries to the snippets feed and stepping through the results. + +gb_client = gdata.base.service.GBaseService() +q = gdata.base.service.BaseQuery() +q.feed = '/base/feeds/snippets' +q['start-index'] = '1' +q['max-results'] = '10' +q.bq = raw_input('Please enter your Google Base query: ') + +feed = gb_client.QuerySnippetsFeed(q.ToUri()) + +while(int(q['start-index']) < 989): + # Display the titles of the snippets. + print 'Snippet query results items %s to %s' % (q['start-index'], + int(q['start-index'])+10) + for entry in feed.entry: + print ' ', entry.title.text + + # Show the next 10 results from the snippets feed when the user presses + # enter. + nothing = raw_input('Press enter to see the next 10 results') + q['start-index'] = str(int(q['start-index']) + 10) + feed = gb_client.QuerySnippetsFeed(q.ToUri()) + +print 'You\'ve reached the upper limit of 1000 items. Goodbye :)' diff --git a/gdata.py-1.2.3/samples/base/dryRunInsert.py b/gdata.py-1.2.3/samples/base/dryRunInsert.py new file mode 100755 index 0000000..d1a42d0 --- /dev/null +++ b/gdata.py-1.2.3/samples/base/dryRunInsert.py @@ -0,0 +1,60 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gdata.base.service +import gdata.service +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import gdata.base +import getpass + +# Demonstrates item insertion with a dry run insert operation. The item will +# NOT be added to Google Base. + +gb_client = gdata.base.service.GBaseService() +gb_client.email = raw_input('Please enter your username: ') +gb_client.password = getpass.getpass() + +print 'Logging in' +gb_client.ProgrammaticLogin() + +# Create a test item which will be used in a dry run insert +item = gdata.base.GBaseItem() +item.author.append(atom.Author(name=atom.Name(text='Mr. Smith'))) +item.title = atom.Title(text='He Jingxian\'s chicken') +item.link.append(atom.Link(rel='alternate', link_type='text/html', + href='http://www.host.com/123456jsh9')) +item.label.append(gdata.base.Label(text='kung pao chicken')) +item.label.append(gdata.base.Label(text='chinese cuisine')) +item.label.append(gdata.base.Label(text='testrecipes')) +item.item_type = gdata.base.ItemType(text='recipes') +item.AddItemAttribute(name='cooking_time', value_type='intUnit', value='30 minutes') +item.AddItemAttribute(name='main_ingredient', value='chicken') +item.AddItemAttribute(name='main_ingredient', value='chili') + +# Make an insert request with the dry run flag set so that the item will not +# actually be created. +result = gb_client.InsertItem(item, url_params={'dry-run': 'true'}) + +# Send the XML from the server to standard out. +print 'Here\'s the XML from the server\'s simulated insert' +print str(result) + +print 'Done' diff --git a/gdata.py-1.2.3/samples/blogger/BloggerExample.py b/gdata.py-1.2.3/samples/blogger/BloggerExample.py new file mode 100755 index 0000000..1f0361d --- /dev/null +++ b/gdata.py-1.2.3/samples/blogger/BloggerExample.py @@ -0,0 +1,317 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file demonstrates how to use the Google Data API's Python client library +# to interface with the Blogger service. There are examples for the following +# operations: +# +# * Retrieving the list of all the user's blogs +# * Retrieving all posts on a single blog +# * Performing a date-range query for posts on a blog +# * Creating draft posts and publishing posts +# * Updating posts +# * Retrieving comments +# * Creating comments +# * Deleting comments +# * Deleting posts + + +__author__ = 'lkeppler@google.com (Luke Keppler)' + + +from gdata import service +import gdata +import atom +import getopt +import sys + + +class BloggerExample: + + def __init__(self, email, password): + """Creates a GDataService and provides ClientLogin auth details to it. + The email and password are required arguments for ClientLogin. The + 'source' defined below is an arbitrary string, but should be used to + reference your name or the name of your organization, the app name and + version, with '-' between each of the three values.""" + + # Authenticate using ClientLogin. + self.service = service.GDataService(email, password) + self.service.source = 'Blogger_Python_Sample-1.0' + self.service.service = 'blogger' + self.service.server = 'www.blogger.com' + self.service.ProgrammaticLogin() + + # Get the blog ID for the first blog. + feed = self.service.Get('/feeds/default/blogs') + self_link = feed.entry[0].GetSelfLink() + if self_link: + self.blog_id = self_link.href.split('/')[-1] + + def PrintUserBlogTitles(self): + """Prints a list of all the user's blogs.""" + + # Request the feed. + query = service.Query() + query.feed = '/feeds/default/blogs' + feed = self.service.Get(query.ToUri()) + + # Print the results. + print feed.title.text + for entry in feed.entry: + print "\t" + entry.title.text + print + + def CreatePost(self, title, content, author_name, is_draft): + """This method creates a new post on a blog. The new post can be stored as + a draft or published based on the value of the is_draft parameter. The + method creates an GDataEntry for the new post using the title, content, + author_name and is_draft parameters. With is_draft, True saves the post as + a draft, while False publishes the post. Then it uses the given + GDataService to insert the new post. If the insertion is successful, the + added post (GDataEntry) will be returned. + """ + + # Create the entry to insert. + entry = gdata.GDataEntry() + entry.author.append(atom.Author(atom.Name(text=author_name))) + entry.title = atom.Title(title_type='xhtml', text=title) + entry.content = atom.Content(content_type='html', text=content) + if is_draft: + control = atom.Control() + control.draft = atom.Draft(text='yes') + entry.control = control + + # Ask the service to insert the new entry. + return self.service.Post(entry, + '/feeds/' + self.blog_id + '/posts/default') + + def PrintAllPosts(self): + """This method displays the titles of all the posts in a blog. First it + requests the posts feed for the blogs and then it prints the results. + """ + + # Request the feed. + feed = self.service.GetFeed('/feeds/' + self.blog_id + '/posts/default') + + # Print the results. + print feed.title.text + for entry in feed.entry: + if not entry.title.text: + print "\tNo Title" + else: + print "\t" + entry.title.text + print + + def PrintPostsInDateRange(self, start_time, end_time): + """This method displays the title and modification time for any posts that + have been created or updated in the period between the start_time and + end_time parameters. The method creates the query, submits it to the + GDataService, and then displays the results. + + Note that while the start_time is inclusive, the end_time is exclusive, so + specifying an end_time of '2007-07-01' will include those posts up until + 2007-6-30 11:59:59PM. + + The start_time specifies the beginning of the search period (inclusive), + while end_time specifies the end of the search period (exclusive). + """ + + # Create query and submit a request. + query = service.Query() + query.feed = '/feeds/' + self.blog_id + '/posts/default' + query.updated_min = start_time + query.updated_max = end_time + query.orderby = 'updated' + feed = self.service.Get(query.ToUri()) + + # Print the results. + print feed.title.text + " posts between " + start_time + " and " + end_time + print feed.title.text + for entry in feed.entry: + if not entry.title.text: + print "\tNo Title" + else: + print "\t" + entry.title.text + print + + def UpdatePostTitle(self, entry_to_update, new_title): + """This method updates the title of the given post. The GDataEntry object + is updated with the new title, then a request is sent to the GDataService. + If the insertion is successful, the updated post will be returned. + + Note that other characteristics of the post can also be modified by + updating the values of the entry object before submitting the request. + + The entry_to_update is a GDatEntry containing the post to update. + The new_title is the text to use for the post's new title. Returns: a + GDataEntry containing the newly-updated post. + """ + + # Set the new title in the Entry object + entry_to_update.title = atom.Title('xhtml', new_title) + + # Grab the edit URI + edit_uri = entry_to_update.GetEditLink().href + + return self.service.Put(entry_to_update, edit_uri) + + def CreateComment(self, post_id, comment_text): + """This method adds a comment to the specified post. First the comment + feed's URI is built using the given post ID. Then a GDataEntry is created + for the comment and submitted to the GDataService. The post_id is the ID + of the post on which to post comments. The comment_text is the text of the + comment to store. Returns: an entry containing the newly-created comment + + NOTE: This functionality is not officially supported yet. + """ + + # Build the comment feed URI + feed_uri = '/feeds/' + self.blog_id + '/' + post_id + '/comments/default' + + # Create a new entry for the comment and submit it to the GDataService + entry = gdata.GDataEntry() + entry.content = atom.Content(content_type='xhtml', text=comment_text) + return self.service.Post(entry, feed_uri) + + def PrintAllComments(self, post_id): + """This method displays all the comments for the given post. First the + comment feed's URI is built using the given post ID. Then the method + requests the comments feed and displays the results. Takes the post_id + of the post on which to view comments. + """ + + # Build comment feed URI and request comments on the specified post + feed_url = '/feeds/' + self.blog_id + '/comments/default' + feed = self.service.Get(feed_url) + + # Display the results + print feed.title.text + for entry in feed.entry: + print "\t" + entry.title.text + print "\t" + entry.updated.text + print + + def DeleteComment(self, post_id, comment_id): + """This method removes the comment specified by the given edit_link_href, the + URI for editing the comment. + """ + + feed_uri = '/feeds/' + self.blog_id + '/' + post_id + '/comments/default/' + comment_id + self.service.Delete(feed_uri) + + def DeletePost(self, edit_link_href): + """This method removes the post specified by the given edit_link_href, the + URI for editing the post. + """ + + self.service.Delete(edit_link_href) + + def run(self): + """Runs each of the example methods defined above, demonstrating how to + interface with the Blogger service. + """ + + # Demonstrate retrieving a list of the user's blogs. + self.PrintUserBlogTitles() + + # Demonstrate how to create a draft post. + draft_post = self.CreatePost("Snorkling in Aruba", + "

    We had so much fun snorkling in Aruba

    ", + "Post author", True) + print "Successfully created draft post: \"" + draft_post.title.text + "\".\n" + + # Demonstrate how to publish a public post. + public_post = self.CreatePost("Back from vacation", + "

    I didn't want to leave Aruba, but I ran out of money :(

    ", + "Post author", False) + print "Successfully created public post: \"" + public_post.title.text + "\".\n" + + # Demonstrate various feed queries. + print "Now listing all posts." + self.PrintAllPosts() + print "Now listing all posts between 2007-04-04 and 2007-04-23." + self.PrintPostsInDateRange("2007-04-04", "2007-04-23") + + # Demonstrate updating a post's title. + print "Now updating the title of the post we just created:" + public_post = self.UpdatePostTitle(public_post, "The party's over") + print "Successfully changed the post's title to \"" + public_post.title.text + "\".\n" + + # Demonstrate how to retrieve the comments for a post. + + # Get the post ID and build the comments feed URI for the specified post + self_id = public_post.id.text + tokens = self_id.split("-") + post_id = tokens[-1] + + print "Now posting a comment on the post titled: \"" + public_post.title.text + "\"." + comment = self.CreateComment(post_id, "Did you see any sharks?") + print "Successfully posted \"" + comment.content.text + "\" on the post titled: \"" + public_post.title.text + "\".\n" + + comment_id = comment.GetEditLink().href.split("/")[-1] + + print "Now printing all comments" + self.PrintAllComments(post_id) + + # Delete the comment we just posted + print "Now deleting the comment we just posted" + self.DeleteComment(post_id, comment_id) + print "Successfully deleted comment." + self.PrintAllComments(post_id) + + # Get the post's edit URI + edit_uri = public_post.GetEditLink().href + + # Demonstrate deleting posts. + print "Now deleting the post titled: \"" + public_post.title.text + "\"." + self.DeletePost(edit_uri) + print "Successfully deleted post." + self.PrintAllPosts() + + +def main(): + """The main function runs the BloggerExample application with the provided + username and password values. Authentication credentials are required. + NOTE: It is recommended that you run this sample using a test account.""" + + # parse command line options + try: + opts, args = getopt.getopt(sys.argv[1:], "", ["email=", "password="]) + except getopt.error, msg: + print ('python BloggerExample.py --email [email] --password [password] ') + sys.exit(2) + + email = '' + password = '' + + # Process options + for o, a in opts: + if o == "--email": + email = a + elif o == "--password": + password = a + + if email == '' or password == '': + print ('python BloggerExample.py --email [email] --password [password]') + sys.exit(2) + + sample = BloggerExample(email, password) + sample.run() + + +if __name__ == '__main__': + main() diff --git a/gdata.py-1.2.3/samples/calendar/calendarExample.py b/gdata.py-1.2.3/samples/calendar/calendarExample.py new file mode 100755 index 0000000..fe05216 --- /dev/null +++ b/gdata.py-1.2.3/samples/calendar/calendarExample.py @@ -0,0 +1,577 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.rboyd@gmail.com (Ryan Boyd)' + + +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import gdata.calendar.service +import gdata.service +import atom.service +import gdata.calendar +import atom +import getopt +import sys +import string +import time + + +class CalendarExample: + + def __init__(self, email, password): + """Creates a CalendarService and provides ClientLogin auth details to it. + The email and password are required arguments for ClientLogin. The + CalendarService automatically sets the service to be 'cl', as is + appropriate for calendar. The 'source' defined below is an arbitrary + string, but should be used to reference your name or the name of your + organization, the app name and version, with '-' between each of the three + values. The account_type is specified to authenticate either + Google Accounts or Google Apps accounts. See gdata.service or + http://code.google.com/apis/accounts/AuthForInstalledApps.html for more + info on ClientLogin. NOTE: ClientLogin should only be used for installed + applications and not for multi-user web applications.""" + + self.cal_client = gdata.calendar.service.CalendarService() + self.cal_client.email = email + self.cal_client.password = password + self.cal_client.source = 'Google-Calendar_Python_Sample-1.0' + self.cal_client.ProgrammaticLogin() + + def _PrintUserCalendars(self): + """Retrieves the list of calendars to which the authenticated user either + owns or subscribes to. This is the same list as is represented in the + Google Calendar GUI. Although we are only printing the title of the + calendar in this case, other information, including the color of the + calendar, the timezone, and more. See CalendarListEntry for more details + on available attributes.""" + + feed = self.cal_client.GetAllCalendarsFeed() + print 'Printing allcalendars: %s' % feed.title.text + for i, a_calendar in zip(xrange(len(feed.entry)), feed.entry): + print '\t%s. %s' % (i, a_calendar.title.text,) + + def _PrintOwnCalendars(self): + """Retrieves the list of calendars to which the authenticated user + owns -- + Although we are only printing the title of the + calendar in this case, other information, including the color of the + calendar, the timezone, and more. See CalendarListEntry for more details + on available attributes.""" + + feed = self.cal_client.GetOwnCalendarsFeed() + print 'Printing owncalendars: %s' % feed.title.text + for i, a_calendar in zip(xrange(len(feed.entry)), feed.entry): + print '\t%s. %s' % (i, a_calendar.title.text,) + + def _PrintAllEventsOnDefaultCalendar(self): + """Retrieves all events on the primary calendar for the authenticated user. + In reality, the server limits the result set intially returned. You can + use the max_results query parameter to allow the server to send additional + results back (see query parameter use in DateRangeQuery for more info). + Additionally, you can page through the results returned by using the + feed.GetNextLink().href value to get the location of the next set of + results.""" + + feed = self.cal_client.GetCalendarEventFeed() + print 'Events on Primary Calendar: %s' % (feed.title.text,) + for i, an_event in zip(xrange(len(feed.entry)), feed.entry): + print '\t%s. %s' % (i, an_event.title.text,) + for p, a_participant in zip(xrange(len(an_event.who)), an_event.who): + print '\t\t%s. %s' % (p, a_participant.email,) + print '\t\t\t%s' % (a_participant.name,) + print '\t\t\t%s' % (a_participant.attendee_status.value,) + + def _FullTextQuery(self, text_query='Tennis'): + """Retrieves events from the calendar which match the specified full-text + query. The full-text query searches the title and content of an event, + but it does not search the value of extended properties at the time of + this writing. It uses the default (primary) calendar of the authenticated + user and uses the private visibility/full projection feed. Please see: + http://code.google.com/apis/calendar/reference.html#Feeds + for more information on the feed types. Note: as we're not specifying + any query parameters other than the full-text query, recurring events + returned will not have gd:when elements in the response. Please see + the Google Calendar API query paramters reference for more info: + http://code.google.com/apis/calendar/reference.html#Parameters""" + + print 'Full text query for events on Primary Calendar: \'%s\'' % ( + text_query,) + query = gdata.calendar.service.CalendarEventQuery('default', 'private', + 'full', text_query) + feed = self.cal_client.CalendarQuery(query) + for i, an_event in zip(xrange(len(feed.entry)), feed.entry): + print '\t%s. %s' % (i, an_event.title.text,) + print '\t\t%s. %s' % (i, an_event.content.text,) + for a_when in an_event.when: + print '\t\tStart time: %s' % (a_when.start_time,) + print '\t\tEnd time: %s' % (a_when.end_time,) + + def _DateRangeQuery(self, start_date='2007-01-01', end_date='2007-07-01'): + """Retrieves events from the server which occur during the specified date + range. This uses the CalendarEventQuery class to generate the URL which is + used to retrieve the feed. For more information on valid query parameters, + see: http://code.google.com/apis/calendar/reference.html#Parameters""" + + print 'Date range query for events on Primary Calendar: %s to %s' % ( + start_date, end_date,) + query = gdata.calendar.service.CalendarEventQuery('default', 'private', + 'full') + query.start_min = start_date + query.start_max = end_date + feed = self.cal_client.CalendarQuery(query) + for i, an_event in zip(xrange(len(feed.entry)), feed.entry): + print '\t%s. %s' % (i, an_event.title.text,) + for a_when in an_event.when: + print '\t\tStart time: %s' % (a_when.start_time,) + print '\t\tEnd time: %s' % (a_when.end_time,) + + def _InsertCalendar(self, title='Little League Schedule', + description='This calendar contains practice and game times', + time_zone='America/Los_Angeles', hidden=False, location='Oakland', + color='#2952A3'): + """Creates a new calendar using the specified data.""" + print 'Creating new calendar with title "%s"' % title + calendar = gdata.calendar.CalendarListEntry() + calendar.title = atom.Title(text=title) + calendar.summary = atom.Summary(text=description) + calendar.where = gdata.calendar.Where(value_string=location) + calendar.color = gdata.calendar.Color(value=color) + calendar.timezone = gdata.calendar.Timezone(value=time_zone) + + if hidden: + calendar.hidden = gdata.calendar.Hidden(value='true') + else: + calendar.hidden = gdata.calendar.Hidden(value='false') + + new_calendar = self.cal_client.InsertCalendar(new_calendar=calendar) + return new_calendar + + def _UpdateCalendar(self, calendar, title='New Title', color=None): + """Updates the title and, optionally, the color of the supplied calendar""" + print 'Updating the calendar titled "%s" with the title "%s"' % ( + calendar.title.text, title) + calendar.title = atom.Title(text=title) + if color is not None: + calendar.color = gdata.calendar.Color(value=color) + + updated_calendar = self.cal_client.UpdateCalendar(calendar=calendar) + return updated_calendar + + def _DeleteAllCalendars(self): + """Deletes all calendars. Note: the primary calendar cannot be deleted""" + feed = self.cal_client.GetOwnCalendarsFeed() + for entry in feed.entry: + print 'Deleting calendar: %s' % entry.title.text + try: + self.cal_client.Delete(entry.GetEditLink().href) + except gdata.service.RequestError, msg: + if msg[0]['body'].startswith('Cannot remove primary calendar'): + print '\t%s' % msg[0]['body'] + else: + print '\tUnexpected Error: %s' % msg[0]['body'] + + def _InsertSubscription(self, + id='c4o4i7m2lbamc4k26sc2vokh5g%40group.calendar.google.com'): + """Subscribes to the calendar with the specified ID.""" + print 'Subscribing to the calendar with ID: %s' % id + calendar = gdata.calendar.CalendarListEntry() + calendar.id = atom.Id(text=id) + returned_calendar = self.cal_client.InsertCalendarSubscription(calendar) + return returned_calendar + + def _UpdateCalendarSubscription(self, + id='c4o4i7m2lbamc4k26sc2vokh5g%40group.calendar.google.com', + color=None, hidden=None, selected=None): + """Updates the subscription to the calendar with the specified ID.""" + print 'Updating the calendar subscription with ID: %s' % id + calendar_url = ( + 'http://www.google.com/calendar/feeds/default/allcalendars/full/%s' % id) + calendar_entry = self.cal_client.GetCalendarListEntry(calendar_url) + + if color is not None: + calendar_entry.color = gdata.calendar.Color(value=color) + if hidden is not None: + if hidden: + calendar_entry.hidden = gdata.calendar.Hidden(value='true') + else: + calendar_entry.hidden = gdata.calendar.Hidden(value='false') + if selected is not None: + if selected: + calendar_entry.selected = gdata.calendar.Selected(value='true') + else: + calendar_entry.selected = gdata.calendar.Selected(value='false') + + updated_calendar = self.cal_client.UpdateCalendar( + calendar_entry) + return updated_calendar + + def _DeleteCalendarSubscription(self, + id='c4o4i7m2lbamc4k26sc2vokh5g%40group.calendar.google.com'): + """Deletes the subscription to the calendar with the specified ID.""" + print 'Deleting the calendar subscription with ID: %s' % id + calendar_url = ( + 'http://www.google.com/calendar/feeds/default/allcalendars/full/%s' % id) + calendar_entry = self.cal_client.GetCalendarListEntry(calendar_url) + self.cal_client.DeleteCalendarEntry(calendar_entry.GetEditLink().href) + + def _InsertEvent(self, title='Tennis with Beth', + content='Meet for a quick lesson', where='On the courts', + start_time=None, end_time=None, recurrence_data=None): + """Inserts a basic event using either start_time/end_time definitions + or gd:recurrence RFC2445 icalendar syntax. Specifying both types of + dates is not valid. Note how some members of the CalendarEventEntry + class use arrays and others do not. Members which are allowed to occur + more than once in the calendar or GData "kinds" specifications are stored + as arrays. Even for these elements, Google Calendar may limit the number + stored to 1. The general motto to use when working with the Calendar data + API is that functionality not available through the GUI will not be + available through the API. Please see the GData Event "kind" document: + http://code.google.com/apis/gdata/elements.html#gdEventKind + for more information""" + + event = gdata.calendar.CalendarEventEntry() + event.title = atom.Title(text=title) + event.content = atom.Content(text=content) + event.where.append(gdata.calendar.Where(value_string=where)) + + if recurrence_data is not None: + # Set a recurring event + event.recurrence = gdata.calendar.Recurrence(text=recurrence_data) + else: + if start_time is None: + # Use current time for the start_time and have the event last 1 hour + start_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime()) + end_time = time.strftime('%Y-%m-%dT%H:%M:%S.000Z', + time.gmtime(time.time() + 3600)) + event.when.append(gdata.calendar.When(start_time=start_time, + end_time=end_time)) + + new_event = self.cal_client.InsertEvent(event, + '/calendar/feeds/default/private/full') + + return new_event + + def _InsertSingleEvent(self, title='One-time Tennis with Beth', + content='Meet for a quick lesson', where='On the courts', + start_time=None, end_time=None): + """Uses the _InsertEvent helper method to insert a single event which + does not have any recurrence syntax specified.""" + + new_event = self._InsertEvent(title, content, where, start_time, end_time, + recurrence_data=None) + + print 'New single event inserted: %s' % (new_event.id.text,) + print '\tEvent edit URL: %s' % (new_event.GetEditLink().href,) + print '\tEvent HTML URL: %s' % (new_event.GetHtmlLink().href,) + + return new_event + + def _InsertRecurringEvent(self, title='Weekly Tennis with Beth', + content='Meet for a quick lesson', where='On the courts', + recurrence_data=None): + """Uses the _InsertEvent helper method to insert a recurring event which + has only RFC2445 icalendar recurrence syntax specified. Note the use of + carriage return/newline pairs at the end of each line in the syntax. Even + when specifying times (as opposed to only dates), VTIMEZONE syntax is not + required if you use a standard Java timezone ID. Please see the docs for + more information on gd:recurrence syntax: + http://code.google.com/apis/gdata/elements.html#gdRecurrence + """ + + if recurrence_data is None: + recurrence_data = ('DTSTART;VALUE=DATE:20070501\r\n' + + 'DTEND;VALUE=DATE:20070502\r\n' + + 'RRULE:FREQ=WEEKLY;BYDAY=Tu;UNTIL=20070904\r\n') + + new_event = self._InsertEvent(title, content, where, + recurrence_data=recurrence_data, start_time=None, end_time=None) + + print 'New recurring event inserted: %s' % (new_event.id.text,) + print '\tEvent edit URL: %s' % (new_event.GetEditLink().href,) + print '\tEvent HTML URL: %s' % (new_event.GetHtmlLink().href,) + + return new_event + + def _InsertQuickAddEvent(self, + content="Tennis with John today 3pm-3:30pm"): + """Creates an event with the quick_add property set to true so the content + is processed as quick add content instead of as an event description.""" + event = gdata.calendar.CalendarEventEntry() + event.content = atom.Content(text=content) + event.quick_add = gdata.calendar.QuickAdd(value='true'); + + new_event = self.cal_client.InsertEvent(event, + '/calendar/feeds/default/private/full') + return new_event + + def _InsertSimpleWebContentEvent(self): + """Creates a WebContent object and embeds it in a WebContentLink. + The WebContentLink is appended to the existing list of links in the event + entry. Finally, the calendar client inserts the event.""" + + # Create a WebContent object + url = 'http://www.google.com/logos/worldcup06.gif' + web_content = gdata.calendar.WebContent(url=url, width='276', height='120') + + # Create a WebContentLink object that contains the WebContent object + title = 'World Cup' + href = 'http://www.google.com/calendar/images/google-holiday.gif' + type = 'image/gif' + web_content_link = gdata.calendar.WebContentLink(title=title, href=href, + link_type=type, web_content=web_content) + + # Create an event that contains this web content + event = gdata.calendar.CalendarEventEntry() + event.link.append(web_content_link) + + print 'Inserting Simple Web Content Event' + new_event = self.cal_client.InsertEvent(event, + '/calendar/feeds/default/private/full') + return new_event + + def _InsertWebContentGadgetEvent(self): + """Creates a WebContent object and embeds it in a WebContentLink. + The WebContentLink is appended to the existing list of links in the event + entry. Finally, the calendar client inserts the event. Web content + gadget events display Calendar Gadgets inside Google Calendar.""" + + # Create a WebContent object + url = 'http://google.com/ig/modules/datetime.xml' + web_content = gdata.calendar.WebContent(url=url, width='300', height='136') + web_content.gadget_pref.append( + gdata.calendar.WebContentGadgetPref(name='color', value='green')) + + # Create a WebContentLink object that contains the WebContent object + title = 'Date and Time Gadget' + href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif' + type = 'application/x-google-gadgets+xml' + web_content_link = gdata.calendar.WebContentLink(title=title, href=href, + link_type=type, web_content=web_content) + + # Create an event that contains this web content + event = gdata.calendar.CalendarEventEntry() + event.link.append(web_content_link) + + print 'Inserting Web Content Gadget Event' + new_event = self.cal_client.InsertEvent(event, + '/calendar/feeds/default/private/full') + return new_event + + def _UpdateTitle(self, event, new_title='Updated event title'): + """Updates the title of the specified event with the specified new_title. + Note that the UpdateEvent method (like InsertEvent) returns the + CalendarEventEntry object based upon the data returned from the server + after the event is inserted. This represents the 'official' state of + the event on the server. The 'edit' link returned in this event can + be used for future updates. Due to the use of the 'optimistic concurrency' + method of version control, most GData services do not allow you to send + multiple update requests using the same edit URL. Please see the docs: + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + + previous_title = event.title.text + event.title.text = new_title + print 'Updating title of event from:\'%s\' to:\'%s\'' % ( + previous_title, event.title.text,) + return self.cal_client.UpdateEvent(event.GetEditLink().href, event) + + def _AddReminder(self, event, minutes=10): + """Adds a reminder to the event. This uses the default reminder settings + for the user to determine what type of notifications are sent (email, sms, + popup, etc.) and sets the reminder for 'minutes' number of minutes before + the event. Note: you can only use values for minutes as specified in the + Calendar GUI.""" + + for a_when in event.when: + if len(a_when.reminder) > 0: + a_when.reminder[0].minutes = minutes + else: + a_when.reminder.append(gdata.calendar.Reminder(minutes=minutes)) + + print 'Adding %d minute reminder to event' % (minutes,) + return self.cal_client.UpdateEvent(event.GetEditLink().href, event) + + def _AddExtendedProperty(self, event, + name='http://www.example.com/schemas/2005#mycal.id', value='1234'): + """Adds an arbitrary name/value pair to the event. This value is only + exposed through the API. Extended properties can be used to store extra + information needed by your application. The recommended format is used as + the default arguments above. The use of the URL format is to specify a + namespace prefix to avoid collisions between different applications.""" + + event.extended_property.append( + gdata.calendar.ExtendedProperty(name=name, value=value)) + print 'Adding extended property to event: \'%s\'=\'%s\'' % (name, value,) + return self.cal_client.UpdateEvent(event.GetEditLink().href, event) + + def _DeleteEvent(self, event): + """Given an event object returned for the calendar server, this method + deletes the event. The edit link present in the event is the URL used + in the HTTP DELETE request.""" + + self.cal_client.DeleteEvent(event.GetEditLink().href) + + def _PrintAclFeed(self): + """Sends a HTTP GET to the default ACL URL + (http://www.google.com/calendar/feeds/default/acl/full) and displays the + feed returned in the response.""" + + feed = self.cal_client.GetCalendarAclFeed() + print feed.title.text + for i, a_rule in zip(xrange(len(feed.entry)), feed.entry): + print '\t%s. %s' % (i, a_rule.title.text,) + print '\t\t Role: %s' % (a_rule.role.value,) + print '\t\t Scope %s - %s' % (a_rule.scope.type, a_rule.scope.value) + + def _CreateAclRule(self, username): + """Creates a ACL rule that grants the given user permission to view + free/busy information on the default calendar. Note: It is not necessary + to specify a title for the ACL entry. The server will set this to be the + value of the role specified (in this case "freebusy").""" + + rule = gdata.calendar.CalendarAclEntry() + rule.scope = gdata.calendar.Scope(value=username, scope_type="user") + roleValue = "http://schemas.google.com/gCal/2005#%s" % ("freebusy") + rule.role = gdata.calendar.Role(value=roleValue) + aclUrl = "/calendar/feeds/default/acl/full" + returned_rule = self.cal_client.InsertAclEntry(rule, aclUrl) + + def _RetrieveAclRule(self, username): + """Builds the aclEntryUri or the entry created in the previous example. + The sends a HTTP GET message and displays the entry returned in the + response.""" + + aclEntryUri = "http://www.google.com/calendar/feeds/" + aclEntryUri += "default/acl/full/user:%s" % (username) + entry = self.cal_client.GetCalendarAclEntry(aclEntryUri) + print '\t%s' % (entry.title.text,) + print '\t\t Role: %s' % (entry.role.value,) + print '\t\t Scope %s - %s' % (entry.scope.type, entry.scope.value) + return entry + + def _UpdateAclRule(self, entry): + """Modifies the value of the role in the given entry and POSTs the updated + entry. Note that while the role of an ACL entry can be updated, the + scope can not be modified.""" + + roleValue = "http://schemas.google.com/gCal/2005#%s" % ("read") + entry.role = gdata.calendar.Role(value=roleValue) + returned_rule = self.cal_client.UpdateAclEntry(entry.GetEditLink().href, + entry) + + def _DeleteAclRule(self, entry): + """Given an ACL entry returned for the calendar server, this method + deletes the entry. The edit link present in the entry is the URL used + in the HTTP DELETE request.""" + + self.cal_client.DeleteAclEntry(entry.GetEditLink().href) + + def Run(self, delete='false'): + """Runs each of the example methods defined above. Note how the result + of the _InsertSingleEvent call is used for updating the title and the + result of updating the title is used for inserting the reminder and + again with the insertion of the extended property. This is due to the + Calendar's use of GData's optimistic concurrency versioning control system: + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + + # Getting feeds and query results + self._PrintUserCalendars() + self._PrintOwnCalendars() + self._PrintAllEventsOnDefaultCalendar() + self._FullTextQuery() + self._DateRangeQuery() + + # Inserting and updating events + see = self._InsertSingleEvent() + see_u_title = self._UpdateTitle(see, 'New title for single event') + see_u_reminder = self._AddReminder(see_u_title, minutes=30) + see_u_ext_prop = self._AddExtendedProperty(see_u_reminder, + name='propname', value='propvalue') + ree = self._InsertRecurringEvent() + simple_web_content_event = self._InsertSimpleWebContentEvent() + web_content_gadget_event = self._InsertWebContentGadgetEvent() + quick_add_event = self._InsertQuickAddEvent() + + # Access Control List examples + self._PrintAclFeed() + self._CreateAclRule("user@gmail.com") + entry = self._RetrieveAclRule("user@gmail.com") + self._UpdateAclRule(entry) + self._DeleteAclRule(entry) + + # Creating, updating and deleting calendars + inserted_calendar = self._InsertCalendar() + updated_calendar = self._UpdateCalendar(calendar=inserted_calendar) + + # Insert Subscription + inserted_subscription = self._InsertSubscription() + updated_subscription = self._UpdateCalendarSubscription(selected=False) + + # Delete entries if delete argument='true' + if delete == 'true': + print 'Deleting created events' + self.cal_client.DeleteEvent(see_u_ext_prop.GetEditLink().href) + self.cal_client.DeleteEvent(ree.GetEditLink().href) + self.cal_client.DeleteEvent(simple_web_content_event.GetEditLink().href) + self.cal_client.DeleteEvent(web_content_gadget_event.GetEditLink().href) + self.cal_client.DeleteEvent(quick_add_event.GetEditLink().href) + print 'Deleting subscriptions' + self._DeleteCalendarSubscription() + print 'Deleting all calendars' + self._DeleteAllCalendars() + + +def main(): + """Runs the CalendarExample application with the provided username and + and password values. Authentication credentials are required. + NOTE: It is recommended that you run this sample using a test account.""" + + # parse command line options + try: + opts, args = getopt.getopt(sys.argv[1:], "", ["user=", "pw=", "delete="]) + except getopt.error, msg: + print ('python calendarExample.py --user [username] --pw [password] ' + + '--delete [true|false] ') + sys.exit(2) + + user = '' + pw = '' + delete = 'false' + + # Process options + for o, a in opts: + if o == "--user": + user = a + elif o == "--pw": + pw = a + elif o == "--delete": + delete = a + + if user == '' or pw == '': + print ('python calendarExample.py --user [username] --pw [password] ' + + '--delete [true|false] ') + sys.exit(2) + + sample = CalendarExample(user, pw) + sample.Run(delete) + +if __name__ == '__main__': + main() diff --git a/gdata.py-1.2.3/samples/contacts/contacts_example.py b/gdata.py-1.2.3/samples/contacts/contacts_example.py new file mode 100755 index 0000000..530bc99 --- /dev/null +++ b/gdata.py-1.2.3/samples/contacts/contacts_example.py @@ -0,0 +1,326 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +import sys +import getopt +import getpass +import atom +import gdata.contacts +import gdata.contacts.service + + +class ContactsSample(object): + """ContactsSample object demonstrates operations with the Contacts feed.""" + + def __init__(self, email, password): + """Constructor for the ContactsSample object. + + Takes an email and password corresponding to a gmail account to + demonstrate the functionality of the Contacts feed. + + Args: + email: [string] The e-mail address of the account to use for the sample. + password: [string] The password corresponding to the account specified by + the email parameter. + + Yields: + A ContactsSample object used to run the sample demonstrating the + functionality of the Contacts feed. + """ + self.gd_client = gdata.contacts.service.ContactsService() + self.gd_client.email = email + self.gd_client.password = password + self.gd_client.source = 'GoogleInc-ContactsPythonSample-1' + self.gd_client.ProgrammaticLogin() + + def PrintFeed(self, feed, ctr=0): + """Prints out the contents of a feed to the console. + + Args: + feed: A gdata.contacts.ContactsFeed instance. + ctr: [int] The number of entries in this feed previously printed. This + allows continuous entry numbers when paging through a feed. + + Returns: + The number of entries printed, including those previously printed as + specified in ctr. This is for passing as an argument to ctr on + successive calls to this method. + + """ + if not feed.entry: + print '\nNo entries in feed.\n' + return 0 + for i, entry in enumerate(feed.entry): + print '\n%s %s' % (ctr+i+1, entry.title.text) + if entry.content: + print ' %s' % (entry.content.text) + for email in entry.email: + if email.primary and email.primary == 'true': + print ' %s' % (email.address) + # Show the contact groups that this contact is a member of. + for group in entry.group_membership_info: + print ' Member of group: %s' % (group.href) + # Display extended properties. + for extended_property in entry.extended_property: + if extended_property.value: + value = extended_property.value + else: + value = extended_property.GetXmlBlobString() + print ' Extended Property %s: %s' % (extended_property.name, value) + return len(feed.entry) + ctr + + def PrintPaginatedFeed(self, feed, print_method): + """ Print all pages of a paginated feed. + + This will iterate through a paginated feed, requesting each page and + printing the entries contained therein. + + Args: + feed: A gdata.contacts.ContactsFeed instance. + print_method: The method which will be used to print each page of the + feed. Must accept these two named arguments: + feed: A gdata.contacts.ContactsFeed instance. + ctr: [int] The number of entries in this feed previously + printed. This allows continuous entry numbers when paging + through a feed. + """ + ctr = 0 + while feed: + # Print contents of current feed + ctr = print_method(feed=feed, ctr=ctr) + # Prepare for next feed iteration + next = feed.GetNextLink() + feed = None + if next: + if self.PromptOperationShouldContinue(): + # Another feed is available, and the user has given us permission + # to fetch it + feed = self.gd_client.GetContactsFeed(next.href) + else: + # User has asked us to terminate + feed = None + + def PromptOperationShouldContinue(self): + """ Display a "Continue" prompt. + + This give is used to give users a chance to break out of a loop, just in + case they have too many contacts/groups. + + Returns: + A boolean value, True if the current operation should continue, False if + the current operation should terminate. + """ + while True: + input = raw_input("Continue [Y/n]? ") + if input is 'N' or input is 'n': + return False + elif input is 'Y' or input is 'y' or input is '': + return True + + def ListAllContacts(self): + """Retrieves a list of contacts and displays name and primary email.""" + feed = self.gd_client.GetContactsFeed() + self.PrintPaginatedFeed(feed, self.PrintGroupsFeed) + + def PrintGroupsFeed(self, feed, ctr): + if not feed.entry: + print '\nNo groups in feed.\n' + return 0 + for i, entry in enumerate(feed.entry): + print '\n%s %s' % (ctr+i+1, entry.title.text) + if entry.content: + print ' %s' % (entry.content.text) + # Display the group id which can be used to query the contacts feed. + print ' Group ID: %s' % entry.id.text + # Display extended properties. + for extended_property in entry.extended_property: + if extended_property.value: + value = extended_property.value + else: + value = extended_property.GetXmlBlobString() + print ' Extended Property %s: %s' % (extended_property.name, value) + return len(feed.entry) + ctr + + def ListAllGroups(self): + feed = self.gd_client.GetGroupsFeed() + self.PrintPaginatedFeed(feed, self.PrintGroupsFeed) + + def CreateMenu(self): + """Prompts that enable a user to create a contact.""" + name = raw_input('Enter contact\'s name: ') + notes = raw_input('Enter notes for contact: ') + primary_email = raw_input('Enter primary email address: ') + + new_contact = gdata.contacts.ContactEntry(title=atom.Title(text=name)) + new_contact.content = atom.Content(text=notes) + # Create a work email address for the contact and use as primary. + new_contact.email.append(gdata.contacts.Email(address=primary_email, + primary='true', rel=gdata.contacts.REL_WORK)) + entry = self.gd_client.CreateContact(new_contact) + + if entry: + print 'Creation successful!' + print 'ID for the new contact:', entry.id.text + else: + print 'Upload error.' + + def QueryMenu(self): + """Prompts for updated-min query parameters and displays results.""" + updated_min = raw_input( + 'Enter updated min (example: 2007-03-16T00:00:00): ') + query = gdata.contacts.service.ContactsQuery() + query.updated_min = updated_min + feed = self.gd_client.GetContactsFeed(query.ToUri()) + self.PrintFeed(feed) + + def QueryGroupsMenu(self): + """Prompts for updated-min query parameters and displays results.""" + updated_min = raw_input( + 'Enter updated min (example: 2007-03-16T00:00:00): ') + query = gdata.service.Query(feed='/m8/feeds/groups/default/full') + query.updated_min = updated_min + feed = self.gd_client.GetGroupsFeed(query.ToUri()) + self.PrintGroupsFeed(feed) + + def _SelectContact(self): + feed = self.gd_client.GetContactsFeed() + self.PrintFeed(feed) + selection = 5000 + while selection > len(feed.entry)+1 or selection < 1: + selection = int(raw_input( + 'Enter the number for the contact you would like to modify: ')) + return feed.entry[selection-1] + + def UpdateContactMenu(self): + selected_entry = self._SelectContact() + new_name = raw_input('Enter a new name for the contact: ') + if not selected_entry.title: + selected_entry.title = atom.Title() + selected_entry.title.text = new_name + self.gd_client.UpdateContact(selected_entry.GetEditLink().href, selected_entry) + + def DeleteContactMenu(self): + selected_entry = self._SelectContact() + self.gd_client.DeleteContact(selected_entry.GetEditLink().href) + + def PrintMenu(self): + """Displays a menu of options for the user to choose from.""" + print ('\nContacts Sample\n' + '1) List all of your contacts.\n' + '2) Create a contact.\n' + '3) Query contacts on updated time.\n' + '4) Modify a contact.\n' + '5) Delete a contact.\n' + '6) List all of your contact groups.\n' + '7) Query your groups on updated time.\n' + '8) Exit.\n') + + def GetMenuChoice(self, max): + """Retrieves the menu selection from the user. + + Args: + max: [int] The maximum number of allowed choices (inclusive) + + Returns: + The integer of the menu item chosen by the user. + """ + while True: + input = raw_input('> ') + + try: + num = int(input) + except ValueError: + print 'Invalid choice. Please choose a value between 1 and', max + continue + + if num > max or num < 1: + print 'Invalid choice. Please choose a value between 1 and', max + else: + return num + + def Run(self): + """Prompts the user to choose funtionality to be demonstrated.""" + try: + while True: + + self.PrintMenu() + + choice = self.GetMenuChoice(8) + + if choice == 1: + self.ListAllContacts() + elif choice == 2: + self.CreateMenu() + elif choice == 3: + self.QueryMenu() + elif choice == 4: + self.UpdateContactMenu() + elif choice == 5: + self.DeleteContactMenu() + elif choice == 6: + self.ListAllGroups() + elif choice == 7: + self.QueryGroupsMenu() + elif choice == 8: + return + + except KeyboardInterrupt: + print '\nGoodbye.' + return + + +def main(): + """Demonstrates use of the Contacts extension using the ContactsSample object.""" + # Parse command line options + try: + opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'pw=']) + except getopt.error, msg: + print 'python contacts_example.py --user [username] --pw [password]' + sys.exit(2) + + user = '' + pw = '' + # Process options + for option, arg in opts: + if option == '--user': + user = arg + elif option == '--pw': + pw = arg + + while not user: + print 'NOTE: Please run these tests only with a test account.' + user = raw_input('Please enter your username: ') + while not pw: + pw = getpass.getpass() + if not pw: + print 'Password cannot be blank.' + + + try: + sample = ContactsSample(user, pw) + except gdata.service.BadAuthentication: + print 'Invalid user credentials given.' + return + + sample.Run() + + +if __name__ == '__main__': + main() diff --git a/gdata.py-1.2.3/samples/docs/docs_example.py b/gdata.py-1.2.3/samples/docs/docs_example.py new file mode 100755 index 0000000..1263a25 --- /dev/null +++ b/gdata.py-1.2.3/samples/docs/docs_example.py @@ -0,0 +1,261 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jfisher (Jeff Fisher)' + + +import sys +import re +import os.path +import getopt +import getpass +import gdata.docs.service + + +class DocsSample(object): + """A DocsSample object demonstrates the Document List feed.""" + + def __init__(self, email, password): + """Constructor for the DocsSample object. + + Takes an email and password corresponding to a gmail account to + demonstrate the functionality of the Document List feed. + + Args: + email: [string] The e-mail address of the account to use for the sample. + password: [string] The password corresponding to the account specified by + the email parameter. + + Returns: + A DocsSample object used to run the sample demonstrating the + functionality of the Document List feed. + """ + self.gd_client = gdata.docs.service.DocsService() + self.gd_client.email = email + self.gd_client.password = password + self.gd_client.source = 'Document List Python Sample' + self.gd_client.ProgrammaticLogin() + + def _PrintFeed(self, feed): + """Prints out the contents of a feed to the console. + + Args: + feed: A gdata.docs.DocumentListFeed instance. + """ + print '\n' + if not feed.entry: + print 'No entries in feed.\n' + for i, entry in enumerate(feed.entry): + print '%s %s\n' % (i+1, entry.title.text.encode('UTF-8')) + + def _GetFileExtension(self, file_name): + """Returns the uppercase file extension for a file. + + Args: + file_name: [string] The basename of a filename. + + Returns: + A string containing the file extension of the file. + """ + match = re.search('.*\.([a-zA-Z]{3,}$)', file_name) + if match: + return match.group(1).upper() + return False + + def _UploadMenu(self): + """Prompts that enable a user to upload a file to the Document List feed.""" + file_path = '' + file_path = raw_input('Enter path to file: ') + + if not file_path: + return + elif not os.path.isfile(file_path): + print 'Not a valid file.' + return + + file_name = os.path.basename(file_path) + ext = self._GetFileExtension(file_name) + + if not ext or ext not in gdata.docs.service.SUPPORTED_FILETYPES: + print 'File type not supported. Check the file extension.' + return + else: + content_type = gdata.docs.service.SUPPORTED_FILETYPES[ext] + + title = '' + while not title: + title = raw_input('Enter name for document: ') + + try: + ms = gdata.MediaSource(file_path=file_path, content_type=content_type) + except IOError: + print 'Problems reading file. Check permissions.' + return + + if ext in ['CSV', 'ODS', 'XLS']: + print 'Uploading spreadsheet...' + entry = self.gd_client.UploadSpreadsheet(ms, title) + elif ext in ['PPT', 'PPS']: + print 'Uploading presentation...' + entry = self.gd_client.UploadPresentation(ms, title) + else: + print 'Uploading word processor document...' + entry = self.gd_client.UploadDocument(ms, title) + + if entry: + print 'Upload successful!' + print 'Document now accessible at:', entry.GetAlternateLink().href + else: + print 'Upload error.' + + def _ListAllDocuments(self): + """Retrieves a list of all of a user's documents and displays them.""" + feed = self.gd_client.GetDocumentListFeed() + self._PrintFeed(feed) + + def _ListAllSpreadsheets(self): + """Retrieves a list of a user's spreadsheets and displays them.""" + query = gdata.docs.service.DocumentQuery(categories=['spreadsheet']) + feed = self.gd_client.Query(query.ToUri()) + self._PrintFeed(feed) + + def _ListAllWPDocuments(self): + """Retrieves a list of a user's WP documents and displays them.""" + query = gdata.docs.service.DocumentQuery(categories=['document']) + feed = self.gd_client.Query(query.ToUri()) + self._PrintFeed(feed) + + def _ListAllPresentations(self): + """Retrieves a list of a user's presentations and displays them.""" + query = gdata.docs.service.DocumentQuery(categories=['presentation']) + feed = self.gd_client.Query(query.ToUri()) + self._PrintFeed(feed) + + def _FullTextSearch(self): + """Searches a user's documents for a text string. + + Provides prompts to search a user's documents and displays the results + of such a search. The text_query parameter of the DocumentListQuery object + corresponds to the contents of the q parameter in the feed. Note that this + parameter searches the content of documents, not just their titles. + """ + input = raw_input('Enter search term: ') + query = gdata.docs.service.DocumentQuery(text_query=input) + feed = self.gd_client.Query(query.ToUri()) + self._PrintFeed(feed) + + def _PrintMenu(self): + """Displays a menu of options for the user to choose from.""" + print ('\nDocument List Sample\n' + '1) List all of your documents.\n' + '2) List all of your spreadsheets.\n' + '3) List all of your word processor documents.\n' + '4) List all of your presentations.\n' + '5) Search your documents.\n' + '6) Upload a document.\n' + '7) Exit.\n') + + def _GetMenuChoice(self, max): + """Retrieves the menu selection from the user. + + Args: + max: [int] The maximum number of allowed choices (inclusive) + + Returns: + The integer of the menu item chosen by the user. + """ + while True: + input = raw_input('> ') + + try: + num = int(input) + except ValueError: + print 'Invalid choice. Please choose a value between 1 and', max + continue + + if num > max or num < 1: + print 'Invalid choice. Please choose a value between 1 and', max + else: + return num + + def Run(self): + """Prompts the user to choose funtionality to be demonstrated.""" + try: + while True: + + self._PrintMenu() + + choice = self._GetMenuChoice(7) + + if choice == 1: + self._ListAllDocuments() + elif choice == 2: + self._ListAllSpreadsheets() + elif choice == 3: + self._ListAllWPDocuments() + elif choice == 4: + self._ListAllPresentations() + elif choice == 5: + self._FullTextSearch() + elif choice == 6: + self._UploadMenu() + elif choice == 7: + return + + except KeyboardInterrupt: + print '\nGoodbye.' + return + + +def main(): + """Demonstrates use of the Docs extension using the DocsSample object.""" + # Parse command line options + try: + opts, args = getopt.getopt(sys.argv[1:], '', ['user=', 'pw=']) + except getopt.error, msg: + print 'python docsExample.py --user [username] --pw [password] ' + sys.exit(2) + + user = '' + pw = '' + key = '' + # Process options + for option, arg in opts: + if option == '--user': + user = arg + elif option == '--pw': + pw = arg + + while not user: + print 'NOTE: Please run these tests only with a test account.' + user = raw_input('Please enter your username: ') + while not pw: + pw = getpass.getpass() + if not pw: + print 'Password cannot be blank.' + + try: + sample = DocsSample(user, pw) + except gdata.service.BadAuthentication: + print 'Invalid user credentials given.' + return + + sample.Run() + + +if __name__ == '__main__': + main() diff --git a/gdata.py-1.2.3/samples/mashups/birthdaySample.py b/gdata.py-1.2.3/samples/mashups/birthdaySample.py new file mode 100755 index 0000000..79f40df --- /dev/null +++ b/gdata.py-1.2.3/samples/mashups/birthdaySample.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# This sample uses the Google Spreadsheets data API and the Google +# Calendar data API. The script pulls a list of birthdays from a +# Google Spreadsheet and inserts them as webContent events in the +# user's Google Calendar. +# +# The script expects a certain format in the spreadsheet: Name, +# Birthday, Photo URL, and Edit URL as headers. Expected format +# of the birthday is: MM/DD. Edit URL is to be left blank by the +# user - the script uses this column to determine whether to insert +# a new event or to update an event at the URL. +# +# See the spreadsheet below for an example: +# http://spreadsheets.google.com/pub?key=pfMX-JDVnx47J0DxqssIQHg +# + + +__author__ = 'api.stephaniel@google.com (Stephanie Liu)' + +try: + from xml.etree import ElementTree # for Python 2.5 users +except: + from elementtree import ElementTree + +import gdata.spreadsheet.service +import gdata.calendar.service +import gdata.calendar +import gdata.service +import atom.service +import gdata.spreadsheet +import atom +import string +import time +import datetime +import getopt +import getpass +import sys + + +class BirthdaySample: + # CONSTANTS: Expected column headers: name, birthday, photourl, editurl & + # default calendar reminder set to 2 days + NAME = "name" + BIRTHDAY = "birthday" + PHOTO_URL = "photourl" + EDIT_URL = "editurl" + REMINDER = 60 * 24 * 2 # minutes + + def __init__(self, email, password): + """ Initializes spreadsheet and calendar clients. + + Creates SpreadsheetsService and CalendarService objects and + authenticates to each with ClientLogin. For more information + about ClientLogin authentication: + http://code.google.com/apis/accounts/AuthForInstalledApps.html + + Args: + email: string + password: string + """ + + self.s_client = gdata.spreadsheet.service.SpreadsheetsService() + self.s_client.email = email + self.s_client.password = password + self.s_client.source = 'exampleCo-birthdaySample-1' + self.s_client.ProgrammaticLogin() + + self.c_client = gdata.calendar.service.CalendarService() + self.c_client.email = email + self.c_client.password = password + self.c_client.source = 'exampleCo-birthdaySample-1' + self.c_client.ProgrammaticLogin() + + def _PrintFeed(self, feed): + """ Prints out Spreadsheet feeds in human readable format. + + Generic function taken from spreadsheetsExample.py. + + Args: + feed: SpreadsheetsCellsFeed, SpreadsheetsListFeed, + SpreadsheetsWorksheetsFeed, or SpreadsheetsSpreadsheetsFeed + """ + for i, entry in enumerate(feed.entry): + if isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed): + print '%s %s\n' % (entry.title.text, entry.content.text) + elif isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed): + print '%s %s %s\n' % (i, entry.title.text, entry.content.text) + else: + print '%s %s\n' % (i, entry.title.text) + + def _PromptForSpreadsheet(self): + """ Prompts user to select spreadsheet. + + Gets and displays titles of all spreadsheets for user to + select. Generic function taken from spreadsheetsExample.py. + + Args: + none + + Returns: + spreadsheet ID that the user selected: string + """ + + feed = self.s_client.GetSpreadsheetsFeed() + self._PrintFeed(feed) + input = raw_input('\nSelection: ') + + # extract and return the spreadsheet ID + return feed.entry[string.atoi(input)].id.text.rsplit('/', 1)[1] + + def _PromptForWorksheet(self, key): + """ Prompts user to select desired worksheet. + + Gets and displays titles of all worksheets for user to + select. Generic function taken from spreadsheetsExample.py. + + Args: + key: string + + Returns: + the worksheet ID that the user selected: string + """ + + feed = self.s_client.GetWorksheetsFeed(key) + self._PrintFeed(feed) + input = raw_input('\nSelection: ') + + # extract and return the worksheet ID + return feed.entry[string.atoi(input)].id.text.rsplit('/', 1)[1] + + def _AddReminder(self, event, minutes): + """ Adds a reminder to a calendar event. + + This function sets the reminder attribute of the CalendarEventEntry. + The script sets it to 2 days by default, and this value is not + settable by the user. However, it can easily be changed to take this + option. + + Args: + event: CalendarEventEntry + minutes: int + + Returns: + the updated event: CalendarEventEntry + """ + + for a_when in event.when: + if len(a_when.reminder) > 0: + a_when.reminder[0].minutes = minutes + else: + a_when.reminder.append(gdata.calendar.Reminder(minutes=minutes)) + + return self.c_client.UpdateEvent(event.GetEditLink().href, event) + + def _CreateBirthdayWebContentEvent(self, name, birthday, photo_url): + """ Create the birthday web content event. + + This function creates and populates a CalendarEventEntry. webContent + specific attributes are set. To learn more about the webContent + format: + + http://www.google.com/support/calendar/bin/answer.py?answer=48528 + + Args: + name: string + birthday: string - expected format (MM/DD) + photo_url: string + + Returns: + the webContent CalendarEventEntry + """ + + title = "%s's Birthday!" % name + content = "It's %s's Birthday!" % name + month = string.atoi(birthday.split("/")[0]) + day = string.atoi(birthday.split("/")[1]) + + # Get current year + year = time.ctime()[-4:] + year = string.atoi(year) + + # Calculate the "end date" for the all day event + start_time = datetime.date(year, month, day) + one_day = datetime.timedelta(days=1) + end_time = start_time + one_day + + start_time_str = start_time.strftime("%Y-%m-%d") + end_time_str = end_time.strftime("%Y-%m-%d") + + # Create yearly recurrence rule + recurrence_data = ("DTSTART;VALUE=DATE:%s\r\n" + "DTEND;VALUE=DATE:%s\r\n" + "RRULE:FREQ=YEARLY;WKST=SU\r\n" % + (start_time.strftime("%Y%m%d"), end_time.strftime("%Y%m%d"))) + + web_rel = "http://schemas.google.com/gCal/2005/webContent" + icon_href = "http://www.perstephanie.com/images/birthdayicon.gif" + icon_type = "image/gif" + extension_text = ( + 'gCal:webContent xmlns:gCal="http://schemas.google.com/gCal/2005"' + ' url="%s" width="300" height="225"' % (photo_url)) + + event = gdata.calendar.CalendarEventEntry() + event.title = atom.Title(text=title) + event.content = atom.Content(text=content) + event.recurrence = gdata.calendar.Recurrence(text=recurrence_data) + event.when.append(gdata.calendar.When(start_time=start_time_str, + end_time=end_time_str)) + + # Adding the webContent specific XML + event.link.append(atom.Link(rel=web_rel, title=title, href=icon_href, + link_type=icon_type)) + event.link[0].extension_elements.append( + atom.ExtensionElement(extension_text)) + + return event + + def _InsertBirthdayWebContentEvent(self, event): + """ Insert event into the authenticated user's calendar. + + Args: + event: CalendarEventEntry + + Returns: + the newly created CalendarEventEntry + """ + + edit_uri = '/calendar/feeds/default/private/full' + return self.c_client.InsertEvent(event, edit_uri) + + def Run(self): + """ Run sample. + + TODO: add exception handling + + Args: + none + """ + + key_id = self._PromptForSpreadsheet() + wksht_id = self._PromptForWorksheet(key_id) + + feed = self.s_client.GetListFeed(key_id, wksht_id) + + found_name = False + found_birthday = False + found_photourl = False + found_editurl = False + + # Check to make sure all headers are present + # Need to find at least one instance of name, birthday, photourl + # editurl + if len(feed.entry) > 0: + for name, custom in feed.entry[0].custom.iteritems(): + if custom.column == self.NAME: + found_name = True + elif custom.column == self.BIRTHDAY: + found_birthday = True + elif custom.column == self.PHOTO_URL: + found_photourl = True + elif custom.column == self.EDIT_URL: + found_editurl = True + + if not found_name and found_birthday and found_photourl and found_editurl: + print ("ERROR - Unexpected number of column headers. Should have: %s," + " %s, %s, and %s." % (self.NAME, self.BIRTHDAY, self.PHOTO_URL, + self.EDIT_URL)) + sys.exit(1) + + # For every row in the spreadsheet, grab all the data and either insert + # a new event into the calendar, or update the existing event + + # Create dict to represent the row data to update edit link back to + # Spreadsheet + + for entry in feed.entry: + d = {} + input_valid = True + + for name, custom in entry.custom.iteritems(): + d[custom.column] = custom.text + + month = int(d[self.BIRTHDAY].split("/")[0]) + day = int(d[self.BIRTHDAY].split("/")[1]) + + # Some input checking. Script will allow the insert to continue with + # a missing name value. + if d[self.NAME] is None: + d[self.NAME] = " " + if d[self.PHOTO_URL] is None: + input_valid = False + if d[self.BIRTHDAY] is None: + input_valid = False + elif not 1 <= month <= 12 or not 1 <= day <= 31: + input_valid = False + + if d[self.EDIT_URL] is None and input_valid: + event = self._CreateBirthdayWebContentEvent(d[self.NAME], + d[self.BIRTHDAY], d[self.PHOTO_URL]) + event = self._InsertBirthdayWebContentEvent(event) + event = self._AddReminder(event, self.REMINDER) + print "Added %s's birthday!" % d[self.NAME] + elif input_valid: # Event already exists + edit_link = d[self.EDIT_URL] + event = self._CreateBirthdayWebContentEvent(d[self.NAME], + d[self.BIRTHDAY], d[self.PHOTO_URL]) + event = self.c_client.UpdateEvent(edit_link, event) + event = self._AddReminder(event, self.REMINDER) + print "Updated %s's birthday!" % d[self.NAME] + + if input_valid: + d[self.EDIT_URL] = event.GetEditLink().href + self.s_client.UpdateRow(entry, d) + else: + print "Warning - Skipping row, missing valid input." + +def main(): + email = raw_input("Please enter your email: ") + password = getpass.getpass("Please enter your password: ") + + sample = BirthdaySample(email, password) + sample.Run() + + +if __name__ == '__main__': + main() + diff --git a/gdata.py-1.2.3/samples/oauth/oauth_example.py b/gdata.py-1.2.3/samples/oauth/oauth_example.py new file mode 100755 index 0000000..c17f4a7 --- /dev/null +++ b/gdata.py-1.2.3/samples/oauth/oauth_example.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'kunalmshah.userid (Kunal Shah)' + +import sys +import os.path +import getopt +import gdata.auth +import gdata.docs.service + + +class OAuthSample(object): + """An OAuthSample object demonstrates the three-legged OAuth process.""" + + def __init__(self, consumer_key, consuer_secret): + """Constructor for the OAuthSample object. + + Takes a consumer key and consumer secret, authenticates using OAuth + mechanism and lists the document titles using Document List Data API. + Uses HMAC-SHA1 signature method. + + Args: + consumer_key: string Domain identifying third_party web application. + consumer_secret: string Secret generated during registration. + + Returns: + An OAuthSample object used to run the sample demonstrating the + way to use OAuth authentication mode. + """ + self.consumer_key = consumer_key + self.consumer_secret = consuer_secret + self.gd_client = gdata.docs.service.DocsService() + + def _PrintFeed(self, feed): + """Prints out the contents of a feed to the console. + + Args: + feed: A gdata.docs.DocumentListFeed instance. + """ + if not feed.entry: + print 'No entries in feed.\n' + i = 1 + for entry in feed.entry: + print '%d. %s\n' % (i, entry.title.text.encode('UTF-8')) + i += 1 + + def _ListAllDocuments(self): + """Retrieves a list of all of a user's documents and displays them.""" + feed = self.gd_client.GetDocumentListFeed() + self._PrintFeed(feed) + + def Run(self): + """Demonstrates usage of OAuth authentication mode and retrieves a list of + documents using Document List Data API.""" + print '\nSTEP 1: Set OAuth input parameters.' + self.gd_client.SetOAuthInputParameters( + gdata.auth.OAuthSignatureMethod.HMAC_SHA1, + self.consumer_key, consumer_secret=self.consumer_secret) + print '\nSTEP 2: Fetch OAuth Request token.' + request_token = self.gd_client.FetchOAuthRequestToken() + print 'Request Token fetched: %s' % request_token + print '\nSTEP 3: Set the fetched OAuth token.' + self.gd_client.SetOAuthToken(request_token) + print 'OAuth request token set.' + print '\nSTEP 4: Generate OAuth authorization URL.' + auth_url = self.gd_client.GenerateOAuthAuthorizationURL() + print 'Authorization URL: %s' % auth_url + raw_input('Manually go to the above URL and authenticate.' + 'Press a key after authorization.') + print '\nSTEP 5: Upgrade to an OAuth access token.' + self.gd_client.UpgradeToOAuthAccessToken() + print 'Access Token: %s' % ( + self.gd_client.token_store.find_token(request_token.scopes[0])) + print '\nYour Documents:\n' + self._ListAllDocuments() + print 'STEP 6: Revoke the OAuth access token after use.' + self.gd_client.RevokeOAuthToken() + print 'OAuth access token revoked.' + + +def main(): + """Demonstrates usage of OAuth authentication mode. + + Prints a list of documents. This demo uses HMAC-SHA1 signature method. + """ + # Parse command line options + try: + opts, args = getopt.getopt(sys.argv[1:], '', ['consumer_key=', + 'consumer_secret=']) + except getopt.error, msg: + print ('python oauth_example.py --consumer_key [oauth_consumer_key] ' + '--consumer_secret [consumer_secret] ') + sys.exit(2) + + consumer_key = '' + consumer_secret = '' + # Process options + for option, arg in opts: + if option == '--consumer_key': + consumer_key = arg + elif option == '--consumer_secret': + consumer_secret = arg + + while not consumer_key: + consumer_key = raw_input('Please enter consumer key: ') + while not consumer_secret: + consumer_secret = raw_input('Please enter consumer secret: ') + + sample = OAuthSample(consumer_key, consumer_secret) + sample.Run() + + +if __name__ == '__main__': + main() diff --git a/gdata.py-1.2.3/samples/spreadsheets/spreadsheetExample.py b/gdata.py-1.2.3/samples/spreadsheets/spreadsheetExample.py new file mode 100755 index 0000000..fa98d87 --- /dev/null +++ b/gdata.py-1.2.3/samples/spreadsheets/spreadsheetExample.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import gdata.spreadsheet.service +import gdata.service +import atom.service +import gdata.spreadsheet +import atom +import getopt +import sys +import string + + +class SimpleCRUD: + + def __init__(self, email, password): + self.gd_client = gdata.spreadsheet.service.SpreadsheetsService() + self.gd_client.email = email + self.gd_client.password = password + self.gd_client.source = 'Spreadsheets GData Sample' + self.gd_client.ProgrammaticLogin() + self.curr_key = '' + self.curr_wksht_id = '' + self.list_feed = None + + def _PromptForSpreadsheet(self): + # Get the list of spreadsheets + feed = self.gd_client.GetSpreadsheetsFeed() + self._PrintFeed(feed) + input = raw_input('\nSelection: ') + id_parts = feed.entry[string.atoi(input)].id.text.split('/') + self.curr_key = id_parts[len(id_parts) - 1] + + def _PromptForWorksheet(self): + # Get the list of worksheets + feed = self.gd_client.GetWorksheetsFeed(self.curr_key) + self._PrintFeed(feed) + input = raw_input('\nSelection: ') + id_parts = feed.entry[string.atoi(input)].id.text.split('/') + self.curr_wksht_id = id_parts[len(id_parts) - 1] + + def _PromptForCellsAction(self): + print ('dump\n' + 'update {row} {col} {input_value}\n' + '\n') + input = raw_input('Command: ') + command = input.split(' ', 1) + if command[0] == 'dump': + self._CellsGetAction() + elif command[0] == 'update': + parsed = command[1].split(' ', 2) + if len(parsed) == 3: + self._CellsUpdateAction(parsed[0], parsed[1], parsed[2]) + else: + self._CellsUpdateAction(parsed[0], parsed[1], '') + else: + self._InvalidCommandError(input) + + def _PromptForListAction(self): + print ('dump\n' + 'insert {row_data} (example: insert label=content)\n' + 'update {row_index} {row_data}\n' + 'delete {row_index}\n' + '\n') + input = raw_input('Command: ') + command = input.split(' ' , 1) + if command[0] == 'dump': + self._ListGetAction() + elif command[0] == 'insert': + self._ListInsertAction(command[1]) + elif command[0] == 'update': + parsed = command[1].split(' ', 1) + self._ListUpdateAction(parsed[0], parsed[1]) + elif command[0] == 'delete': + self._ListDeleteAction(command[1]) + else: + self._InvalidCommandError(input) + + def _CellsGetAction(self): + # Get the feed of cells + feed = self.gd_client.GetCellsFeed(self.curr_key, self.curr_wksht_id) + self._PrintFeed(feed) + + def _CellsUpdateAction(self, row, col, inputValue): + entry = self.gd_client.UpdateCell(row=row, col=col, inputValue=inputValue, + key=self.curr_key, wksht_id=self.curr_wksht_id) + if isinstance(entry, gdata.spreadsheet.SpreadsheetsCell): + print 'Updated!' + + def _ListGetAction(self): + # Get the list feed + self.list_feed = self.gd_client.GetListFeed(self.curr_key, self.curr_wksht_id) + self._PrintFeed(self.list_feed) + + def _ListInsertAction(self, row_data): + entry = self.gd_client.InsertRow(self._StringToDictionary(row_data), + self.curr_key, self.curr_wksht_id) + if isinstance(entry, gdata.spreadsheet.SpreadsheetsList): + print 'Inserted!' + + def _ListUpdateAction(self, index, row_data): + self.list_feed = self.gd_client.GetListFeed(self.curr_key, self.curr_wksht_id) + entry = self.gd_client.UpdateRow( + self.list_feed.entry[string.atoi(index)], + self._StringToDictionary(row_data)) + if isinstance(entry, gdata.spreadsheet.SpreadsheetsList): + print 'Updated!' + + def _ListDeleteAction(self, index): + self.list_feed = self.gd_client.GetListFeed(self.curr_key, self.curr_wksht_id) + self.gd_client.DeleteRow(self.list_feed.entry[string.atoi(index)]) + print 'Deleted!' + + def _StringToDictionary(self, row_data): + dict = {} + for param in row_data.split(): + temp = param.split('=') + dict[temp[0]] = temp[1] + return dict + + def _PrintFeed(self, feed): + for i, entry in enumerate(feed.entry): + if isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed): + print '%s %s\n' % (entry.title.text, entry.content.text) + elif isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed): + print '%s %s %s' % (i, entry.title.text, entry.content.text) + # Print this row's value for each column (the custom dictionary is + # built using the gsx: elements in the entry.) + print 'Contents:' + for key in entry.custom: + print ' %s: %s' % (key, entry.custom[key].text) + print '\n', + else: + print '%s %s\n' % (i, entry.title.text) + + def _InvalidCommandError(self, input): + print 'Invalid input: %s\n' % (input) + + def Run(self): + self._PromptForSpreadsheet() + self._PromptForWorksheet() + input = raw_input('cells or list? ') + if input == 'cells': + while True: + self._PromptForCellsAction() + elif input == 'list': + while True: + self._PromptForListAction() + + +def main(): + # parse command line options + try: + opts, args = getopt.getopt(sys.argv[1:], "", ["user=", "pw="]) + except getopt.error, msg: + print 'python spreadsheetExample.py --user [username] --pw [password] ' + sys.exit(2) + + user = '' + pw = '' + key = '' + # Process options + for o, a in opts: + if o == "--user": + user = a + elif o == "--pw": + pw = a + + if user == '' or pw == '': + print 'python spreadsheetExample.py --user [username] --pw [password] ' + sys.exit(2) + + sample = SimpleCRUD(user, pw) + sample.Run() + + +if __name__ == '__main__': + main() diff --git a/gdata.py-1.2.3/samples/webmastertools/AddDeleteExampleDotCom.py b/gdata.py-1.2.3/samples/webmastertools/AddDeleteExampleDotCom.py new file mode 100644 index 0000000..c56413d --- /dev/null +++ b/gdata.py-1.2.3/samples/webmastertools/AddDeleteExampleDotCom.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import urllib + +import gdata.webmastertools.service +import gdata.service +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import getpass + + +username = '' +password = '' + +username = raw_input('Please enter your username: ') +password = getpass.getpass() + +client = gdata.webmastertools.service.GWebmasterToolsService( + email=username, + password=password, source='PythonWebmasterToolsSample-1') + +EXAMPLE_SITE = 'http://www.example.com/' +EXAMPLE_SITEMAP = 'http://www.example.com/sitemap-index.xml' + + +def safeElementText(element): + if hasattr(element, 'text'): + return element.text + return '' + + +print 'Logging in' +client.ProgrammaticLogin() + +print +print 'Adding site: %s' % EXAMPLE_SITE +entry = client.AddSite(EXAMPLE_SITE) + +print +print "%-25s %25s %25s" % ('Site', 'Last Updated', 'Last Crawled') +print '='*80 +print "%-25s %25s %25s" % ( + entry.title.text.replace('http://', '')[:25], entry.updated.text[:25], + safeElementText(entry.crawled)[:25]) +print " Preferred: %-23s Indexed: %5s GeoLoc: %10s" % ( + safeElementText(entry.preferred_domain)[:30], entry.indexed.text[:5], + safeElementText(entry.geolocation)[:10]) +print " Crawl rate: %-10s Verified: %5s" % ( + safeElementText(entry.crawl_rate)[:10], entry.verified.text[:5]) + +# Verifying a site. This sample won't do this since we don't own example.com +#client.VerifySite(EXAMPLE_SITE, 'htmlpage') + +# The following needs the ownership of the site +#client.UpdateGeoLocation(EXAMPLE_SITE, 'US') +#client.UpdateCrawlRate(EXAMPLE_SITE, 'normal') +#client.UpdatePreferredDomain(EXAMPLE_SITE, 'preferwww') +#client.UpdateEnhancedImageSearch(EXAMPLE_SITE, 'true') + +print +print 'Adding sitemap: %s' % EXAMPLE_SITEMAP +entry = client.AddSitemap(EXAMPLE_SITE, EXAMPLE_SITEMAP) + +print entry.title.text.replace('http://', '')[:80] +print " Last Updated : %29s Status: %10s" % ( + entry.updated.text[:29], entry.sitemap_status.text[:10]) +print " Last Downloaded: %29s URL Count: %10s" % ( + safeElementText(entry.sitemap_last_downloaded)[:29], + safeElementText(entry.sitemap_url_count)[:10]) + +# Add a mobile sitemap +#entry = client.AddMobileSitemap(EXAMPLE_SITE, 'http://.../sitemap-mobile-example.xml', 'XHTML') + +# Add a news sitemap, your site must be included in Google News. +# See also http://google.com/support/webmasters/bin/answer.py?answer=42738 +#entry = client.AddNewsSitemap(EXAMPLE_SITE, 'http://.../sitemap-news-example.xml', 'Label') + +print +print 'Deleting sitemap: %s' % EXAMPLE_SITEMAP +client.DeleteSitemap(EXAMPLE_SITE, EXAMPLE_SITEMAP) + +print +print 'Deleting site: %s' % EXAMPLE_SITE +client.DeleteSite(EXAMPLE_SITE) +print diff --git a/gdata.py-1.2.3/samples/webmastertools/SitemapsFeedSummary.py b/gdata.py-1.2.3/samples/webmastertools/SitemapsFeedSummary.py new file mode 100644 index 0000000..7c4d246 --- /dev/null +++ b/gdata.py-1.2.3/samples/webmastertools/SitemapsFeedSummary.py @@ -0,0 +1,68 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gdata.webmastertools.service +import gdata.service +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import getpass + + +username = '' +password = '' +site_uri = '' + +username = raw_input('Please enter your username: ') +password = getpass.getpass() +site_uri = raw_input('Please enter your site url: ') + +client = gdata.webmastertools.service.GWebmasterToolsService( + email=username, + password=password, source='PythonWebmasterToolsSample-1') + +print 'Logging in' +client.ProgrammaticLogin() + +print 'Retrieving Sitemaps feed' +feed = client.GetSitemapsFeed(site_uri) + +# Format the feed +print +print 'You have %d sitemap(s), last updated at %s' % ( + len(feed.entry), feed.updated.text) +print +print '='*80 + + +def safeElementText(element): + if hasattr(element, 'text'): + return element.text + return '' + + +# Format each site +for entry in feed.entry: + print entry.title.text.replace('http://', '')[:80] + print " Last Updated : %29s Status: %10s" % ( + entry.updated.text[:29], entry.sitemap_status.text[:10]) + print " Last Downloaded: %29s URL Count: %10s" % ( + safeElementText(entry.sitemap_last_downloaded)[:29], + safeElementText(entry.sitemap_url_count)[:10]) +print diff --git a/gdata.py-1.2.3/samples/webmastertools/SitesFeedSummary.py b/gdata.py-1.2.3/samples/webmastertools/SitesFeedSummary.py new file mode 100644 index 0000000..62827ad --- /dev/null +++ b/gdata.py-1.2.3/samples/webmastertools/SitesFeedSummary.py @@ -0,0 +1,70 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gdata.webmastertools.service +import gdata.service +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import getpass + + +username = '' +password = '' + +username = raw_input('Please enter your username: ') +password = getpass.getpass() + +client = gdata.webmastertools.service.GWebmasterToolsService( + email=username, + password=password, source='PythonWebmasterToolsSample-1') + +print 'Logging in' +client.ProgrammaticLogin() + +print 'Retrieving Sites feed' +feed = client.GetSitesFeed() + +# Format the feed +print +print 'You have %d site(s), last updated at %s' % ( + len(feed.entry), feed.updated.text) +print +print "%-25s %25s %25s" % ('Site', 'Last Updated', 'Last Crawled') +print '='*80 + + +def safeElementText(element): + if hasattr(element, 'text'): + return element.text + return '' + + +# Format each site +for entry in feed.entry: + print "%-25s %25s %25s" % ( + entry.title.text.replace('http://', '')[:25], entry.updated.text[:25], + safeElementText(entry.crawled)[:25]) + print " Preferred: %-23s Indexed: %5s GeoLoc: %10s" % ( + safeElementText(entry.preferred_domain)[:30], entry.indexed.text[:5], + safeElementText(entry.geolocation)[:10]) + print " Crawl rate: %-10s Verified: %5s" % ( + safeElementText(entry.crawl_rate)[:10], entry.verified.text[:5]) + +print diff --git a/gdata.py-1.2.3/setup.py b/gdata.py-1.2.3/setup.py new file mode 100755 index 0000000..b7d565a --- /dev/null +++ b/gdata.py-1.2.3/setup.py @@ -0,0 +1,63 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from distutils.core import setup + + +setup( + name='gdata.py', + version='1.2.3', + description='Python client library for Google data APIs', + long_description = """\ +The Google data Python client library makes it easy to access data +through the Google data APIs. This library provides data model and +service modules for the the following Google data services: +- Google Calendar data API +- Google Contacts data API +- Google Spreadsheets data API +- Google Document List data APIs +- Google Base data API +- Google Apps Provisioning API +- Google Apps Email Migration API +- Google Apps Email Settings API +- Picasa Web Albums Data API +- Google Code Search Data API +- YouTube Data API +- Google Webmaster Tools Data API +- Blogger Data API +- core Google data API functionality +The core Google data code provides sufficient functionality to use this +library with any Google data API (even if a module hasn't been written for +it yet). For example, this client can be used with the Notebook API. +This library may also be used with any Atom Publishing Protocol +service. +""", + author='Jeffrey Scudder', + author_email='api.jscudder@gmail.com', + license='Apache 2.0', + url='http://code.google.com/p/gdata-python-client/', + packages=['atom', 'gdata', 'gdata.calendar', 'gdata.base', + 'gdata.spreadsheet', 'gdata.apps', 'gdata.apps.emailsettings', + 'gdata.apps.migration', 'gdata.docs', 'gdata.codesearch', + 'gdata.photos', 'gdata.exif', 'gdata.geo', 'gdata.media', + 'gdata.contacts', 'gdata.youtube', 'gdata.webmastertools', + 'gdata.blogger', 'gdata.alt', 'gdata.oauth', 'gdata.tlslite', + 'gdata.Crypto', 'gdata.Crypto.Cipher', 'gdata.Crypto.Hash', + 'gdata.Crypto.Protocol', 'gdata.Crypto.PublicKey', 'gdata.Crypto.Util', + 'gdata.tlslite.utils', 'gdata.tlslite.integration'], + package_dir = {'gdata':'src/gdata', 'atom':'src/atom'} +) diff --git a/gdata.py-1.2.3/src/atom/__init__.py b/gdata.py-1.2.3/src/atom/__init__.py new file mode 100755 index 0000000..247b271 --- /dev/null +++ b/gdata.py-1.2.3/src/atom/__init__.py @@ -0,0 +1,1395 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains classes representing Atom elements. + + Module objective: provide data classes for Atom constructs. These classes hide + the XML-ness of Atom and provide a set of native Python classes to interact + with. + + Conversions to and from XML should only be necessary when the Atom classes + "touch the wire" and are sent over HTTP. For this reason this module + provides methods and functions to convert Atom classes to and from strings. + + For more information on the Atom data model, see RFC 4287 + (http://www.ietf.org/rfc/rfc4287.txt) + + AtomBase: A foundation class on which Atom classes are built. It + handles the parsing of attributes and children which are common to all + Atom classes. By default, the AtomBase class translates all XML child + nodes into ExtensionElements. + + ExtensionElement: Atom allows Atom objects to contain XML which is not part + of the Atom specification, these are called extension elements. If a + classes parser encounters an unexpected XML construct, it is translated + into an ExtensionElement instance. ExtensionElement is designed to fully + capture the information in the XML. Child nodes in an XML extension are + turned into ExtensionElements as well. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +# XML namespaces which are often used in Atom entities. +ATOM_NAMESPACE = 'http://www.w3.org/2005/Atom' +ELEMENT_TEMPLATE = '{http://www.w3.org/2005/Atom}%s' +APP_NAMESPACE = 'http://purl.org/atom/app#' +APP_TEMPLATE = '{http://purl.org/atom/app#}%s' + +# This encoding is used for converting strings before translating the XML +# into an object. +XML_STRING_ENCODING = 'utf-8' +# The desired string encoding for object members. +MEMBER_STRING_ENCODING = 'utf-8' + + +def CreateClassFromXMLString(target_class, xml_string, string_encoding=None): + """Creates an instance of the target class from the string contents. + + Args: + target_class: class The class which will be instantiated and populated + with the contents of the XML. This class must have a _tag and a + _namespace class variable. + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + string_encoding: str The character encoding which the xml_string should + be converted to before it is interpreted and translated into + objects. The default is None in which case the string encoding + is not changed. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or None if the root XML tag and namespace did not + match those of the target class. + """ + encoding = string_encoding or XML_STRING_ENCODING + if encoding and isinstance(xml_string, unicode): + xml_string = xml_string.encode(encoding) + tree = ElementTree.fromstring(xml_string) + return _CreateClassFromElementTree(target_class, tree) + + +def _CreateClassFromElementTree(target_class, tree, namespace=None, tag=None): + """Instantiates the class and populates members according to the tree. + + Note: Only use this function with classes that have _namespace and _tag + class members. + + Args: + target_class: class The class which will be instantiated and populated + with the contents of the XML. + tree: ElementTree An element tree whose contents will be converted into + members of the new target_class instance. + namespace: str (optional) The namespace which the XML tree's root node must + match. If omitted, the namespace defaults to the _namespace of the + target class. + tag: str (optional) The tag which the XML tree's root node must match. If + omitted, the tag defaults to the _tag class member of the target + class. + + Returns: + An instance of the target class - or None if the tag and namespace of + the XML tree's root node did not match the desired namespace and tag. + """ + if namespace is None: + namespace = target_class._namespace + if tag is None: + tag = target_class._tag + if tree.tag == '{%s}%s' % (namespace, tag): + target = target_class() + target._HarvestElementTree(tree) + return target + else: + return None + + +class ExtensionContainer(object): + + def __init__(self, extension_elements=None, extension_attributes=None, + text=None): + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + # Three methods to create an object from an ElementTree + def _HarvestElementTree(self, tree): + # Fill in the instance members from the contents of the XML tree. + for child in tree: + self._ConvertElementTreeToMember(child) + for attribute, value in tree.attrib.iteritems(): + self._ConvertElementAttributeToMember(attribute, value) + # Encode the text string according to the desired encoding type. (UTF-8) + if tree.text: + self.text = tree.text.encode(MEMBER_STRING_ENCODING) + + def _ConvertElementTreeToMember(self, child_tree, current_class=None): + self.extension_elements.append(_ExtensionElementFromElementTree( + child_tree)) + + def _ConvertElementAttributeToMember(self, attribute, value): + # Encode the attribute value's string with the desired type Default UTF-8 + if value: + self.extension_attributes[attribute] = value.encode( + MEMBER_STRING_ENCODING) + + # One method to create an ElementTree from an object + def _AddMembersToElementTree(self, tree): + for child in self.extension_elements: + child._BecomeChildElement(tree) + for attribute, value in self.extension_attributes.iteritems(): + if value: + # Decode the value from the desired encoding (default UTF-8). + if not isinstance(value, unicode): + tree.attrib[attribute] = value.decode(MEMBER_STRING_ENCODING) + else: + tree.attrib[attribute] = value + if self.text and not isinstance(self.text, unicode): + tree.text = self.text.decode(MEMBER_STRING_ENCODING) + else: + tree.text = self.text + + def FindExtensions(self, tag=None, namespace=None): + """Searches extension elements for child nodes with the desired name. + + Returns a list of extension elements within this object whose tag + and/or namespace match those passed in. To find all extensions in + a particular namespace, specify the namespace but not the tag name. + If you specify only the tag, the result list may contain extension + elements in multiple namespaces. + + Args: + tag: str (optional) The desired tag + namespace: str (optional) The desired namespace + + Returns: + A list of elements whose tag and/or namespace match the parameters + values + """ + + results = [] + + if tag and namespace: + for element in self.extension_elements: + if element.tag == tag and element.namespace == namespace: + results.append(element) + elif tag and not namespace: + for element in self.extension_elements: + if element.tag == tag: + results.append(element) + elif namespace and not tag: + for element in self.extension_elements: + if element.namespace == namespace: + results.append(element) + else: + for element in self.extension_elements: + results.append(element) + + return results + + +class AtomBase(ExtensionContainer): + + _children = {} + _attributes = {} + + def __init__(self, extension_elements=None, extension_attributes=None, + text=None): + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + def _ConvertElementTreeToMember(self, child_tree): + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(_CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + _CreateClassFromElementTree(member_class, child_tree)) + else: + ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + def _ConvertElementAttributeToMember(self, attribute, value): + # Find the attribute in this class's list of attributes. + if self.__class__._attributes.has_key(attribute): + # Find the member of this class which corresponds to the XML attribute + # (lookup in current_class._attributes) and set this member to the + # desired value (using self.__dict__). + if value: + # Encode the string to capture non-ascii characters (default UTF-8) + setattr(self, self.__class__._attributes[attribute], + value.encode(MEMBER_STRING_ENCODING)) + else: + ExtensionContainer._ConvertElementAttributeToMember(self, attribute, + value) + + # Three methods to create an ElementTree from an object + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + if not isinstance(member, unicode): + tree.attrib[xml_attribute] = member.decode(MEMBER_STRING_ENCODING) + else: + tree.attrib[xml_attribute] = member + # Lastly, call the ExtensionContainers's _AddMembersToElementTree to + # convert any extension attributes. + ExtensionContainer._AddMembersToElementTree(self, tree) + + + def _BecomeChildElement(self, tree): + """ + + Note: Only for use with classes that have a _tag and _namespace class + member. It is in AtomBase so that it can be inherited but it should + not be called on instances of AtomBase. + + """ + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = '{%s}%s' % (self.__class__._namespace, + self.__class__._tag) + self._AddMembersToElementTree(new_child) + + def _ToElementTree(self): + """ + + Note, this method is designed to be used only with classes that have a + _tag and _namespace. It is placed in AtomBase for inheritance but should + not be called on this class. + + """ + new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace, + self.__class__._tag)) + self._AddMembersToElementTree(new_tree) + return new_tree + + def ToString(self, string_encoding='UTF-8'): + """Converts the Atom object to a string containing XML.""" + return ElementTree.tostring(self._ToElementTree(), encoding=string_encoding) + + def __str__(self): + return self.ToString() + + +class Name(AtomBase): + """The atom:name element""" + + _tag = 'name' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Name + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NameFromString(xml_string): + return CreateClassFromXMLString(Name, xml_string) + + +class Email(AtomBase): + """The atom:email element""" + + _tag = 'email' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Email + + Args: + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailFromString(xml_string): + return CreateClassFromXMLString(Email, xml_string) + + +class Uri(AtomBase): + """The atom:uri element""" + + _tag = 'uri' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Uri + + Args: + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UriFromString(xml_string): + return CreateClassFromXMLString(Uri, xml_string) + + +class Person(AtomBase): + """A foundation class from which atom:author and atom:contributor extend. + + A person contains information like name, email address, and web page URI for + an author or contributor to an Atom feed. + """ + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _children['{%s}name' % (ATOM_NAMESPACE)] = ('name', Name) + _children['{%s}email' % (ATOM_NAMESPACE)] = ('email', Email) + _children['{%s}uri' % (ATOM_NAMESPACE)] = ('uri', Uri) + + def __init__(self, name=None, email=None, uri=None, + extension_elements=None, extension_attributes=None, text=None): + """Foundation from which author and contributor are derived. + + The constructor is provided for illustrative purposes, you should not + need to instantiate a Person. + + Args: + name: Name The person's name + email: Email The person's email address + uri: Uri The URI of the person's webpage + extension_elements: list A list of ExtensionElement instances which are + children of this element. + extension_attributes: dict A dictionary of strings which are the values + for additional XML attributes of this element. + text: String The text contents of the element. This is the contents + of the Entry's XML text node. (Example: This is the text) + """ + + self.name = name + self.email = email + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +class Author(Person): + """The atom:author element + + An author is a required element in Feed. + """ + + _tag = 'author' + _namespace = ATOM_NAMESPACE + _children = Person._children.copy() + _attributes = Person._attributes.copy() + #_children = {} + #_attributes = {} + + def __init__(self, name=None, email=None, uri=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Author + + Args: + name: Name + email: Email + uri: Uri + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.name = name + self.email = email + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +def AuthorFromString(xml_string): + return CreateClassFromXMLString(Author, xml_string) + + +class Contributor(Person): + """The atom:contributor element""" + + _tag = 'contributor' + _namespace = ATOM_NAMESPACE + _children = Person._children.copy() + _attributes = Person._attributes.copy() + + def __init__(self, name=None, email=None, uri=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Contributor + + Args: + name: Name + email: Email + uri: Uri + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.name = name + self.email = email + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +def ContributorFromString(xml_string): + return CreateClassFromXMLString(Contributor, xml_string) + + +class Link(AtomBase): + """The atom:link element""" + + _tag = 'link' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['href'] = 'href' + _attributes['type'] = 'type' + _attributes['title'] = 'title' + _attributes['length'] = 'length' + _attributes['hreflang'] = 'hreflang' + + def __init__(self, href=None, rel=None, link_type=None, hreflang=None, + title=None, length=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Link + + Args: + href: string The href attribute of the link + rel: string + type: string + hreflang: string The language for the href + title: string + length: string The length of the href's destination + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + text: str The text data in the this element + """ + + self.href = href + self.rel = rel + self.type = link_type + self.hreflang = hreflang + self.title = title + self.length = length + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LinkFromString(xml_string): + return CreateClassFromXMLString(Link, xml_string) + + +class Generator(AtomBase): + """The atom:generator element""" + + _tag = 'generator' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['uri'] = 'uri' + _attributes['version'] = 'version' + + def __init__(self, uri=None, version=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Generator + + Args: + uri: string + version: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.uri = uri + self.version = version + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def GeneratorFromString(xml_string): + return CreateClassFromXMLString(Generator, xml_string) + + +class Text(AtomBase): + """A foundation class from which atom:title, summary, etc. extend. + + This class should never be instantiated. + """ + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['type'] = 'type' + + def __init__(self, text_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Text + + Args: + text_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = text_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Title(Text): + """The atom:title element""" + + _tag = 'title' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, title_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Title + + Args: + title_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = title_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def TitleFromString(xml_string): + return CreateClassFromXMLString(Title, xml_string) + + +class Subtitle(Text): + """The atom:subtitle element""" + + _tag = 'subtitle' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, subtitle_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Subtitle + + Args: + subtitle_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = subtitle_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SubtitleFromString(xml_string): + return CreateClassFromXMLString(Subtitle, xml_string) + + +class Rights(Text): + """The atom:rights element""" + + _tag = 'rights' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, rights_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Rights + + Args: + rights_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = rights_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def RightsFromString(xml_string): + return CreateClassFromXMLString(Rights, xml_string) + + +class Summary(Text): + """The atom:summary element""" + + _tag = 'summary' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + + def __init__(self, summary_type=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Summary + + Args: + summary_type: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = summary_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SummaryFromString(xml_string): + return CreateClassFromXMLString(Summary, xml_string) + + +class Content(Text): + """The atom:content element""" + + _tag = 'content' + _namespace = ATOM_NAMESPACE + _children = Text._children.copy() + _attributes = Text._attributes.copy() + _attributes['src'] = 'src' + + def __init__(self, content_type=None, src=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Content + + Args: + content_type: string + src: string + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.type = content_type + self.src = src + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def ContentFromString(xml_string): + return CreateClassFromXMLString(Content, xml_string) + + +class Category(AtomBase): + """The atom:category element""" + + _tag = 'category' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _attributes['term'] = 'term' + _attributes['scheme'] = 'scheme' + _attributes['label'] = 'label' + + def __init__(self, term=None, scheme=None, label=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Category + + Args: + term: str + scheme: str + label: str + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.term = term + self.scheme = scheme + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def CategoryFromString(xml_string): + return CreateClassFromXMLString(Category, xml_string) + + +class Id(AtomBase): + """The atom:id element.""" + + _tag = 'id' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Id + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def IdFromString(xml_string): + return CreateClassFromXMLString(Id, xml_string) + + +class Icon(AtomBase): + """The atom:icon element.""" + + _tag = 'icon' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Icon + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def IconFromString(xml_string): + return CreateClassFromXMLString(Icon, xml_string) + + +class Logo(AtomBase): + """The atom:logo element.""" + + _tag = 'logo' + _namespace = ATOM_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Logo + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LogoFromString(xml_string): + return CreateClassFromXMLString(Logo, xml_string) + + +class Draft(AtomBase): + """The app:draft element which indicates if this entry should be public.""" + + _tag = 'draft' + _namespace = APP_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for app:draft + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def DraftFromString(xml_string): + return CreateClassFromXMLString(Draft, xml_string) + + +class Control(AtomBase): + """The app:control element indicating restrictions on publication. + + The APP control element may contain a draft element indicating whether or + not this entry should be publicly available. + """ + + _tag = 'control' + _namespace = APP_NAMESPACE + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _children['{%s}draft' % APP_NAMESPACE] = ('draft', Draft) + + def __init__(self, draft=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for app:control""" + + self.draft = draft + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ControlFromString(xml_string): + return CreateClassFromXMLString(Control, xml_string) + + +class Date(AtomBase): + """A parent class for atom:updated, published, etc.""" + + #TODO Add text to and from time conversion methods to allow users to set + # the contents of a Date to a python DateTime object. + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Updated(Date): + """The atom:updated element.""" + + _tag = 'updated' + _namespace = ATOM_NAMESPACE + _children = Date._children.copy() + _attributes = Date._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Updated + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UpdatedFromString(xml_string): + return CreateClassFromXMLString(Updated, xml_string) + + +class Published(Date): + """The atom:published element.""" + + _tag = 'published' + _namespace = ATOM_NAMESPACE + _children = Date._children.copy() + _attributes = Date._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Published + + Args: + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PublishedFromString(xml_string): + return CreateClassFromXMLString(Published, xml_string) + + +class LinkFinder(object): + """An "interface" providing methods to find link elements + + Entry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of + links. + + This class is used as a mixin in Atom entries and feeds. + """ + + def GetSelfLink(self): + """Find the first link with rel set to 'self' + + Returns: + An atom.Link or none if none of the links had rel equal to 'self' + """ + + for a_link in self.link: + if a_link.rel == 'self': + return a_link + return None + + def GetEditLink(self): + for a_link in self.link: + if a_link.rel == 'edit': + return a_link + return None + + def GetNextLink(self): + for a_link in self.link: + if a_link.rel == 'next': + return a_link + return None + + def GetLicenseLink(self): + for a_link in self.link: + if a_link.rel == 'license': + return a_link + return None + + def GetAlternateLink(self): + for a_link in self.link: + if a_link.rel == 'alternate': + return a_link + return None + + +class FeedEntryParent(AtomBase, LinkFinder): + """A super class for atom:feed and entry, contains shared attributes""" + + _children = AtomBase._children.copy() + _attributes = AtomBase._attributes.copy() + _children['{%s}author' % ATOM_NAMESPACE] = ('author', [Author]) + _children['{%s}category' % ATOM_NAMESPACE] = ('category', [Category]) + _children['{%s}contributor' % ATOM_NAMESPACE] = ('contributor', [Contributor]) + _children['{%s}id' % ATOM_NAMESPACE] = ('id', Id) + _children['{%s}link' % ATOM_NAMESPACE] = ('link', [Link]) + _children['{%s}rights' % ATOM_NAMESPACE] = ('rights', Rights) + _children['{%s}title' % ATOM_NAMESPACE] = ('title', Title) + _children['{%s}updated' % ATOM_NAMESPACE] = ('updated', Updated) + + def __init__(self, author=None, category=None, contributor=None, + atom_id=None, link=None, rights=None, title=None, updated=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.rights = rights + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Source(FeedEntryParent): + """The atom:source element""" + + _tag = 'source' + _namespace = ATOM_NAMESPACE + _children = FeedEntryParent._children.copy() + _attributes = FeedEntryParent._attributes.copy() + _children['{%s}generator' % ATOM_NAMESPACE] = ('generator', Generator) + _children['{%s}icon' % ATOM_NAMESPACE] = ('icon', Icon) + _children['{%s}logo' % ATOM_NAMESPACE] = ('logo', Logo) + _children['{%s}subtitle' % ATOM_NAMESPACE] = ('subtitle', Subtitle) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SourceFromString(xml_string): + return CreateClassFromXMLString(Source, xml_string) + + +class Entry(FeedEntryParent): + """The atom:entry element""" + + _tag = 'entry' + _namespace = ATOM_NAMESPACE + _children = FeedEntryParent._children.copy() + _attributes = FeedEntryParent._attributes.copy() + _children['{%s}content' % ATOM_NAMESPACE] = ('content', Content) + _children['{%s}published' % ATOM_NAMESPACE] = ('published', Published) + _children['{%s}source' % ATOM_NAMESPACE] = ('source', Source) + _children['{%s}summary' % ATOM_NAMESPACE] = ('summary', Summary) + _children['{%s}control' % APP_NAMESPACE] = ('control', Control) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for atom:entry + + Args: + author: list A list of Author instances which belong to this class. + category: list A list of Category instances + content: Content The entry's Content + contributor: list A list on Contributor instances + id: Id The entry's Id element + link: list A list of Link instances + published: Published The entry's Published element + rights: Rights The entry's Rights element + source: Source the entry's source element + summary: Summary the entry's summary element + title: Title the entry's title element + updated: Updated the entry's updated element + control: The entry's app:control element which can be used to mark an + entry as a draft which should not be publicly viewable. + text: String The text contents of the element. This is the contents + of the Entry's XML text node. (Example: This is the text) + extension_elements: list A list of ExtensionElement instances which are + children of this element. + extension_attributes: dict A dictionary of strings which are the values + for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.title = title + self.updated = updated + self.control = control + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EntryFromString(xml_string): + return CreateClassFromXMLString(Entry, xml_string) + + +class Feed(Source): + """The atom:feed element""" + + _tag = 'feed' + _namespace = ATOM_NAMESPACE + _children = Source._children.copy() + _attributes = Source._attributes.copy() + _children['{%s}entry' % ATOM_NAMESPACE] = ('entry', [Entry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + text=None, extension_elements=None, extension_attributes=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + entry: list (optional) A list of the Entry instances contained in the + feed. + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def FeedFromString(xml_string): + return CreateClassFromXMLString(Feed, xml_string) + + +class ExtensionElement(object): + """Represents extra XML elements contained in Atom classes.""" + + def __init__(self, tag, namespace=None, attributes=None, + children=None, text=None): + """Constructor for EtensionElement + + Args: + namespace: string (optional) The XML namespace for this element. + tag: string (optional) The tag (without the namespace qualifier) for + this element. To reconstruct the full qualified name of the element, + combine this tag with the namespace. + attributes: dict (optinal) The attribute value string pairs for the XML + attributes of this element. + children: list (optional) A list of ExtensionElements which represent + the XML child nodes of this element. + """ + + self.namespace = namespace + self.tag = tag + self.attributes = attributes or {} + self.children = children or [] + self.text = text + + def ToString(self): + element_tree = self._TransferToElementTree(ElementTree.Element('')) + return ElementTree.tostring(element_tree, encoding="UTF-8") + + def _TransferToElementTree(self, element_tree): + if self.tag is None: + return None + + if self.namespace is not None: + element_tree.tag = '{%s}%s' % (self.namespace, self.tag) + else: + element_tree.tag = self.tag + + for key, value in self.attributes.iteritems(): + element_tree.attrib[key] = value + + for child in self.children: + child._BecomeChildElement(element_tree) + + element_tree.text = self.text + + return element_tree + + def _BecomeChildElement(self, element_tree): + """Converts this object into an etree element and adds it as a child node. + + Adds self to the ElementTree. This method is required to avoid verbose XML + which constantly redefines the namespace. + + Args: + element_tree: ElementTree._Element The element to which this object's XML + will be added. + """ + new_element = ElementTree.Element('') + element_tree.append(new_element) + self._TransferToElementTree(new_element) + + def FindChildren(self, tag=None, namespace=None): + """Searches child nodes for objects with the desired tag/namespace. + + Returns a list of extension elements within this object whose tag + and/or namespace match those passed in. To find all children in + a particular namespace, specify the namespace but not the tag name. + If you specify only the tag, the result list may contain extension + elements in multiple namespaces. + + Args: + tag: str (optional) The desired tag + namespace: str (optional) The desired namespace + + Returns: + A list of elements whose tag and/or namespace match the parameters + values + """ + + results = [] + + if tag and namespace: + for element in self.children: + if element.tag == tag and element.namespace == namespace: + results.append(element) + elif tag and not namespace: + for element in self.children: + if element.tag == tag: + results.append(element) + elif namespace and not tag: + for element in self.children: + if element.namespace == namespace: + results.append(element) + else: + for element in self.children: + results.append(element) + + return results + + +def ExtensionElementFromString(xml_string): + element_tree = ElementTree.fromstring(xml_string) + return _ExtensionElementFromElementTree(element_tree) + + +def _ExtensionElementFromElementTree(element_tree): + element_tag = element_tree.tag + if '}' in element_tag: + namespace = element_tag[1:element_tag.index('}')] + tag = element_tag[element_tag.index('}')+1:] + else: + namespace = None + tag = element_tag + extension = ExtensionElement(namespace=namespace, tag=tag) + for key, value in element_tree.attrib.iteritems(): + extension.attributes[key] = value + for child in element_tree: + extension.children.append(_ExtensionElementFromElementTree(child)) + extension.text = element_tree.text + return extension diff --git a/gdata.py-1.2.3/src/atom/core.py b/gdata.py-1.2.3/src/atom/core.py new file mode 100644 index 0000000..b37a54e --- /dev/null +++ b/gdata.py-1.2.3/src/atom/core.py @@ -0,0 +1,382 @@ +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import inspect +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +class XmlElement(object): + _qname = None + _other_elements = None + _other_attributes = None + _rule_set = None + _members = None + text = None + + def __init__(self, text=None, *args, **kwargs): + if ('_members' not in self.__class__.__dict__ + or self.__class__._members is None): + self.__class__._members = tuple(self.__class__._list_xml_members()) + for member_name, member_type in self.__class__._members: + if member_name in kwargs: + setattr(self, member_name, kwargs[member_name]) + else: + if isinstance(member_type, list): + setattr(self, member_name, []) + else: + setattr(self, member_name, None) + self._other_elements = [] + self._other_attributes = {} + if text is not None: + self.text = text + + def _list_xml_members(cls): + """Generator listing all members which are XML elements or attributes. + + The following members would be considered XML members: + foo = 'abc' - indicates an XML attribute with the qname abc + foo = SomeElement - indicates an XML child element + foo = [AnElement] - indicates a repeating XML child element, each instance + will be stored in a list in this member + foo = ('att1', '{http://example.com/namespace}att2' - indicates an XML + attribute which has different parsing rules in different versions of + the protocol. Version 1 of the XML parsing rules will look for an + attribute with the qname 'att1' but verion 2 of the parsing rules will + look for a namespaced attribute with the local name of 'att2' and an + XML namespace of 'http://example.com/namespace'. + """ + members = [] + for pair in inspect.getmembers(cls): + if not pair[0].startswith('_') and pair[0] != 'text': + member_type = pair[1] + if (isinstance(member_type, tuple) or isinstance(member_type, list) + or isinstance(member_type, (str, unicode)) + or (inspect.isclass(member_type) + and issubclass(member_type, XmlElement))): + members.append(pair) + return members + + _list_xml_members = classmethod(_list_xml_members) + + def _get_rules(cls, version): + # Initialize the _rule_set to make sure there is a slot available to store + # the parsing rules for this version of the XML schema. + # Look for rule set in the class __dict__ proxy so that only the + # _rule_set for this class will be found. By using the dict proxy + # we avoid finding rule_sets defined in superclasses. + # The four lines below provide support for any number of versions, but it + # runs a bit slower then hard coding slots for two versions, so I'm using + # the below two lines. + #if '_rule_set' not in cls.__dict__ or cls._rule_set is None: + # cls._rule_set = [] + #while len(cls.__dict__['_rule_set']) < version: + # cls._rule_set.append(None) + # If there is no rule set cache in the class, provide slots for two XML + # versions. If and when there is a version 3, this list will need to be + # expanded. + if '_rule_set' not in cls.__dict__ or cls._rule_set is None: + cls._rule_set = [None, None] + # If a version higher than 2 is requested, fall back to version 2 because + # 2 is currently the highest supported version. + if version > 2: + return cls._get_rules(2) + # Check the dict proxy for the rule set to avoid finding any rule sets + # which belong to the superclass. We only want rule sets for this class. + if cls._rule_set[version-1] is None: + # The rule set for each version consists of the qname for this element + # ('{namespace}tag'), a dictionary (elements) for looking up the + # corresponding class member when given a child element's qname, and a + # dictionary (attributes) for looking up the corresponding class member + # when given an XML attribute's qname. + elements = {} + attributes = {} + if ('_members' not in cls.__dict__ or cls._members is None): + cls._members = tuple(cls._list_xml_members()) + for member_name, target in cls._members: + if isinstance(target, list): + # This member points to a repeating element. + elements[_get_qname(target[0], version)] = (member_name, target[0], + True) + elif isinstance(target, tuple): + # This member points to a versioned XML attribute. + if version <= len(target): + attributes[target[version-1]] = member_name + else: + attributes[target[-1]] = member_name + elif isinstance(target, (str, unicode)): + # This member points to an XML attribute. + attributes[target] = member_name + elif issubclass(target, XmlElement): + # This member points to a single occurance element. + elements[_get_qname(target, version)] = (member_name, target, False) + version_rules = (_get_qname(cls, version), elements, attributes) + cls._rule_set[version-1] = version_rules + return version_rules + else: + return cls._rule_set[version-1] + + _get_rules = classmethod(_get_rules) + + def get_elements(self, tag=None, namespace=None, version=1): + """Find all sub elements which match the tag and namespace. + + To find all elements in this object, call get_elements with the tag and + namespace both set to None (the default). This method searches through + the object's members and the elements stored in _other_elements which + did not match any of the XML parsing rules for this class. + + Args: + tag: str + namespace: str + version: int Specifies the version of the XML rules to be used when + searching for matching elements. + + Returns: + A list of the matching XmlElements. + """ + matches = [] + ignored1, elements, ignored2 = self.__class__._get_rules(version) + if elements: + for qname, element_def in elements.iteritems(): + member = getattr(self, element_def[0]) + if member: + if _qname_matches(tag, namespace, qname): + if element_def[2]: + # If this is a repeating element, copy all instances into the + # result list. + matches.extend(member) + else: + matches.append(member) + for element in self._other_elements: + if _qname_matches(tag, namespace, element._qname): + matches.append(element) + return matches + + def get_attributes(self, tag=None, namespace=None, version=1): + """Find all attributes which match the tag and namespace. + + To find all attributes in this object, call get_attributes with the tag + and namespace both set to None (the default). This method searches + through the object's members and the attributes stored in + _other_attributes which did not fit any of the XML parsing rules for this + class. + + Args: + tag: str + namespace: str + version: int Specifies the version of the XML rules to be used when + searching for matching attributes. + + Returns: + A list of XmlAttribute objects for the matching attributes. + """ + matches = [] + ignored1, ignored2, attributes = self.__class__._get_rules(version) + if attributes: + for qname, attribute_def in attributes.iteritems(): + member = getattr(self, attribute_def[0]) + if member: + if _qname_matches(tag, namespace, qname): + matches.append(XmlAttribute(qname, member)) + for qname, value in self._other_attributes.iteritems(): + if _qname_matches(tag, namespace, qname): + matches.append(XmlAttribute(qname, value)) + return matches + + def _harvest_tree(self, tree, version=1): + """Populates object members from the data in the tree Element.""" + qname, elements, attributes = self.__class__._get_rules(version) + for element in tree: + if elements and element.tag in elements: + definition = elements[element.tag] + # If this is a repeating element, make sure the member is set to a + # list. + if definition[2]: + if getattr(self, definition[0]) is None: + setattr(self, definition[0], []) + getattr(self, definition[0]).append(_xml_element_from_tree(element, + definition[1])) + else: + setattr(self, definition[0], _xml_element_from_tree(element, + definition[1])) + else: + self._other_elements.append(_xml_element_from_tree(element, XmlElement)) + for attrib, value in tree.attrib.iteritems(): + if attributes and attrib in attributes: + setattr(self, attributes[attrib], value) + else: + self._other_attributes[attrib] = value + if tree.text: + self.text = tree.text + + def _to_tree(self, version=1): + new_tree = ElementTree.Element(_get_qname(self, version)) + self._attach_members(new_tree, version) + return new_tree + + def _attach_members(self, tree, version=1): + """Convert members to XML elements/attributes and add them to the tree. + + Args: + tree: An ElementTree.Element which will be modified. The members of + this object will be added as child elements or attributes + according to the rules described in _expected_elements and + _expected_attributes. The elements and attributes stored in + other_attributes and other_elements are also added a children + of this tree. + version: int Ingnored in this method but used by VersionedElement. + """ + qname, elements, attributes = self.__class__._get_rules(version) + # Add the expected elements and attributes to the tree. + if elements: + for tag, element_def in elements.iteritems(): + member = getattr(self, element_def[0]) + # If this is a repeating element and there are members in the list. + if member and element_def[2]: + for instance in member: + instance._become_child(tree, version) + elif member: + member._become_child(tree, version) + if attributes: + for attribute_tag, member_name in attributes.iteritems(): + value = getattr(self, member_name) + if value: + tree.attrib[attribute_tag] = value + # Add the unexpected (other) elements and attributes to the tree. + for element in self._other_elements: + element._become_child(tree, version) + for key, value in self._other_attributes.iteritems(): + tree.attrib[key] = value + if self.text: + tree.text = self.text + + def to_string(self, version=1): + """Converts this object to XML.""" + return ElementTree.tostring(self._to_tree(version)) + + def _become_child(self, tree, version=1): + """Adds a child element to tree with the XML data in self.""" + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = _get_qname(self, version) + self._attach_members(new_child, version) + + +def _get_qname(element, version): + if isinstance(element._qname, tuple): + if version <= len(element._qname): + return element._qname[version-1] + else: + return element._qname[-1] + else: + return element._qname + + +def _qname_matches(tag, namespace, qname): + """Logic determines if a QName matches the desired local tag and namespace. + + This is used in XmlElement.get_elements and XmlElement.get_attributes to + find matches in the element's members (among all expected-and-unexpected + elements-and-attributes). + + Args: + expected_tag: string + expected_namespace: string + qname: string in the form '{xml_namespace}localtag' or 'tag' if there is + no namespace. + + Returns: + boolean True if the member's tag and namespace fit the expected tag and + namespace. + """ + # If there is no expected namespace or tag, then everything will match. + if qname is None: + member_tag = None + member_namespace = None + else: + if qname.startswith('{'): + member_namespace = qname[1:qname.index('}')] + member_tag = qname[qname.index('}') + 1:] + else: + member_namespace = None + member_tag = qname + return ((tag is None and namespace is None) + # If there is a tag, but no namespace, see if the local tag matches. + or (namespace is None and member_tag == tag) + # There was no tag, but there was a namespace so see if the namespaces + # match. + or (tag is None and member_namespace == namespace) + # There was no tag, and the desired elements have no namespace, so check + # to see that the member's namespace is None. + or (tag is None and namespace == '' + and member_namespace is None) + # The tag and the namespace both match. + or (tag == member_tag + and namespace == member_namespace) + # The tag matches, and the expected namespace is the empty namespace, + # check to make sure the member's namespace is None. + or (tag == member_tag and namespace == '' + and member_namespace is None)) + + +def xml_element_from_string(xml_string, target_class, + version=1, encoding='UTF-8'): + """Parses the XML string according to the rules for the target_class. + + Args: + xml_string: str or unicode + target_class: XmlElement or a subclass. + version: int (optional) The version of the schema which should be used when + converting the XML into an object. The default is 1. + """ +# xml_string = xml_string.encode('UTF-8') + tree = ElementTree.fromstring(xml_string) + #tree = ElementTree.fromstring(unicode(xml_string, encoding)) + return _xml_element_from_tree(tree, target_class, version) + + +def _xml_element_from_tree(tree, target_class, version=1): + if target_class._qname is None: + instance = target_class() + instance._qname = tree.tag + instance._harvest_tree(tree, version) + return instance + # TODO handle the namespace-only case + # Namespace only will be used with Google Spreadsheets rows and + # Google Base item attributes. + elif tree.tag == target_class._qname: + instance = target_class() + instance._harvest_tree(tree, version) + return instance + return None + + +class XmlAttribute(object): + + def __init__(self, qname, value): + self._qname = qname + self.value = value diff --git a/gdata.py-1.2.3/src/atom/http.py b/gdata.py-1.2.3/src/atom/http.py new file mode 100644 index 0000000..c40b394 --- /dev/null +++ b/gdata.py-1.2.3/src/atom/http.py @@ -0,0 +1,286 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""HttpClients in this module use httplib to make HTTP requests. + +This module make HTTP requests based on httplib, but there are environments +in which an httplib based approach will not work (if running in Google App +Engine for example). In those cases, higher level classes (like AtomService +and GDataService) can swap out the HttpClient to transparently use a +different mechanism for making HTTP requests. + + HttpClient: Contains a request method which performs an HTTP call to the + server. + + ProxiedHttpClient: Contains a request method which connects to a proxy using + settings stored in operating system environment variables then + performs an HTTP call to the endpoint server. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import types +import os +import httplib +import atom.url +import atom.http_interface +import socket +import base64 + + +class ProxyError(atom.http_interface.Error): + pass + + +DEFAULT_CONTENT_TYPE = 'application/atom+xml' + + +class HttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None): + self.debug = False + self.headers = headers or {} + + def request(self, operation, url, data=None, headers=None): + """Performs an HTTP call to the server, supports GET, POST, PUT, and + DELETE. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.http + client = atom.http.HttpClient() + http_response = client.request('GET', 'http://www.google.com/') + + Args: + operation: str The HTTP operation to be performed. This is usually one + of 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be converted + to a string. Should be set to None when performing a GET or DELETE. + If data is a file-like object which can be read, this method will + read a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be + evaluated and sent. + url: The full URL to which the request should be sent. Can be a string + or atom.url.Url. + headers: dict of strings. HTTP headers which should be sent + in the request. + """ + if not isinstance(url, atom.url.Url): + if isinstance(url, types.StringTypes): + url = atom.url.parse_url(url) + else: + raise atom.http_interface.UnparsableUrlObject('Unable to parse url ' + 'parameter because it was not a string or atom.url.Url') + + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + + connection = self._prepare_connection(url, all_headers) + + if self.debug: + connection.debuglevel = 1 + + connection.putrequest(operation, self._get_access_url(url), + skip_host=True) + connection.putheader('Host', url.host) + + # Overcome a bug in Python 2.4 and 2.5 + # httplib.HTTPConnection.putrequest adding + # HTTP request header 'Host: www.google.com:443' instead of + # 'Host: www.google.com', and thus resulting the error message + # 'Token invalid - AuthSub token has wrong scope' in the HTTP response. + if (url.protocol == 'https' and int(url.port or 443) == 443 and + hasattr(connection, '_buffer') and + isinstance(connection._buffer, list)): + header_line = 'Host: %s:443' % url.host + replacement_header_line = 'Host: %s' % url.host + try: + connection._buffer[connection._buffer.index(header_line)] = ( + replacement_header_line) + except ValueError: # header_line missing from connection._buffer + pass + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + if isinstance(data, types.StringTypes): + all_headers['Content-Length'] = len(data) + else: + raise atom.http_interface.ContentLengthRequired('Unable to calculate ' + 'the length of the data parameter. Specify a value for ' + 'Content-Length') + + # Set the content type to the default value if none was set. + if 'Content-Type' not in all_headers: + all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE + + # Send the HTTP headers. + for header_name in all_headers: + connection.putheader(header_name, all_headers[header_name]) + connection.endheaders() + + # If there is data, send it in the request. + if data: + if isinstance(data, list): + for data_part in data: + _send_data_part(data_part, connection) + else: + _send_data_part(data, connection) + + # Return the HTTP Response from the server. + return connection.getresponse() + + def _prepare_connection(self, url, headers): + if not isinstance(url, atom.url.Url): + if isinstance(url, types.StringTypes): + url = atom.url.parse_url(url) + else: + raise atom.http_interface.UnparsableUrlObject('Unable to parse url ' + 'parameter because it was not a string or atom.url.Url') + if url.protocol == 'https': + if not url.port: + return httplib.HTTPSConnection(url.host) + return httplib.HTTPSConnection(url.host, int(url.port)) + else: + if not url.port: + return httplib.HTTPConnection(url.host) + return httplib.HTTPConnection(url.host, int(url.port)) + + def _get_access_url(self, url): + return url.to_string() + + +class ProxiedHttpClient(HttpClient): + """Performs an HTTP request through a proxy. + + The proxy settings are obtained from enviroment variables. The URL of the + proxy server is assumed to be stored in the environment variables + 'https_proxy' and 'http_proxy' respectively. If the proxy server requires + a Basic Auth authorization header, the username and password are expected to + be in the 'proxy-username' or 'proxy_username' variable and the + 'proxy-password' or 'proxy_password' variable. + + After connecting to the proxy server, the request is completed as in + HttpClient.request. + """ + def _prepare_connection(self, url, headers): + proxy_auth = _get_proxy_auth() + if url.protocol == 'https': + # destination is https + proxy = os.environ.get('https_proxy') + if proxy: + # Set any proxy auth headers + if proxy_auth: + proxy_auth = 'Proxy-authorization: %s' % proxy_auth + + # Construct the proxy connect command. + port = url.port + if not port: + port = '443' + proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port) + + # Set the user agent to send to the proxy + if headers and 'User-Agent' in headers: + user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent']) + else: + user_agent = '' + + proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent) + + # Find the proxy host and port. + proxy_url = atom.url.parse_url(proxy) + if not proxy_url.port: + proxy_url.port = '80' + + # Connect to the proxy server, very simple recv and error checking + p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + p_sock.connect((proxy_url.host, int(proxy_url.port))) + p_sock.sendall(proxy_pieces) + response = '' + + # Wait for the full response. + while response.find("\r\n\r\n") == -1: + response += p_sock.recv(8192) + + p_status = response.split()[1] + if p_status != str(200): + raise ProxyError('Error status=%s' % str(p_status)) + + # Trivial setup for ssl socket. + ssl = socket.ssl(p_sock, None, None) + fake_sock = httplib.FakeSocket(p_sock, ssl) + + # Initalize httplib and replace with the proxy socket. + connection = httplib.HTTPConnection(proxy_url.host) + connection.sock=fake_sock + return connection + else: + # The request was HTTPS, but there was no https_proxy set. + return HttpClient._prepare_connection(self, url, headers) + else: + proxy = os.environ.get('http_proxy') + if proxy: + # Find the proxy host and port. + proxy_url = atom.url.parse_url(proxy) + if not proxy_url.port: + proxy_url.port = '80' + + if proxy_auth: + headers['Proxy-Authorization'] = proxy_auth.strip() + + return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port)) + else: + # The request was HTTP, but there was no http_proxy set. + return HttpClient._prepare_connection(self, url, headers) + + def _get_access_url(self, url): + return url.to_string() + + +def _get_proxy_auth(): + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + if proxy_username: + user_auth = base64.encodestring('%s:%s' % (proxy_username, + proxy_password)) + return 'Basic %s\r\n' % (user_auth.strip()) + else: + return '' + + +def _send_data_part(data, connection): + if isinstance(data, types.StringTypes): + connection.send(data) + return + # Check to see if data is a file-like object that has a read method. + elif hasattr(data, 'read'): + # Read the file and send it a chunk at a time. + while 1: + binarydata = data.read(100000) + if binarydata == '': break + connection.send(binarydata) + return + else: + # The data object was not a file. + # Try to convert to a string and send the data. + connection.send(str(data)) + return diff --git a/gdata.py-1.2.3/src/atom/http_interface.py b/gdata.py-1.2.3/src/atom/http_interface.py new file mode 100644 index 0000000..36e8d96 --- /dev/null +++ b/gdata.py-1.2.3/src/atom/http_interface.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides a common interface for all HTTP requests. + + HttpResponse: Represents the server's response to an HTTP request. Provides + an interface identical to httplib.HTTPResponse which is the response + expected from higher level classes which use HttpClient.request. + + GenericHttpClient: Provides an interface (superclass) for an object + responsible for making HTTP requests. Subclasses of this object are + used in AtomService and GDataService to make requests to the server. By + changing the http_client member object, the AtomService is able to make + HTTP requests using different logic (for example, when running on + Google App Engine, the http_client makes requests using the App Engine + urlfetch API). +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import StringIO + + +USER_AGENT = '%s GData-Python/1.2.2' + + +class Error(Exception): + pass + + +class UnparsableUrlObject(Error): + pass + + +class ContentLengthRequired(Error): + pass + + +class HttpResponse(object): + def __init__(self, body=None, status=None, reason=None, headers=None): + """Constructor for an HttpResponse object. + + HttpResponse represents the server's response to an HTTP request from + the client. The HttpClient.request method returns a httplib.HTTPResponse + object and this HttpResponse class is designed to mirror the interface + exposed by httplib.HTTPResponse. + + Args: + body: A file like object, with a read() method. The body could also + be a string, and the constructor will wrap it so that + HttpResponse.read(self) will return the full string. + status: The HTTP status code as an int. Example: 200, 201, 404. + reason: The HTTP status message which follows the code. Example: + OK, Created, Not Found + headers: A dictionary containing the HTTP headers in the server's + response. A common header in the response is Content-Length. + """ + if body: + if hasattr(body, 'read'): + self._body = body + else: + self._body = StringIO.StringIO(body) + else: + self._body = None + if status is not None: + self.status = int(status) + else: + self.status = None + self.reason = reason + self._headers = headers or {} + + def getheader(self, name, default=None): + if name in self._headers: + return self._headers[name] + else: + return default + + def read(self, amt=None): + if not amt: + return self._body.read() + else: + return self._body.read(amt) + + +class GenericHttpClient(object): + debug = False + + def __init__(self, http_client, headers=None): + """ + + Args: + http_client: An object which provides a request method to make an HTTP + request. The request method in GenericHttpClient performs a + call-through to the contained HTTP client object. + headers: A dictionary containing HTTP headers which should be included + in every HTTP request. Common persistent headers include + 'User-Agent'. + """ + self.http_client = http_client + self.headers = headers or {} + + def request(self, operation, url, data=None, headers=None): + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + return self.http_client.request(operation, url, data=data, + headers=all_headers) + + def get(self, url, headers=None): + return self.request('GET', url, headers=headers) + + def post(self, url, data, headers=None): + return self.request('POST', url, data=data, headers=headers) + + def put(self, url, data, headers=None): + return self.request('PUT', url, data=data, headers=headers) + + def delete(self, url, headers=None): + return self.request('DELETE', url, headers=headers) + + +class GenericToken(object): + """Represents an Authorization token to be added to HTTP requests. + + Some Authorization headers included calculated fields (digital + signatures for example) which are based on the parameters of the HTTP + request. Therefore the token is responsible for signing the request + and adding the Authorization header. + """ + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """For the GenericToken, no Authorization token is set.""" + return http_client.request(operation, url, data=data, headers=headers) + + def valid_for_scope(self, url): + """Tells the caller if the token authorizes access to the desired URL. + + Since the generic token doesn't add an auth header, it is not valid for + any scope. + """ + return False + + diff --git a/gdata.py-1.2.3/src/atom/mock_http.py b/gdata.py-1.2.3/src/atom/mock_http.py new file mode 100644 index 0000000..c420f37 --- /dev/null +++ b/gdata.py-1.2.3/src/atom/mock_http.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import atom.http_interface +import atom.url + + +class Error(Exception): + pass + + +class NoRecordingFound(Error): + pass + + +class MockRequest(object): + """Holds parameters of an HTTP request for matching against future requests. + """ + def __init__(self, operation, url, data=None, headers=None): + self.operation = operation + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + self.url = url + self.data = data + self.headers = headers + + +class MockResponse(atom.http_interface.HttpResponse): + """Simulates an httplib.HTTPResponse object.""" + def __init__(self, body=None, status=None, reason=None, headers=None): + if body and hasattr(body, 'read'): + self.body = body.read() + else: + self.body = body + if status is not None: + self.status = int(status) + else: + self.status = None + self.reason = reason + self._headers = headers or {} + + def read(self): + return self.body + + +class MockHttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None, recordings=None, real_client=None): + """An HttpClient which responds to request with stored data. + + The request-response pairs are stored as tuples in a member list named + recordings. + + The MockHttpClient can be switched from replay mode to record mode by + setting the real_client member to an instance of an HttpClient which will + make real HTTP requests and store the server's response in list of + recordings. + + Args: + headers: dict containing HTTP headers which should be included in all + HTTP requests. + recordings: The initial recordings to be used for responses. This list + contains tuples in the form: (MockRequest, MockResponse) + real_client: An HttpClient which will make a real HTTP request. The + response will be converted into a MockResponse and stored in + recordings. + """ + self.recordings = recordings or [] + self.real_client = real_client + self.headers = headers or {} + + def add_response(self, response, operation, url, data=None, headers=None): + """Adds a request-response pair to the recordings list. + + After the recording is added, future matching requests will receive the + response. + + Args: + response: MockResponse + operation: str + url: str + data: str, Currently the data is ignored when looking for matching + requests. + headers: dict of strings: Currently the headers are ignored when + looking for matching requests. + """ + request = MockRequest(operation, url, data=data, headers=headers) + self.recordings.append((request, response)) + + def request(self, operation, url, data=None, headers=None): + """Returns a matching MockResponse from the recordings. + + If the real_client is set, the request will be passed along and the + server's response will be added to the recordings and also returned. + + If there is no match, a NoRecordingFound error will be raised. + """ + if self.real_client is None: + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for recording in self.recordings: + if recording[0].operation == operation and recording[0].url == url: + return recording[1] + raise NoRecordingFound('No recodings found for %s %s' % ( + operation, url)) + else: + # There is a real HTTP client, so make the request, and record the + # response. + response = self.real_client.request(operation, url, data=data, + headers=headers) + # TODO: copy the headers + stored_response = MockResponse(body=response, status=response.status, + reason=response.reason) + self.add_response(stored_response, operation, url, data=data, + headers=headers) + return stored_response diff --git a/gdata.py-1.2.3/src/atom/mock_service.py b/gdata.py-1.2.3/src/atom/mock_service.py new file mode 100755 index 0000000..601b68a --- /dev/null +++ b/gdata.py-1.2.3/src/atom/mock_service.py @@ -0,0 +1,243 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""MockService provides CRUD ops. for mocking calls to AtomPub services. + + MockService: Exposes the publicly used methods of AtomService to provide + a mock interface which can be used in unit tests. +""" + +import atom.service +import pickle + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +# Recordings contains pairings of HTTP MockRequest objects with MockHttpResponse objects. +recordings = [] +# If set, the mock service HttpRequest are actually made through this object. +real_request_handler = None + +def ConcealValueWithSha(source): + import sha + return sha.new(source[:-5]).hexdigest() + +def DumpRecordings(conceal_func=ConcealValueWithSha): + if conceal_func: + for recording_pair in recordings: + recording_pair[0].ConcealSecrets(conceal_func) + return pickle.dumps(recordings) + +def LoadRecordings(recordings_file_or_string): + if isinstance(recordings_file_or_string, str): + atom.mock_service.recordings = pickle.loads(recordings_file_or_string) + elif hasattr(recordings_file_or_string, 'read'): + atom.mock_service.recordings = pickle.loads( + recordings_file_or_string.read()) + +def HttpRequest(service, operation, data, uri, extra_headers=None, + url_params=None, escape_params=True, content_type='application/atom+xml'): + """Simulates an HTTP call to the server, makes an actual HTTP request if + real_request_handler is set. + + This function operates in two different modes depending on if + real_request_handler is set or not. If real_request_handler is not set, + HttpRequest will look in this module's recordings list to find a response + which matches the parameters in the function call. If real_request_handler + is set, this function will call real_request_handler.HttpRequest, add the + response to the recordings list, and respond with the actual response. + + Args: + service: atom.AtomService object which contains some of the parameters + needed to make the request. The following members are used to + construct the HTTP call: server (str), additional_headers (dict), + port (int), and ssl (bool). + operation: str The HTTP operation to be performed. This is usually one of + 'GET', 'POST', 'PUT', or 'DELETE' + data: ElementTree, filestream, list of parts, or other object which can be + converted to a string. + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, this method will read + a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be evaluated + and sent. + uri: The beginning of the URL to which the request should be sent. + Examples: '/', '/base/feeds/snippets', + '/m8/feeds/contacts/default/base' + extra_headers: dict of strings. HTTP headers which should be sent + in the request. These headers are in addition to those stored in + service.additional_headers. + url_params: dict of strings. Key value pairs to be added to the URL as + URL parameters. For example {'foo':'bar', 'test':'param'} will + become ?foo=bar&test=param. + escape_params: bool default True. If true, the keys and values in + url_params will be URL escaped when the form is constructed + (Special characters converted to %XX form.) + content_type: str The MIME type for the data being sent. Defaults to + 'application/atom+xml', this is only used if data is set. + """ + full_uri = atom.service.BuildUri(uri, url_params, escape_params) + (server, port, ssl, uri) = atom.service.ProcessUrl(service, uri) + current_request = MockRequest(operation, full_uri, host=server, ssl=ssl, + data=data, extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, content_type=content_type) + # If the request handler is set, we should actually make the request using + # the request handler and record the response to replay later. + if real_request_handler: + response = real_request_handler.HttpRequest(service, operation, data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, content_type=content_type) + # TODO: need to copy the HTTP headers from the real response into the + # recorded_response. + recorded_response = MockHttpResponse(body=response.read(), + status=response.status, reason=response.reason) + # Insert a tuple which maps the request to the response object returned + # when making an HTTP call using the real_request_handler. + recordings.append((current_request, recorded_response)) + return recorded_response + else: + # Look through available recordings to see if one matches the current + # request. + for request_response_pair in recordings: + if request_response_pair[0].IsMatch(current_request): + return request_response_pair[1] + return None + + +class MockRequest(object): + """Represents a request made to an AtomPub server. + + These objects are used to determine if a client request matches a recorded + HTTP request to determine what the mock server's response will be. + """ + + def __init__(self, operation, uri, host=None, ssl=False, port=None, + data=None, extra_headers=None, url_params=None, escape_params=True, + content_type='application/atom+xml'): + """Constructor for a MockRequest + + Args: + operation: str One of 'GET', 'POST', 'PUT', or 'DELETE' this is the + HTTP operation requested on the resource. + uri: str The URL describing the resource to be modified or feed to be + retrieved. This should include the protocol (http/https) and the host + (aka domain). For example, these are some valud full_uris: + 'http://example.com', 'https://www.google.com/accounts/ClientLogin' + host: str (optional) The server name which will be placed at the + beginning of the URL if the uri parameter does not begin with 'http'. + Examples include 'example.com', 'www.google.com', 'www.blogger.com'. + ssl: boolean (optional) If true, the request URL will begin with https + instead of http. + data: ElementTree, filestream, list of parts, or other object which can be + converted to a string. (optional) + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, the constructor + will read the entire file into memory. If the data is a list of + parts to be sent, each part will be evaluated and stored. + extra_headers: dict (optional) HTTP headers included in the request. + url_params: dict (optional) Key value pairs which should be added to + the URL as URL parameters in the request. For example uri='/', + url_parameters={'foo':'1','bar':'2'} could become '/?foo=1&bar=2'. + escape_params: boolean (optional) Perform URL escaping on the keys and + values specified in url_params. Defaults to True. + content_type: str (optional) Provides the MIME type of the data being + sent. + """ + self.operation = operation + self.uri = _ConstructFullUrlBase(uri, host=host, ssl=ssl) + self.data = data + self.extra_headers = extra_headers + self.url_params = url_params or {} + self.escape_params = escape_params + self.content_type = content_type + + def ConcealSecrets(self, conceal_func): + """Conceal secret data in this request.""" + if self.extra_headers.has_key('Authorization'): + self.extra_headers['Authorization'] = conceal_func( + self.extra_headers['Authorization']) + + def IsMatch(self, other_request): + """Check to see if the other_request is equivalent to this request. + + Used to determine if a recording matches an incoming request so that a + recorded response should be sent to the client. + + The matching is not exact, only the operation and URL are examined + currently. + + Args: + other_request: MockRequest The request which we want to check this + (self) MockRequest against to see if they are equivalent. + """ + # More accurate matching logic will likely be required. + return (self.operation == other_request.operation and self.uri == + other_request.uri) + + +def _ConstructFullUrlBase(uri, host=None, ssl=False): + """Puts URL components into the form http(s)://full.host.strinf/uri/path + + Used to construct a roughly canonical URL so that URLs which begin with + 'http://example.com/' can be compared to a uri of '/' when the host is + set to 'example.com' + + If the uri contains 'http://host' already, the host and ssl parameters + are ignored. + + Args: + uri: str The path component of the URL, examples include '/' + host: str (optional) The host name which should prepend the URL. Example: + 'example.com' + ssl: boolean (optional) If true, the returned URL will begin with https + instead of http. + + Returns: + String which has the form http(s)://example.com/uri/string/contents + """ + if uri.startswith('http'): + return uri + if ssl: + return 'https://%s%s' % (host, uri) + else: + return 'http://%s%s' % (host, uri) + + +class MockHttpResponse(object): + """Returned from MockService crud methods as the server's response.""" + + def __init__(self, body=None, status=None, reason=None, headers=None): + """Construct a mock HTTPResponse and set members. + + Args: + body: str (optional) The HTTP body of the server's response. + status: int (optional) + reason: str (optional) + headers: dict (optional) + """ + self.body = body + self.status = status + self.reason = reason + self.headers = headers or {} + + def read(self): + return self.body + + def getheader(self, header_name): + return self.headers[header_name] + diff --git a/gdata.py-1.2.3/src/atom/service.py b/gdata.py-1.2.3/src/atom/service.py new file mode 100755 index 0000000..cf322a1 --- /dev/null +++ b/gdata.py-1.2.3/src/atom/service.py @@ -0,0 +1,726 @@ +#!/usr/bin/python +# +# Copyright (C) 2006, 2007, 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""AtomService provides CRUD ops. in line with the Atom Publishing Protocol. + + AtomService: Encapsulates the ability to perform insert, update and delete + operations with the Atom Publishing Protocol on which GData is + based. An instance can perform query, insertion, deletion, and + update. + + HttpRequest: Function that performs a GET, POST, PUT, or DELETE HTTP request + to the specified end point. An AtomService object or a subclass can be + used to specify information about the request. +""" + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import atom.http_interface +import atom.url +import atom.http +import atom.token_store + +import os +import httplib +import urllib +import re +import base64 +import socket +import warnings +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +class AtomService(object): + """Performs Atom Publishing Protocol CRUD operations. + + The AtomService contains methods to perform HTTP CRUD operations. + """ + + # Default values for members + port = 80 + ssl = False + # Set the current_token to force the AtomService to use this token + # instead of searching for an appropriate token in the token_store. + current_token = None + auto_store_tokens = True + auto_set_current_token = True + + def _get_override_token(self): + return self.current_token + + def _set_override_token(self, token): + self.current_token = token + + override_token = property(_get_override_token, _set_override_token) + + def __init__(self, server=None, additional_headers=None, + application_name='', http_client=None, token_store=None): + """Creates a new AtomService client. + + Args: + server: string (optional) The start of a URL for the server + to which all operations should be directed. Example: + 'www.google.com' + additional_headers: dict (optional) Any additional HTTP headers which + should be included with CRUD operations. + http_client: An object responsible for making HTTP requests using a + request method. If none is provided, a new instance of + atom.http.ProxiedHttpClient will be used. + token_store: Keeps a collection of authorization tokens which can be + applied to requests for a specific URLs. Critical methods are + find_token based on a URL (atom.url.Url or a string), add_token, + and remove_token. + """ + self.http_client = http_client or atom.http.ProxiedHttpClient() + self.token_store = token_store or atom.token_store.TokenStore() + self.server = server + self.additional_headers = additional_headers or {} + self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % ( + application_name,) + # If debug is True, the HTTPConnection will display debug information + self._set_debug(False) + + def _get_debug(self): + return self.http_client.debug + + def _set_debug(self, value): + self.http_client.debug = value + + debug = property(_get_debug, _set_debug, + doc='If True, HTTP debug information is printed.') + + def use_basic_auth(self, username, password, scopes=None): + if username is not None and password is not None: + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + base_64_string = base64.encodestring('%s:%s' % (username, password)) + token = BasicAuthToken('Basic %s' % base_64_string.strip(), + scopes=[atom.token_store.SCOPE_ALL]) + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + return self.token_store.add_token(token) + return True + return False + + def UseBasicAuth(self, username, password, for_proxy=False): + """Sets an Authenticaiton: Basic HTTP header containing plaintext. + + Deprecated, use use_basic_auth instead. + + The username and password are base64 encoded and added to an HTTP header + which will be included in each request. Note that your username and + password are sent in plaintext. + + Args: + username: str + password: str + """ + self.use_basic_auth(username, password) + + def request(self, operation, url, data=None, headers=None, + url_params=None): + if isinstance(url, str): + if not url.startswith('http') and self.ssl: + url = atom.url.parse_url('https://%s%s' % (self.server, url)) + elif not url.startswith('http'): + url = atom.url.parse_url('http://%s%s' % (self.server, url)) + else: + url = atom.url.parse_url(url) + + if url_params: + for name, value in url_params.iteritems(): + url.params[name] = value + + all_headers = self.additional_headers.copy() + if headers: + all_headers.update(headers) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + content_length = CalculateDataLength(data) + if content_length: + all_headers['Content-Length'] = str(content_length) + + # Find an Authorization token for this URL if one is available. + if self.override_token: + auth_token = self.override_token + else: + auth_token = self.token_store.find_token(url) + return auth_token.perform_request(self.http_client, operation, url, + data=data, headers=all_headers) + + # CRUD operations + def Get(self, uri, extra_headers=None, url_params=None, escape_params=True): + """Query the APP server with the given URI + + The uri is the portion of the URI after the server value + (server example: 'www.google.com'). + + Example use: + To perform a query against Google Base, set the server to + 'base.google.com' and set the uri to '/base/feeds/...', where ... is + your query. For example, to find snippets for all digital cameras uri + should be set to: '/base/feeds/snippets?bq=digital+camera' + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dicty (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the query. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse The server's response to the GET request. + """ + return self.request('GET', uri, data=None, headers=extra_headers, + url_params=url_params) + + def Post(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, content_type='application/atom+xml'): + """Insert data into an APP server at the given URI. + + Args: + data: string, ElementTree._Element, or something with a __str__ method + The XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse Server's response to the POST request. + """ + if extra_headers is None: + extra_headers = {} + if content_type: + extra_headers['Content-Type'] = content_type + return self.request('POST', uri, data=data, headers=extra_headers, + url_params=url_params) + + def Put(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, content_type='application/atom+xml'): + """Updates an entry at the given URI. + + Args: + data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The + XML containing the updated data. + uri: string A URI indicating entry to which the update will be applied. + Example: '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse Server's response to the PUT request. + """ + if extra_headers is None: + extra_headers = {} + if content_type: + extra_headers['Content-Type'] = content_type + return self.request('PUT', uri, data=data, headers=extra_headers, + url_params=url_params) + + def Delete(self, uri, extra_headers=None, url_params=None, + escape_params=True): + """Deletes the entry at the given URI. + + Args: + uri: string The URI of the entry to be deleted. Example: + '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + httplib.HTTPResponse Server's response to the DELETE request. + """ + return self.request('DELETE', uri, data=None, headers=extra_headers, + url_params=url_params) + + +class BasicAuthToken(atom.http_interface.GenericToken): + def __init__(self, auth_header, scopes=None): + """Creates a token used to add Basic Auth headers to HTTP requests. + + Args: + auth_header: str The value for the Authorization header. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + self.auth_header = auth_header + self.scopes = scopes or [] + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header to the basic auth string.""" + if headers is None: + headers = {'Authorization':self.auth_header} + else: + headers['Authorization'] = self.auth_header + return http_client.request(operation, url, data=data, headers=headers) + + def __str__(self): + return self.auth_header + + def valid_for_scope(self, url): + """Tells the caller if the token authorizes access to the desired URL. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +def PrepareConnection(service, full_uri): + """Opens a connection to the server based on the full URI. + + This method is deprecated, instead use atom.http.HttpClient.request. + + Examines the target URI and the proxy settings, which are set as + environment variables, to open a connection with the server. This + connection is used to make an HTTP request. + + Args: + service: atom.AtomService or a subclass. It must have a server string which + represents the server host to which the request should be made. It may also + have a dictionary of additional_headers to send in the HTTP request. + full_uri: str Which is the target relative (lacks protocol and host) or + absolute URL to be opened. Example: + 'https://www.google.com/accounts/ClientLogin' or + 'base/feeds/snippets' where the server is set to www.google.com. + + Returns: + A tuple containing the httplib.HTTPConnection and the full_uri for the + request. + """ + deprecation('calling deprecated function PrepareConnection') + (server, port, ssl, partial_uri) = ProcessUrl(service, full_uri) + if ssl: + # destination is https + proxy = os.environ.get('https_proxy') + if proxy: + (p_server, p_port, p_ssl, p_uri) = ProcessUrl(service, proxy, True) + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + if proxy_username: + user_auth = base64.encodestring('%s:%s' % (proxy_username, + proxy_password)) + proxy_authorization = ('Proxy-authorization: Basic %s\r\n' % ( + user_auth.strip())) + else: + proxy_authorization = '' + proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (server, port) + user_agent = 'User-Agent: %s\r\n' % ( + service.additional_headers['User-Agent']) + proxy_pieces = (proxy_connect + proxy_authorization + user_agent + + '\r\n') + + #now connect, very simple recv and error checking + p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) + p_sock.connect((p_server,p_port)) + p_sock.sendall(proxy_pieces) + response = '' + + # Wait for the full response. + while response.find("\r\n\r\n") == -1: + response += p_sock.recv(8192) + + p_status=response.split()[1] + if p_status!=str(200): + raise 'Error status=',str(p_status) + + # Trivial setup for ssl socket. + ssl = socket.ssl(p_sock, None, None) + fake_sock = httplib.FakeSocket(p_sock, ssl) + + # Initalize httplib and replace with the proxy socket. + connection = httplib.HTTPConnection(server) + connection.sock=fake_sock + full_uri = partial_uri + + else: + connection = httplib.HTTPSConnection(server, port) + full_uri = partial_uri + + else: + # destination is http + proxy = os.environ.get('http_proxy') + if proxy: + (p_server, p_port, p_ssl, p_uri) = ProcessUrl(service.server, proxy, True) + proxy_username = os.environ.get('proxy-username') + if not proxy_username: + proxy_username = os.environ.get('proxy_username') + proxy_password = os.environ.get('proxy-password') + if not proxy_password: + proxy_password = os.environ.get('proxy_password') + if proxy_username: + UseBasicAuth(service, proxy_username, proxy_password, True) + connection = httplib.HTTPConnection(p_server, p_port) + if not full_uri.startswith("http://"): + if full_uri.startswith("/"): + full_uri = "http://%s%s" % (service.server, full_uri) + else: + full_uri = "http://%s/%s" % (service.server, full_uri) + else: + connection = httplib.HTTPConnection(server, port) + full_uri = partial_uri + + return (connection, full_uri) + + +def UseBasicAuth(service, username, password, for_proxy=False): + """Sets an Authenticaiton: Basic HTTP header containing plaintext. + + Deprecated, use AtomService.use_basic_auth insread. + + The username and password are base64 encoded and added to an HTTP header + which will be included in each request. Note that your username and + password are sent in plaintext. The auth header is added to the + additional_headers dictionary in the service object. + + Args: + service: atom.AtomService or a subclass which has an + additional_headers dict as a member. + username: str + password: str + """ + deprecation('calling deprecated function UseBasicAuth') + base_64_string = base64.encodestring('%s:%s' % (username, password)) + base_64_string = base_64_string.strip() + if for_proxy: + header_name = 'Proxy-Authorization' + else: + header_name = 'Authorization' + service.additional_headers[header_name] = 'Basic %s' % (base_64_string,) + + +def ProcessUrl(service, url, for_proxy=False): + """Processes a passed URL. If the URL does not begin with https?, then + the default value for server is used + + This method is deprecated, use atom.url.parse_url instead. + """ + if not isinstance(url, atom.url.Url): + url = atom.url.parse_url(url) + + server = url.host + ssl = False + port = 80 + + if not server: + if hasattr(service, 'server'): + server = service.server + else: + server = service + if not url.protocol and hasattr(service, 'ssl'): + ssl = service.ssl + if hasattr(service, 'port'): + port = service.port + else: + if url.protocol == 'https': + ssl = True + elif url.protocol == 'http': + ssl = False + if url.port: + port = int(url.port) + elif port == 80 and ssl: + port = 443 + + return (server, port, ssl, url.get_request_uri()) + +def DictionaryToParamList(url_parameters, escape_params=True): + """Convert a dictionary of URL arguments into a URL parameter string. + + This function is deprcated, use atom.url.Url instead. + + Args: + url_parameters: The dictionaty of key-value pairs which will be converted + into URL parameters. For example, + {'dry-run': 'true', 'foo': 'bar'} + will become ['dry-run=true', 'foo=bar']. + + Returns: + A list which contains a string for each key-value pair. The strings are + ready to be incorporated into a URL by using '&'.join([] + parameter_list) + """ + # Choose which function to use when modifying the query and parameters. + # Use quote_plus when escape_params is true. + transform_op = [str, urllib.quote_plus][bool(escape_params)] + # Create a list of tuples containing the escaped version of the + # parameter-value pairs. + parameter_tuples = [(transform_op(param), transform_op(value)) + for param, value in (url_parameters or {}).items()] + # Turn parameter-value tuples into a list of strings in the form + # 'PARAMETER=VALUE'. + return ['='.join(x) for x in parameter_tuples] + + +def BuildUri(uri, url_params=None, escape_params=True): + """Converts a uri string and a collection of parameters into a URI. + + This function is deprcated, use atom.url.Url instead. + + Args: + uri: string + url_params: dict (optional) + escape_params: boolean (optional) + uri: string The start of the desired URI. This string can alrady contain + URL parameters. Examples: '/base/feeds/snippets', + '/base/feeds/snippets?bq=digital+camera' + url_parameters: dict (optional) Additional URL parameters to be included + in the query. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + string The URI consisting of the escaped URL parameters appended to the + initial uri string. + """ + # Prepare URL parameters for inclusion into the GET request. + parameter_list = DictionaryToParamList(url_params, escape_params) + + # Append the URL parameters to the URL. + if parameter_list: + if uri.find('?') != -1: + # If there are already URL parameters in the uri string, add the + # parameters after a new & character. + full_uri = '&'.join([uri] + parameter_list) + else: + # The uri string did not have any URL parameters (no ? character) + # so put a ? between the uri and URL parameters. + full_uri = '%s%s' % (uri, '?%s' % ('&'.join([] + parameter_list))) + else: + full_uri = uri + + return full_uri + + +def HttpRequest(service, operation, data, uri, extra_headers=None, + url_params=None, escape_params=True, content_type='application/atom+xml'): + """Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE. + + This method is deprecated, use atom.http.HttpClient.request instead. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.service + client = atom.service.AtomService() + http_response = client.Get('http://www.google.com/') + or you could set the client.server to 'www.google.com' and use the + following: + client.server = 'www.google.com' + http_response = client.Get('/') + + Args: + service: atom.AtomService object which contains some of the parameters + needed to make the request. The following members are used to + construct the HTTP call: server (str), additional_headers (dict), + port (int), and ssl (bool). + operation: str The HTTP operation to be performed. This is usually one of + 'GET', 'POST', 'PUT', or 'DELETE' + data: ElementTree, filestream, list of parts, or other object which can be + converted to a string. + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, this method will read + a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be evaluated + and sent. + uri: The beginning of the URL to which the request should be sent. + Examples: '/', '/base/feeds/snippets', + '/m8/feeds/contacts/default/base' + extra_headers: dict of strings. HTTP headers which should be sent + in the request. These headers are in addition to those stored in + service.additional_headers. + url_params: dict of strings. Key value pairs to be added to the URL as + URL parameters. For example {'foo':'bar', 'test':'param'} will + become ?foo=bar&test=param. + escape_params: bool default True. If true, the keys and values in + url_params will be URL escaped when the form is constructed + (Special characters converted to %XX form.) + content_type: str The MIME type for the data being sent. Defaults to + 'application/atom+xml', this is only used if data is set. + """ + deprecation('call to deprecated function HttpRequest') + full_uri = BuildUri(uri, url_params, escape_params) + (connection, full_uri) = PrepareConnection(service, full_uri) + + if extra_headers is None: + extra_headers = {} + + # Turn on debug mode if the debug member is set. + if service.debug: + connection.debuglevel = 1 + + connection.putrequest(operation, full_uri) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if (data and not service.additional_headers.has_key('Content-Length') and + not extra_headers.has_key('Content-Length')): + content_length = CalculateDataLength(data) + if content_length: + extra_headers['Content-Length'] = str(content_length) + + if content_type: + extra_headers['Content-Type'] = content_type + + # Send the HTTP headers. + if isinstance(service.additional_headers, dict): + for header in service.additional_headers: + connection.putheader(header, service.additional_headers[header]) + if isinstance(extra_headers, dict): + for header in extra_headers: + connection.putheader(header, extra_headers[header]) + connection.endheaders() + + # If there is data, send it in the request. + if data: + if isinstance(data, list): + for data_part in data: + __SendDataPart(data_part, connection) + else: + __SendDataPart(data, connection) + + # Return the HTTP Response from the server. + return connection.getresponse() + + +def __SendDataPart(data, connection): + """This method is deprecated, use atom.http._send_data_part""" + deprecated('call to deprecated function __SendDataPart') + if isinstance(data, str): + #TODO add handling for unicode. + connection.send(data) + return + elif ElementTree.iselement(data): + connection.send(ElementTree.tostring(data)) + return + # Check to see if data is a file-like object that has a read method. + elif hasattr(data, 'read'): + # Read the file and send it a chunk at a time. + while 1: + binarydata = data.read(100000) + if binarydata == '': break + connection.send(binarydata) + return + else: + # The data object was not a file. + # Try to convert to a string and send the data. + connection.send(str(data)) + return + + +def CalculateDataLength(data): + """Attempts to determine the length of the data to send. + + This method will respond with a length only if the data is a string or + and ElementTree element. + + Args: + data: object If this is not a string or ElementTree element this funtion + will return None. + """ + if isinstance(data, str): + return len(data) + elif isinstance(data, list): + return None + elif ElementTree.iselement(data): + return len(ElementTree.tostring(data)) + elif hasattr(data, 'read'): + # If this is a file-like object, don't try to guess the length. + return None + else: + return len(str(data)) + + +def deprecation(message): + warnings.warn(message, DeprecationWarning, stacklevel=2) diff --git a/gdata.py-1.2.3/src/atom/token_store.py b/gdata.py-1.2.3/src/atom/token_store.py new file mode 100644 index 0000000..d618965 --- /dev/null +++ b/gdata.py-1.2.3/src/atom/token_store.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides a TokenStore class which is designed to manage +auth tokens required for different services. + +Each token is valid for a set of scopes which is the start of a URL. An HTTP +client will use a token store to find a valid Authorization header to send +in requests to the specified URL. If the HTTP client determines that a token +has expired or been revoked, it can remove the token from the store so that +it will not be used in future requests. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import atom.http_interface +import atom.url + + +SCOPE_ALL = 'http' + + +class TokenStore(object): + """Manages Authorization tokens which will be sent in HTTP headers.""" + def __init__(self, scoped_tokens=None): + self._tokens = scoped_tokens or {} + + def add_token(self, token): + """Adds a new token to the store (replaces tokens with the same scope). + + Args: + token: A subclass of http_interface.GenericToken. The token object is + responsible for adding the Authorization header to the HTTP request. + The scopes defined in the token are used to determine if the token + is valid for a requested scope when find_token is called. + + Returns: + True if the token was added, False if the token was not added becase + no scopes were provided. + """ + if not hasattr(token, 'scopes') or not token.scopes: + return False + + for scope in token.scopes: + self._tokens[str(scope)] = token + return True + + def find_token(self, url): + """Selects an Authorization header token which can be used for the URL. + + Args: + url: str or atom.url.Url or a list containing the same. + The URL which is going to be requested. All + tokens are examined to see if any scopes begin match the beginning + of the URL. The first match found is returned. + + Returns: + The token object which should execute the HTTP request. If there was + no token for the url (the url did not begin with any of the token + scopes available), then the atom.http_interface.GenericToken will be + returned because the GenericToken calls through to the http client + without adding an Authorization header. + """ + if url is None: + return None + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if url in self._tokens: + token = self._tokens[url] + if token.valid_for_scope(url): + return token + else: + del self._tokens[url] + for scope, token in self._tokens.iteritems(): + if token.valid_for_scope(url): + return token + return atom.http_interface.GenericToken() + + def remove_token(self, token): + """Removes the token from the token_store. + + This method is used when a token is determined to be invalid. If the + token was found by find_token, but resulted in a 401 or 403 error stating + that the token was invlid, then the token should be removed to prevent + future use. + + Returns: + True if a token was found and then removed from the token + store. False if the token was not in the TokenStore. + """ + token_found = False + scopes_to_delete = [] + for scope, stored_token in self._tokens.iteritems(): + if stored_token == token: + scopes_to_delete.append(scope) + token_found = True + for scope in scopes_to_delete: + del self._tokens[scope] + return token_found + + def remove_all_tokens(self): + self._tokens = {} diff --git a/gdata.py-1.2.3/src/atom/url.py b/gdata.py-1.2.3/src/atom/url.py new file mode 100644 index 0000000..0effa10 --- /dev/null +++ b/gdata.py-1.2.3/src/atom/url.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import urlparse +import urllib + + +DEFAULT_PROTOCOL = 'http' +DEFAULT_PORT = 80 + + +def parse_url(url_string): + """Creates a Url object which corresponds to the URL string. + + This method can accept partial URLs, but it will leave missing + members of the Url unset. + """ + parts = urlparse.urlparse(url_string) + url = Url() + if parts[0]: + url.protocol = parts[0] + if parts[1]: + host_parts = parts[1].split(':') + if host_parts[0]: + url.host = host_parts[0] + if len(host_parts) > 1: + url.port = host_parts[1] + if parts[2]: + url.path = parts[2] + if parts[4]: + param_pairs = parts[4].split('&') + for pair in param_pairs: + pair_parts = pair.split('=') + if len(pair_parts) > 1: + url.params[urllib.unquote_plus(pair_parts[0])] = ( + urllib.unquote_plus(pair_parts[1])) + elif len(pair_parts) == 1: + url.params[urllib.unquote_plus(pair_parts[0])] = None + return url + +class Url(object): + """Represents a URL and implements comparison logic. + + URL strings which are not identical can still be equivalent, so this object + provides a better interface for comparing and manipulating URLs than + strings. URL parameters are represented as a dictionary of strings, and + defaults are used for the protocol (http) and port (80) if not provided. + """ + def __init__(self, protocol=None, host=None, port=None, path=None, + params=None): + self.protocol = protocol + self.host = host + self.port = port + self.path = path + self.params = params or {} + + def to_string(self): + url_parts = ['', '', '', '', '', ''] + if self.protocol: + url_parts[0] = self.protocol + if self.host: + if self.port: + url_parts[1] = ':'.join((self.host, str(self.port))) + else: + url_parts[1] = self.host + if self.path: + url_parts[2] = self.path + if self.params: + url_parts[4] = self.get_param_string() + return urlparse.urlunparse(url_parts) + + def get_param_string(self): + param_pairs = [] + for key, value in self.params.iteritems(): + param_pairs.append('='.join((urllib.quote_plus(key), + urllib.quote_plus(str(value))))) + return '&'.join(param_pairs) + + def get_request_uri(self): + """Returns the path with the parameters escaped and appended.""" + param_string = self.get_param_string() + if param_string: + return '?'.join([self.path, param_string]) + else: + return self.path + + def __cmp__(self, other): + if not isinstance(other, Url): + return cmp(self.to_string(), str(other)) + difference = 0 + # Compare the protocol + if self.protocol and other.protocol: + difference = cmp(self.protocol, other.protocol) + elif self.protocol and not other.protocol: + difference = cmp(self.protocol, DEFAULT_PROTOCOL) + elif not self.protocol and other.protocol: + difference = cmp(DEFAULT_PROTOCOL, other.protocol) + if difference != 0: + return difference + # Compare the host + difference = cmp(self.host, other.host) + if difference != 0: + return difference + # Compare the port + if self.port and other.port: + difference = cmp(self.port, other.port) + elif self.port and not other.port: + difference = cmp(self.port, DEFAULT_PORT) + elif not self.port and other.port: + difference = cmp(DEFAULT_PORT, other.port) + if difference != 0: + return difference + # Compare the path + difference = cmp(self.path, other.path) + if difference != 0: + return difference + # Compare the parameters + return cmp(self.params, other.params) + + def __str__(self): + return self.to_string() + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/AES.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/AES.pyd new file mode 100755 index 0000000..707b36f Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/AES.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/ARC2.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/ARC2.pyd new file mode 100755 index 0000000..a9dfbf6 Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/ARC2.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/ARC4.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/ARC4.pyd new file mode 100755 index 0000000..4cd51f2 Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/ARC4.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/Blowfish.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/Blowfish.pyd new file mode 100755 index 0000000..cbdc881 Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/Blowfish.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/CAST.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/CAST.pyd new file mode 100755 index 0000000..c3e1fa6 Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/CAST.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/DES.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/DES.pyd new file mode 100755 index 0000000..b9967dc Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/DES.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/DES3.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/DES3.pyd new file mode 100755 index 0000000..6768126 Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/DES3.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/IDEA.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/IDEA.pyd new file mode 100755 index 0000000..860c121 Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/IDEA.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/RC5.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/RC5.pyd new file mode 100755 index 0000000..ef39b3e Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/RC5.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/XOR.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/XOR.pyd new file mode 100755 index 0000000..fb53d53 Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/XOR.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Cipher/__init__.py b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/__init__.py new file mode 100755 index 0000000..3b2f855 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Cipher/__init__.py @@ -0,0 +1,33 @@ +"""Secret-key encryption algorithms. + +Secret-key encryption algorithms transform plaintext in some way that +is dependent on a key, producing ciphertext. This transformation can +easily be reversed, if (and, hopefully, only if) one knows the key. + +The encryption modules here all support the interface described in PEP +272, "API for Block Encryption Algorithms". + +If you don't know which algorithm to choose, use AES because it's +standard and has undergone a fair bit of examination. + +Crypto.Cipher.AES Advanced Encryption Standard +Crypto.Cipher.ARC2 Alleged RC2 +Crypto.Cipher.ARC4 Alleged RC4 +Crypto.Cipher.Blowfish +Crypto.Cipher.CAST +Crypto.Cipher.DES The Data Encryption Standard. Very commonly used + in the past, but today its 56-bit keys are too small. +Crypto.Cipher.DES3 Triple DES. +Crypto.Cipher.IDEA +Crypto.Cipher.RC5 +Crypto.Cipher.XOR The simple XOR cipher. +""" + +__all__ = ['AES', 'ARC2', 'ARC4', + 'Blowfish', 'CAST', 'DES', 'DES3', 'IDEA', 'RC5', + 'XOR' + ] + +__revision__ = "$Id: __init__.py,v 1.7 2003/02/28 15:28:35 akuchling Exp $" + + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Hash/HMAC.py b/gdata.py-1.2.3/src/gdata/Crypto/Hash/HMAC.py new file mode 100755 index 0000000..eeb5782 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Hash/HMAC.py @@ -0,0 +1,108 @@ +"""HMAC (Keyed-Hashing for Message Authentication) Python module. + +Implements the HMAC algorithm as described by RFC 2104. + +This is just a copy of the Python 2.2 HMAC module, modified to work when +used on versions of Python before 2.2. +""" + +__revision__ = "$Id: HMAC.py,v 1.5 2002/07/25 17:19:02 z3p Exp $" + +import string + +def _strxor(s1, s2): + """Utility method. XOR the two strings s1 and s2 (must have same length). + """ + return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2)) + +# The size of the digests returned by HMAC depends on the underlying +# hashing module used. +digest_size = None + +class HMAC: + """RFC2104 HMAC class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new HMAC object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod == None: + import md5 + digestmod = md5 + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + try: + self.digest_size = digestmod.digest_size + except AttributeError: + self.digest_size = len(self.outer.digest()) + + blocksize = 64 + ipad = "\x36" * blocksize + opad = "\x5C" * blocksize + + if len(key) > blocksize: + key = digestmod.new(key).digest() + + key = key + chr(0) * (blocksize - len(key)) + self.outer.update(_strxor(key, opad)) + self.inner.update(_strxor(key, ipad)) + if (msg): + self.update(msg) + +## def clear(self): +## raise NotImplementedError, "clear() method not available in HMAC." + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = HMAC("") + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([string.zfill(hex(ord(x))[2:], 2) + for x in tuple(self.digest())]) + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Hash/MD2.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Hash/MD2.pyd new file mode 100755 index 0000000..d117062 Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Hash/MD2.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Hash/MD4.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Hash/MD4.pyd new file mode 100755 index 0000000..4ba243e Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Hash/MD4.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Hash/MD5.py b/gdata.py-1.2.3/src/gdata/Crypto/Hash/MD5.py new file mode 100755 index 0000000..b0eba39 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Hash/MD5.py @@ -0,0 +1,13 @@ + +# Just use the MD5 module from the Python standard library + +__revision__ = "$Id: MD5.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $" + +from md5 import * + +import md5 +if hasattr(md5, 'digestsize'): + digest_size = digestsize + del digestsize +del md5 + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Hash/RIPEMD.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Hash/RIPEMD.pyd new file mode 100755 index 0000000..a418e75 Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Hash/RIPEMD.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Hash/SHA.py b/gdata.py-1.2.3/src/gdata/Crypto/Hash/SHA.py new file mode 100755 index 0000000..ea3c6a3 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Hash/SHA.py @@ -0,0 +1,11 @@ + +# Just use the SHA module from the Python standard library + +__revision__ = "$Id: SHA.py,v 1.4 2002/07/11 14:31:19 akuchling Exp $" + +from sha import * +import sha +if hasattr(sha, 'digestsize'): + digest_size = digestsize + del digestsize +del sha diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Hash/SHA256.pyd b/gdata.py-1.2.3/src/gdata/Crypto/Hash/SHA256.pyd new file mode 100755 index 0000000..865a16c Binary files /dev/null and b/gdata.py-1.2.3/src/gdata/Crypto/Hash/SHA256.pyd differ diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Hash/__init__.py b/gdata.py-1.2.3/src/gdata/Crypto/Hash/__init__.py new file mode 100755 index 0000000..920fe74 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Hash/__init__.py @@ -0,0 +1,24 @@ +"""Hashing algorithms + +Hash functions take arbitrary strings as input, and produce an output +of fixed size that is dependent on the input; it should never be +possible to derive the input data given only the hash function's +output. Hash functions can be used simply as a checksum, or, in +association with a public-key algorithm, can be used to implement +digital signatures. + +The hashing modules here all support the interface described in PEP +247, "API for Cryptographic Hash Functions". + +Submodules: +Crypto.Hash.HMAC RFC 2104: Keyed-Hashing for Message Authentication +Crypto.Hash.MD2 +Crypto.Hash.MD4 +Crypto.Hash.MD5 +Crypto.Hash.RIPEMD +Crypto.Hash.SHA +""" + +__all__ = ['HMAC', 'MD2', 'MD4', 'MD5', 'RIPEMD', 'SHA', 'SHA256'] +__revision__ = "$Id: __init__.py,v 1.6 2003/12/19 14:24:25 akuchling Exp $" + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Protocol/AllOrNothing.py b/gdata.py-1.2.3/src/gdata/Crypto/Protocol/AllOrNothing.py new file mode 100755 index 0000000..6f3505d --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Protocol/AllOrNothing.py @@ -0,0 +1,295 @@ +"""This file implements all-or-nothing package transformations. + +An all-or-nothing package transformation is one in which some text is +transformed into message blocks, such that all blocks must be obtained before +the reverse transformation can be applied. Thus, if any blocks are corrupted +or lost, the original message cannot be reproduced. + +An all-or-nothing package transformation is not encryption, although a block +cipher algorithm is used. The encryption key is randomly generated and is +extractable from the message blocks. + +This class implements the All-Or-Nothing package transformation algorithm +described in: + +Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform" +http://theory.lcs.mit.edu/~rivest/fusion.pdf + +""" + +__revision__ = "$Id: AllOrNothing.py,v 1.8 2003/02/28 15:23:20 akuchling Exp $" + +import operator +import string +from Crypto.Util.number import bytes_to_long, long_to_bytes + + + +class AllOrNothing: + """Class implementing the All-or-Nothing package transform. + + Methods for subclassing: + + _inventkey(key_size): + Returns a randomly generated key. Subclasses can use this to + implement better random key generating algorithms. The default + algorithm is probably not very cryptographically secure. + + """ + + def __init__(self, ciphermodule, mode=None, IV=None): + """AllOrNothing(ciphermodule, mode=None, IV=None) + + ciphermodule is a module implementing the cipher algorithm to + use. It must provide the PEP272 interface. + + Note that the encryption key is randomly generated + automatically when needed. Optional arguments mode and IV are + passed directly through to the ciphermodule.new() method; they + are the feedback mode and initialization vector to use. All + three arguments must be the same for the object used to create + the digest, and to undigest'ify the message blocks. + """ + + self.__ciphermodule = ciphermodule + self.__mode = mode + self.__IV = IV + self.__key_size = ciphermodule.key_size + if self.__key_size == 0: + self.__key_size = 16 + + __K0digit = chr(0x69) + + def digest(self, text): + """digest(text:string) : [string] + + Perform the All-or-Nothing package transform on the given + string. Output is a list of message blocks describing the + transformed text, where each block is a string of bit length equal + to the ciphermodule's block_size. + """ + + # generate a random session key and K0, the key used to encrypt the + # hash blocks. Rivest calls this a fixed, publically-known encryption + # key, but says nothing about the security implications of this key or + # how to choose it. + key = self._inventkey(self.__key_size) + K0 = self.__K0digit * self.__key_size + + # we need two cipher objects here, one that is used to encrypt the + # message blocks and one that is used to encrypt the hashes. The + # former uses the randomly generated key, while the latter uses the + # well-known key. + mcipher = self.__newcipher(key) + hcipher = self.__newcipher(K0) + + # Pad the text so that its length is a multiple of the cipher's + # block_size. Pad with trailing spaces, which will be eliminated in + # the undigest() step. + block_size = self.__ciphermodule.block_size + padbytes = block_size - (len(text) % block_size) + text = text + ' ' * padbytes + + # Run through the algorithm: + # s: number of message blocks (size of text / block_size) + # input sequence: m1, m2, ... ms + # random key K' (`key' in the code) + # Compute output sequence: m'1, m'2, ... m's' for s' = s + 1 + # Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s + # Let m's' = K' ^ h1 ^ h2 ^ ... hs + # where hi = E(K0, m'i ^ i) for i = 1, 2, ... s + # + # The one complication I add is that the last message block is hard + # coded to the number of padbytes added, so that these can be stripped + # during the undigest() step + s = len(text) / block_size + blocks = [] + hashes = [] + for i in range(1, s+1): + start = (i-1) * block_size + end = start + block_size + mi = text[start:end] + assert len(mi) == block_size + cipherblock = mcipher.encrypt(long_to_bytes(i, block_size)) + mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock) + blocks.append(mticki) + # calculate the hash block for this block + hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size)) + hashes.append(bytes_to_long(hi)) + + # Add the padbytes length as a message block + i = i + 1 + cipherblock = mcipher.encrypt(long_to_bytes(i, block_size)) + mticki = padbytes ^ bytes_to_long(cipherblock) + blocks.append(mticki) + + # calculate this block's hash + hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size)) + hashes.append(bytes_to_long(hi)) + + # Now calculate the last message block of the sequence 1..s'. This + # will contain the random session key XOR'd with all the hash blocks, + # so that for undigest(), once all the hash blocks are calculated, the + # session key can be trivially extracted. Calculating all the hash + # blocks requires that all the message blocks be received, thus the + # All-or-Nothing algorithm succeeds. + mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes) + blocks.append(mtick_stick) + + # we convert the blocks to strings since in Python, byte sequences are + # always represented as strings. This is more consistent with the + # model that encryption and hash algorithms always operate on strings. + return map(long_to_bytes, blocks) + + + def undigest(self, blocks): + """undigest(blocks : [string]) : string + + Perform the reverse package transformation on a list of message + blocks. Note that the ciphermodule used for both transformations + must be the same. blocks is a list of strings of bit length + equal to the ciphermodule's block_size. + """ + + # better have at least 2 blocks, for the padbytes package and the hash + # block accumulator + if len(blocks) < 2: + raise ValueError, "List must be at least length 2." + + # blocks is a list of strings. We need to deal with them as long + # integers + blocks = map(bytes_to_long, blocks) + + # Calculate the well-known key, to which the hash blocks are + # encrypted, and create the hash cipher. + K0 = self.__K0digit * self.__key_size + hcipher = self.__newcipher(K0) + + # Since we have all the blocks (or this method would have been called + # prematurely), we can calcualte all the hash blocks. + hashes = [] + for i in range(1, len(blocks)): + mticki = blocks[i-1] ^ i + hi = hcipher.encrypt(long_to_bytes(mticki)) + hashes.append(bytes_to_long(hi)) + + # now we can calculate K' (key). remember the last block contains + # m's' which we don't include here + key = blocks[-1] ^ reduce(operator.xor, hashes) + + # and now we can create the cipher object + mcipher = self.__newcipher(long_to_bytes(key)) + block_size = self.__ciphermodule.block_size + + # And we can now decode the original message blocks + parts = [] + for i in range(1, len(blocks)): + cipherblock = mcipher.encrypt(long_to_bytes(i, block_size)) + mi = blocks[i-1] ^ bytes_to_long(cipherblock) + parts.append(mi) + + # The last message block contains the number of pad bytes appended to + # the original text string, such that its length was an even multiple + # of the cipher's block_size. This number should be small enough that + # the conversion from long integer to integer should never overflow + padbytes = int(parts[-1]) + text = string.join(map(long_to_bytes, parts[:-1]), '') + return text[:-padbytes] + + def _inventkey(self, key_size): + # TBD: Not a very secure algorithm. Eventually, I'd like to use JHy's + # kernelrand module + import time + from Crypto.Util import randpool + # TBD: key_size * 2 to work around possible bug in RandomPool? + pool = randpool.RandomPool(key_size * 2) + while key_size > pool.entropy: + pool.add_event() + + # we now have enough entropy in the pool to get a key_size'd key + return pool.get_bytes(key_size) + + def __newcipher(self, key): + if self.__mode is None and self.__IV is None: + return self.__ciphermodule.new(key) + elif self.__IV is None: + return self.__ciphermodule.new(key, self.__mode) + else: + return self.__ciphermodule.new(key, self.__mode, self.__IV) + + + +if __name__ == '__main__': + import sys + import getopt + import base64 + + usagemsg = '''\ +Test module usage: %(program)s [-c cipher] [-l] [-h] + +Where: + --cipher module + -c module + Cipher module to use. Default: %(ciphermodule)s + + --aslong + -l + Print the encoded message blocks as long integers instead of base64 + encoded strings + + --help + -h + Print this help message +''' + + ciphermodule = 'AES' + aslong = 0 + + def usage(code, msg=None): + if msg: + print msg + print usagemsg % {'program': sys.argv[0], + 'ciphermodule': ciphermodule} + sys.exit(code) + + try: + opts, args = getopt.getopt(sys.argv[1:], + 'c:l', ['cipher=', 'aslong']) + except getopt.error, msg: + usage(1, msg) + + if args: + usage(1, 'Too many arguments') + + for opt, arg in opts: + if opt in ('-h', '--help'): + usage(0) + elif opt in ('-c', '--cipher'): + ciphermodule = arg + elif opt in ('-l', '--aslong'): + aslong = 1 + + # ugly hack to force __import__ to give us the end-path module + module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new']) + + a = AllOrNothing(module) + print 'Original text:\n==========' + print __doc__ + print '==========' + msgblocks = a.digest(__doc__) + print 'message blocks:' + for i, blk in map(None, range(len(msgblocks)), msgblocks): + # base64 adds a trailing newline + print ' %3d' % i, + if aslong: + print bytes_to_long(blk) + else: + print base64.encodestring(blk)[:-1] + # + # get a new undigest-only object so there's no leakage + b = AllOrNothing(module) + text = b.undigest(msgblocks) + if text == __doc__: + print 'They match!' + else: + print 'They differ!' diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Protocol/Chaffing.py b/gdata.py-1.2.3/src/gdata/Crypto/Protocol/Chaffing.py new file mode 100755 index 0000000..fdfb82d --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Protocol/Chaffing.py @@ -0,0 +1,229 @@ +"""This file implements the chaffing algorithm. + +Winnowing and chaffing is a technique for enhancing privacy without requiring +strong encryption. In short, the technique takes a set of authenticated +message blocks (the wheat) and adds a number of chaff blocks which have +randomly chosen data and MAC fields. This means that to an adversary, the +chaff blocks look as valid as the wheat blocks, and so the authentication +would have to be performed on every block. By tailoring the number of chaff +blocks added to the message, the sender can make breaking the message +computationally infeasible. There are many other interesting properties of +the winnow/chaff technique. + +For example, say Alice is sending a message to Bob. She packetizes the +message and performs an all-or-nothing transformation on the packets. Then +she authenticates each packet with a message authentication code (MAC). The +MAC is a hash of the data packet, and there is a secret key which she must +share with Bob (key distribution is an exercise left to the reader). She then +adds a serial number to each packet, and sends the packets to Bob. + +Bob receives the packets, and using the shared secret authentication key, +authenticates the MACs for each packet. Those packets that have bad MACs are +simply discarded. The remainder are sorted by serial number, and passed +through the reverse all-or-nothing transform. The transform means that an +eavesdropper (say Eve) must acquire all the packets before any of the data can +be read. If even one packet is missing, the data is useless. + +There's one twist: by adding chaff packets, Alice and Bob can make Eve's job +much harder, since Eve now has to break the shared secret key, or try every +combination of wheat and chaff packet to read any of the message. The cool +thing is that Bob doesn't need to add any additional code; the chaff packets +are already filtered out because their MACs don't match (in all likelihood -- +since the data and MACs for the chaff packets are randomly chosen it is +possible, but very unlikely that a chaff MAC will match the chaff data). And +Alice need not even be the party adding the chaff! She could be completely +unaware that a third party, say Charles, is adding chaff packets to her +messages as they are transmitted. + +For more information on winnowing and chaffing see this paper: + +Ronald L. Rivest, "Chaffing and Winnowing: Confidentiality without Encryption" +http://theory.lcs.mit.edu/~rivest/chaffing.txt + +""" + +__revision__ = "$Id: Chaffing.py,v 1.7 2003/02/28 15:23:21 akuchling Exp $" + +from Crypto.Util.number import bytes_to_long + +class Chaff: + """Class implementing the chaff adding algorithm. + + Methods for subclasses: + + _randnum(size): + Returns a randomly generated number with a byte-length equal + to size. Subclasses can use this to implement better random + data and MAC generating algorithms. The default algorithm is + probably not very cryptographically secure. It is most + important that the chaff data does not contain any patterns + that can be used to discern it from wheat data without running + the MAC. + + """ + + def __init__(self, factor=1.0, blocksper=1): + """Chaff(factor:float, blocksper:int) + + factor is the number of message blocks to add chaff to, + expressed as a percentage between 0.0 and 1.0. blocksper is + the number of chaff blocks to include for each block being + chaffed. Thus the defaults add one chaff block to every + message block. By changing the defaults, you can adjust how + computationally difficult it could be for an adversary to + brute-force crack the message. The difficulty is expressed + as: + + pow(blocksper, int(factor * number-of-blocks)) + + For ease of implementation, when factor < 1.0, only the first + int(factor*number-of-blocks) message blocks are chaffed. + """ + + if not (0.0<=factor<=1.0): + raise ValueError, "'factor' must be between 0.0 and 1.0" + if blocksper < 0: + raise ValueError, "'blocksper' must be zero or more" + + self.__factor = factor + self.__blocksper = blocksper + + + def chaff(self, blocks): + """chaff( [(serial-number:int, data:string, MAC:string)] ) + : [(int, string, string)] + + Add chaff to message blocks. blocks is a list of 3-tuples of the + form (serial-number, data, MAC). + + Chaff is created by choosing a random number of the same + byte-length as data, and another random number of the same + byte-length as MAC. The message block's serial number is + placed on the chaff block and all the packet's chaff blocks + are randomly interspersed with the single wheat block. This + method then returns a list of 3-tuples of the same form. + Chaffed blocks will contain multiple instances of 3-tuples + with the same serial number, but the only way to figure out + which blocks are wheat and which are chaff is to perform the + MAC hash and compare values. + """ + + chaffedblocks = [] + + # count is the number of blocks to add chaff to. blocksper is the + # number of chaff blocks to add per message block that is being + # chaffed. + count = len(blocks) * self.__factor + blocksper = range(self.__blocksper) + for i, wheat in map(None, range(len(blocks)), blocks): + # it shouldn't matter which of the n blocks we add chaff to, so for + # ease of implementation, we'll just add them to the first count + # blocks + if i < count: + serial, data, mac = wheat + datasize = len(data) + macsize = len(mac) + addwheat = 1 + # add chaff to this block + for j in blocksper: + import sys + chaffdata = self._randnum(datasize) + chaffmac = self._randnum(macsize) + chaff = (serial, chaffdata, chaffmac) + # mix up the order, if the 5th bit is on then put the + # wheat on the list + if addwheat and bytes_to_long(self._randnum(16)) & 0x40: + chaffedblocks.append(wheat) + addwheat = 0 + chaffedblocks.append(chaff) + if addwheat: + chaffedblocks.append(wheat) + else: + # just add the wheat + chaffedblocks.append(wheat) + return chaffedblocks + + def _randnum(self, size): + # TBD: Not a very secure algorithm. + # TBD: size * 2 to work around possible bug in RandomPool + from Crypto.Util import randpool + import time + pool = randpool.RandomPool(size * 2) + while size > pool.entropy: + pass + + # we now have enough entropy in the pool to get size bytes of random + # data... well, probably + return pool.get_bytes(size) + + + +if __name__ == '__main__': + text = """\ +We hold these truths to be self-evident, that all men are created equal, that +they are endowed by their Creator with certain unalienable Rights, that among +these are Life, Liberty, and the pursuit of Happiness. That to secure these +rights, Governments are instituted among Men, deriving their just powers from +the consent of the governed. That whenever any Form of Government becomes +destructive of these ends, it is the Right of the People to alter or to +abolish it, and to institute new Government, laying its foundation on such +principles and organizing its powers in such form, as to them shall seem most +likely to effect their Safety and Happiness. +""" + print 'Original text:\n==========' + print text + print '==========' + + # first transform the text into packets + blocks = [] ; size = 40 + for i in range(0, len(text), size): + blocks.append( text[i:i+size] ) + + # now get MACs for all the text blocks. The key is obvious... + print 'Calculating MACs...' + from Crypto.Hash import HMAC, SHA + key = 'Jefferson' + macs = [HMAC.new(key, block, digestmod=SHA).digest() + for block in blocks] + + assert len(blocks) == len(macs) + + # put these into a form acceptable as input to the chaffing procedure + source = [] + m = map(None, range(len(blocks)), blocks, macs) + print m + for i, data, mac in m: + source.append((i, data, mac)) + + # now chaff these + print 'Adding chaff...' + c = Chaff(factor=0.5, blocksper=2) + chaffed = c.chaff(source) + + from base64 import encodestring + + # print the chaffed message blocks. meanwhile, separate the wheat from + # the chaff + + wheat = [] + print 'chaffed message blocks:' + for i, data, mac in chaffed: + # do the authentication + h = HMAC.new(key, data, digestmod=SHA) + pmac = h.digest() + if pmac == mac: + tag = '-->' + wheat.append(data) + else: + tag = ' ' + # base64 adds a trailing newline + print tag, '%3d' % i, \ + repr(data), encodestring(mac)[:-1] + + # now decode the message packets and check it against the original text + print 'Undigesting wheat...' + newtext = "".join(wheat) + if newtext == text: + print 'They match!' + else: + print 'They differ!' diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Protocol/__init__.py b/gdata.py-1.2.3/src/gdata/Crypto/Protocol/__init__.py new file mode 100755 index 0000000..a6d68bc --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Protocol/__init__.py @@ -0,0 +1,17 @@ + +"""Cryptographic protocols + +Implements various cryptographic protocols. (Don't expect to find +network protocols here.) + +Crypto.Protocol.AllOrNothing Transforms a message into a set of message + blocks, such that the blocks can be + recombined to get the message back. + +Crypto.Protocol.Chaffing Takes a set of authenticated message blocks + (the wheat) and adds a number of + randomly generated blocks (the chaff). +""" + +__all__ = ['AllOrNothing', 'Chaffing'] +__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:23:21 akuchling Exp $" diff --git a/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/DSA.py b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/DSA.py new file mode 100755 index 0000000..7947b6f --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/DSA.py @@ -0,0 +1,238 @@ + +# +# DSA.py : Digital Signature Algorithm +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: DSA.py,v 1.16 2004/05/06 12:52:54 akuchling Exp $" + +from Crypto.PublicKey.pubkey import * +from Crypto.Util import number +from Crypto.Util.number import bytes_to_long, long_to_bytes +from Crypto.Hash import SHA + +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +class error (Exception): + pass + +def generateQ(randfunc): + S=randfunc(20) + hash1=SHA.new(S).digest() + hash2=SHA.new(long_to_bytes(bytes_to_long(S)+1)).digest() + q = bignum(0) + for i in range(0,20): + c=ord(hash1[i])^ord(hash2[i]) + if i==0: + c=c | 128 + if i==19: + c= c | 1 + q=q*256+c + while (not isPrime(q)): + q=q+2 + if pow(2,159L) < q < pow(2,160L): + return S, q + raise error, 'Bad q value generated' + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate a DSA key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + + if bits<160: + raise error, 'Key length <160 bits' + obj=DSAobj() + # Generate string S and prime q + if progress_func: + progress_func('p,q\n') + while (1): + S, obj.q = generateQ(randfunc) + n=(bits-1)/160 + C, N, V = 0, 2, {} + b=(obj.q >> 5) & 15 + powb=pow(bignum(2), b) + powL1=pow(bignum(2), bits-1) + while C<4096: + for k in range(0, n+1): + V[k]=bytes_to_long(SHA.new(S+str(N)+str(k)).digest()) + W=V[n] % powb + for k in range(n-1, -1, -1): + W=(W<<160L)+V[k] + X=W+powL1 + p=X-(X%(2*obj.q)-1) + if powL1<=p and isPrime(p): + break + C, N = C+1, N+n+1 + if C<4096: + break + if progress_func: + progress_func('4096 multiples failed\n') + + obj.p = p + power=(p-1)/obj.q + if progress_func: + progress_func('h,g\n') + while (1): + h=bytes_to_long(randfunc(bits)) % (p-1) + g=pow(h, power, p) + if 11: + break + obj.g=g + if progress_func: + progress_func('x,y\n') + while (1): + x=bytes_to_long(randfunc(20)) + if 0 < x < obj.q: + break + obj.x, obj.y = x, pow(g, x, p) + return obj + +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long)):DSAobj + Construct a DSA object from a 4- or 5-tuple of numbers. + """ + obj=DSAobj() + if len(tuple) not in [4,5]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class DSAobj(pubkey): + keydata=['y', 'g', 'p', 'q', 'x'] + + def _encrypt(self, s, Kstr): + raise error, 'DSA algorithm cannot encrypt data' + + def _decrypt(self, s): + raise error, 'DSA algorithm cannot decrypt data' + + def _sign(self, M, K): + if (K<2 or self.q<=K): + raise error, 'K is not between 2 and q' + r=pow(self.g, K, self.p) % self.q + s=(inverse(K, self.q)*(M+self.x*r)) % self.q + return (r,s) + + def _verify(self, M, sig): + r, s = sig + if r<=0 or r>=self.q or s<=0 or s>=self.q: + return 0 + w=inverse(s, self.q) + u1, u2 = (M*w) % self.q, (r*w) % self.q + v1 = pow(self.g, u1, self.p) + v2 = pow(self.y, u2, self.p) + v = ((v1*v2) % self.p) + v = v % self.q + if v==r: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return number.size(self.p) - 1 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + if hasattr(self, 'x'): + return 1 + else: + return 0 + + def can_sign(self): + """Return a Boolean value recording whether this algorithm can generate signatures.""" + return 1 + + def can_encrypt(self): + """Return a Boolean value recording whether this algorithm can encrypt data.""" + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.y, self.g, self.p, self.q)) + +object=DSAobj + +generate_py = generate +construct_py = construct + +class DSAobj_c(pubkey): + keydata = ['y', 'g', 'p', 'q', 'x'] + + def __init__(self, key): + self.key = key + + def __getattr__(self, attr): + if attr in self.keydata: + return getattr(self.key, attr) + else: + if self.__dict__.has_key(attr): + self.__dict__[attr] + else: + raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr) + + def __getstate__(self): + d = {} + for k in self.keydata: + if hasattr(self.key, k): + d[k]=getattr(self.key, k) + return d + + def __setstate__(self, state): + y,g,p,q = state['y'], state['g'], state['p'], state['q'] + if not state.has_key('x'): + self.key = _fastmath.dsa_construct(y,g,p,q) + else: + x = state['x'] + self.key = _fastmath.dsa_construct(y,g,p,q,x) + + def _sign(self, M, K): + return self.key._sign(M, K) + + def _verify(self, M, (r, s)): + return self.key._verify(M, r, s) + + def size(self): + return self.key.size() + + def has_private(self): + return self.key.has_private() + + def publickey(self): + return construct_c((self.key.y, self.key.g, self.key.p, self.key.q)) + + def can_sign(self): + return 1 + + def can_encrypt(self): + return 0 + +def generate_c(bits, randfunc, progress_func=None): + obj = generate_py(bits, randfunc, progress_func) + y,g,p,q,x = obj.y, obj.g, obj.p, obj.q, obj.x + return construct_c((y,g,p,q,x)) + +def construct_c(tuple): + key = apply(_fastmath.dsa_construct, tuple) + return DSAobj_c(key) + +if _fastmath: + #print "using C version of DSA" + generate = generate_c + construct = construct_c + error = _fastmath.error diff --git a/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/ElGamal.py b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/ElGamal.py new file mode 100755 index 0000000..026881c --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/ElGamal.py @@ -0,0 +1,132 @@ +# +# ElGamal.py : ElGamal encryption/decryption and signatures +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: ElGamal.py,v 1.9 2003/04/04 19:44:26 akuchling Exp $" + +from Crypto.PublicKey.pubkey import * +from Crypto.Util import number + +class error (Exception): + pass + +# Generate an ElGamal key with N bits +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate an ElGamal key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=ElGamalobj() + # Generate prime p + if progress_func: + progress_func('p\n') + obj.p=bignum(getPrime(bits, randfunc)) + # Generate random number g + if progress_func: + progress_func('g\n') + size=bits-1-(ord(randfunc(1)) & 63) # g will be from 1--64 bits smaller than p + if size<1: + size=bits-1 + while (1): + obj.g=bignum(getPrime(size, randfunc)) + if obj.g < obj.p: + break + size=(size+1) % bits + if size==0: + size=4 + # Generate random number x + if progress_func: + progress_func('x\n') + while (1): + size=bits-1-ord(randfunc(1)) # x will be from 1 to 256 bits smaller than p + if size>2: + break + while (1): + obj.x=bignum(getPrime(size, randfunc)) + if obj.x < obj.p: + break + size = (size+1) % bits + if size==0: + size=4 + if progress_func: + progress_func('y\n') + obj.y = pow(obj.g, obj.x, obj.p) + return obj + +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long))) + : ElGamalobj + Construct an ElGamal key from a 3- or 4-tuple of numbers. + """ + + obj=ElGamalobj() + if len(tuple) not in [3,4]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class ElGamalobj(pubkey): + keydata=['p', 'g', 'y', 'x'] + + def _encrypt(self, M, K): + a=pow(self.g, K, self.p) + b=( M*pow(self.y, K, self.p) ) % self.p + return ( a,b ) + + def _decrypt(self, M): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + ax=pow(M[0], self.x, self.p) + plaintext=(M[1] * inverse(ax, self.p ) ) % self.p + return plaintext + + def _sign(self, M, K): + if (not hasattr(self, 'x')): + raise error, 'Private key not available in this object' + p1=self.p-1 + if (GCD(K, p1)!=1): + raise error, 'Bad K value: GCD(K,p-1)!=1' + a=pow(self.g, K, self.p) + t=(M-self.x*a) % p1 + while t<0: t=t+p1 + b=(t*inverse(K, p1)) % p1 + return (a, b) + + def _verify(self, M, sig): + v1=pow(self.y, sig[0], self.p) + v1=(v1*pow(sig[0], sig[1], self.p)) % self.p + v2=pow(self.g, M, self.p) + if v1==v2: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return number.size(self.p) - 1 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + if hasattr(self, 'x'): + return 1 + else: + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.p, self.g, self.y)) + + +object=ElGamalobj diff --git a/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/RSA.py b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/RSA.py new file mode 100755 index 0000000..e0e877e --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/RSA.py @@ -0,0 +1,256 @@ +# +# RSA.py : RSA encryption/decryption +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: RSA.py,v 1.20 2004/05/06 12:52:54 akuchling Exp $" + +from Crypto.PublicKey import pubkey +from Crypto.Util import number + +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +class error (Exception): + pass + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate an RSA key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=RSAobj() + + # Generate the prime factors of n + if progress_func: + progress_func('p,q\n') + p = q = 1L + while number.size(p*q) < bits: + p = pubkey.getPrime(bits/2, randfunc) + q = pubkey.getPrime(bits/2, randfunc) + + # p shall be smaller than q (for calc of u) + if p > q: + (p, q)=(q, p) + obj.p = p + obj.q = q + + if progress_func: + progress_func('u\n') + obj.u = pubkey.inverse(obj.p, obj.q) + obj.n = obj.p*obj.q + + obj.e = 65537L + if progress_func: + progress_func('d\n') + obj.d=pubkey.inverse(obj.e, (obj.p-1)*(obj.q-1)) + + assert bits <= 1+obj.size(), "Generated key is too small" + + return obj + +def construct(tuple): + """construct(tuple:(long,) : RSAobj + Construct an RSA object from a 2-, 3-, 5-, or 6-tuple of numbers. + """ + + obj=RSAobj() + if len(tuple) not in [2,3,5,6]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + if len(tuple) >= 5: + # Ensure p is smaller than q + if obj.p>obj.q: + (obj.p, obj.q)=(obj.q, obj.p) + + if len(tuple) == 5: + # u not supplied, so we're going to have to compute it. + obj.u=pubkey.inverse(obj.p, obj.q) + + return obj + +class RSAobj(pubkey.pubkey): + keydata = ['n', 'e', 'd', 'p', 'q', 'u'] + def _encrypt(self, plaintext, K=''): + if self.n<=plaintext: + raise error, 'Plaintext too large' + return (pow(plaintext, self.e, self.n),) + + def _decrypt(self, ciphertext): + if (not hasattr(self, 'd')): + raise error, 'Private key not available in this object' + if self.n<=ciphertext[0]: + raise error, 'Ciphertext too large' + return pow(ciphertext[0], self.d, self.n) + + def _sign(self, M, K=''): + return (self._decrypt((M,)),) + + def _verify(self, M, sig): + m2=self._encrypt(sig[0]) + if m2[0]==M: + return 1 + else: return 0 + + def _blind(self, M, B): + tmp = pow(B, self.e, self.n) + return (M * tmp) % self.n + + def _unblind(self, M, B): + tmp = pubkey.inverse(B, self.n) + return (M * tmp) % self.n + + def can_blind (self): + """can_blind() : bool + Return a Boolean value recording whether this algorithm can + blind data. (This does not imply that this + particular key object has the private information required to + to blind a message.) + """ + return 1 + + def size(self): + """size() : int + Return the maximum number of bits that can be handled by this key. + """ + return number.size(self.n) - 1 + + def has_private(self): + """has_private() : bool + Return a Boolean denoting whether the object contains + private components. + """ + if hasattr(self, 'd'): + return 1 + else: return 0 + + def publickey(self): + """publickey(): RSAobj + Return a new key object containing only the public key information. + """ + return construct((self.n, self.e)) + +class RSAobj_c(pubkey.pubkey): + keydata = ['n', 'e', 'd', 'p', 'q', 'u'] + + def __init__(self, key): + self.key = key + + def __getattr__(self, attr): + if attr in self.keydata: + return getattr(self.key, attr) + else: + if self.__dict__.has_key(attr): + self.__dict__[attr] + else: + raise AttributeError, '%s instance has no attribute %s' % (self.__class__, attr) + + def __getstate__(self): + d = {} + for k in self.keydata: + if hasattr(self.key, k): + d[k]=getattr(self.key, k) + return d + + def __setstate__(self, state): + n,e = state['n'], state['e'] + if not state.has_key('d'): + self.key = _fastmath.rsa_construct(n,e) + else: + d = state['d'] + if not state.has_key('q'): + self.key = _fastmath.rsa_construct(n,e,d) + else: + p, q, u = state['p'], state['q'], state['u'] + self.key = _fastmath.rsa_construct(n,e,d,p,q,u) + + def _encrypt(self, plain, K): + return (self.key._encrypt(plain),) + + def _decrypt(self, cipher): + return self.key._decrypt(cipher[0]) + + def _sign(self, M, K): + return (self.key._sign(M),) + + def _verify(self, M, sig): + return self.key._verify(M, sig[0]) + + def _blind(self, M, B): + return self.key._blind(M, B) + + def _unblind(self, M, B): + return self.key._unblind(M, B) + + def can_blind (self): + return 1 + + def size(self): + return self.key.size() + + def has_private(self): + return self.key.has_private() + + def publickey(self): + return construct_c((self.key.n, self.key.e)) + +def generate_c(bits, randfunc, progress_func = None): + # Generate the prime factors of n + if progress_func: + progress_func('p,q\n') + + p = q = 1L + while number.size(p*q) < bits: + p = pubkey.getPrime(bits/2, randfunc) + q = pubkey.getPrime(bits/2, randfunc) + + # p shall be smaller than q (for calc of u) + if p > q: + (p, q)=(q, p) + if progress_func: + progress_func('u\n') + u=pubkey.inverse(p, q) + n=p*q + + e = 65537L + if progress_func: + progress_func('d\n') + d=pubkey.inverse(e, (p-1)*(q-1)) + key = _fastmath.rsa_construct(n,e,d,p,q,u) + obj = RSAobj_c(key) + +## print p +## print q +## print number.size(p), number.size(q), number.size(q*p), +## print obj.size(), bits + assert bits <= 1+obj.size(), "Generated key is too small" + return obj + + +def construct_c(tuple): + key = apply(_fastmath.rsa_construct, tuple) + return RSAobj_c(key) + +object = RSAobj + +generate_py = generate +construct_py = construct + +if _fastmath: + #print "using C version of RSA" + generate = generate_c + construct = construct_c + error = _fastmath.error diff --git a/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/__init__.py b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/__init__.py new file mode 100755 index 0000000..ad1c80c --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/__init__.py @@ -0,0 +1,17 @@ +"""Public-key encryption and signature algorithms. + +Public-key encryption uses two different keys, one for encryption and +one for decryption. The encryption key can be made public, and the +decryption key is kept private. Many public-key algorithms can also +be used to sign messages, and some can *only* be used for signatures. + +Crypto.PublicKey.DSA Digital Signature Algorithm. (Signature only) +Crypto.PublicKey.ElGamal (Signing and encryption) +Crypto.PublicKey.RSA (Signing, encryption, and blinding) +Crypto.PublicKey.qNEW (Signature only) + +""" + +__all__ = ['RSA', 'DSA', 'ElGamal', 'qNEW'] +__revision__ = "$Id: __init__.py,v 1.4 2003/04/03 20:27:13 akuchling Exp $" + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/pubkey.py b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/pubkey.py new file mode 100755 index 0000000..5c75c3e --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/pubkey.py @@ -0,0 +1,172 @@ +# +# pubkey.py : Internal functions for public key operations +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: pubkey.py,v 1.11 2003/04/03 20:36:14 akuchling Exp $" + +import types, warnings +from Crypto.Util.number import * + +# Basic public key class +class pubkey: + def __init__(self): + pass + + def __getstate__(self): + """To keep key objects platform-independent, the key data is + converted to standard Python long integers before being + written out. It will then be reconverted as necessary on + restoration.""" + d=self.__dict__ + for key in self.keydata: + if d.has_key(key): d[key]=long(d[key]) + return d + + def __setstate__(self, d): + """On unpickling a key object, the key data is converted to the big +number representation being used, whether that is Python long +integers, MPZ objects, or whatever.""" + for key in self.keydata: + if d.has_key(key): self.__dict__[key]=bignum(d[key]) + + def encrypt(self, plaintext, K): + """encrypt(plaintext:string|long, K:string|long) : tuple + Encrypt the string or integer plaintext. K is a random + parameter required by some algorithms. + """ + wasString=0 + if isinstance(plaintext, types.StringType): + plaintext=bytes_to_long(plaintext) ; wasString=1 + if isinstance(K, types.StringType): + K=bytes_to_long(K) + ciphertext=self._encrypt(plaintext, K) + if wasString: return tuple(map(long_to_bytes, ciphertext)) + else: return ciphertext + + def decrypt(self, ciphertext): + """decrypt(ciphertext:tuple|string|long): string + Decrypt 'ciphertext' using this key. + """ + wasString=0 + if not isinstance(ciphertext, types.TupleType): + ciphertext=(ciphertext,) + if isinstance(ciphertext[0], types.StringType): + ciphertext=tuple(map(bytes_to_long, ciphertext)) ; wasString=1 + plaintext=self._decrypt(ciphertext) + if wasString: return long_to_bytes(plaintext) + else: return plaintext + + def sign(self, M, K): + """sign(M : string|long, K:string|long) : tuple + Return a tuple containing the signature for the message M. + K is a random parameter required by some algorithms. + """ + if (not self.has_private()): + raise error, 'Private key not available in this object' + if isinstance(M, types.StringType): M=bytes_to_long(M) + if isinstance(K, types.StringType): K=bytes_to_long(K) + return self._sign(M, K) + + def verify (self, M, signature): + """verify(M:string|long, signature:tuple) : bool + Verify that the signature is valid for the message M; + returns true if the signature checks out. + """ + if isinstance(M, types.StringType): M=bytes_to_long(M) + return self._verify(M, signature) + + # alias to compensate for the old validate() name + def validate (self, M, signature): + warnings.warn("validate() method name is obsolete; use verify()", + DeprecationWarning) + + def blind(self, M, B): + """blind(M : string|long, B : string|long) : string|long + Blind message M using blinding factor B. + """ + wasString=0 + if isinstance(M, types.StringType): + M=bytes_to_long(M) ; wasString=1 + if isinstance(B, types.StringType): B=bytes_to_long(B) + blindedmessage=self._blind(M, B) + if wasString: return long_to_bytes(blindedmessage) + else: return blindedmessage + + def unblind(self, M, B): + """unblind(M : string|long, B : string|long) : string|long + Unblind message M using blinding factor B. + """ + wasString=0 + if isinstance(M, types.StringType): + M=bytes_to_long(M) ; wasString=1 + if isinstance(B, types.StringType): B=bytes_to_long(B) + unblindedmessage=self._unblind(M, B) + if wasString: return long_to_bytes(unblindedmessage) + else: return unblindedmessage + + + # The following methods will usually be left alone, except for + # signature-only algorithms. They both return Boolean values + # recording whether this key's algorithm can sign and encrypt. + def can_sign (self): + """can_sign() : bool + Return a Boolean value recording whether this algorithm can + generate signatures. (This does not imply that this + particular key object has the private information required to + to generate a signature.) + """ + return 1 + + def can_encrypt (self): + """can_encrypt() : bool + Return a Boolean value recording whether this algorithm can + encrypt data. (This does not imply that this + particular key object has the private information required to + to decrypt a message.) + """ + return 1 + + def can_blind (self): + """can_blind() : bool + Return a Boolean value recording whether this algorithm can + blind data. (This does not imply that this + particular key object has the private information required to + to blind a message.) + """ + return 0 + + # The following methods will certainly be overridden by + # subclasses. + + def size (self): + """size() : int + Return the maximum number of bits that can be handled by this key. + """ + return 0 + + def has_private (self): + """has_private() : bool + Return a Boolean denoting whether the object contains + private components. + """ + return 0 + + def publickey (self): + """publickey(): object + Return a new key object containing only the public information. + """ + return self + + def __eq__ (self, other): + """__eq__(other): 0, 1 + Compare us to other for equality. + """ + return self.__getstate__() == other.__getstate__() diff --git a/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/qNEW.py b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/qNEW.py new file mode 100755 index 0000000..65f8ae3 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/PublicKey/qNEW.py @@ -0,0 +1,170 @@ +# +# qNEW.py : The q-NEW signature algorithm. +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: qNEW.py,v 1.8 2003/04/04 15:13:35 akuchling Exp $" + +from Crypto.PublicKey import pubkey +from Crypto.Util.number import * +from Crypto.Hash import SHA + +class error (Exception): + pass + +HASHBITS = 160 # Size of SHA digests + +def generate(bits, randfunc, progress_func=None): + """generate(bits:int, randfunc:callable, progress_func:callable) + + Generate a qNEW key of length 'bits', using 'randfunc' to get + random data and 'progress_func', if present, to display + the progress of the key generation. + """ + obj=qNEWobj() + + # Generate prime numbers p and q. q is a 160-bit prime + # number. p is another prime number (the modulus) whose bit + # size is chosen by the caller, and is generated so that p-1 + # is a multiple of q. + # + # Note that only a single seed is used to + # generate p and q; if someone generates a key for you, you can + # use the seed to duplicate the key generation. This can + # protect you from someone generating values of p,q that have + # some special form that's easy to break. + if progress_func: + progress_func('p,q\n') + while (1): + obj.q = getPrime(160, randfunc) + # assert pow(2, 159L)1. g is kept; h can be discarded. + if progress_func: + progress_func('h,g\n') + while (1): + h=bytes_to_long(randfunc(bits)) % (p-1) + g=pow(h, power, p) + if 11: + break + obj.g=g + + # x is the private key information, and is + # just a random number between 0 and q. + # y=g**x mod p, and is part of the public information. + if progress_func: + progress_func('x,y\n') + while (1): + x=bytes_to_long(randfunc(20)) + if 0 < x < obj.q: + break + obj.x, obj.y=x, pow(g, x, p) + + return obj + +# Construct a qNEW object +def construct(tuple): + """construct(tuple:(long,long,long,long)|(long,long,long,long,long) + Construct a qNEW object from a 4- or 5-tuple of numbers. + """ + obj=qNEWobj() + if len(tuple) not in [4,5]: + raise error, 'argument for construct() wrong length' + for i in range(len(tuple)): + field = obj.keydata[i] + setattr(obj, field, tuple[i]) + return obj + +class qNEWobj(pubkey.pubkey): + keydata=['p', 'q', 'g', 'y', 'x'] + + def _sign(self, M, K=''): + if (self.q<=K): + raise error, 'K is greater than q' + if M<0: + raise error, 'Illegal value of M (<0)' + if M>=pow(2,161L): + raise error, 'Illegal value of M (too large)' + r=pow(self.g, K, self.p) % self.q + s=(K- (r*M*self.x % self.q)) % self.q + return (r,s) + def _verify(self, M, sig): + r, s = sig + if r<=0 or r>=self.q or s<=0 or s>=self.q: + return 0 + if M<0: + raise error, 'Illegal value of M (<0)' + if M<=0 or M>=pow(2,161L): + return 0 + v1 = pow(self.g, s, self.p) + v2 = pow(self.y, M*r, self.p) + v = ((v1*v2) % self.p) + v = v % self.q + if v==r: + return 1 + return 0 + + def size(self): + "Return the maximum number of bits that can be handled by this key." + return 160 + + def has_private(self): + """Return a Boolean denoting whether the object contains + private components.""" + return hasattr(self, 'x') + + def can_sign(self): + """Return a Boolean value recording whether this algorithm can generate signatures.""" + return 1 + + def can_encrypt(self): + """Return a Boolean value recording whether this algorithm can encrypt data.""" + return 0 + + def publickey(self): + """Return a new key object containing only the public information.""" + return construct((self.p, self.q, self.g, self.y)) + +object = qNEWobj + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Util/RFC1751.py b/gdata.py-1.2.3/src/gdata/Crypto/Util/RFC1751.py new file mode 100755 index 0000000..0a47952 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Util/RFC1751.py @@ -0,0 +1,342 @@ +#!/usr/local/bin/python +# rfc1751.py : Converts between 128-bit strings and a human-readable +# sequence of words, as defined in RFC1751: "A Convention for +# Human-Readable 128-bit Keys", by Daniel L. McDonald. + +__revision__ = "$Id: RFC1751.py,v 1.6 2003/04/04 15:15:10 akuchling Exp $" + + +import string, binascii + +binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101', + 6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011', + 12:'1100', 13:'1101', 14:'1110', 15:'1111'} + +def _key2bin(s): + "Convert a key into a string of binary digits" + kl=map(lambda x: ord(x), s) + kl=map(lambda x: binary[x/16]+binary[x&15], kl) + return ''.join(kl) + +def _extract(key, start, length): + """Extract a bitstring from a string of binary digits, and return its + numeric value.""" + k=key[start:start+length] + return reduce(lambda x,y: x*2+ord(y)-48, k, 0) + +def key_to_english (key): + """key_to_english(key:string) : string + Transform an arbitrary key into a string containing English words. + The key length must be a multiple of 8. + """ + english='' + for index in range(0, len(key), 8): # Loop over 8-byte subkeys + subkey=key[index:index+8] + # Compute the parity of the key + skbin=_key2bin(subkey) ; p=0 + for i in range(0, 64, 2): p=p+_extract(skbin, i, 2) + # Append parity bits to the subkey + skbin=_key2bin(subkey+chr((p<<6) & 255)) + for i in range(0, 64, 11): + english=english+wordlist[_extract(skbin, i, 11)]+' ' + + return english[:-1] # Remove the trailing space + +def english_to_key (str): + """english_to_key(string):string + Transform a string into a corresponding key. + The string must contain words separated by whitespace; the number + of words must be a multiple of 6. + """ + + L=string.split(string.upper(str)) ; key='' + for index in range(0, len(L), 6): + sublist=L[index:index+6] ; char=9*[0] ; bits=0 + for i in sublist: + index = wordlist.index(i) + shift = (8-(bits+11)%8) %8 + y = index << shift + cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff + if (shift>5): + char[bits/8] = char[bits/8] | cl + char[bits/8+1] = char[bits/8+1] | cc + char[bits/8+2] = char[bits/8+2] | cr + elif shift>-3: + char[bits/8] = char[bits/8] | cc + char[bits/8+1] = char[bits/8+1] | cr + else: char[bits/8] = char[bits/8] | cr + bits=bits+11 + subkey=reduce(lambda x,y:x+chr(y), char, '') + + # Check the parity of the resulting key + skbin=_key2bin(subkey) + p=0 + for i in range(0, 64, 2): p=p+_extract(skbin, i, 2) + if (p&3) != _extract(skbin, 64, 2): + raise ValueError, "Parity error in resulting key" + key=key+subkey[0:8] + return key + +wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD", + "AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA", + "AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK", + "ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE", + "AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM", + "BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET", + "BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO", + "BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT", + "BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT", + "CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY", + "CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN", + "DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG", + "DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB", + "DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO", + "ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE", + "EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW", + "FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR", + "FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP", + "GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO", + "GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD", + "HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM", + "HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT", + "HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE", + "HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL", + "INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT", + "ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET", + "JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT", + "KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB", + "LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE", + "LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT", + "LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG", + "LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW", + "MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT", + "MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG", + "MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED", + "NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD", + "NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF", + "OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL", + "OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT", + "OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD", + "PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG", + "PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT", + "PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB", + "PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT", + "RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM", + "RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB", + "RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM", + "SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET", + "SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY", + "SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY", + "SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN", + "TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE", + "TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP", + "TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP", + "US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS", + "WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT", + "WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE", + "YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT", + "ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS", + "ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE", + "AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA", + "ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN", + "AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW", + "ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA", + "ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM", + "AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW", + "AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL", + "BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM", + "BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK", + "BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH", + "BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT", + "BEAU", "BECK", "BEEF", "BEEN", "BEER", + "BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN", + "BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE", + "BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE", + "BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT", + "BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK", + "BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT", + "BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK", + "BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS", + "BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN", + "BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD", + "BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG", + "BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST", + "BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF", + "CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL", + "CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL", + "CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF", + "CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG", + "CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY", + "CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA", + "COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN", + "COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK", + "COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST", + "COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB", + "CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY", + "CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE", + "DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN", + "DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS", + "DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED", + "DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK", + "DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT", + "DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES", + "DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA", + "DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG", + "DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK", + "DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK", + "DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST", + "EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT", + "EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT", + "EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED", + "FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL", + "FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT", + "FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST", + "FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE", + "FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE", + "FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW", + "FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM", + "FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL", + "FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL", + "FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY", + "FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY", + "FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA", + "GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH", + "GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE", + "GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT", + "GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN", + "GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD", + "GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG", + "GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB", + "GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN", + "GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH", + "GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR", + "HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK", + "HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE", + "HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR", + "HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL", + "HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN", + "HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT", + "HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE", + "HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK", + "HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL", + "HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK", + "HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE", + "HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH", + "INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE", + "ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE", + "JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL", + "JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN", + "JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY", + "JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST", + "JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL", + "KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL", + "KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW", + "KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD", + "KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN", + "LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD", + "LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS", + "LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER", + "LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST", + "LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU", + "LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB", + "LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST", + "LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE", + "LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD", + "LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK", + "LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE", + "LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE", + "MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI", + "MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK", + "MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE", + "MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK", + "MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH", + "MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT", + "MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS", + "MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD", + "MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON", + "MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH", + "MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK", + "MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL", + "NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR", + "NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS", + "NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA", + "NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON", + "NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB", + "OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY", + "OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE", + "ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS", + "OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY", + "OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT", + "RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE", + "RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR", + "RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA", + "REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT", + "RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD", + "ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME", + "ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS", + "ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY", + "RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE", + "RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE", + "SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE", + "SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR", + "SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK", + "SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS", + "SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN", + "SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE", + "SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE", + "SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW", + "SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY", + "SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT", + "SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB", + "SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA", + "SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE", + "SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR", + "STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH", + "SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF", + "SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM", + "TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK", + "TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM", + "TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS", + "TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN", + "THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER", + "TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY", + "TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG", + "TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR", + "TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG", + "TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE", + "TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK", + "TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER", + "USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST", + "VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY", + "VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE", + "WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK", + "WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM", + "WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY", + "WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR", + "WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM", + "WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE", + "WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE", + "WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD", + "WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE", + "YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR", + "YELL", "YOGA", "YOKE" ] + +if __name__=='__main__': + data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'), + ('CCAC2AED591056BE4F90FD441C534766', + 'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'), + ('EFF81F9BFBC65350920CDD7416DE8009', + 'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL') + ] + + for key, words in data: + print 'Trying key', key + key=binascii.a2b_hex(key) + w2=key_to_english(key) + if w2!=words: + print 'key_to_english fails on key', repr(key), ', producing', str(w2) + k2=english_to_key(words) + if k2!=key: + print 'english_to_key fails on key', repr(key), ', producing', repr(k2) + + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Util/__init__.py b/gdata.py-1.2.3/src/gdata/Crypto/Util/__init__.py new file mode 100755 index 0000000..0d14768 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Util/__init__.py @@ -0,0 +1,16 @@ +"""Miscellaneous modules + +Contains useful modules that don't belong into any of the +other Crypto.* subpackages. + +Crypto.Util.number Number-theoretic functions (primality testing, etc.) +Crypto.Util.randpool Random number generation +Crypto.Util.RFC1751 Converts between 128-bit keys and human-readable + strings of words. + +""" + +__all__ = ['randpool', 'RFC1751', 'number'] + +__revision__ = "$Id: __init__.py,v 1.4 2003/02/28 15:26:00 akuchling Exp $" + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Util/number.py b/gdata.py-1.2.3/src/gdata/Crypto/Util/number.py new file mode 100755 index 0000000..9d50563 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Util/number.py @@ -0,0 +1,201 @@ +# +# number.py : Number-theoretic functions +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: number.py,v 1.13 2003/04/04 18:21:07 akuchling Exp $" + +bignum = long +try: + from Crypto.PublicKey import _fastmath +except ImportError: + _fastmath = None + +# Commented out and replaced with faster versions below +## def long2str(n): +## s='' +## while n>0: +## s=chr(n & 255)+s +## n=n>>8 +## return s + +## import types +## def str2long(s): +## if type(s)!=types.StringType: return s # Integers will be left alone +## return reduce(lambda x,y : x*256+ord(y), s, 0L) + +def size (N): + """size(N:long) : int + Returns the size of the number N in bits. + """ + bits, power = 0,1L + while N >= power: + bits += 1 + power = power << 1 + return bits + +def getRandomNumber(N, randfunc): + """getRandomNumber(N:int, randfunc:callable):long + Return an N-bit random number.""" + + S = randfunc(N/8) + odd_bits = N % 8 + if odd_bits != 0: + char = ord(randfunc(1)) >> (8-odd_bits) + S = chr(char) + S + value = bytes_to_long(S) + value |= 2L ** (N-1) # Ensure high bit is set + assert size(value) >= N + return value + +def GCD(x,y): + """GCD(x:long, y:long): long + Return the GCD of x and y. + """ + x = abs(x) ; y = abs(y) + while x > 0: + x, y = y % x, x + return y + +def inverse(u, v): + """inverse(u:long, u:long):long + Return the inverse of u mod v. + """ + u3, v3 = long(u), long(v) + u1, v1 = 1L, 0L + while v3 > 0: + q=u3 / v3 + u1, v1 = v1, u1 - v1*q + u3, v3 = v3, u3 - v3*q + while u1<0: + u1 = u1 + v + return u1 + +# Given a number of bits to generate and a random generation function, +# find a prime number of the appropriate size. + +def getPrime(N, randfunc): + """getPrime(N:int, randfunc:callable):long + Return a random N-bit prime number. + """ + + number=getRandomNumber(N, randfunc) | 1 + while (not isPrime(number)): + number=number+2 + return number + +def isPrime(N): + """isPrime(N:long):bool + Return true if N is prime. + """ + if N == 1: + return 0 + if N in sieve: + return 1 + for i in sieve: + if (N % i)==0: + return 0 + + # Use the accelerator if available + if _fastmath is not None: + return _fastmath.isPrime(N) + + # Compute the highest bit that's set in N + N1 = N - 1L + n = 1L + while (n> 1L + + # Rabin-Miller test + for c in sieve[:7]: + a=long(c) ; d=1L ; t=n + while (t): # Iterate over the bits in N1 + x=(d*d) % N + if x==1L and d!=1L and d!=N1: + return 0 # Square root of 1 found + if N1 & t: + d=(x*a) % N + else: + d=x + t = t >> 1L + if d!=1L: + return 0 + return 1 + +# Small primes used for checking primality; these are all the primes +# less than 256. This should be enough to eliminate most of the odd +# numbers before needing to do a Rabin-Miller test at all. + +sieve=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, + 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, + 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, + 197, 199, 211, 223, 227, 229, 233, 239, 241, 251] + +# Improved conversion functions contributed by Barry Warsaw, after +# careful benchmarking + +import struct + +def long_to_bytes(n, blocksize=0): + """long_to_bytes(n:long, blocksize:int) : string + Convert a long integer to a byte string. + + If optional blocksize is given and greater than zero, pad the front of the + byte string with binary zeros so that the length is a multiple of + blocksize. + """ + # after much testing, this algorithm was deemed to be the fastest + s = '' + n = long(n) + pack = struct.pack + while n > 0: + s = pack('>I', n & 0xffffffffL) + s + n = n >> 32 + # strip off leading zeros + for i in range(len(s)): + if s[i] != '\000': + break + else: + # only happens when n == 0 + s = '\000' + i = 0 + s = s[i:] + # add back some pad bytes. this could be done more efficiently w.r.t. the + # de-padding being done above, but sigh... + if blocksize > 0 and len(s) % blocksize: + s = (blocksize - len(s) % blocksize) * '\000' + s + return s + +def bytes_to_long(s): + """bytes_to_long(string) : long + Convert a byte string to a long integer. + + This is (essentially) the inverse of long_to_bytes(). + """ + acc = 0L + unpack = struct.unpack + length = len(s) + if length % 4: + extra = (4 - length % 4) + s = '\000' * extra + s + length = length + extra + for i in range(0, length, 4): + acc = (acc << 32) + unpack('>I', s[i:i+4])[0] + return acc + +# For backwards compatibility... +import warnings +def long2str(n, blocksize=0): + warnings.warn("long2str() has been replaced by long_to_bytes()") + return long_to_bytes(n, blocksize) +def str2long(s): + warnings.warn("str2long() has been replaced by bytes_to_long()") + return bytes_to_long(s) diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Util/randpool.py b/gdata.py-1.2.3/src/gdata/Crypto/Util/randpool.py new file mode 100755 index 0000000..467501c --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Util/randpool.py @@ -0,0 +1,421 @@ +# +# randpool.py : Cryptographically strong random number generation +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: randpool.py,v 1.14 2004/05/06 12:56:54 akuchling Exp $" + +import time, array, types, warnings, os.path +from Crypto.Util.number import long_to_bytes +try: + import Crypto.Util.winrandom as winrandom +except: + winrandom = None + +STIRNUM = 3 + +class RandomPool: + """randpool.py : Cryptographically strong random number generation. + + The implementation here is similar to the one in PGP. To be + cryptographically strong, it must be difficult to determine the RNG's + output, whether in the future or the past. This is done by using + a cryptographic hash function to "stir" the random data. + + Entropy is gathered in the same fashion as PGP; the highest-resolution + clock around is read and the data is added to the random number pool. + A conservative estimate of the entropy is then kept. + + If a cryptographically secure random source is available (/dev/urandom + on many Unixes, Windows CryptGenRandom on most Windows), then use + it. + + Instance Attributes: + bits : int + Maximum size of pool in bits + bytes : int + Maximum size of pool in bytes + entropy : int + Number of bits of entropy in this pool. + + Methods: + add_event([s]) : add some entropy to the pool + get_bytes(int) : get N bytes of random data + randomize([N]) : get N bytes of randomness from external source + """ + + + def __init__(self, numbytes = 160, cipher=None, hash=None): + if hash is None: + from Crypto.Hash import SHA as hash + + # The cipher argument is vestigial; it was removed from + # version 1.1 so RandomPool would work even in the limited + # exportable subset of the code + if cipher is not None: + warnings.warn("'cipher' parameter is no longer used") + + if isinstance(hash, types.StringType): + # ugly hack to force __import__ to give us the end-path module + hash = __import__('Crypto.Hash.'+hash, + None, None, ['new']) + warnings.warn("'hash' parameter should now be a hashing module") + + self.bytes = numbytes + self.bits = self.bytes*8 + self.entropy = 0 + self._hash = hash + + # Construct an array to hold the random pool, + # initializing it to 0. + self._randpool = array.array('B', [0]*self.bytes) + + self._event1 = self._event2 = 0 + self._addPos = 0 + self._getPos = hash.digest_size + self._lastcounter=time.time() + self.__counter = 0 + + self._measureTickSize() # Estimate timer resolution + self._randomize() + + def _updateEntropyEstimate(self, nbits): + self.entropy += nbits + if self.entropy < 0: + self.entropy = 0 + elif self.entropy > self.bits: + self.entropy = self.bits + + def _randomize(self, N = 0, devname = '/dev/urandom'): + """_randomize(N, DEVNAME:device-filepath) + collects N bits of randomness from some entropy source (e.g., + /dev/urandom on Unixes that have it, Windows CryptoAPI + CryptGenRandom, etc) + DEVNAME is optional, defaults to /dev/urandom. You can change it + to /dev/random if you want to block till you get enough + entropy. + """ + data = '' + if N <= 0: + nbytes = int((self.bits - self.entropy)/8+0.5) + else: + nbytes = int(N/8+0.5) + if winrandom: + # Windows CryptGenRandom provides random data. + data = winrandom.new().get_bytes(nbytes) + elif os.path.exists(devname): + # Many OSes support a /dev/urandom device + try: + f=open(devname) + data=f.read(nbytes) + f.close() + except IOError, (num, msg): + if num!=2: raise IOError, (num, msg) + # If the file wasn't found, ignore the error + if data: + self._addBytes(data) + # Entropy estimate: The number of bits of + # data obtained from the random source. + self._updateEntropyEstimate(8*len(data)) + self.stir_n() # Wash the random pool + + def randomize(self, N=0): + """randomize(N:int) + use the class entropy source to get some entropy data. + This is overridden by KeyboardRandomize(). + """ + return self._randomize(N) + + def stir_n(self, N = STIRNUM): + """stir_n(N) + stirs the random pool N times + """ + for i in xrange(N): + self.stir() + + def stir (self, s = ''): + """stir(s:string) + Mix up the randomness pool. This will call add_event() twice, + but out of paranoia the entropy attribute will not be + increased. The optional 's' parameter is a string that will + be hashed with the randomness pool. + """ + + entropy=self.entropy # Save inital entropy value + self.add_event() + + # Loop over the randomness pool: hash its contents + # along with a counter, and add the resulting digest + # back into the pool. + for i in range(self.bytes / self._hash.digest_size): + h = self._hash.new(self._randpool) + h.update(str(self.__counter) + str(i) + str(self._addPos) + s) + self._addBytes( h.digest() ) + self.__counter = (self.__counter + 1) & 0xFFFFffffL + + self._addPos, self._getPos = 0, self._hash.digest_size + self.add_event() + + # Restore the old value of the entropy. + self.entropy=entropy + + + def get_bytes (self, N): + """get_bytes(N:int) : string + Return N bytes of random data. + """ + + s='' + i, pool = self._getPos, self._randpool + h=self._hash.new() + dsize = self._hash.digest_size + num = N + while num > 0: + h.update( self._randpool[i:i+dsize] ) + s = s + h.digest() + num = num - dsize + i = (i + dsize) % self.bytes + if i>1, bits+1 + if bits>8: bits=8 + + self._event1, self._event2 = event, self._event1 + + self._updateEntropyEstimate(bits) + return bits + + # Private functions + def _noise(self): + # Adds a bit of noise to the random pool, by adding in the + # current time and CPU usage of this process. + # The difference from the previous call to _noise() is taken + # in an effort to estimate the entropy. + t=time.time() + delta = (t - self._lastcounter)/self._ticksize*1e6 + self._lastcounter = t + self._addBytes(long_to_bytes(long(1000*time.time()))) + self._addBytes(long_to_bytes(long(1000*time.clock()))) + self._addBytes(long_to_bytes(long(1000*time.time()))) + self._addBytes(long_to_bytes(long(delta))) + + # Reduce delta to a maximum of 8 bits so we don't add too much + # entropy as a result of this call. + delta=delta % 0xff + return int(delta) + + + def _measureTickSize(self): + # _measureTickSize() tries to estimate a rough average of the + # resolution of time that you can see from Python. It does + # this by measuring the time 100 times, computing the delay + # between measurements, and taking the median of the resulting + # list. (We also hash all the times and add them to the pool) + interval = [None] * 100 + h = self._hash.new(`(id(self),id(interval))`) + + # Compute 100 differences + t=time.time() + h.update(`t`) + i = 0 + j = 0 + while i < 100: + t2=time.time() + h.update(`(i,j,t2)`) + j += 1 + delta=int((t2-t)*1e6) + if delta: + interval[i] = delta + i += 1 + t=t2 + + # Take the median of the array of intervals + interval.sort() + self._ticksize=interval[len(interval)/2] + h.update(`(interval,self._ticksize)`) + # mix in the measurement times and wash the random pool + self.stir(h.digest()) + + def _addBytes(self, s): + "XOR the contents of the string S into the random pool" + i, pool = self._addPos, self._randpool + for j in range(0, len(s)): + pool[i]=pool[i] ^ ord(s[j]) + i=(i+1) % self.bytes + self._addPos = i + + # Deprecated method names: remove in PCT 2.1 or later. + def getBytes(self, N): + warnings.warn("getBytes() method replaced by get_bytes()", + DeprecationWarning) + return self.get_bytes(N) + + def addEvent (self, event, s=""): + warnings.warn("addEvent() method replaced by add_event()", + DeprecationWarning) + return self.add_event(s + str(event)) + +class PersistentRandomPool (RandomPool): + def __init__ (self, filename=None, *args, **kwargs): + RandomPool.__init__(self, *args, **kwargs) + self.filename = filename + if filename: + try: + # the time taken to open and read the file might have + # a little disk variability, modulo disk/kernel caching... + f=open(filename, 'rb') + self.add_event() + data = f.read() + self.add_event() + # mix in the data from the file and wash the random pool + self.stir(data) + f.close() + except IOError: + # Oh, well; the file doesn't exist or is unreadable, so + # we'll just ignore it. + pass + + def save(self): + if self.filename == "": + raise ValueError, "No filename set for this object" + # wash the random pool before save, provides some forward secrecy for + # old values of the pool. + self.stir_n() + f=open(self.filename, 'wb') + self.add_event() + f.write(self._randpool.tostring()) + f.close() + self.add_event() + # wash the pool again, provide some protection for future values + self.stir() + +# non-echoing Windows keyboard entry +_kb = 0 +if not _kb: + try: + import msvcrt + class KeyboardEntry: + def getch(self): + c = msvcrt.getch() + if c in ('\000', '\xe0'): + # function key + c += msvcrt.getch() + return c + def close(self, delay = 0): + if delay: + time.sleep(delay) + while msvcrt.kbhit(): + msvcrt.getch() + _kb = 1 + except: + pass + +# non-echoing Posix keyboard entry +if not _kb: + try: + import termios + class KeyboardEntry: + def __init__(self, fd = 0): + self._fd = fd + self._old = termios.tcgetattr(fd) + new = termios.tcgetattr(fd) + new[3]=new[3] & ~termios.ICANON & ~termios.ECHO + termios.tcsetattr(fd, termios.TCSANOW, new) + def getch(self): + termios.tcflush(0, termios.TCIFLUSH) # XXX Leave this in? + return os.read(self._fd, 1) + def close(self, delay = 0): + if delay: + time.sleep(delay) + termios.tcflush(self._fd, termios.TCIFLUSH) + termios.tcsetattr(self._fd, termios.TCSAFLUSH, self._old) + _kb = 1 + except: + pass + +class KeyboardRandomPool (PersistentRandomPool): + def __init__(self, *args, **kwargs): + PersistentRandomPool.__init__(self, *args, **kwargs) + + def randomize(self, N = 0): + "Adds N bits of entropy to random pool. If N is 0, fill up pool." + import os, string, time + if N <= 0: + bits = self.bits - self.entropy + else: + bits = N*8 + if bits == 0: + return + print bits,'bits of entropy are now required. Please type on the keyboard' + print 'until enough randomness has been accumulated.' + kb = KeyboardEntry() + s='' # We'll save the characters typed and add them to the pool. + hash = self._hash + e = 0 + try: + while e < bits: + temp=str(bits-e).rjust(6) + os.write(1, temp) + s=s+kb.getch() + e += self.add_event(s) + os.write(1, 6*chr(8)) + self.add_event(s+hash.new(s).digest() ) + finally: + kb.close() + print '\n\007 Enough. Please wait a moment.\n' + self.stir_n() # wash the random pool. + kb.close(4) + +if __name__ == '__main__': + pool = RandomPool() + print 'random pool entropy', pool.entropy, 'bits' + pool.add_event('something') + print `pool.get_bytes(100)` + import tempfile, os + fname = tempfile.mktemp() + pool = KeyboardRandomPool(filename=fname) + print 'keyboard random pool entropy', pool.entropy, 'bits' + pool.randomize() + print 'keyboard random pool entropy', pool.entropy, 'bits' + pool.randomize(128) + pool.save() + saved = open(fname, 'rb').read() + print 'saved', `saved` + print 'pool ', `pool._randpool.tostring()` + newpool = PersistentRandomPool(fname) + print 'persistent random pool entropy', pool.entropy, 'bits' + os.remove(fname) diff --git a/gdata.py-1.2.3/src/gdata/Crypto/Util/test.py b/gdata.py-1.2.3/src/gdata/Crypto/Util/test.py new file mode 100755 index 0000000..7b23e9f --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/Util/test.py @@ -0,0 +1,453 @@ +# +# test.py : Functions used for testing the modules +# +# Part of the Python Cryptography Toolkit +# +# Distribute and use freely; there are no restrictions on further +# dissemination and usage except those imposed by the laws of your +# country of residence. This software is provided "as is" without +# warranty of fitness for use or suitability for any purpose, express +# or implied. Use at your own risk or not at all. +# + +__revision__ = "$Id: test.py,v 1.16 2004/08/13 22:24:18 akuchling Exp $" + +import binascii +import string +import testdata + +from Crypto.Cipher import * + +def die(string): + import sys + print '***ERROR: ', string +# sys.exit(0) # Will default to continuing onward... + +def print_timing (size, delta, verbose): + if verbose: + if delta == 0: + print 'Unable to measure time -- elapsed time too small' + else: + print '%.2f K/sec' % (size/delta) + +def exerciseBlockCipher(cipher, verbose): + import string, time + try: + ciph = eval(cipher) + except NameError: + print cipher, 'module not available' + return None + print cipher+ ':' + str='1' # Build 128K of test data + for i in xrange(0, 17): + str=str+str + if ciph.key_size==0: ciph.key_size=16 + password = 'password12345678Extra text for password'[0:ciph.key_size] + IV = 'Test IV Test IV Test IV Test'[0:ciph.block_size] + + if verbose: print ' ECB mode:', + obj=ciph.new(password, ciph.MODE_ECB) + if obj.block_size != ciph.block_size: + die("Module and cipher object block_size don't match") + + text='1234567812345678'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='KuchlingKuchling'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='NotTodayNotEver!'[0:ciph.block_size] + c=obj.encrypt(text) + if (obj.decrypt(c)!=text): die('Error encrypting "'+text+'"') + + start=time.time() + s=obj.encrypt(str) + s2=obj.decrypt(s) + end=time.time() + if (str!=s2): + die('Error in resulting plaintext from ECB mode') + print_timing(256, end-start, verbose) + del obj + + if verbose: print ' CFB mode:', + obj1=ciph.new(password, ciph.MODE_CFB, IV) + obj2=ciph.new(password, ciph.MODE_CFB, IV) + start=time.time() + ciphertext=obj1.encrypt(str[0:65536]) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str[0:65536]): + die('Error in resulting plaintext from CFB mode') + print_timing(64, end-start, verbose) + del obj1, obj2 + + if verbose: print ' CBC mode:', + obj1=ciph.new(password, ciph.MODE_CBC, IV) + obj2=ciph.new(password, ciph.MODE_CBC, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from CBC mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + if verbose: print ' PGP mode:', + obj1=ciph.new(password, ciph.MODE_PGP, IV) + obj2=ciph.new(password, ciph.MODE_PGP, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from PGP mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + if verbose: print ' OFB mode:', + obj1=ciph.new(password, ciph.MODE_OFB, IV) + obj2=ciph.new(password, ciph.MODE_OFB, IV) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from OFB mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + def counter(length=ciph.block_size): + return length * 'a' + + if verbose: print ' CTR mode:', + obj1=ciph.new(password, ciph.MODE_CTR, counter=counter) + obj2=ciph.new(password, ciph.MODE_CTR, counter=counter) + start=time.time() + ciphertext=obj1.encrypt(str) + plaintext=obj2.decrypt(ciphertext) + end=time.time() + if (plaintext!=str): + die('Error in resulting plaintext from CTR mode') + print_timing(256, end-start, verbose) + del obj1, obj2 + + # Test the IV handling + if verbose: print ' Testing IV handling' + obj1=ciph.new(password, ciph.MODE_CBC, IV) + plaintext='Test'*(ciph.block_size/4)*3 + ciphertext1=obj1.encrypt(plaintext) + obj1.IV=IV + ciphertext2=obj1.encrypt(plaintext) + if ciphertext1!=ciphertext2: + die('Error in setting IV') + + # Test keyword arguments + obj1=ciph.new(key=password) + obj1=ciph.new(password, mode=ciph.MODE_CBC) + obj1=ciph.new(mode=ciph.MODE_CBC, key=password) + obj1=ciph.new(IV=IV, mode=ciph.MODE_CBC, key=password) + + return ciph + +def exerciseStreamCipher(cipher, verbose): + import string, time + try: + ciph = eval(cipher) + except (NameError): + print cipher, 'module not available' + return None + print cipher + ':', + str='1' # Build 128K of test data + for i in xrange(0, 17): + str=str+str + key_size = ciph.key_size or 16 + password = 'password12345678Extra text for password'[0:key_size] + + obj1=ciph.new(password) + obj2=ciph.new(password) + if obj1.block_size != ciph.block_size: + die("Module and cipher object block_size don't match") + if obj1.key_size != ciph.key_size: + die("Module and cipher object key_size don't match") + + text='1234567812345678Python' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='B1FF I2 A R3A11Y |<00L D00D!!!!!' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + text='SpamSpamSpamSpamSpamSpamSpamSpamSpam' + c=obj1.encrypt(text) + if (obj2.decrypt(c)!=text): die('Error encrypting "'+text+'"') + + start=time.time() + s=obj1.encrypt(str) + str=obj2.decrypt(s) + end=time.time() + print_timing(256, end-start, verbose) + del obj1, obj2 + + return ciph + +def TestStreamModules(args=['arc4', 'XOR'], verbose=1): + import sys, string + args=map(string.lower, args) + + if 'arc4' in args: + # Test ARC4 stream cipher + arc4=exerciseStreamCipher('ARC4', verbose) + if (arc4!=None): + for entry in testdata.arc4: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=arc4.new(key) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('ARC4 failed on entry '+`entry`) + + if 'xor' in args: + # Test XOR stream cipher + XOR=exerciseStreamCipher('XOR', verbose) + if (XOR!=None): + for entry in testdata.xor: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=XOR.new(key) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('XOR failed on entry '+`entry`) + + +def TestBlockModules(args=['aes', 'arc2', 'des', 'blowfish', 'cast', 'des3', + 'idea', 'rc5'], + verbose=1): + import string + args=map(string.lower, args) + if 'aes' in args: + ciph=exerciseBlockCipher('AES', verbose) # AES + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.aes: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('AES failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + for entry in testdata.aes_modes: + mode, key, plain, cipher, kw = entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, mode, **kw) + obj2=ciph.new(key, mode, **kw) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('AES encrypt failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + plain2=obj2.decrypt(ciphertext) + if plain2!=plain: + die('AES decrypt failed on entry '+`entry`) + for i in plain2: + if verbose: print hex(ord(i)), + if verbose: print + + + if 'arc2' in args: + ciph=exerciseBlockCipher('ARC2', verbose) # Alleged RC2 + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.arc2: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('ARC2 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + print + + if 'blowfish' in args: + ciph=exerciseBlockCipher('Blowfish',verbose)# Bruce Schneier's Blowfish cipher + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.blowfish: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('Blowfish failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + if 'cast' in args: + ciph=exerciseBlockCipher('CAST', verbose) # CAST-128 + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.cast: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('CAST failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + if 0: + # The full-maintenance test; it requires 4 million encryptions, + # and correspondingly is quite time-consuming. I've disabled + # it; it's faster to compile block/cast.c with -DTEST and run + # the resulting program. + a = b = '\x01\x23\x45\x67\x12\x34\x56\x78\x23\x45\x67\x89\x34\x56\x78\x9A' + + for i in range(0, 1000000): + obj = cast.new(b, cast.MODE_ECB) + a = obj.encrypt(a[:8]) + obj.encrypt(a[-8:]) + obj = cast.new(a, cast.MODE_ECB) + b = obj.encrypt(b[:8]) + obj.encrypt(b[-8:]) + + if a!="\xEE\xA9\xD0\xA2\x49\xFD\x3B\xA6\xB3\x43\x6F\xB8\x9D\x6D\xCA\x92": + if verbose: print 'CAST test failed: value of "a" doesn\'t match' + if b!="\xB2\xC9\x5E\xB0\x0C\x31\xAD\x71\x80\xAC\x05\xB8\xE8\x3D\x69\x6E": + if verbose: print 'CAST test failed: value of "b" doesn\'t match' + + if 'des' in args: + # Test/benchmark DES block cipher + des=exerciseBlockCipher('DES', verbose) + if (des!=None): + # Various tests taken from the DES library packaged with Kerberos V4 + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_ECB) + s=obj.encrypt('Now is t') + if (s!=binascii.a2b_hex('3fa40e8a984d4815')): + die('DES fails test 1') + obj=des.new(binascii.a2b_hex('08192a3b4c5d6e7f'), des.MODE_ECB) + s=obj.encrypt('\000\000\000\000\000\000\000\000') + if (s!=binascii.a2b_hex('25ddac3e96176467')): + die('DES fails test 2') + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC, + binascii.a2b_hex('1234567890abcdef')) + s=obj.encrypt("Now is the time for all ") + if (s!=binascii.a2b_hex('e5c7cdde872bf27c43e934008c389c0f683788499a7c05f6')): + die('DES fails test 3') + obj=des.new(binascii.a2b_hex('0123456789abcdef'), des.MODE_CBC, + binascii.a2b_hex('fedcba9876543210')) + s=obj.encrypt("7654321 Now is the time for \000\000\000\000") + if (s!=binascii.a2b_hex("ccd173ffab2039f4acd8aefddfd8a1eb468e91157888ba681d269397f7fe62b4")): + die('DES fails test 4') + del obj,s + + # R. Rivest's test: see http://theory.lcs.mit.edu/~rivest/destest.txt + x=binascii.a2b_hex('9474B8E8C73BCA7D') + for i in range(0, 16): + obj=des.new(x, des.MODE_ECB) + if (i & 1): x=obj.decrypt(x) + else: x=obj.encrypt(x) + if x!=binascii.a2b_hex('1B1A2DDB4C642438'): + die("DES fails Rivest's test") + + if verbose: print ' Verifying against test suite...' + for entry in testdata.des: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=des.new(key, des.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('DES failed on entry '+`entry`) + for entry in testdata.des_cbc: + key, iv, plain, cipher=entry + key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher) + obj1=des.new(key, des.MODE_CBC, iv) + obj2=des.new(key, des.MODE_CBC, iv) + ciphertext=obj1.encrypt(plain) + if (ciphertext!=cipher): + die('DES CBC mode failed on entry '+`entry`) + + if 'des3' in args: + ciph=exerciseBlockCipher('DES3', verbose) # Triple DES + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.des3: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('DES3 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + for entry in testdata.des3_cbc: + key, iv, plain, cipher=entry + key, iv, cipher=binascii.a2b_hex(key),binascii.a2b_hex(iv),binascii.a2b_hex(cipher) + obj1=ciph.new(key, ciph.MODE_CBC, iv) + obj2=ciph.new(key, ciph.MODE_CBC, iv) + ciphertext=obj1.encrypt(plain) + if (ciphertext!=cipher): + die('DES3 CBC mode failed on entry '+`entry`) + + if 'idea' in args: + ciph=exerciseBlockCipher('IDEA', verbose) # IDEA block cipher + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.idea: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key, ciph.MODE_ECB) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('IDEA failed on entry '+`entry`) + + if 'rc5' in args: + # Ronald Rivest's RC5 algorithm + ciph=exerciseBlockCipher('RC5', verbose) + if (ciph!=None): + if verbose: print ' Verifying against test suite...' + for entry in testdata.rc5: + key,plain,cipher=entry + key=binascii.a2b_hex(key) + plain=binascii.a2b_hex(plain) + cipher=binascii.a2b_hex(cipher) + obj=ciph.new(key[4:], ciph.MODE_ECB, + version =ord(key[0]), + word_size=ord(key[1]), + rounds =ord(key[2]) ) + ciphertext=obj.encrypt(plain) + if (ciphertext!=cipher): + die('RC5 failed on entry '+`entry`) + for i in ciphertext: + if verbose: print hex(ord(i)), + if verbose: print + + + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/__init__.py b/gdata.py-1.2.3/src/gdata/Crypto/__init__.py new file mode 100755 index 0000000..2324ae8 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/__init__.py @@ -0,0 +1,25 @@ + +"""Python Cryptography Toolkit + +A collection of cryptographic modules implementing various algorithms +and protocols. + +Subpackages: +Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4) +Crypto.Hash Hashing algorithms (MD5, SHA, HMAC) +Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing + transform). This package does not contain any + network protocols. +Crypto.PublicKey Public-key encryption and signature algorithms + (RSA, DSA) +Crypto.Util Various useful modules and functions (long-to-string + conversion, random number generation, number + theoretic functions) +""" + +__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util'] + +__version__ = '2.0.1' +__revision__ = "$Id: __init__.py,v 1.12 2005/06/14 01:20:22 akuchling Exp $" + + diff --git a/gdata.py-1.2.3/src/gdata/Crypto/test.py b/gdata.py-1.2.3/src/gdata/Crypto/test.py new file mode 100755 index 0000000..c5ed061 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/Crypto/test.py @@ -0,0 +1,38 @@ +# +# Test script for the Python Cryptography Toolkit. +# + +__revision__ = "$Id: test.py,v 1.7 2002/07/11 14:31:19 akuchling Exp $" + +import os, sys + + +# Add the build directory to the front of sys.path +from distutils.util import get_platform +s = "build/lib.%s-%.3s" % (get_platform(), sys.version) +s = os.path.join(os.getcwd(), s) +sys.path.insert(0, s) +s = os.path.join(os.getcwd(), 'test') +sys.path.insert(0, s) + +from Crypto.Util import test + +args = sys.argv[1:] +quiet = "--quiet" in args +if quiet: args.remove('--quiet') + +if not quiet: + print '\nStream Ciphers:' + print '===============' + +if args: test.TestStreamModules(args, verbose= not quiet) +else: test.TestStreamModules(verbose= not quiet) + +if not quiet: + print '\nBlock Ciphers:' + print '==============' + +if args: test.TestBlockModules(args, verbose= not quiet) +else: test.TestBlockModules(verbose= not quiet) + + diff --git a/gdata.py-1.2.3/src/gdata/__init__.py b/gdata.py-1.2.3/src/gdata/__init__.py new file mode 100755 index 0000000..2335b2a --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/__init__.py @@ -0,0 +1,835 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains classes representing Google Data elements. + + Extends Atom classes to add Google Data specific elements. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import os +import atom +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree + + +# XML namespaces which are often used in GData entities. +GDATA_NAMESPACE = 'http://schemas.google.com/g/2005' +GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s' +OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/' +OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s' +BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch' +GACL_NAMESPACE = 'http://schemas.google.com/acl/2007' +GACL_TEMPLATE = '{http://schemas.google.com/acl/2007}%s' + + +# Labels used in batch request entries to specify the desired CRUD operation. +BATCH_INSERT = 'insert' +BATCH_UPDATE = 'update' +BATCH_DELETE = 'delete' +BATCH_QUERY = 'query' + +class Error(Exception): + pass + + +class MissingRequiredParameters(Error): + pass + + +class MediaSource(object): + """GData Entries can refer to media sources, so this class provides a + place to store references to these objects along with some metadata. + """ + + def __init__(self, file_handle=None, content_type=None, content_length=None, + file_path=None, file_name=None): + """Creates an object of type MediaSource. + + Args: + file_handle: A file handle pointing to the file to be encapsulated in the + MediaSource + content_type: string The MIME type of the file. Required if a file_handle + is given. + content_length: int The size of the file. Required if a file_handle is + given. + file_path: string (optional) A full path name to the file. Used in + place of a file_handle. + file_name: string The name of the file without any path information. + Required if a file_handle is given. + """ + self.file_handle = file_handle + self.content_type = content_type + self.content_length = content_length + self.file_name = file_name + + if (file_handle is None and content_type is not None and + file_path is not None): + self.setFile(file_path, content_type) + + def setFile(self, file_name, content_type): + """A helper function which can create a file handle from a given filename + and set the content type and length all at once. + + Args: + file_name: string The path and file name to the file containing the media + content_type: string A MIME type representing the type of the media + """ + + self.file_handle = open(file_name, 'rb') + self.content_type = content_type + self.content_length = os.path.getsize(file_name) + self.file_name = os.path.basename(file_name) + + +class LinkFinder(atom.LinkFinder): + """An "interface" providing methods to find link elements + + GData Entry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of + links. + + This class is used as a mixin in GData entries. + """ + + def GetSelfLink(self): + """Find the first link with rel set to 'self' + + Returns: + An atom.Link or none if none of the links had rel equal to 'self' + """ + + for a_link in self.link: + if a_link.rel == 'self': + return a_link + return None + + def GetEditLink(self): + for a_link in self.link: + if a_link.rel == 'edit': + return a_link + return None + + def GetEditMediaLink(self): + """The Picasa API mistakenly returns media-edit rather than edit-media, but + this may change soon. + """ + for a_link in self.link: + if a_link.rel == 'edit-media': + return a_link + if a_link.rel == 'media-edit': + return a_link + return None + + def GetHtmlLink(self): + """Find the first link with rel of alternate and type of text/html + + Returns: + An atom.Link or None if no links matched + """ + for a_link in self.link: + if a_link.rel == 'alternate' and a_link.type == 'text/html': + return a_link + return None + + def GetPostLink(self): + """Get a link containing the POST target URL. + + The POST target URL is used to insert new entries. + + Returns: + A link object with a rel matching the POST type. + """ + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#post': + return a_link + return None + + def GetAclLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/acl/2007#accessControlList': + return a_link + return None + + def GetFeedLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#feed': + return a_link + return None + + def GetNextLink(self): + for a_link in self.link: + if a_link.rel == 'next': + return a_link + return None + + def GetPrevLink(self): + for a_link in self.link: + if a_link.rel == 'previous': + return a_link + return None + + +class TotalResults(atom.AtomBase): + """opensearch:TotalResults for a GData feed""" + + _tag = 'totalResults' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def TotalResultsFromString(xml_string): + return atom.CreateClassFromXMLString(TotalResults, xml_string) + + +class StartIndex(atom.AtomBase): + """The opensearch:startIndex element in GData feed""" + + _tag = 'startIndex' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def StartIndexFromString(xml_string): + return atom.CreateClassFromXMLString(StartIndex, xml_string) + + +class ItemsPerPage(atom.AtomBase): + """The opensearch:itemsPerPage element in GData feed""" + + _tag = 'itemsPerPage' + _namespace = OPENSEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ItemsPerPageFromString(xml_string): + return atom.CreateClassFromXMLString(ItemsPerPage, xml_string) + + +class ExtendedProperty(atom.AtomBase): + """The Google Data extendedProperty element. + + Used to store arbitrary key-value information specific to your + application. The value can either be a text string stored as an XML + attribute (.value), or an XML node (XmlBlob) as a child element. + + This element is used in the Google Calendar data API and the Google + Contacts data API. + """ + + _tag = 'extendedProperty' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def GetXmlBlobExtensionElement(self): + """Returns the XML blob as an atom.ExtensionElement. + + Returns: + An atom.ExtensionElement representing the blob's XML, or None if no + blob was set. + """ + if len(self.extension_elements) < 1: + return None + else: + return self.extension_elements[0] + + def GetXmlBlobString(self): + """Returns the XML blob as a string. + + Returns: + A string containing the blob's XML, or None if no blob was set. + """ + blob = self.GetXmlBlobExtensionElement() + if blob: + return blob.ToString() + return None + + def SetXmlBlob(self, blob): + """Sets the contents of the extendedProperty to XML as a child node. + + Since the extendedProperty is only allowed one child element as an XML + blob, setting the XML blob will erase any preexisting extension elements + in this object. + + Args: + blob: str, ElementTree Element or atom.ExtensionElement representing + the XML blob stored in the extendedProperty. + """ + # Erase any existing extension_elements, clears the child nodes from the + # extendedProperty. + self.extension_elements = [] + if isinstance(blob, atom.ExtensionElement): + self.extension_elements.append(blob) + elif ElementTree.iselement(blob): + self.extension_elements.append(atom._ExtensionElementFromElementTree( + blob)) + else: + self.extension_elements.append(atom.ExtensionElementFromString(blob)) + + +def ExtendedPropertyFromString(xml_string): + return atom.CreateClassFromXMLString(ExtendedProperty, xml_string) + + +class GDataEntry(atom.Entry, LinkFinder): + """Extends Atom Entry to provide data processing""" + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def IsMedia(self): + """Determines whether or not an entry is a GData Media entry. + """ + if (self.GetEditMediaLink()): + return True + else: + return False + + def GetMediaURL(self): + """Returns the URL to the media content, if the entry is a media entry. + Otherwise returns None. + """ + if not self.IsMedia(): + return None + else: + return self.content.src + + +def GDataEntryFromString(xml_string): + """Creates a new GDataEntry instance given a string of XML.""" + return atom.CreateClassFromXMLString(GDataEntry, xml_string) + + +class GDataFeed(atom.Feed, LinkFinder): + """A Feed from a GData service""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}totalResults' % OPENSEARCH_NAMESPACE] = ('total_results', + TotalResults) + _children['{%s}startIndex' % OPENSEARCH_NAMESPACE] = ('start_index', + StartIndex) + _children['{%s}itemsPerPage' % OPENSEARCH_NAMESPACE] = ('items_per_page', + ItemsPerPage) + # Add a conversion rule for atom:entry to make it into a GData + # Entry. + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GDataEntry]) + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __GetGenerator(self): + return self.__generator + + def __SetGenerator(self, generator): + self.__generator = generator + if generator is not None: + self.__generator.text = generator.text.strip() + + generator = property(__GetGenerator, __SetGenerator) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Source + + Args: + author: list (optional) A list of Author instances which belong to this + class. + category: list (optional) A list of Category instances + contributor: list (optional) A list on Contributor instances + generator: Generator (optional) + icon: Icon (optional) + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + logo: Logo (optional) + rights: Rights (optional) The entry's Rights element + subtitle: Subtitle (optional) The entry's subtitle element + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + entry: list (optional) A list of the Entry instances contained in the + feed. + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.author = author or [] + self.category = category or [] + self.contributor = contributor or [] + self.generator = generator + self.icon = icon + self.id = atom_id + self.link = link or [] + self.logo = logo + self.rights = rights + self.subtitle = subtitle + self.title = title + self.updated = updated + self.entry = entry or [] + self.total_results = total_results + self.start_index = start_index + self.items_per_page = items_per_page + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GDataFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GDataFeed, xml_string) + + +class BatchId(atom.AtomBase): + _tag = 'id' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + +def BatchIdFromString(xml_string): + return atom.CreateClassFromXMLString(BatchId, xml_string) + + +class BatchOperation(atom.AtomBase): + _tag = 'operation' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + + def __init__(self, op_type=None, extension_elements=None, + extension_attributes=None, + text=None): + self.type = op_type + atom.AtomBase.__init__(self, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchOperationFromString(xml_string): + return atom.CreateClassFromXMLString(BatchOperation, xml_string) + + +class BatchStatus(atom.AtomBase): + """The batch:status element present in a batch response entry. + + A status element contains the code (HTTP response code) and + reason as elements. In a single request these fields would + be part of the HTTP response, but in a batch request each + Entry operation has a corresponding Entry in the response + feed which includes status information. + + See http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + + _tag = 'status' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['code'] = 'code' + _attributes['reason'] = 'reason' + _attributes['content-type'] = 'content_type' + + def __init__(self, code=None, reason=None, content_type=None, + extension_elements=None, extension_attributes=None, text=None): + self.code = code + self.reason = reason + self.content_type = content_type + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchStatusFromString(xml_string): + return atom.CreateClassFromXMLString(BatchStatus, xml_string) + + +class BatchEntry(GDataEntry): + """An atom:entry for use in batch requests. + + The BatchEntry contains additional members to specify the operation to be + performed on this entry and a batch ID so that the server can reference + individual operations in the response feed. For more information, see: + http://code.google.com/apis/gdata/batch.html + """ + + _tag = GDataEntry._tag + _namespace = GDataEntry._namespace + _children = GDataEntry._children.copy() + _children['{%s}operation' % BATCH_NAMESPACE] = ('batch_operation', BatchOperation) + _children['{%s}id' % BATCH_NAMESPACE] = ('batch_id', BatchId) + _children['{%s}status' % BATCH_NAMESPACE] = ('batch_status', BatchStatus) + _attributes = GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + batch_operation=None, batch_id=None, batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + GDataEntry.__init__(self, author=author, category=category, + content=content, contributor=contributor, atom_id=atom_id, link=link, + published=published, rights=rights, source=source, summary=summary, + control=control, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +def BatchEntryFromString(xml_string): + return atom.CreateClassFromXMLString(BatchEntry, xml_string) + + +class BatchInterrupted(atom.AtomBase): + """The batch:interrupted element sent if batch request was interrupted. + + Only appears in a feed if some of the batch entries could not be processed. + See: http://code.google.com/apis/gdata/batch.html#Handling_Errors + """ + + _tag = 'interrupted' + _namespace = BATCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['reason'] = 'reason' + _attributes['success'] = 'success' + _attributes['failures'] = 'failures' + _attributes['parsed'] = 'parsed' + + def __init__(self, reason=None, success=None, failures=None, parsed=None, + extension_elements=None, extension_attributes=None, text=None): + self.reason = reason + self.success = success + self.failures = failures + self.parsed = parsed + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchInterruptedFromString(xml_string): + return atom.CreateClassFromXMLString(BatchInterrupted, xml_string) + + +class BatchFeed(GDataFeed): + """A feed containing a list of batch request entries.""" + + _tag = GDataFeed._tag + _namespace = GDataFeed._namespace + _children = GDataFeed._children.copy() + _attributes = GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchEntry]) + _children['{%s}interrupted' % BATCH_NAMESPACE] = ('interrupted', BatchInterrupted) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + interrupted=None, + extension_elements=None, extension_attributes=None, text=None): + self.interrupted = interrupted + GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + def AddBatchEntry(self, entry=None, id_url_string=None, + batch_id_string=None, operation_string=None): + """Logic for populating members of a BatchEntry and adding to the feed. + + + If the entry is not a BatchEntry, it is converted to a BatchEntry so + that the batch specific members will be present. + + The id_url_string can be used in place of an entry if the batch operation + applies to a URL. For example query and delete operations require just + the URL of an entry, no body is sent in the HTTP request. If an + id_url_string is sent instead of an entry, a BatchEntry is created and + added to the feed. + + This method also assigns the desired batch id to the entry so that it + can be referenced in the server's response. If the batch_id_string is + None, this method will assign a batch_id to be the index at which this + entry will be in the feed's entry list. + + Args: + entry: BatchEntry, atom.Entry, or another Entry flavor (optional) The + entry which will be sent to the server as part of the batch request. + The item must have a valid atom id so that the server knows which + entry this request references. + id_url_string: str (optional) The URL of the entry to be acted on. You + can find this URL in the text member of the atom id for an entry. + If an entry is not sent, this id will be used to construct a new + BatchEntry which will be added to the request feed. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + operation_string: str (optional) The desired batch operation which will + set the batch_operation.type member of the entry. Options are + 'insert', 'update', 'delete', and 'query' + + Raises: + MissingRequiredParameters: Raised if neither an id_ url_string nor an + entry are provided in the request. + + Returns: + The added entry. + """ + if entry is None and id_url_string is None: + raise MissingRequiredParameters('supply either an entry or URL string') + if entry is None and id_url_string is not None: + entry = BatchEntry(atom_id=atom.Id(text=id_url_string)) + # TODO: handle cases in which the entry lacks batch_... members. + #if not isinstance(entry, BatchEntry): + # Convert the entry to a batch entry. + if batch_id_string is not None: + entry.batch_id = BatchId(text=batch_id_string) + elif entry.batch_id is None or entry.batch_id.text is None: + entry.batch_id = BatchId(text=str(len(self.entry))) + if operation_string is not None: + entry.batch_operation = BatchOperation(op_type=operation_string) + self.entry.append(entry) + return entry + + def AddInsert(self, entry, batch_id_string=None): + """Add an insert request to the operations in this batch request feed. + + If the entry doesn't yet have an operation or a batch id, these will + be set to the insert operation and a batch_id specified as a parameter. + + Args: + entry: BatchEntry The entry which will be sent in the batch feed as an + insert request. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. Note that batch_ids should either always be specified or + never, mixing could potentially result in duplicate batch ids. + """ + entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_INSERT) + + def AddUpdate(self, entry, batch_id_string=None): + """Add an update request to the list of batch operations in this feed. + + Sets the operation type of the entry to insert if it is not already set + and assigns the desired batch id to the entry so that it can be + referenced in the server's response. + + Args: + entry: BatchEntry The entry which will be sent to the server as an + update (HTTP PUT) request. The item must have a valid atom id + so that the server knows which entry to replace. + batch_id_string: str (optional) The batch ID to be used to reference + this batch operation in the results feed. If this parameter is None, + the current length of the feed's entry array will be used as a + count. See also comments for AddInsert. + """ + entry = self.AddBatchEntry(entry=entry, batch_id_string=batch_id_string, + operation_string=BATCH_UPDATE) + + def AddDelete(self, url_string=None, entry=None, batch_id_string=None): + """Adds a delete request to the batch request feed. + + This method takes either the url_string which is the atom id of the item + to be deleted, or the entry itself. The atom id of the entry must be + present so that the server knows which entry should be deleted. + + Args: + url_string: str (optional) The URL of the entry to be deleted. You can + find this URL in the text member of the atom id for an entry. + entry: BatchEntry (optional) The entry to be deleted. + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters: Raised if neither a url_string nor an entry + are provided in the request. + """ + entry = self.AddBatchEntry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, + operation_string=BATCH_DELETE) + + def AddQuery(self, url_string=None, entry=None, batch_id_string=None): + """Adds a query request to the batch request feed. + + This method takes either the url_string which is the query URL + whose results will be added to the result feed. The query URL will + be encapsulated in a BatchEntry, and you may pass in the BatchEntry + with a query URL instead of sending a url_string. + + Args: + url_string: str (optional) + entry: BatchEntry (optional) + batch_id_string: str (optional) + + Raises: + MissingRequiredParameters + """ + entry = self.AddBatchEntry(entry=entry, id_url_string=url_string, + batch_id_string=batch_id_string, + operation_string=BATCH_QUERY) + + def GetBatchLink(self): + for link in self.link: + if link.rel == 'http://schemas.google.com/g/2005#batch': + return link + return None + + +def BatchFeedFromString(xml_string): + return atom.CreateClassFromXMLString(BatchFeed, xml_string) + + +class EntryLink(atom.AtomBase): + """The gd:entryLink element""" + + _tag = 'entryLink' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + # The entry used to be an atom.Entry, now it is a GDataEntry. + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', GDataEntry) + _attributes['rel'] = 'rel' + _attributes['readOnly'] = 'read_only' + _attributes['href'] = 'href' + + def __init__(self, href=None, read_only=None, rel=None, + entry=None, extension_elements=None, + extension_attributes=None, text=None): + self.href = href + self.read_only = read_only + self.rel = rel + self.entry = entry + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EntryLinkFromString(xml_string): + return atom.CreateClassFromXMLString(EntryLink, xml_string) + + +class FeedLink(atom.AtomBase): + """The gd:feedLink element""" + + _tag = 'feedLink' + _namespace = GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feed' % atom.ATOM_NAMESPACE] = ('feed', GDataFeed) + _attributes['rel'] = 'rel' + _attributes['readOnly'] = 'read_only' + _attributes['countHint'] = 'count_hint' + _attributes['href'] = 'href' + + def __init__(self, count_hint=None, href=None, read_only=None, rel=None, + feed=None, extension_elements=None, extension_attributes=None, + text=None): + self.count_hint = count_hint + self.href = href + self.read_only = read_only + self.rel = rel + self.feed = feed + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def FeedLinkFromString(xml_string): + return atom.CreateClassFromXMLString(EntryLink, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/alt/__init__.py b/gdata.py-1.2.3/src/gdata/alt/__init__.py new file mode 100644 index 0000000..742980e --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/alt/__init__.py @@ -0,0 +1,20 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This package's modules adapt the gdata library to run in other environments + +The first example is the appengine module which contains functions and +classes which modify a GDataService object to run on Google App Engine. +""" diff --git a/gdata.py-1.2.3/src/gdata/alt/appengine.py b/gdata.py-1.2.3/src/gdata/alt/appengine.py new file mode 100644 index 0000000..54a620f --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/alt/appengine.py @@ -0,0 +1,302 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Provides HTTP functions for gdata.service to use on Google App Engine + +AppEngineHttpClient: Provides an HTTP request method which uses App Engine's + urlfetch API. Set the http_client member of a GDataService object to an + instance of an AppEngineHttpClient to allow the gdata library to run on + Google App Engine. + +run_on_appengine: Function which will modify an existing GDataService object + to allow it to run on App Engine. It works by creating a new instance of + the AppEngineHttpClient and replacing the GDataService object's + http_client. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import StringIO +import pickle +import atom.http_interface +import atom.token_store +from google.appengine.api import urlfetch +from google.appengine.ext import db +from google.appengine.api import users +from google.appengine.api import memcache + + +def run_on_appengine(gdata_service, store_tokens=True, + single_user_mode=False): + """Modifies a GDataService object to allow it to run on App Engine. + + Args: + gdata_service: An instance of AtomService, GDataService, or any + of their subclasses which has an http_client member and a + token_store member. + store_tokens: Boolean, defaults to True. If True, the gdata_service + will attempt to add each token to it's token_store when + SetClientLoginToken or SetAuthSubToken is called. If False + the tokens will not automatically be added to the + token_store. + single_user_mode: Boolean, defaults to False. If True, the current_token + member of gdata_service will be set when + SetClientLoginToken or SetAuthTubToken is called. If set + to True, the current_token is set in the gdata_service + and anyone who accesses the object will use the same + token. + + Note: If store_tokens is set to False and + single_user_mode is set to False, all tokens will be + ignored, since the library assumes: the tokens should not + be stored in the datastore and they should not be stored + in the gdata_service object. This will make it + impossible to make requests which require authorization. + """ + gdata_service.http_client = AppEngineHttpClient() + gdata_service.token_store = AppEngineTokenStore() + gdata_service.auto_store_tokens = store_tokens + gdata_service.auto_set_current_token = single_user_mode + return gdata_service + + +class AppEngineHttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None): + self.debug = False + self.headers = headers or {} + + def request(self, operation, url, data=None, headers=None): + """Performs an HTTP call to the server, supports GET, POST, PUT, and + DELETE. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.http + client = atom.http.HttpClient() + http_response = client.request('GET', 'http://www.google.com/') + + Args: + operation: str The HTTP operation to be performed. This is usually one + of 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be converted + to a string. Should be set to None when performing a GET or DELETE. + If data is a file-like object which can be read, this method will + read a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be + evaluated and sent. + url: The full URL to which the request should be sent. Can be a string + or atom.url.Url. + headers: dict of strings. HTTP headers which should be sent + in the request. + """ + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + + # Construct the full payload. + # Assume that data is None or a string. + data_str = data + if data: + if isinstance(data, list): + # If data is a list of different objects, convert them all to strings + # and join them together. + converted_parts = [_convert_data_part(x) for x in data] + data_str = ''.join(converted_parts) + else: + data_str = _convert_data_part(data) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + all_headers['Content-Length'] = str(len(data_str)) + + # Set the content type to the default value if none was set. + if 'Content-Type' not in all_headers: + all_headers['Content-Type'] = 'application/atom+xml' + + # Lookup the urlfetch operation which corresponds to the desired HTTP verb. + if operation == 'GET': + method = urlfetch.GET + elif operation == 'POST': + method = urlfetch.POST + elif operation == 'PUT': + method = urlfetch.PUT + elif operation == 'DELETE': + method = urlfetch.DELETE + else: + method = None + return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str, + method=method, headers=all_headers, follow_redirects=False)) + + +def _convert_data_part(data): + if not data or isinstance(data, str): + return data + elif hasattr(data, 'read'): + # data is a file like object, so read it completely. + return data.read() + # The data object was not a file. + # Try to convert to a string and send the data. + return str(data) + + +class HttpResponse(object): + """Translates a urlfetch resoinse to look like an hhtplib resoinse. + + Used to allow the resoinse from HttpRequest to be usable by gdata.service + methods. + """ + + def __init__(self, urlfetch_response): + self.body = StringIO.StringIO(urlfetch_response.content) + self.headers = urlfetch_response.headers + self.status = urlfetch_response.status_code + self.reason = '' + + def read(self, length=None): + if not length: + return self.body.read() + else: + return self.body.read(length) + + def getheader(self, name): + if not self.headers.has_key(name): + return self.headers[name.lower()] + return self.headers[name] + + +class TokenCollection(db.Model): + """Datastore Model which associates auth tokens with the current user.""" + user = db.UserProperty() + pickled_tokens = db.BlobProperty() + + +class AppEngineTokenStore(atom.token_store.TokenStore): + """Stores the user's auth tokens in the App Engine datastore. + + Tokens are only written to the datastore if a user is signed in (if + users.get_current_user() returns a user object). + """ + def __init__(self): + pass + + def add_token(self, token): + """Associates the token with the current user and stores it. + + If there is no current user, the token will not be stored. + + Returns: + False if the token was not stored. + """ + tokens = load_auth_tokens() + if not hasattr(token, 'scopes') or not token.scopes: + return False + for scope in token.scopes: + tokens[str(scope)] = token + key = save_auth_tokens(tokens) + if key: + return True + return False + + def find_token(self, url): + """Searches the current user's collection of token for a token which can + be used for a request to the url. + + Returns: + The stored token which belongs to the current user and is valid for the + desired URL. If there is no current user, or there is no valid user + token in the datastore, a atom.http_interface.GenericToken is returned. + """ + if url is None: + return None + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + tokens = load_auth_tokens() + if url in tokens: + token = tokens[url] + if token.valid_for_scope(url): + return token + else: + del tokens[url] + save_auth_tokens(tokens) + for scope, token in tokens.iteritems(): + if token.valid_for_scope(url): + return token + return atom.http_interface.GenericToken() + + def remove_token(self, token): + """Removes the token from the current user's collection in the datastore. + + Returns: + False if the token was not removed, this could be because the token was + not in the datastore, or because there is no current user. + """ + token_found = False + scopes_to_delete = [] + tokens = load_auth_tokens() + for scope, stored_token in tokens.iteritems(): + if stored_token == token: + scopes_to_delete.append(scope) + token_found = True + for scope in scopes_to_delete: + del tokens[scope] + if token_found: + save_auth_tokens(tokens) + return token_found + + def remove_all_tokens(self): + """Removes all of the current user's tokens from the datastore.""" + save_auth_tokens({}) + + +def save_auth_tokens(token_dict): + """Associates the tokens with the current user and writes to the datastore. + + If there us no current user, the tokens are not written and this function + returns None. + + Returns: + The key of the datastore entity containing the user's tokens, or None if + there was no current user. + """ + if users.get_current_user() is None: + return None + user_tokens = TokenCollection.all().filter('user =', users.get_current_user()).get() + if user_tokens: + user_tokens.pickled_tokens = pickle.dumps(token_dict) + return user_tokens.put() + else: + user_tokens = TokenCollection( + user=users.get_current_user(), + pickled_tokens=pickle.dumps(token_dict)) + return user_tokens.put() + + +def load_auth_tokens(): + """Reads a dictionary of the current user's tokens from the datastore. + + If there is no current user (a user is not signed in to the app) or the user + does not have any tokens, an empty dictionary is returned. + """ + if users.get_current_user() is None: + return {} + user_tokens = TokenCollection.all().filter('user =', users.get_current_user()).get() + if user_tokens: + return pickle.loads(user_tokens.pickled_tokens) + return {} + diff --git a/gdata.py-1.2.3/src/gdata/apps/__init__.py b/gdata.py-1.2.3/src/gdata/apps/__init__.py new file mode 100755 index 0000000..682e7c8 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/apps/__init__.py @@ -0,0 +1,496 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains objects used with Google Apps.""" + +__author__ = 'tmatsuo@sios.com (Takashi MATSUO)' + + +import atom +import gdata + + +# XML namespaces which are often used in Google Apps entity. +APPS_NAMESPACE = 'http://schemas.google.com/apps/2006' +APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s' + + +class EmailList(atom.AtomBase): + """The Google Apps EmailList element""" + + _tag = 'emailList' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def EmailListFromString(xml_string): + return atom.CreateClassFromXMLString(EmailList, xml_string) + + +class Who(atom.AtomBase): + """The Google Apps Who element""" + + _tag = 'who' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['rel'] = 'rel' + _attributes['email'] = 'email' + + def __init__(self, rel=None, email=None, extension_elements=None, + extension_attributes=None, text=None): + self.rel = rel + self.email = email + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def WhoFromString(xml_string): + return atom.CreateClassFromXMLString(Who, xml_string) + + +class Login(atom.AtomBase): + """The Google Apps Login element""" + + _tag = 'login' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['userName'] = 'user_name' + _attributes['password'] = 'password' + _attributes['suspended'] = 'suspended' + _attributes['admin'] = 'admin' + _attributes['changePasswordAtNextLogin'] = 'change_password' + _attributes['agreedToTerms'] = 'agreed_to_terms' + _attributes['ipWhitelisted'] = 'ip_whitelisted' + _attributes['hashFunctionName'] = 'hash_function_name' + + def __init__(self, user_name=None, password=None, suspended=None, + ip_whitelisted=None, hash_function_name=None, + admin=None, change_password=None, agreed_to_terms=None, + extension_elements=None, extension_attributes=None, + text=None): + self.user_name = user_name + self.password = password + self.suspended = suspended + self.admin = admin + self.change_password = change_password + self.agreed_to_terms = agreed_to_terms + self.ip_whitelisted = ip_whitelisted + self.hash_function_name = hash_function_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LoginFromString(xml_string): + return atom.CreateClassFromXMLString(Login, xml_string) + + +class Quota(atom.AtomBase): + """The Google Apps Quota element""" + + _tag = 'quota' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['limit'] = 'limit' + + def __init__(self, limit=None, extension_elements=None, + extension_attributes=None, text=None): + self.limit = limit + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def QuotaFromString(xml_string): + return atom.CreateClassFromXMLString(Quota, xml_string) + + +class Name(atom.AtomBase): + """The Google Apps Name element""" + + _tag = 'name' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['familyName'] = 'family_name' + _attributes['givenName'] = 'given_name' + + def __init__(self, family_name=None, given_name=None, + extension_elements=None, extension_attributes=None, text=None): + self.family_name = family_name + self.given_name = given_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NameFromString(xml_string): + return atom.CreateClassFromXMLString(Name, xml_string) + + +class Nickname(atom.AtomBase): + """The Google Apps Nickname element""" + + _tag = 'nickname' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, + extension_elements=None, extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NicknameFromString(xml_string): + return atom.CreateClassFromXMLString(Nickname, xml_string) + + +class NicknameEntry(gdata.GDataEntry): + """A Google Apps flavor of an Atom Entry for Nickname""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}login' % APPS_NAMESPACE] = ('login', Login) + _children['{%s}nickname' % APPS_NAMESPACE] = ('nickname', Nickname) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + login=None, nickname=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.login = login + self.nickname = nickname + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def NicknameEntryFromString(xml_string): + return atom.CreateClassFromXMLString(NicknameEntry, xml_string) + + +class NicknameFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps Nickname feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [NicknameEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def NicknameFeedFromString(xml_string): + return atom.CreateClassFromXMLString(NicknameFeed, xml_string) + + +class UserEntry(gdata.GDataEntry): + """A Google Apps flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}login' % APPS_NAMESPACE] = ('login', Login) + _children['{%s}name' % APPS_NAMESPACE] = ('name', Name) + _children['{%s}quota' % APPS_NAMESPACE] = ('quota', Quota) + # This child may already be defined in GDataEntry, confirm before removing. + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + login=None, name=None, quota=None, who=None, feed_link=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.login = login + self.name = name + self.quota = quota + self.who = who + self.feed_link = feed_link or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def UserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(UserEntry, xml_string) + + +class UserFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps User feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [UserEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def UserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(UserFeed, xml_string) + + +class EmailListEntry(gdata.GDataEntry): + """A Google Apps EmailList flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}emailList' % APPS_NAMESPACE] = ('email_list', EmailList) + # Might be able to remove this _children entry. + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + email_list=None, feed_link=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.email_list = email_list + self.feed_link = feed_link or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailListEntryFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListEntry, xml_string) + + +class EmailListFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps EmailList feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [EmailListEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def EmailListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListFeed, xml_string) + + +class EmailListRecipientEntry(gdata.GDataEntry): + """A Google Apps EmailListRecipient flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', Who) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + who=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.who = who + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def EmailListRecipientEntryFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListRecipientEntry, xml_string) + + +class EmailListRecipientFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Apps EmailListRecipient feed flavor of an Atom Feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [EmailListRecipientEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def EmailListRecipientFeedFromString(xml_string): + return atom.CreateClassFromXMLString(EmailListRecipientFeed, xml_string) + + +class Property(atom.AtomBase): + """The Google Apps Property element""" + + _tag = 'property' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PropertyFromString(xml_string): + return atom.CreateClassFromXMLString(Property, xml_string) + + +class PropertyEntry(gdata.GDataEntry): + """A Google Apps Property flavor of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}property' % APPS_NAMESPACE] = ('property', [Property]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + property=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.property = property + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def PropertyEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PropertyEntry, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/apps/emailsettings/__init__.py b/gdata.py-1.2.3/src/gdata/apps/emailsettings/__init__.py new file mode 100644 index 0000000..275c6a0 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/apps/emailsettings/__init__.py @@ -0,0 +1,15 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/gdata.py-1.2.3/src/gdata/apps/emailsettings/service.py b/gdata.py-1.2.3/src/gdata/apps/emailsettings/service.py new file mode 100644 index 0000000..1bf3b0d --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/apps/emailsettings/service.py @@ -0,0 +1,250 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Allow Google Apps domain administrators to set users' email settings. + + EmailSettingsService: Set various email settings. +""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import gdata.apps +import gdata.apps.service +import gdata.service + + +API_VER='2.0' +# Forwarding and POP3 options +KEEP='KEEP' +ARCHIVE='ARCHIVE' +DELETE='DELETE' +ALL_MAIL='ALL_MAIL' +MAIL_FROM_NOW_ON='MAIL_FROM_NOW_ON' + + +class EmailSettingsService(gdata.apps.service.PropertyService): + """Client for the Google Apps Email Settings service.""" + + def _serviceUrl(self, setting_id, username, domain=None): + if domain is None: + domain = self.domain + return '/a/feeds/emailsettings/%s/%s/%s/%s' % (API_VER, domain, username, + setting_id) + + def _bool2str(self, b): + if b is None: + return None + return str(b is True).lower() + + def CreateLabel(self, username, label): + """Create a label. + + Args: + username: User to create label for. + label: Label to create. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('label', username) + properties = {'label': label} + return self._PostProperties(uri, properties) + + def CreateFilter(self, username, from_=None, to=None, subject=None, + has_the_word=None, does_not_have_the_word=None, + has_attachment=None, label=None, should_mark_as_read=None, + should_archive=None): + """Create a filter. + + Args: + username: User to create filter for. + from_: Filter from string. + to: Filter to string. + subject: Filter subject. + has_the_word: Words to filter in. + does_not_have_the_word: Words to filter out. + has_attachment: Boolean for message having attachment. + label: Label to apply. + should_mark_as_read: Boolean for marking message as read. + should_archive: Boolean for archiving message. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('filter', username) + properties = {} + properties['from'] = from_ + properties['to'] = to + properties['subject'] = subject + properties['hasTheWord'] = has_the_word + properties['doesNotHaveTheWord'] = does_not_have_the_word + properties['hasAttachment'] = self._bool2str(has_attachment) + properties['label'] = label + properties['shouldMarkAsRead'] = self._bool2str(should_mark_as_read) + properties['shouldArchive'] = self._bool2str(should_archive) + return self._PostProperties(uri, properties) + + def CreateSendAsAlias(self, username, name, address, reply_to=None, + make_default=None): + """Create alias to send mail as. + + Args: + username: User to create alias for. + name: Name of alias. + address: Email address to send from. + reply_to: Email address to reply to. + make_default: Boolean for whether this is the new default sending alias. + + Returns: + A dict containing the result of the create operation. + """ + uri = self._serviceUrl('sendas', username) + properties = {} + properties['name'] = name + properties['address'] = address + properties['replyTo'] = reply_to + properties['makeDefault'] = self._bool2str(make_default) + return self._PostProperties(uri, properties) + + def UpdateForwarding(self, username, enable, forward_to=None, action=None): + """Update forwarding settings. + + Args: + username: User to update forwarding for. + enable: Boolean whether to enable this forwarding rule. + forward_to: Email address to forward to. + action: Action to take after forwarding. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('forwarding', username) + properties = {} + properties['enable'] = self._bool2str(enable) + if enable is True: + properties['forwardTo'] = forward_to + properties['action'] = action + return self._PutProperties(uri, properties) + + def UpdatePop(self, username, enable, enable_for=None, action=None): + """Update POP3 settings. + + Args: + username: User to update POP3 settings for. + enable: Boolean whether to enable POP3. + enable_for: Which messages to make available via POP3. + action: Action to take after user retrieves email via POP3. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('pop', username) + properties = {} + properties['enable'] = self._bool2str(enable) + if enable is True: + properties['enableFor'] = enable_for + properties['action'] = action + return self._PutProperties(uri, properties) + + def UpdateImap(self, username, enable): + """Update IMAP settings. + + Args: + username: User to update IMAP settings for. + enable: Boolean whether to enable IMAP. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('imap', username) + properties = {'enable': self._bool2str(enable)} + return self._PutProperties(uri, properties) + + def UpdateVacation(self, username, enable, subject=None, message=None, + contacts_only=None): + """Update vacation settings. + + Args: + username: User to update vacation settings for. + enable: Boolean whether to enable vacation responses. + subject: Vacation message subject. + message: Vacation message body. + contacts_only: Boolean whether to send message only to contacts. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('vacation', username) + properties = {} + properties['enable'] = self._bool2str(enable) + if enable is True: + properties['subject'] = subject + properties['message'] = message + properties['contactsOnly'] = self._bool2str(contacts_only) + return self._PutProperties(uri, properties) + + def UpdateSignature(self, username, signature): + """Update signature. + + Args: + username: User to update signature for. + signature: Signature string. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('signature', username) + properties = {'signature': signature} + return self._PutProperties(uri, properties) + + def UpdateLanguage(self, username, language): + """Update user interface language. + + Args: + username: User to update language for. + language: Language code. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('language', username) + properties = {'language': language} + return self._PutProperties(uri, properties) + + def UpdateGeneral(self, username, page_size=None, shortcuts=None, arrows=None, + snippets=None, unicode=None): + """Update general settings. + + Args: + username: User to update general settings for. + page_size: Number of messages to show. + shortcuts: Boolean whether shortcuts are enabled. + arrows: Boolean whether arrows are enabled. + snippets: Boolean whether snippets are enabled. + unicode: Wheter unicode is enabled. + + Returns: + A dict containing the result of the update operation. + """ + uri = self._serviceUrl('general', username) + properties = {} + properties['pageSize'] = str(page_size) + properties['shortcuts'] = self._bool2str(shortcuts) + properties['arrows'] = self._bool2str(arrows) + properties['snippets'] = self._bool2str(snippets) + properties['unicode'] = self._bool2str(unicode) + return self._PutProperties(uri, properties) diff --git a/gdata.py-1.2.3/src/gdata/apps/migration/__init__.py b/gdata.py-1.2.3/src/gdata/apps/migration/__init__.py new file mode 100644 index 0000000..9892671 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/apps/migration/__init__.py @@ -0,0 +1,212 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains objects used with Google Apps.""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import atom +import gdata + + +# XML namespaces which are often used in Google Apps entity. +APPS_NAMESPACE = 'http://schemas.google.com/apps/2006' +APPS_TEMPLATE = '{http://schemas.google.com/apps/2006}%s' + + +class Rfc822Msg(atom.AtomBase): + """The Migration rfc822Msg element.""" + + _tag = 'rfc822Msg' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['encoding'] = 'encoding' + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.encoding = 'base64' + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def Rfc822MsgFromString(xml_string): + """Parse in the Rrc822 message from the XML definition.""" + + return atom.CreateClassFromXMLString(Rfc822Msg, xml_string) + + +class MailItemProperty(atom.AtomBase): + """The Migration mailItemProperty element.""" + + _tag = 'mailItemProperty' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def MailItemPropertyFromString(xml_string): + """Parse in the MailItemProperiy from the XML definition.""" + + return atom.CreateClassFromXMLString(MailItemProperty, xml_string) + + +class Label(atom.AtomBase): + """The Migration label element.""" + + _tag = 'label' + _namespace = APPS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['labelName'] = 'label_name' + + def __init__(self, label_name=None, + extension_elements=None, extension_attributes=None, + text=None): + self.label_name = label_name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LabelFromString(xml_string): + """Parse in the mailItemProperty from the XML definition.""" + + return atom.CreateClassFromXMLString(Label, xml_string) + + +class MailEntry(gdata.GDataEntry): + """A Google Migration flavor of an Atom Entry.""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg) + _children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property', + [MailItemProperty]) + _children['{%s}label' % APPS_NAMESPACE] = ('label', [Label]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + rfc822_msg=None, mail_item_property=None, label=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated) + self.rfc822_msg = rfc822_msg + self.mail_item_property = mail_item_property + self.label = label + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def MailEntryFromString(xml_string): + """Parse in the MailEntry from the XML definition.""" + + return atom.CreateClassFromXMLString(MailEntry, xml_string) + + +class BatchMailEntry(gdata.BatchEntry): + """A Google Migration flavor of an Atom Entry.""" + + _tag = gdata.BatchEntry._tag + _namespace = gdata.BatchEntry._namespace + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + _children['{%s}rfc822Msg' % APPS_NAMESPACE] = ('rfc822_msg', Rfc822Msg) + _children['{%s}mailItemProperty' % APPS_NAMESPACE] = ('mail_item_property', + [MailItemProperty]) + _children['{%s}label' % APPS_NAMESPACE] = ('label', [Label]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + rfc822_msg=None, mail_item_property=None, label=None, + batch_operation=None, batch_id=None, batch_status=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, + batch_id=batch_id, batch_status=batch_status, + title=title, updated=updated) + self.rfc822_msg = rfc822_msg or None + self.mail_item_property = mail_item_property or [] + self.label = label or [] + self.extended_property = extended_property or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def BatchMailEntryFromString(xml_string): + """Parse in the BatchMailEntry from the XML definition.""" + + return atom.CreateClassFromXMLString(BatchMailEntry, xml_string) + + +class BatchMailEventFeed(gdata.BatchFeed): + """A Migration event feed flavor of an Atom Feed.""" + + _tag = gdata.BatchFeed._tag + _namespace = gdata.BatchFeed._namespace + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BatchMailEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, interrupted=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + interrupted=interrupted, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def BatchMailEventFeedFromString(xml_string): + """Parse in the BatchMailEventFeed from the XML definition.""" + + return atom.CreateClassFromXMLString(BatchMailEventFeed, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/apps/migration/service.py b/gdata.py-1.2.3/src/gdata/apps/migration/service.py new file mode 100644 index 0000000..6319995 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/apps/migration/service.py @@ -0,0 +1,129 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains the methods to import mail via Google Apps Email Migration API. + + MigrationService: Provides methids to import mail. +""" + +__author__ = 'google-apps-apis@googlegroups.com' + + +import base64 +import gdata +import gdata.apps.service +import gdata.service +from gdata.apps import migration + + +API_VER = '2.0' + + +class MigrationService(gdata.apps.service.AppsService): + """Client for the EMAPI migration service. Use either ImportMail to import + one message at a time, or AddBatchEntry and SubmitBatch to import a batch of + messages at a time. + """ + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None): + gdata.apps.service.AppsService.__init__( + self, email=email, password=password, domain=domain, source=source, + server=server, additional_headers=additional_headers) + self.mail_batch = migration.BatchMailEventFeed() + + def _BaseURL(self): + return '/a/feeds/migration/%s/%s' % (API_VER, self.domain) + + def ImportMail(self, user_name, mail_message, mail_item_properties, + mail_labels): + """Import a single mail message. + + Args: + user_name: The username to import messages to. + mail_message: An RFC822 format email message. + mail_item_properties: A list of Gmail properties to apply to the message. + mail_labels: A list of labels to apply to the message. + + Returns: + A MailEntry representing the successfully imported message. + + Raises: + AppsForYourDomainException: An error occurred importing the message. + """ + uri = '%s/%s/mail' % (self._BaseURL(), user_name) + + mail_entry = migration.MailEntry() + mail_entry.rfc822_msg = migration.Rfc822Msg(text=(base64.b64encode( + mail_message))) + mail_entry.rfc822_msg.encoding = 'base64' + mail_entry.mail_item_property = map( + lambda x: migration.MailItemProperty(value=x), mail_item_properties) + mail_entry.label = map(lambda x: migration.Label(label_name=x), + mail_labels) + + try: + return migration.MailEntryFromString(str(self.Post(mail_entry, uri))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def AddBatchEntry(self, mail_message, mail_item_properties, + mail_labels): + """Add a message to the current batch that you later will submit. + + Args: + mail_message: An RFC822 format email message. + mail_item_properties: A list of Gmail properties to apply to the message. + mail_labels: A list of labels to apply to the message. + + Returns: + The length of the MailEntry representing the message. + """ + mail_entry = migration.BatchMailEntry() + mail_entry.rfc822_msg = migration.Rfc822Msg(text=(base64.b64encode( + mail_message))) + mail_entry.rfc822_msg.encoding = 'base64' + mail_entry.mail_item_property = map( + lambda x: migration.MailItemProperty(value=x), mail_item_properties) + mail_entry.label = map(lambda x: migration.Label(label_name=x), + mail_labels) + + self.mail_batch.AddBatchEntry(mail_entry) + + return len(str(mail_entry)) + + def SubmitBatch(self, user_name): + """Send a all the mail items you have added to the batch to the server. + + Args: + user_name: The username to import messages to. + + Returns: + A HTTPResponse from the web service call. + + Raises: + AppsForYourDomainException: An error occurred importing the batch. + """ + uri = '%s/%s/mail/batch' % (self._BaseURL(), user_name) + + try: + self.result = self.Post(self.mail_batch, uri, + converter=migration.BatchMailEventFeedFromString) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + self.mail_batch = migration.BatchMailEventFeed() + + return self.result diff --git a/gdata.py-1.2.3/src/gdata/apps/service.py b/gdata.py-1.2.3/src/gdata/apps/service.py new file mode 100644 index 0000000..fdfb712 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/apps/service.py @@ -0,0 +1,436 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'tmatsuo@sios.com (Takashi MATSUO)' + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import urllib +import gdata +import atom.service +import gdata.service +import gdata.apps +import atom + +API_VER="2.0" +HTTP_OK=200 + +UNKOWN_ERROR=1000 +USER_DELETED_RECENTLY=1100 +USER_SUSPENDED=1101 +DOMAIN_USER_LIMIT_EXCEEDED=1200 +DOMAIN_ALIAS_LIMIT_EXCEEDED=1201 +DOMAIN_SUSPENDED=1202 +DOMAIN_FEATURE_UNAVAILABLE=1203 +ENTITY_EXISTS=1300 +ENTITY_DOES_NOT_EXIST=1301 +ENTITY_NAME_IS_RESERVED=1302 +ENTITY_NAME_NOT_VALID=1303 +INVALID_GIVEN_NAME=1400 +INVALID_FAMILY_NAME=1401 +INVALID_PASSWORD=1402 +INVALID_USERNAME=1403 +INVALID_HASH_FUNCTION_NAME=1404 +INVALID_HASH_DIGGEST_LENGTH=1405 +INVALID_EMAIL_ADDRESS=1406 +INVALID_QUERY_PARAMETER_VALUE=1407 +TOO_MANY_RECIPIENTS_ON_EMAIL_LIST=1500 + +DEFAULT_QUOTA_LIMIT='2048' + +class Error(Exception): + pass + +class AppsForYourDomainException(Error): + + def __init__(self, response): + + Error.__init__(self, response) + try: + self.element_tree = ElementTree.fromstring(response['body']) + self.error_code = int(self.element_tree[0].attrib['errorCode']) + self.reason = self.element_tree[0].attrib['reason'] + self.invalidInput = self.element_tree[0].attrib['invalidInput'] + except: + self.error_code = UNKOWN_ERROR + +class AppsService(gdata.service.GDataService): + """Client for the Google Apps Provisioning service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='apps', source=source, + server=server, + additional_headers=additional_headers) + self.ssl = True + self.port = 443 + self.domain = domain + + def _baseURL(self): + return "/a/feeds/%s" % self.domain + + def GetGeneratorFromLinkFinder(self, link_finder, func): + """returns a generator for pagination""" + yield link_finder + next = link_finder.GetNextLink() + while next is not None: + next_feed = func(str(self.Get(next.href))) + yield next_feed + next = next_feed.GetNextLink() + + def AddAllElementsFromAllPages(self, link_finder, func): + """retrieve all pages and add all elements""" + next = link_finder.GetNextLink() + while next is not None: + next_feed = func(str(self.Get(next.href))) + for a_entry in next_feed.entry: + link_finder.entry.append(a_entry) + next = next_feed.GetNextLink() + return link_finder + + def RetrievePageOfEmailLists(self, start_email_list_name=None): + """Retrieve one page of email list""" + + uri = "%s/emailList/%s" % (self._baseURL(), API_VER) + if start_email_list_name is not None: + uri += "?startEmailListName=%s" % start_email_list_name + try: + return gdata.apps.EmailListFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveAllEmailLists(self): + """Retrieve all email list of a domain.""" + + ret = self.RetrievePageOfEmailLists() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListFeedFromString) + + def RetrieveEmailList(self, list_name): + """Retreive a single email list by the list's name.""" + + uri = "%s/emailList/%s/%s" % ( + self._baseURL(), API_VER, list_name) + try: + return self.Get(uri, converter=gdata.apps.EmailListEntryFromString) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveEmailLists(self, recipient): + """Retrieve All Email List Subscriptions for an Email Address.""" + + uri = "%s/emailList/%s?recipient=%s" % ( + self._baseURL(), API_VER, recipient) + try: + ret = gdata.apps.EmailListFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListFeedFromString) + + def RemoveRecipientFromEmailList(self, recipient, list_name): + """Remove recipient from email list.""" + + uri = "%s/emailList/%s/%s/recipient/%s" % ( + self._baseURL(), API_VER, list_name, recipient) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfRecipients(self, list_name, start_recipient=None): + """Retrieve one page of recipient of an email list. """ + + uri = "%s/emailList/%s/%s/recipient" % ( + self._baseURL(), API_VER, list_name) + + if start_recipient is not None: + uri += "?startRecipient=%s" % start_recipient + try: + return gdata.apps.EmailListRecipientFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveAllRecipients(self, list_name): + """Retrieve all recipient of an email list.""" + + ret = self.RetrievePageOfRecipients(list_name) + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.EmailListRecipientFeedFromString) + + def AddRecipientToEmailList(self, recipient, list_name): + """Add a recipient to a email list.""" + + uri = "%s/emailList/%s/%s/recipient" % ( + self._baseURL(), API_VER, list_name) + recipient_entry = gdata.apps.EmailListRecipientEntry() + recipient_entry.who = gdata.apps.Who(email=recipient) + + try: + return gdata.apps.EmailListRecipientEntryFromString( + str(self.Post(recipient_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteEmailList(self, list_name): + """Delete a email list""" + + uri = "%s/emailList/%s/%s" % (self._baseURL(), API_VER, list_name) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateEmailList(self, list_name): + """Create a email list. """ + + uri = "%s/emailList/%s" % (self._baseURL(), API_VER) + email_list_entry = gdata.apps.EmailListEntry() + email_list_entry.email_list = gdata.apps.EmailList(name=list_name) + try: + return gdata.apps.EmailListEntryFromString( + str(self.Post(email_list_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteNickname(self, nickname): + """Delete a nickname""" + + uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname) + try: + self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfNicknames(self, start_nickname=None): + """Retrieve one page of nicknames in the domain""" + + uri = "%s/nickname/%s" % (self._baseURL(), API_VER) + if start_nickname is not None: + uri += "?startNickname=%s" % start_nickname + try: + return gdata.apps.NicknameFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrieveAllNicknames(self): + """Retrieve all nicknames in the domain""" + + ret = self.RetrievePageOfNicknames() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.NicknameFeedFromString) + + def RetrieveNicknames(self, user_name): + """Retrieve nicknames of the user""" + + uri = "%s/nickname/%s?username=%s" % (self._baseURL(), API_VER, user_name) + try: + ret = gdata.apps.NicknameFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.NicknameFeedFromString) + + def RetrieveNickname(self, nickname): + """Retrieve a nickname. + + Args: + nickname: string The nickname to retrieve + + Returns: + gdata.apps.NicknameEntry + """ + + uri = "%s/nickname/%s/%s" % (self._baseURL(), API_VER, nickname) + try: + return gdata.apps.NicknameEntryFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateNickname(self, user_name, nickname): + """Create a nickname""" + + uri = "%s/nickname/%s" % (self._baseURL(), API_VER) + nickname_entry = gdata.apps.NicknameEntry() + nickname_entry.login = gdata.apps.Login(user_name=user_name) + nickname_entry.nickname = gdata.apps.Nickname(name=nickname) + + try: + return gdata.apps.NicknameEntryFromString( + str(self.Post(nickname_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def DeleteUser(self, user_name): + """Delete a user account""" + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return self.Delete(uri) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def UpdateUser(self, user_name, user_entry): + """Update a user account.""" + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return gdata.apps.UserEntryFromString(str(self.Put(user_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def CreateUser(self, user_name, family_name, given_name, password, + suspended='false', quota_limit=None, + password_hash_function=None): + """Create a user account. """ + + uri = "%s/user/%s" % (self._baseURL(), API_VER) + user_entry = gdata.apps.UserEntry() + user_entry.login = gdata.apps.Login( + user_name=user_name, password=password, suspended=suspended, + hash_function_name=password_hash_function) + user_entry.name = gdata.apps.Name(family_name=family_name, + given_name=given_name) + if quota_limit is not None: + user_entry.quota = gdata.apps.Quota(limit=str(quota_limit)) + + try: + return gdata.apps.UserEntryFromString(str(self.Post(user_entry, uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def SuspendUser(self, user_name): + user_entry = self.RetrieveUser(user_name) + if user_entry.login.suspended != 'true': + user_entry.login.suspended = 'true' + user_entry = self.UpdateUser(user_name, user_entry) + return user_entry + + def RestoreUser(self, user_name): + user_entry = self.RetrieveUser(user_name) + if user_entry.login.suspended != 'false': + user_entry.login.suspended = 'false' + user_entry = self.UpdateUser(user_name, user_entry) + return user_entry + + def RetrieveUser(self, user_name): + """Retrieve an user account. + + Args: + user_name: string The user name to retrieve + + Returns: + gdata.apps.UserEntry + """ + + uri = "%s/user/%s/%s" % (self._baseURL(), API_VER, user_name) + try: + return gdata.apps.UserEntryFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def RetrievePageOfUsers(self, start_username=None): + """Retrieve one page of users in this domain.""" + + uri = "%s/user/%s" % (self._baseURL(), API_VER) + if start_username is not None: + uri += "?startUsername=%s" % start_username + try: + return gdata.apps.UserFeedFromString(str(self.Get(uri))) + except gdata.service.RequestError, e: + raise AppsForYourDomainException(e.args[0]) + + def GetGeneratorForAllUsers(self): + """Retrieve a generator for all users in this domain.""" + first_page = self.RetrievePageOfUsers() + return self.GetGeneratorFromLinkFinder(first_page, + gdata.apps.UserFeedFromString) + + def RetrieveAllUsers(self): + """Retrieve all users in this domain. OBSOLETE""" + + ret = self.RetrievePageOfUsers() + # pagination + return self.AddAllElementsFromAllPages( + ret, gdata.apps.UserFeedFromString) + + +class PropertyService(gdata.service.GDataService): + """Client for the Google Apps Property service.""" + + def __init__(self, email=None, password=None, domain=None, source=None, + server='apps-apis.google.com', additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='apps', source=source, + server=server, + additional_headers=additional_headers) + self.ssl = True + self.port = 443 + self.domain = domain + + def _GetPropertyEntry(self, properties): + property_entry = gdata.apps.PropertyEntry() + property = [] + for name, value in properties.iteritems(): + if name is not None and value is not None: + property.append(gdata.apps.Property(name=name, value=value)) + property_entry.property = property + return property_entry + + def _PropertyEntry2Dict(self, property_entry): + properties = {} + for i, property in enumerate(property_entry.property): + properties[property.name] = property.value + return properties + + def _GetProperties(self, uri): + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Get(uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _PostProperties(self, uri, properties): + property_entry = self._GetPropertyEntry(properties) + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Post(property_entry, uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) + + def _PutProperties(self, uri, properties): + property_entry = self._GetPropertyEntry(properties) + try: + return self._PropertyEntry2Dict(gdata.apps.PropertyEntryFromString( + str(self.Put(property_entry, uri)))) + except gdata.service.RequestError, e: + raise gdata.apps.service.AppsForYourDomainException(e.args[0]) diff --git a/gdata.py-1.2.3/src/gdata/auth.py b/gdata.py-1.2.3/src/gdata/auth.py new file mode 100644 index 0000000..803c247 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/auth.py @@ -0,0 +1,929 @@ +#!/usr/bin/python +# +# Copyright (C) 2007, 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import cgi +import math +import random +import re +import time +import types +import urllib +import atom.http_interface +import atom.token_store +import atom.url +import gdata.oauth as oauth +import gdata.oauth.rsa as oauth_rsa +import gdata.tlslite.utils.keyfactory as keyfactory +import gdata.tlslite.utils.cryptomath as cryptomath + +__author__ = 'api.jscudder (Jeff Scudder)' + + +PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth=' +AUTHSUB_AUTH_LABEL = 'AuthSub token=' + + +"""This module provides functions and objects used with Google authentication. + +Details on Google authorization mechanisms used with the Google Data APIs can +be found here: +http://code.google.com/apis/gdata/auth.html +http://code.google.com/apis/accounts/ + +The essential functions are the following. +Related to ClientLogin: + generate_client_login_request_body: Constructs the body of an HTTP request to + obtain a ClientLogin token for a specific + service. + extract_client_login_token: Creates a ClientLoginToken with the token from a + success response to a ClientLogin request. + get_captcha_challenge: If the server responded to the ClientLogin request + with a CAPTCHA challenge, this method extracts the + CAPTCHA URL and identifying CAPTCHA token. + +Related to AuthSub: + generate_auth_sub_url: Constructs a full URL for a AuthSub request. The + user's browser must be sent to this Google Accounts + URL and redirected back to the app to obtain the + AuthSub token. + extract_auth_sub_token_from_url: Once the user's browser has been + redirected back to the web app, use this + function to create an AuthSubToken with + the correct authorization token and scope. + token_from_http_body: Extracts the AuthSubToken value string from the + server's response to an AuthSub session token upgrade + request. +""" + +def generate_client_login_request_body(email, password, service, source, + account_type='HOSTED_OR_GOOGLE', captcha_token=None, + captcha_response=None): + """Creates the body of the autentication request + + See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request + for more details. + + Args: + email: str + password: str + service: str + source: str + account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid + values are 'GOOGLE' and 'HOSTED' + captcha_token: str (optional) + captcha_response: str (optional) + + Returns: + The HTTP body to send in a request for a client login token. + """ + # Create a POST body containing the user's credentials. + request_fields = {'Email': email, + 'Passwd': password, + 'accountType': account_type, + 'service': service, + 'source': source} + if captcha_token and captcha_response: + # Send the captcha token and response as part of the POST body if the + # user is responding to a captch challenge. + request_fields['logintoken'] = captcha_token + request_fields['logincaptcha'] = captcha_response + return urllib.urlencode(request_fields) + + +GenerateClientLoginRequestBody = generate_client_login_request_body + + +def GenerateClientLoginAuthToken(http_body): + """Returns the token value to use in Authorization headers. + + Reads the token from the server's response to a Client Login request and + creates header value to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The value half of an Authorization header. + """ + token = get_client_login_token(http_body) + if token: + return 'GoogleLogin auth=%s' % token + return None + + +def get_client_login_token(http_body): + """Returns the token value for a ClientLoginToken. + + Reads the token from the server's response to a Client Login request and + creates the token value string to use in requests. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request + + Returns: + The token value string for a ClientLoginToken. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Auth='): + # Strip off the leading Auth= and return the Authorization value. + return response_line[5:] + return None + + +def extract_client_login_token(http_body, scopes): + """Parses the server's response and returns a ClientLoginToken. + + Args: + http_body: str The body of the server's HTTP response to a Client Login + request. It is assumed that the login request was successful. + scopes: list containing atom.url.Urls or strs. The scopes list contains + all of the partial URLs under which the client login token is + valid. For example, if scopes contains ['http://example.com/foo'] + then the client login token would be valid for + http://example.com/foo/bar/baz + + Returns: + A ClientLoginToken which is valid for the specified scopes. + """ + token_string = get_client_login_token(http_body) + token = ClientLoginToken(scopes=scopes) + token.set_token_string(token_string) + return token + + +def get_captcha_challenge(http_body, + captcha_base_url='http://www.google.com/accounts/'): + """Returns the URL and token for a CAPTCHA challenge issued by the server. + + Args: + http_body: str The body of the HTTP response from the server which + contains the CAPTCHA challenge. + captcha_base_url: str This function returns a full URL for viewing the + challenge image which is built from the server's response. This + base_url is used as the beginning of the URL because the server + only provides the end of the URL. For example the server provides + 'Captcha?ctoken=Hi...N' and the URL for the image is + 'http://www.google.com/accounts/Captcha?ctoken=Hi...N' + + Returns: + A dictionary containing the information needed to repond to the CAPTCHA + challenge, the image URL and the ID token of the challenge. The + dictionary is in the form: + {'token': string identifying the CAPTCHA image, + 'url': string containing the URL of the image} + Returns None if there was no CAPTCHA challenge in the response. + """ + contains_captcha_challenge = False + captcha_parameters = {} + for response_line in http_body.splitlines(): + if response_line.startswith('Error=CaptchaRequired'): + contains_captcha_challenge = True + elif response_line.startswith('CaptchaToken='): + # Strip off the leading CaptchaToken= + captcha_parameters['token'] = response_line[13:] + elif response_line.startswith('CaptchaUrl='): + captcha_parameters['url'] = '%s%s' % (captcha_base_url, + response_line[11:]) + if contains_captcha_challenge: + return captcha_parameters + else: + return None + + +GetCaptchaChallenge = get_captcha_challenge + + +def GenerateOAuthRequestTokenUrl( + oauth_input_params, scopes, + request_token_url='https://www.google.com/accounts/OAuthGetRequestToken', + extra_parameters=None): + """Generate a URL at which a request for OAuth request token is to be sent. + + Args: + oauth_input_params: OAuthInputParams OAuth input parameters. + scopes: list of strings The URLs of the services to be accessed. + request_token_url: string The beginning of the request token URL. This is + normally 'https://www.google.com/accounts/OAuthGetRequestToken' or + '/accounts/OAuthGetRequestToken' + extra_parameters: dict (optional) key-value pairs as any additional + parameters to be included in the URL and signature while making a + request for fetching an OAuth request token. All the OAuth parameters + are added by default. But if provided through this argument, any + default parameters will be overwritten. For e.g. a default parameter + oauth_version 1.0 can be overwritten if + extra_parameters = {'oauth_version': '2.0'} + + Returns: + atom.url.Url OAuth request token URL. + """ + scopes_string = ' '.join([str(scope) for scope in scopes]) + parameters = {'scope': scopes_string} + if extra_parameters: + parameters.update(extra_parameters) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + oauth_input_params.GetConsumer(), http_url=request_token_url, + parameters=parameters) + oauth_request.sign_request(oauth_input_params.GetSignatureMethod(), + oauth_input_params.GetConsumer(), None) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateOAuthAuthorizationUrl( + request_token, + authorization_url='https://www.google.com/accounts/OAuthAuthorizeToken', + callback_url=None, extra_params=None, + include_scopes_in_callback=False, scopes_param_prefix='oauth_token_scope'): + """Generates URL at which user will login to authorize the request token. + + Args: + request_token: gdata.auth.OAuthToken OAuth request token. + authorization_url: string The beginning of the authorization URL. This is + normally 'https://www.google.com/accounts/OAuthAuthorizeToken' or + '/accounts/OAuthAuthorizeToken' + callback_url: string (optional) The URL user will be sent to after + logging in and granting access. + extra_params: dict (optional) Additional parameters to be sent. + include_scopes_in_callback: Boolean (default=False) if set to True, and + if 'callback_url' is present, the 'callback_url' will be modified to + include the scope(s) from the request token as a URL parameter. The + key for the 'callback' URL's scope parameter will be + OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as + a parameter to the 'callback' URL, is that the page which receives + the OAuth token will be able to tell which URLs the token grants + access to. + scopes_param_prefix: string (default='oauth_token_scope') The URL + parameter key which maps to the list of valid scopes for the token. + This URL parameter will be included in the callback URL along with + the scopes of the token as value if include_scopes_in_callback=True. + + Returns: + atom.url.Url OAuth authorization URL. + """ + scopes = request_token.scopes + if isinstance(scopes, list): + scopes = ' '.join(scopes) + if include_scopes_in_callback and callback_url: + if callback_url.find('?') > -1: + callback_url += '&' + else: + callback_url += '?' + callback_url += urllib.urlencode({scopes_param_prefix:scopes}) + oauth_token = oauth.OAuthToken(request_token.key, request_token.secret) + oauth_request = oauth.OAuthRequest.from_token_and_callback( + token=oauth_token, callback=callback_url, + http_url=authorization_url, parameters=extra_params) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateOAuthAccessTokenUrl( + authorized_request_token, + oauth_input_params, + access_token_url='https://www.google.com/accounts/OAuthGetAccessToken', + oauth_version='1.0'): + """Generates URL at which user will login to authorize the request token. + + Args: + authorized_request_token: gdata.auth.OAuthToken OAuth authorized request + token. + oauth_input_params: OAuthInputParams OAuth input parameters. + access_token_url: string The beginning of the authorization URL. This is + normally 'https://www.google.com/accounts/OAuthGetAccessToken' or + '/accounts/OAuthGetAccessToken' + oauth_version: str (default='1.0') oauth_version parameter. + + Returns: + atom.url.Url OAuth access token URL. + """ + oauth_token = oauth.OAuthToken(authorized_request_token.key, + authorized_request_token.secret) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + oauth_input_params.GetConsumer(), token=oauth_token, + http_url=access_token_url, parameters={'oauth_version': oauth_version}) + oauth_request.sign_request(oauth_input_params.GetSignatureMethod(), + oauth_input_params.GetConsumer(), oauth_token) + return atom.url.parse_url(oauth_request.to_url()) + + +def GenerateAuthSubUrl(next, scope, secure=False, session=True, + request_url='https://www.google.com/accounts/AuthSubRequest', + domain='default'): + """Generate a URL at which the user will login and be redirected back. + + Users enter their credentials on a Google login page and a token is sent + to the URL specified in next. See documentation for AuthSub login at: + http://code.google.com/apis/accounts/AuthForWebApps.html + + Args: + request_url: str The beginning of the request URL. This is normally + 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + next: string The URL user will be sent to after logging in. + scope: string The URL of the service to be accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + domain: str (optional) The Google Apps domain for this account. If this + is not a Google Apps account, use 'default' which is the default + value. + """ + # Translate True/False values for parameters into numeric values acceoted + # by the AuthSub service. + if secure: + secure = 1 + else: + secure = 0 + + if session: + session = 1 + else: + session = 0 + + request_params = urllib.urlencode({'next': next, 'scope': scope, + 'secure': secure, 'session': session, + 'hd': domain}) + if request_url.find('?') == -1: + return '%s?%s' % (request_url, request_params) + else: + # The request URL already contained url parameters so we should add + # the parameters using the & seperator + return '%s&%s' % (request_url, request_params) + + +def generate_auth_sub_url(next, scopes, secure=False, session=True, + request_url='https://www.google.com/accounts/AuthSubRequest', + domain='default', scopes_param_prefix='auth_sub_scopes'): + """Constructs a URL string for requesting a multiscope AuthSub token. + + The generated token will contain a URL parameter to pass along the + requested scopes to the next URL. When the Google Accounts page + redirects the broswser to the 'next' URL, it appends the single use + AuthSub token value to the URL as a URL parameter with the key 'token'. + However, the information about which scopes were requested is not + included by Google Accounts. This method adds the scopes to the next + URL before making the request so that the redirect will be sent to + a page, and both the token value and the list of scopes can be + extracted from the request URL. + + Args: + next: atom.url.URL or string The URL user will be sent to after + authorizing this web application to access their data. + scopes: list containint strings The URLs of the services to be accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + request_url: atom.url.Url or str The beginning of the request URL. This + is normally 'http://www.google.com/accounts/AuthSubRequest' or + '/accounts/AuthSubRequest' + domain: The domain which the account is part of. This is used for Google + Apps accounts, the default value is 'default' which means that the + requested account is a Google Account (@gmail.com for example) + scopes_param_prefix: str (optional) The requested scopes are added as a + URL parameter to the next URL so that the page at the 'next' URL can + extract the token value and the valid scopes from the URL. The key + for the URL parameter defaults to 'auth_sub_scopes' + + Returns: + An atom.url.Url which the user's browser should be directed to in order + to authorize this application to access their information. + """ + if isinstance(next, (str, unicode)): + next = atom.url.parse_url(next) + scopes_string = ' '.join([str(scope) for scope in scopes]) + next.params[scopes_param_prefix] = scopes_string + + if isinstance(request_url, (str, unicode)): + request_url = atom.url.parse_url(request_url) + request_url.params['next'] = str(next) + request_url.params['scope'] = scopes_string + if session: + request_url.params['session'] = 1 + else: + request_url.params['session'] = 0 + if secure: + request_url.params['secure'] = 1 + else: + request_url.params['secure'] = 0 + request_url.params['hd'] = domain + return request_url + + +def AuthSubTokenFromUrl(url): + """Extracts the AuthSub token from the URL. + + Used after the AuthSub redirect has sent the user to the 'next' page and + appended the token to the URL. This function returns the value to be used + in the Authorization header. + + Args: + url: str The URL of the current page which contains the AuthSub token as + a URL parameter. + """ + token = TokenFromUrl(url) + if token: + return 'AuthSub token=%s' % token + return None + + +def TokenFromUrl(url): + """Extracts the AuthSub token from the URL. + + Returns the raw token value. + + Args: + url: str The URL or the query portion of the URL string (after the ?) of + the current page which contains the AuthSub token as a URL parameter. + """ + if url.find('?') > -1: + query_params = url.split('?')[1] + else: + query_params = url + for pair in query_params.split('&'): + if pair.startswith('token='): + return pair[6:] + return None + + +def extract_auth_sub_token_from_url(url, + scopes_param_prefix='auth_sub_scopes', rsa_key=None): + """Creates an AuthSubToken and sets the token value and scopes from the URL. + + After the Google Accounts AuthSub pages redirect the user's broswer back to + the web application (using the 'next' URL from the request) the web app must + extract the token from the current page's URL. The token is provided as a + URL parameter named 'token' and if generate_auth_sub_url was used to create + the request, the token's valid scopes are included in a URL parameter whose + name is specified in scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + An AuthSubToken with the token value from the URL and set to be valid for + the scopes passed in on the URL. If no scopes were included in the URL, + the AuthSubToken defaults to being valid for no scopes. If there was no + 'token' parameter in the URL, this function returns None. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if 'token' not in url.params: + return None + scopes = [] + if scopes_param_prefix in url.params: + scopes = url.params[scopes_param_prefix].split(' ') + token_value = url.params['token'] + if rsa_key: + token = SecureAuthSubToken(rsa_key, scopes=scopes) + else: + token = AuthSubToken(scopes=scopes) + token.set_token_string(token_value) + return token + + +def AuthSubTokenFromHttpBody(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The header value to use for Authorization which contains the AuthSub + token. + """ + token_value = token_from_http_body(http_body) + if token_value: + return '%s%s' % (AUTHSUB_AUTH_LABEL, token_value) + return None + + +def token_from_http_body(http_body): + """Extracts the AuthSub token from an HTTP body string. + + Used to find the new session token after making a request to upgrade a + single use AuthSub token. + + Args: + http_body: str The repsonse from the server which contains the AuthSub + key. For example, this function would find the new session token + from the server's response to an upgrade token request. + + Returns: + The raw token value to use in an AuthSubToken object. + """ + for response_line in http_body.splitlines(): + if response_line.startswith('Token='): + # Strip off Token= and return the token value string. + return response_line[6:] + return None + + +TokenFromHttpBody = token_from_http_body + + +def OAuthTokenFromUrl(url, scopes_param_prefix='oauth_token_scope'): + """Creates an OAuthToken and sets token key and scopes (if present) from URL. + + After the Google Accounts OAuth pages redirect the user's broswer back to + the web application (using the 'callback' URL from the request) the web app + can extract the token from the current page's URL. The token is same as the + request token, but it is either authorized (if user grants access) or + unauthorized (if user denies access). The token is provided as a + URL parameter named 'oauth_token' and if it was chosen to use + GenerateOAuthAuthorizationUrl with include_scopes_in_param=True, the token's + valid scopes are included in a URL parameter whose name is specified in + scopes_param_prefix. + + Args: + url: atom.url.Url or str representing the current URL. The token value + and valid scopes should be included as URL parameters. + scopes_param_prefix: str (optional) The URL parameter key which maps to + the list of valid scopes for the token. + + Returns: + An OAuthToken with the token key from the URL and set to be valid for + the scopes passed in on the URL. If no scopes were included in the URL, + the OAuthToken defaults to being valid for no scopes. If there was no + 'oauth_token' parameter in the URL, this function returns None. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + if 'oauth_token' not in url.params: + return None + scopes = [] + if scopes_param_prefix in url.params: + scopes = url.params[scopes_param_prefix].split(' ') + token_key = url.params['oauth_token'] + token = OAuthToken(key=token_key, scopes=scopes) + return token + + +def OAuthTokenFromHttpBody(http_body): + """Parses the HTTP response body and returns an OAuth token. + + The returned OAuth token will just have key and secret parameters set. + It won't have any knowledge about the scopes or oauth_input_params. It is + your responsibility to make it aware of the remaining parameters. + + Returns: + OAuthToken OAuth token. + """ + token = oauth.OAuthToken.from_string(http_body) + oauth_token = OAuthToken(key=token.key, secret=token.secret) + return oauth_token + + +class OAuthSignatureMethod(object): + """Holds valid OAuth signature methods. + + RSA_SHA1: Class to build signature according to RSA-SHA1 algorithm. + HMAC_SHA1: Class to build signature according to HMAC-SHA1 algorithm. + """ + + HMAC_SHA1 = oauth.OAuthSignatureMethod_HMAC_SHA1 + + class RSA_SHA1(oauth_rsa.OAuthSignatureMethod_RSA_SHA1): + """Provides implementation for abstract methods to return RSA certs.""" + + def __init__(self, private_key, public_cert): + self.private_key = private_key + self.public_cert = public_cert + + def _fetch_public_cert(self, unused_oauth_request): + return self.public_cert + + def _fetch_private_cert(self, unused_oauth_request): + return self.private_key + + +class OAuthInputParams(object): + """Stores OAuth input parameters. + + This class is a store for OAuth input parameters viz. consumer key and secret, + signature method and RSA key. + """ + + def __init__(self, signature_method, consumer_key, consumer_secret=None, + rsa_key=None): + """Initializes object with parameters required for using OAuth mechanism. + + NOTE: Though consumer_secret and rsa_key are optional, either of the two + is required depending on the value of the signature_method. + + Args: + signature_method: class which provides implementation for strategy class + oauth.oauth.OAuthSignatureMethod. Signature method to be used for + signing each request. Valid implementations are provided as the + constants defined by gdata.auth.OAuthSignatureMethod. Currently + they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and + gdata.auth.OAuthSignatureMethod.HMAC_SHA1 + consumer_key: string Domain identifying third_party web application. + consumer_secret: string (optional) Secret generated during registration. + Required only for HMAC_SHA1 signature method. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. + """ + if signature_method == OAuthSignatureMethod.RSA_SHA1: + self._signature_method = signature_method(rsa_key, None) + else: + self._signature_method = signature_method() + self._consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) + + def GetSignatureMethod(self): + """Gets the OAuth signature method. + + Returns: + object of supertype + """ + return self._signature_method + + def GetConsumer(self): + """Gets the OAuth consumer. + + Returns: + object of type + """ + return self._consumer + + +class ClientLoginToken(atom.http_interface.GenericToken): + """Stores the Authorization header in auth_header and adds to requests. + + This token will add it's Authorization header to an HTTP request + as it is made. Ths token class is simple but + some Token classes must calculate portions of the Authorization header + based on the request being made, which is why the token is responsible + for making requests via an http_client parameter. + + Args: + auth_header: str The value for the Authorization header. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + def __init__(self, auth_header=None, scopes=None): + self.auth_header = auth_header + self.scopes = scopes or [] + + def __str__(self): + return self.auth_header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if headers is None: + headers = {'Authorization':self.auth_header} + else: + headers['Authorization'] = self.auth_header + return http_client.request(operation, url, data=data, headers=headers) + + def get_token_string(self): + """Removes PROGRAMMATIC_AUTH_LABEL to give just the token value.""" + return self.auth_header[len(PROGRAMMATIC_AUTH_LABEL):] + + def set_token_string(self, token_string): + self.auth_header = '%s%s' % (PROGRAMMATIC_AUTH_LABEL, token_string) + + def valid_for_scope(self, url): + """Tells the caller if the token authorizes access to the desired URL. + """ + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +class AuthSubToken(ClientLoginToken): + def get_token_string(self): + """Removes AUTHSUB_AUTH_LABEL to give just the token value.""" + return self.auth_header[len(AUTHSUB_AUTH_LABEL):] + + def set_token_string(self, token_string): + self.auth_header = '%s%s' % (AUTHSUB_AUTH_LABEL, token_string) + + +class OAuthToken(atom.http_interface.GenericToken): + """Stores the token key, token secret and scopes for which token is valid. + + This token adds the authorization header to each request made. It + re-calculates authorization header for every request since the OAuth + signature to be added to the authorization header is dependent on the + request parameters. + + Attributes: + key: str The value for the OAuth token i.e. token key. + secret: str The value for the OAuth token secret. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + oauth_input_params: OAuthInputParams OAuth input parameters. + """ + + def __init__(self, key=None, secret=None, scopes=None, + oauth_input_params=None): + self.key = key + self.secret = secret + self.scopes = scopes or [] + self.oauth_input_params = oauth_input_params + + def __str__(self): + return self.get_token_string() + + def get_token_string(self): + """Returns the token string. + + The token string returned is of format + oauth_token=[0]&oauth_token_secret=[1], where [0] and [1] are some strings. + + Returns: + A token string of format oauth_token=[0]&oauth_token_secret=[1], + where [0] and [1] are some strings. If self.secret is absent, it just + returns oauth_token=[0]. If self.key is absent, it just returns + oauth_token_secret=[1]. If both are absent, it returns None. + """ + if self.key and self.secret: + return urllib.urlencode({'oauth_token': self.key, + 'oauth_token_secret': self.secret}) + elif self.key: + return 'oauth_token=%s' % self.key + elif self.secret: + return 'oauth_token_secret=%s' % self.secret + else: + return None + + def set_token_string(self, token_string): + """Sets the token key and secret from the token string. + + Args: + token_string: str Token string of form + oauth_token=[0]&oauth_token_secret=[1]. If oauth_token is not present, + self.key will be None. If oauth_token_secret is not present, + self.secret will be None. + """ + token_params = cgi.parse_qs(token_string, keep_blank_values=False) + if 'oauth_token' in token_params: + self.key = token_params['oauth_token'][0] + if 'oauth_token_secret' in token_params: + self.secret = token_params['oauth_token_secret'][0] + + def GetAuthHeader(self, http_method, http_url, realm=''): + """Get the authentication header. + + Args: + http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc. + http_url: string or atom.url.Url HTTP URL to which request is made. + realm: string (default='') realm parameter to be included in the + authorization header. + + Returns: + dict Header to be sent with every subsequent request after + authentication. + """ + if isinstance(http_url, types.StringTypes): + http_url = atom.url.parse_url(http_url) + header = None + token = None + if self.key or self.secret: + token = oauth.OAuthToken(self.key, self.secret) + oauth_request = oauth.OAuthRequest.from_consumer_and_token( + self.oauth_input_params.GetConsumer(), token=token, + http_url=str(http_url), http_method=http_method, + parameters=http_url.params) + oauth_request.sign_request(self.oauth_input_params.GetSignatureMethod(), + self.oauth_input_params.GetConsumer(), token) + header = oauth_request.to_header(realm=realm) + header['Authorization'] = header['Authorization'].replace('+', '%2B') + return header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if not headers: + headers = {} + headers.update(self.GetAuthHeader(operation, url)) + return http_client.request(operation, url, data=data, headers=headers) + + def valid_for_scope(self, url): + if isinstance(url, (str, unicode)): + url = atom.url.parse_url(url) + for scope in self.scopes: + if scope == atom.token_store.SCOPE_ALL: + return True + if isinstance(scope, (str, unicode)): + scope = atom.url.parse_url(scope) + if scope == url: + return True + # Check the host and the path, but ignore the port and protocol. + elif scope.host == url.host and not scope.path: + return True + elif scope.host == url.host and scope.path and not url.path: + continue + elif scope.host == url.host and url.path.startswith(scope.path): + return True + return False + + +class SecureAuthSubToken(AuthSubToken): + """Stores the rsa private key, token, and scopes for the secure AuthSub token. + + This token adds the authorization header to each request made. It + re-calculates authorization header for every request since the secure AuthSub + signature to be added to the authorization header is dependent on the + request parameters. + + Attributes: + rsa_key: string The RSA private key in PEM format that the token will + use to sign requests + token_string: string (optional) The value for the AuthSub token. + scopes: list of str or atom.url.Url specifying the beginnings of URLs + for which this token can be used. For example, if scopes contains + 'http://example.com/foo', then this token can be used for a request to + 'http://example.com/foo/bar' but it cannot be used for a request to + 'http://example.com/baz' + """ + + def __init__(self, rsa_key, token_string=None, scopes=None): + self.rsa_key = keyfactory.parsePEMKey(rsa_key) + self.token_string = token_string or '' + self.scopes = scopes or [] + + def __str__(self): + return self.get_token_string() + + def get_token_string(self): + return str(self.token_string) + + def set_token_string(self, token_string): + self.token_string = token_string + + def GetAuthHeader(self, http_method, http_url): + """Generates the Authorization header. + + The form of the secure AuthSub Authorization header is + Authorization: AuthSub token="token" sigalg="sigalg" data="data" sig="sig" + and data represents a string in the form + data = http_method http_url timestamp nonce + + Args: + http_method: string HTTP method i.e. operation e.g. GET, POST, PUT, etc. + http_url: string or atom.url.Url HTTP URL to which request is made. + + Returns: + dict Header to be sent with every subsequent request after authentication. + """ + timestamp = int(math.floor(time.time())) + nonce = '%lu' % random.randrange(1, 2**64) + data = '%s %s %d %s' % (http_method, str(http_url), timestamp, nonce) + sig = cryptomath.bytesToBase64(self.rsa_key.hashAndSign(data)) + header = {'Authorization': '%s"%s" data="%s" sig="%s" sigalg="rsa-sha1"' % + (AUTHSUB_AUTH_LABEL, self.token_string, data, sig)} + return header + + def perform_request(self, http_client, operation, url, data=None, + headers=None): + """Sets the Authorization header and makes the HTTP request.""" + if not headers: + headers = {} + headers.update(self.GetAuthHeader(operation, url)) + return http_client.request(operation, url, data=data, headers=headers) diff --git a/gdata.py-1.2.3/src/gdata/base/__init__.py b/gdata.py-1.2.3/src/gdata/base/__init__.py new file mode 100755 index 0000000..c27114a --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/base/__init__.py @@ -0,0 +1,687 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Base.""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + + +# XML namespaces which are often used in Google Base entities. +GBASE_NAMESPACE = 'http://base.google.com/ns/1.0' +GBASE_TEMPLATE = '{http://base.google.com/ns/1.0}%s' +GMETA_NAMESPACE = 'http://base.google.com/ns-metadata/1.0' +GMETA_TEMPLATE = '{http://base.google.com/ns-metadata/1.0}%s' + + +class ItemAttributeContainer(object): + """Provides methods for finding Google Base Item attributes. + + Google Base item attributes are child nodes in the gbase namespace. Google + Base allows you to define your own item attributes and this class provides + methods to interact with the custom attributes. + """ + + def GetItemAttributes(self, name): + """Returns a list of all item attributes which have the desired name. + + Args: + name: str The tag of the desired base attributes. For example, calling + this method with 'rating' would return a list of ItemAttributes + represented by a 'g:rating' tag. + + Returns: + A list of matching ItemAttribute objects. + """ + result = [] + for attrib in self.item_attributes: + if attrib.name == name: + result.append(attrib) + return result + + def FindItemAttribute(self, name): + """Get the contents of the first Base item attribute which matches name. + + This method is deprecated, please use GetItemAttributes instead. + + Args: + name: str The tag of the desired base attribute. For example, calling + this method with name = 'rating' would search for a tag rating + in the GBase namespace in the item attributes. + + Returns: + The text contents of the item attribute, or none if the attribute was + not found. + """ + + for attrib in self.item_attributes: + if attrib.name == name: + return attrib.text + return None + + def AddItemAttribute(self, name, value, value_type=None, access=None): + """Adds a new item attribute tag containing the value. + + Creates a new extension element in the GBase namespace to represent a + Google Base item attribute. + + Args: + name: str The tag name for the new attribute. This must be a valid xml + tag name. The tag will be placed in the GBase namespace. + value: str Contents for the item attribute + value_type: str (optional) The type of data in the vlaue, Examples: text + float + access: str (optional) Used to hide attributes. The attribute is not + exposed in the snippets feed if access is set to 'private'. + """ + + new_attribute = ItemAttribute(name, text=value, + text_type=value_type, access=access) + self.item_attributes.append(new_attribute) + + def SetItemAttribute(self, name, value): + """Changes an existing item attribute's value.""" + + for attrib in self.item_attributes: + if attrib.name == name: + attrib.text = value + return + + def RemoveItemAttribute(self, name): + """Deletes the first extension element which matches name. + + Deletes the first extension element which matches name. + """ + + for i in xrange(len(self.item_attributes)): + if self.item_attributes[i].name == name: + del self.item_attributes[i] + return + + # We need to overwrite _ConvertElementTreeToMember to add special logic to + # convert custom attributes to members + def _ConvertElementTreeToMember(self, child_tree): + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + elif child_tree.tag.find('{%s}' % GBASE_NAMESPACE) == 0: + # If this is in the gbase namespace, make it into an extension element. + name = child_tree.tag[child_tree.tag.index('}')+1:] + value = child_tree.text + if child_tree.attrib.has_key('type'): + value_type = child_tree.attrib['type'] + else: + value_type = None + self.AddItemAttribute(name, value, value_type) + else: + atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + # We need to overwtite _AddMembersToElementTree to add special logic to + # convert custom members to XML nodes. + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Convert all special custom item attributes to nodes + for attribute in self.item_attributes: + attribute._BecomeChildElement(tree) + # Lastly, call the ExtensionContainers's _AddMembersToElementTree to + # convert any extension attributes. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + +class ItemAttribute(atom.Text): + """An optional or user defined attribute for a GBase item. + + Google Base allows items to have custom attribute child nodes. These nodes + have contents and a type attribute which tells Google Base whether the + contents are text, a float value with units, etc. The Atom text class has + the same structure, so this class inherits from Text. + """ + + _namespace = GBASE_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + _attributes['access'] = 'access' + + def __init__(self, name, text_type=None, access=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for a GBase item attribute + + Args: + name: str The name of the attribute. Examples include + price, color, make, model, pages, salary, etc. + text_type: str (optional) The type associated with the text contents + access: str (optional) If the access attribute is set to 'private', the + attribute will not be included in the item's description in the + snippets feed + text: str (optional) The text data in the this element + extension_elements: list (optional) A list of ExtensionElement + instances + extension_attributes: dict (optional) A dictionary of attribute + value string pairs + """ + + self.name = name + self.type = text_type + self.access = access + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _BecomeChildElement(self, tree): + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = '{%s}%s' % (self.__class__._namespace, + self.name) + self._AddMembersToElementTree(new_child) + + def _ToElementTree(self): + new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace, + self.name)) + self._AddMembersToElementTree(new_tree) + return new_tree + + +def ItemAttributeFromString(xml_string): + element_tree = ElementTree.fromstring(xml_string) + return _ItemAttributeFromElementTree(element_tree) + + +def _ItemAttributeFromElementTree(element_tree): + if element_tree.tag.find(GBASE_TEMPLATE % '') == 0: + to_return = ItemAttribute('') + to_return._HarvestElementTree(element_tree) + to_return.name = element_tree.tag[element_tree.tag.index('}')+1:] + if to_return.name and to_return.name != '': + return to_return + return None + + +class Label(atom.AtomBase): + """The Google Base label element""" + + _tag = 'label' + _namespace = GBASE_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def LabelFromString(xml_string): + return atom.CreateClassFromXMLString(Label, xml_string) + + +class Thumbnail(atom.AtomBase): + """The Google Base thumbnail element""" + + _tag = 'thumbnail' + _namespace = GMETA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['width'] = 'width' + _attributes['height'] = 'height' + + def __init__(self, width=None, height=None, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.width = width + self.height = height + + +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + + +class ImageLink(atom.Text): + """The Google Base image_link element""" + + _tag = 'image_link' + _namespace = GBASE_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + _children['{%s}thumbnail' % GMETA_NAMESPACE] = ('thumbnail', [Thumbnail]) + + def __init__(self, thumbnail=None, text=None, extension_elements=None, + text_type=None, extension_attributes=None): + self.thumbnail = thumbnail or [] + self.text = text + self.type = text_type + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ImageLinkFromString(xml_string): + return atom.CreateClassFromXMLString(ImageLink, xml_string) + + +class ItemType(atom.Text): + """The Google Base item_type element""" + + _tag = 'item_type' + _namespace = GBASE_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + text_type=None, extension_attributes=None): + self.text = text + self.type = text_type + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ItemTypeFromString(xml_string): + return atom.CreateClassFromXMLString(ItemType, xml_string) + + +class MetaItemType(ItemType): + """The Google Base item_type element""" + + _tag = 'item_type' + _namespace = GMETA_NAMESPACE + _children = ItemType._children.copy() + _attributes = ItemType._attributes.copy() + + +def MetaItemTypeFromString(xml_string): + return atom.CreateClassFromXMLString(MetaItemType, xml_string) + + +class Value(atom.AtomBase): + """Metadata about common values for a given attribute + + A value is a child of an attribute which comes from the attributes feed. + The value's text is a commonly used value paired with an attribute name + and the value's count tells how often this value appears for the given + attribute in the search results. + """ + + _tag = 'value' + _namespace = GMETA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['count'] = 'count' + + def __init__(self, count=None, text=None, extension_elements=None, + extension_attributes=None): + """Constructor for Attribute metadata element + + Args: + count: str (optional) The number of times the value in text is given + for the parent attribute. + text: str (optional) The value which appears in the search results. + extension_elements: list (optional) A list of ExtensionElement + instances + extension_attributes: dict (optional) A dictionary of attribute value + string pairs + """ + + self.count = count + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ValueFromString(xml_string): + return atom.CreateClassFromXMLString(Value, xml_string) + + +class Attribute(atom.Text): + """Metadata about an attribute from the attributes feed + + An entry from the attributes feed contains a list of attributes. Each + attribute describes the attribute's type and count of the items which + use the attribute. + """ + + _tag = 'attribute' + _namespace = GMETA_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + _children['{%s}value' % GMETA_NAMESPACE] = ('value', [Value]) + _attributes['count'] = 'count' + _attributes['name'] = 'name' + + def __init__(self, name=None, attribute_type=None, count=None, value=None, + text=None, extension_elements=None, extension_attributes=None): + """Constructor for Attribute metadata element + + Args: + name: str (optional) The name of the attribute + attribute_type: str (optional) The type for the attribute. Examples: + test, float, etc. + count: str (optional) The number of times this attribute appears in + the query results. + value: list (optional) The values which are often used for this + attirbute. + text: str (optional) The text contents of the XML for this attribute. + extension_elements: list (optional) A list of ExtensionElement + instances + extension_attributes: dict (optional) A dictionary of attribute value + string pairs + """ + + self.name = name + self.type = attribute_type + self.count = count + self.value = value or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def AttributeFromString(xml_string): + return atom.CreateClassFromXMLString(Attribute, xml_string) + + +class Attributes(atom.AtomBase): + """A collection of Google Base metadata attributes""" + + _tag = 'attributes' + _namespace = GMETA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute]) + + def __init__(self, attribute=None, extension_elements=None, + extension_attributes=None, text=None): + self.attribute = attribute or [] + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +class GBaseItem(ItemAttributeContainer, gdata.BatchEntry): + """An Google Base flavor of an Atom Entry. + + Google Base items have required attributes, recommended attributes, and user + defined attributes. The required attributes are stored in this class as + members, and other attributes are stored as extension elements. You can + access the recommended and user defined attributes by using + AddItemAttribute, SetItemAttribute, FindItemAttribute, and + RemoveItemAttribute. + + The Base Item + """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + _children['{%s}label' % GBASE_NAMESPACE] = ('label', [Label]) + _children['{%s}item_type' % GBASE_NAMESPACE] = ('item_type', ItemType) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, updated=None, control=None, + label=None, item_type=None, item_attributes=None, + batch_operation=None, batch_id=None, batch_status=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.title = title + self.updated = updated + self.control = control + self.label = label or [] + self.item_type = item_type + self.item_attributes = item_attributes or [] + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GBaseItemFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseItem, xml_string) + + +class GBaseSnippet(GBaseItem): + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = GBaseItem._children.copy() + _attributes = GBaseItem._attributes.copy() + + +def GBaseSnippetFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseSnippet, xml_string) + + +class GBaseAttributeEntry(gdata.GDataEntry): + """An Atom Entry from the attributes feed""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute]) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, updated=None, label=None, + attribute=None, control=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.label = label or [] + self.attribute = attribute or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GBaseAttributeEntryFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseAttributeEntry, xml_string) + + +class GBaseItemTypeEntry(gdata.GDataEntry): + """An Atom entry from the item types feed + + These entries contain a list of attributes which are stored in one + XML node called attributes. This class simplifies the data structure + by treating attributes as a list of attribute instances. + + Note that the item_type for an item type entry is in the Google Base meta + namespace as opposed to item_types encountered in other feeds. + """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}attributes' % GMETA_NAMESPACE] = ('attributes', Attributes) + _children['{%s}attribute' % GMETA_NAMESPACE] = ('attribute', [Attribute]) + _children['{%s}item_type' % GMETA_NAMESPACE] = ('item_type', MetaItemType) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, updated=None, label=None, + item_type=None, control=None, attribute=None, attributes=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.title = title + self.updated = updated + self.control = control + self.label = label or [] + self.item_type = item_type + self.attributes = attributes + self.attribute = attribute or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GBaseItemTypeEntryFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseItemTypeEntry, xml_string) + + +class GBaseItemFeed(gdata.BatchFeed): + """A feed containing Google Base Items""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItem]) + + +def GBaseItemFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseItemFeed, xml_string) + + +class GBaseSnippetFeed(gdata.GDataFeed): + """A feed containing Google Base Snippets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseSnippet]) + + +def GBaseSnippetFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseSnippetFeed, xml_string) + + +class GBaseAttributesFeed(gdata.GDataFeed): + """A feed containing Google Base Attributes + + A query sent to the attributes feed will return a feed of + attributes which are present in the items that match the + query. + """ + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [GBaseAttributeEntry]) + + +def GBaseAttributesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseAttributesFeed, xml_string) + + +class GBaseLocalesFeed(gdata.GDataFeed): + """The locales feed from Google Base. + + This read-only feed defines the permitted locales for Google Base. The + locale value identifies the language, currency, and date formats used in a + feed. + """ + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + + +def GBaseLocalesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseLocalesFeed, xml_string) + + +class GBaseItemTypesFeed(gdata.GDataFeed): + """A feed from the Google Base item types feed""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GBaseItemTypeEntry]) + + +def GBaseItemTypesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GBaseItemTypesFeed, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/base/service.py b/gdata.py-1.2.3/src/gdata/base/service.py new file mode 100755 index 0000000..dff3451 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/base/service.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GBaseService extends the GDataService to streamline Google Base operations. + + GBaseService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.base +import atom + + +# URL to which all batch requests are sent. +BASE_BATCH_URL = 'http://www.google.com/base/feeds/items/batch' + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class GBaseService(gdata.service.GDataService): + """Client for the Google Base service.""" + + def __init__(self, email=None, password=None, source=None, + server='base.google.com', api_key=None, + additional_headers=None, handler=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='gbase', source=source, + server=server, + additional_headers=additional_headers, + handler=handler) + self.api_key = api_key + + def _SetAPIKey(self, api_key): + if not isinstance(self.additional_headers, dict): + self.additional_headers = {} + self.additional_headers['X-Google-Key'] = api_key + + def __SetAPIKey(self, api_key): + self._SetAPIKey(api_key) + + def _GetAPIKey(self): + if 'X-Google-Key' not in self.additional_headers: + return None + else: + return self.additional_headers['X-Google-Key'] + + def __GetAPIKey(self): + return self._GetAPIKey() + + api_key = property(__GetAPIKey, __SetAPIKey, + doc="""Get or set the API key to be included in all requests.""") + + def Query(self, uri, converter=None): + """Performs a style query and returns a resulting feed or entry. + + Args: + uri: string The full URI which be queried. Examples include + '/base/feeds/snippets?bq=digital+camera', + 'http://www.google.com/base/feeds/snippets?bq=digital+camera' + '/base/feeds/items' + I recommend creating a URI using a query class. + converter: func (optional) A function which will be executed on the + server's response. Examples include GBaseItemFromString, etc. + + Returns: + If converter was specified, returns the results of calling converter on + the server's response. If converter was not specified, and the result + was an Atom Entry, returns a GBaseItem, by default, the method returns + the result of calling gdata.service's Get method. + """ + + result = self.Get(uri, converter=converter) + if converter: + return result + elif isinstance(result, atom.Entry): + return gdata.base.GBaseItemFromString(result.ToString()) + return result + + def QuerySnippetsFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseSnippetFeedFromString) + + def QueryItemsFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseItemFeedFromString) + + def QueryAttributesFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseAttributesFeedFromString) + + def QueryItemTypesFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseItemTypesFeedFromString) + + def QueryLocalesFeed(self, uri): + return self.Get(uri, converter=gdata.base.GBaseLocalesFeedFromString) + + def GetItem(self, uri): + return self.Get(uri, converter=gdata.base.GBaseItemFromString) + + def GetSnippet(self, uri): + return self.Get(uri, converter=gdata.base.GBaseSnippetFromString) + + def GetAttribute(self, uri): + return self.Get(uri, converter=gdata.base.GBaseAttributeEntryFromString) + + def GetItemType(self, uri): + return self.Get(uri, converter=gdata.base.GBaseItemTypeEntryFromString) + + def GetLocale(self, uri): + return self.Get(uri, converter=gdata.base.GDataEntryFromString) + + def InsertItem(self, new_item, url_params=None, escape_params=True, + converter=None): + """Adds an item to Google Base. + + Args: + new_item: atom.Entry or subclass A new item which is to be added to + Google Base. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + GBaseItemFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a GBaseItem. + """ + + response = self.Post(new_item, '/base/feeds/items', url_params=url_params, + escape_params=escape_params, converter=converter) + + if not converter and isinstance(response, atom.Entry): + return gdata.base.GBaseItemFromString(response.ToString()) + return response + + def DeleteItem(self, item_id, url_params=None, escape_params=True): + """Removes an item with the specified ID from Google Base. + + Args: + item_id: string The ID of the item to be deleted. Example: + 'http://www.google.com/base/feeds/items/13185446517496042648' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete('%s' % (item_id[len('http://www.google.com'):],), + url_params=url_params, escape_params=escape_params) + + def UpdateItem(self, item_id, updated_item, url_params=None, + escape_params=True, + converter=gdata.base.GBaseItemFromString): + """Updates an existing item. + + Args: + item_id: string The ID of the item to be updated. Example: + 'http://www.google.com/base/feeds/items/13185446517496042648' + updated_item: atom.Entry, subclass, or string, containing + the Atom Entry which will replace the base item which is + stored at the item_id. + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + GBaseItemFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a GBaseItem. + """ + + response = self.Put(updated_item, + item_id, url_params=url_params, escape_params=escape_params, + converter=converter) + if not converter and isinstance(response, atom.Entry): + return gdata.base.GBaseItemFromString(response.ToString()) + return response + + def ExecuteBatch(self, batch_feed, + converter=gdata.base.GBaseItemFeedFromString): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.BatchFeed A feed containing BatchEntry elements which + contain the desired CRUD operation and any necessary entry data. + converter: Function (optional) Function to be executed on the server's + response. This function should take one string as a parameter. The + default value is GBaseItemFeedFromString which will turn the result + into a gdata.base.GBaseItem object. + + Returns: + A gdata.BatchFeed containing the results. + """ + + return self.Post(batch_feed, BASE_BATCH_URL, converter=converter) + + +class BaseQuery(gdata.service.Query): + + def _GetBaseQuery(self): + return self['bq'] + + def _SetBaseQuery(self, base_query): + self['bq'] = base_query + + bq = property(_GetBaseQuery, _SetBaseQuery, + doc="""The bq query parameter""") diff --git a/gdata.py-1.2.3/src/gdata/blogger/__init__.py b/gdata.py-1.2.3/src/gdata/blogger/__init__.py new file mode 100644 index 0000000..0e0417b --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/blogger/__init__.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# +# Copyright (C) 2007, 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains extensions to Atom objects used with Blogger.""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +import atom +import gdata +import re + + +LABEL_SCHEME = 'http://www.blogger.com/atom/ns#' +THR_NAMESPACE = 'http://purl.org/syndication/thread/1.0' + + +class BloggerEntry(gdata.GDataEntry): + """Adds convenience methods inherited by all Blogger entries.""" + + blog_name_pattern = re.compile('(http://)(\w*)') + blog_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)') + blog_id2_pattern = re.compile('tag:blogger.com,1999:user-(\d+)\.blog-(\d+)') + + def GetBlogId(self): + """Extracts the Blogger id of this blog. + This method is useful when contructing URLs by hand. The blog id is + often used in blogger operation URLs. This should not be confused with + the id member of a BloggerBlog. The id element is the Atom id XML element. + The blog id which this method returns is a part of the Atom id. + + Returns: + The blog's unique id as a string. + """ + if self.id.text: + match = self.blog_id_pattern.match(self.id.text) + if match: + return match.group(2) + else: + return self.blog_id2_pattern.match(self.id.text).group(2) + return None + + def GetBlogName(self): + """Finds the name of this blog as used in the 'alternate' URL. + An alternate URL is in the form 'http://blogName.blogspot.com/'. For an + entry representing the above example, this method would return 'blogName'. + + Returns: + The blog's URL name component as a string. + """ + for link in self.link: + if link.rel == 'alternate': + return self.blog_name_pattern.match(link.href).group(2) + return None + + +class BlogEntry(BloggerEntry): + """Describes a blog entry in the feed listing a user's blogs.""" + + +def BlogEntryFromString(xml_string): + return atom.CreateClassFromXMLString(BlogEntry, xml_string) + + +class BlogFeed(gdata.GDataFeed): + """Describes a feed of a user's blogs.""" + + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogEntry]) + + +def BlogFeedFromString(xml_string): + return atom.CreateClassFromXMLString(BlogFeed, xml_string) + + +class BlogPostEntry(BloggerEntry): + """Describes a blog post entry in the feed of a blog's posts.""" + + post_id_pattern = re.compile('(tag:blogger.com,1999:blog-)(\w*)(.post-)(\w*)') + + def AddLabel(self, label): + """Adds a label to the blog post. + + The label is represented by an Atom category element, so this method + is shorthand for appending a new atom.Category object. + + Args: + label: str + """ + self.category.append(atom.Category(scheme=LABEL_SCHEME, term=label)) + + def GetPostId(self): + """Extracts the postID string from the entry's Atom id. + + Returns: A string of digits which identify this post within the blog. + """ + if self.id.text: + return self.post_id_pattern.match(self.id.text).group(4) + return None + + +def BlogPostEntryFromString(xml_string): + return atom.CreateClassFromXMLString(BlogPostEntry, xml_string) + + +class BlogPostFeed(gdata.GDataFeed): + """Describes a feed of a blog's posts.""" + + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [BlogPostEntry]) + + +def BlogPostFeedFromString(xml_string): + return atom.CreateClassFromXMLString(BlogPostFeed, xml_string) + + +class InReplyTo(atom.AtomBase): + _tag = 'in-reply-to' + _namespace = THR_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['href'] = 'href' + _attributes['ref'] = 'ref' + _attributes['source'] = 'source' + _attributes['type'] = 'type' + + def __init__(self, href=None, ref=None, source=None, type=None, + extension_elements=None, extension_attributes=None, text=None): + self.href = href + self.ref = ref + self.source = source + self.type = type + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + self.text = text + + +def InReplyToFromString(xml_string): + return atom.CreateClassFromXMLString(InReplyTo, xml_string) + + +class CommentEntry(BloggerEntry): + """Describes a blog post comment entry in the feed of a blog post's + comments.""" + + _children = BloggerEntry._children.copy() + _children['{%s}in-reply-to' % THR_NAMESPACE] = ('in_reply_to', InReplyTo) + + comment_id_pattern = re.compile('.*-(\w*)$') + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + in_reply_to=None, extension_elements=None, extension_attributes=None, + text=None): + BloggerEntry.__init__(self, author=author, category=category, + content=content, contributor=contributor, atom_id=atom_id, link=link, + published=published, rights=rights, source=source, summary=summary, + control=control, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + self.in_reply_to = in_reply_to + + def GetCommentId(self): + """Extracts the commentID string from the entry's Atom id. + + Returns: A string of digits which identify this post within the blog. + """ + if self.id.text: + return self.comment_id_pattern.match(self.id.text).group(1) + return None + + +def CommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CommentEntry, xml_string) + + +class CommentFeed(gdata.GDataFeed): + """Describes a feed of a blog post's comments.""" + + _children = gdata.GDataFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CommentEntry]) + + +def CommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CommentFeed, xml_string) + + diff --git a/gdata.py-1.2.3/src/gdata/blogger/service.py b/gdata.py-1.2.3/src/gdata/blogger/service.py new file mode 100644 index 0000000..50c27d2 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/blogger/service.py @@ -0,0 +1,138 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Classes to interact with the Blogger server.""" + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import gdata.service +import gdata.blogger + + +class BloggerService(gdata.service.GDataService): + + def __init__(self, email=None, password=None, source=None, + server=None, api_key=None, + additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='blogger', source=source, + server=server, + additional_headers=additional_headers) + self.accountType = 'GOOGLE' + + def GetBlogFeed(self, uri=None): + """Retrieve a list of the blogs to which the current user may manage.""" + if not uri: + uri = 'http://www.blogger.com/feeds/default/blogs' + return self.Get(uri, converter=gdata.blogger.BlogFeedFromString) + + def GetBlogCommentFeed(self, blog_id=None, uri=None): + """Retrieve a list of the comments for this blog.""" + if blog_id: + uri = 'http://www.blogger.com/feeds/%s/comments/default' % blog_id + return self.Get(uri, converter=gdata.blogger.CommentFeedFromString) + + def GetBlogPostFeed(self, blog_id=None, uri=None): + if blog_id: + uri = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id + return self.Get(uri, converter=gdata.blogger.BlogPostFeedFromString) + + def GetPostCommentFeed(self, blog_id=None, post_id=None, uri=None): + """Retrieve a list of the comments for this particular blog post.""" + if blog_id and post_id: + uri = 'http://www.blogger.com/feeds/%s/%s/comments/default' % (blog_id, + post_id) + return self.Get(uri, converter=gdata.blogger.CommentFeedFromString) + + def AddPost(self, entry, blog_id=None, uri=None): + if blog_id: + uri = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id + return self.Post(entry, uri, + converter=gdata.blogger.BlogPostEntryFromString) + + def UpdatePost(self, entry, uri=None): + if not uri: + uri = entry.GetEditLink().href + return self.Put(entry, uri, + converter=gdata.blogger.BlogPostEntryFromString) + + def DeletePost(self, entry=None, uri=None): + if not uri: + uri = entry.GetEditLink().href + return self.Delete(uri) + + def AddComment(self, comment_entry, blog_id=None, post_id=None, uri=None): + """Adds a new comment to the specified blog post.""" + if blog_id and post_id: + uri = 'http://www.blogger.com/feeds/%s/%s/comments/default' % ( + blog_id, post_id) + return self.Post(comment_entry, uri, + converter=gdata.blogger.CommentEntryFromString) + + def DeleteComment(self, entry=None, uri=None): + if not uri: + uri = entry.GetEditLink().href + return self.Delete(uri) + + +class BlogQuery(gdata.service.Query): + + def __init__(self, feed=None, params=None, categories=None, blog_id=None): + """Constructs a query object for the list of a user's Blogger blogs. + + Args: + feed: str (optional) The beginning of the URL to be queried. If the + feed is not set, and there is no blog_id passed in, the default + value is used ('http://www.blogger.com/feeds/default/blogs'). + params: dict (optional) + categories: list (optional) + blog_id: str (optional) + """ + if not feed and blog_id: + feed = 'http://www.blogger.com/feeds/default/blogs/%s' % blog_id + elif not feed: + feed = 'http://www.blogger.com/feeds/default/blogs' + gdata.service.Query.__init__(self, feed=feed, params=params, + categories=categories) + + +class BlogPostQuery(gdata.service.Query): + + def __init__(self, feed=None, params=None, categories=None, blog_id=None, + post_id=None): + if not feed and blog_id and post_id: + feed = 'http://www.blogger.com/feeds/%s/posts/default/%s' % (blog_id, + post_id) + elif not feed and blog_id: + feed = 'http://www.blogger.com/feeds/%s/posts/default' % blog_id + gdata.service.Query.__init__(self, feed=feed, params=params, + categories=categories) + + +class BlogCommentQuery(gdata.service.Query): + + def __init__(self, feed=None, params=None, categories=None, blog_id=None, + post_id=None, comment_id=None): + if not feed and blog_id and comment_id: + feed = 'http://www.blogger.com/feeds/%s/comments/default/%s' % ( + blog_id, comment_id) + elif not feed and blog_id and post_id: + feed = 'http://www.blogger.com/feeds/%s/%s/comments/default' % ( + blog_id, post_id) + elif not feed and blog_id: + feed = 'http://www.blogger.com/feeds/%s/comments/default' % blog_id + gdata.service.Query.__init__(self, feed=feed, params=params, + categories=categories) diff --git a/gdata.py-1.2.3/src/gdata/calendar/__init__.py b/gdata.py-1.2.3/src/gdata/calendar/__init__.py new file mode 100755 index 0000000..cea1a03 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/calendar/__init__.py @@ -0,0 +1,907 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Contains extensions to ElementWrapper objects used with Google Calendar.""" + + +__author__ = 'api.vli (Vivian Li), api.rboyd (Ryan Boyd)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + + +# XML namespaces which are often used in Google Calendar entities. +GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005' +GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005}%s' +WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent') +GACL_NAMESPACE = gdata.GACL_NAMESPACE +GACL_TEMPLATE = gdata.GACL_TEMPLATE + + + +class ValueAttributeContainer(atom.AtomBase): + """A parent class for all Calendar classes which have a value attribute. + + Children include Color, AccessLevel, Hidden + """ + + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Color(ValueAttributeContainer): + """The Google Calendar color element""" + + _tag = 'color' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + + +class AccessLevel(ValueAttributeContainer): + """The Google Calendar accesslevel element""" + + _tag = 'accesslevel' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Hidden(ValueAttributeContainer): + """The Google Calendar hidden element""" + + _tag = 'hidden' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Selected(ValueAttributeContainer): + """The Google Calendar selected element""" + + _tag = 'selected' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Timezone(ValueAttributeContainer): + """The Google Calendar timezone element""" + + _tag = 'timezone' + _namespace = GCAL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class Where(atom.AtomBase): + """The Google Calendar Where element""" + + _tag = 'where' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['valueString'] = 'value_string' + + def __init__(self, value_string=None, extension_elements=None, + extension_attributes=None, text=None): + self.value_string = value_string + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar meta Entry flavor of an Atom Entry """ + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}color' % GCAL_NAMESPACE] = ('color', Color) + _children['{%s}accesslevel' % GCAL_NAMESPACE] = ('access_level', + AccessLevel) + _children['{%s}hidden' % GCAL_NAMESPACE] = ('hidden', Hidden) + _children['{%s}selected' % GCAL_NAMESPACE] = ('selected', Selected) + _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) + _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', Where) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + color=None, access_level=None, hidden=None, timezone=None, + selected=None, + where=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + + self.color = color + self.access_level = access_level + self.hidden = hidden + self.selected = selected + self.timezone = timezone + self.where = where + + +class CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar meta feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarListEntry]) + + +class Scope(atom.AtomBase): + """The Google ACL scope element""" + + _tag = 'scope' + _namespace = GACL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + _attributes['type'] = 'type' + + def __init__(self, extension_elements=None, value=None, scope_type=None, + extension_attributes=None, text=None): + self.value = value + self.type = scope_type + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Role(ValueAttributeContainer): + """The Google Calendar timezone element""" + + _tag = 'role' + _namespace = GACL_NAMESPACE + _children = ValueAttributeContainer._children.copy() + _attributes = ValueAttributeContainer._attributes.copy() + + +class CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar ACL Entry flavor of an Atom Entry """ + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}scope' % GACL_NAMESPACE] = ('scope', Scope) + _children['{%s}role' % GACL_NAMESPACE] = ('role', Role) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + scope=None, role=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + self.scope = scope + self.role = role + + +class CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar ACL feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarAclEntry]) + + +class CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder): + """A Google Calendar event comments entry flavor of an Atom Entry""" + + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +class CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder): + """A Google Calendar event comments feed flavor of an Atom Feed""" + + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [CalendarEventCommentEntry]) + + +class ExtendedProperty(gdata.ExtendedProperty): + """A transparent subclass of gdata.ExtendedProperty added to this module + for backwards compatibility.""" + + +class Reminder(atom.AtomBase): + """The Google Calendar reminder element""" + + _tag = 'reminder' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['absoluteTime'] = 'absolute_time' + _attributes['days'] = 'days' + _attributes['hours'] = 'hours' + _attributes['minutes'] = 'minutes' + + def __init__(self, absolute_time=None, + days=None, hours=None, minutes=None, + extension_elements=None, + extension_attributes=None, text=None): + self.absolute_time = absolute_time + if days is not None: + self.days = str(days) + else: + self.days = None + if hours is not None: + self.hours = str(hours) + else: + self.hours = None + if minutes is not None: + self.minutes = str(minutes) + else: + self.minutes = None + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class When(atom.AtomBase): + """The Google Calendar When element""" + + _tag = 'when' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder]) + _attributes['startTime'] = 'start_time' + _attributes['endTime'] = 'end_time' + + def __init__(self, start_time=None, end_time=None, reminder=None, + extension_elements=None, extension_attributes=None, text=None): + self.start_time = start_time + self.end_time = end_time + self.reminder = reminder or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Recurrence(atom.AtomBase): + """The Google Calendar Recurrence element""" + + _tag = 'recurrence' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + +class UriEnumElement(atom.AtomBase): + + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, tag, enum_map, attrib_name='value', + extension_elements=None, extension_attributes=None, text=None): + self.tag=tag + self.enum_map=enum_map + self.attrib_name=attrib_name + self.value=None + self.text=text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def findKey(self, value): + res=[item[0] for item in self.enum_map.items() if item[1] == value] + if res is None or len(res) == 0: + return None + return res[0] + + def _ConvertElementAttributeToMember(self, attribute, value): + # Special logic to use the enum_map to set the value of the object's value member. + if attribute == self.attrib_name and value != '': + self.value = self.enum_map[value] + return + # Find the attribute in this class's list of attributes. + if self.__class__._attributes.has_key(attribute): + # Find the member of this class which corresponds to the XML attribute + # (lookup in current_class._attributes) and set this member to the + # desired value (using self.__dict__). + setattr(self, self.__class__._attributes[attribute], value) + else: + # The current class doesn't map this attribute, so try to parent class. + atom.ExtensionContainer._ConvertElementAttributeToMember(self, + attribute, + value) + + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Special logic to set the desired XML attribute. + key = self.findKey(self.value) + if key is not None: + tree.attrib[self.attrib_name]=key + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Lastly, call the parent's _AddMembersToElementTree to get any + # extension elements. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + + +class AttendeeStatus(UriEnumElement): + """The Google Calendar attendeeStatus element""" + + _tag = 'attendeeStatus' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + attendee_enum = { + 'http://schemas.google.com/g/2005#event.accepted' : 'ACCEPTED', + 'http://schemas.google.com/g/2005#event.declined' : 'DECLINED', + 'http://schemas.google.com/g/2005#event.invited' : 'INVITED', + 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'attendeeStatus', AttendeeStatus.attendee_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class AttendeeType(UriEnumElement): + """The Google Calendar attendeeType element""" + + _tag = 'attendeeType' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + attendee_type_enum = { + 'http://schemas.google.com/g/2005#event.optional' : 'OPTIONAL', + 'http://schemas.google.com/g/2005#event.required' : 'REQUIRED' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'attendeeType', + AttendeeType.attendee_type_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes,text=text) + + +class Visibility(UriEnumElement): + """The Google Calendar Visibility element""" + + _tag = 'visibility' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + visibility_enum = { + 'http://schemas.google.com/g/2005#event.confidential' : 'CONFIDENTIAL', + 'http://schemas.google.com/g/2005#event.default' : 'DEFAULT', + 'http://schemas.google.com/g/2005#event.private' : 'PRIVATE', + 'http://schemas.google.com/g/2005#event.public' : 'PUBLIC' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'visibility', Visibility.visibility_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Transparency(UriEnumElement): + """The Google Calendar Transparency element""" + + _tag = 'transparency' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + transparency_enum = { + 'http://schemas.google.com/g/2005#event.opaque' : 'OPAQUE', + 'http://schemas.google.com/g/2005#event.transparent' : 'TRANSPARENT' } + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, tag='transparency', + enum_map=Transparency.transparency_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class Comments(atom.AtomBase): + """The Google Calendar comments element""" + + _tag = 'comments' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + gdata.FeedLink) + _attributes['rel'] = 'rel' + + def __init__(self, rel=None, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + self.rel = rel + self.feed_link = feed_link + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class EventStatus(UriEnumElement): + """The Google Calendar eventStatus element""" + + _tag = 'eventStatus' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + + status_enum = { 'http://schemas.google.com/g/2005#event.canceled' : 'CANCELED', + 'http://schemas.google.com/g/2005#event.confirmed' : 'CONFIRMED', + 'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'} + + def __init__(self, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, tag='eventStatus', + enum_map=EventStatus.status_enum, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + +class Who(UriEnumElement): + """The Google Calendar Who element""" + + _tag = 'who' + _namespace = gdata.GDATA_NAMESPACE + _children = UriEnumElement._children.copy() + _attributes = UriEnumElement._attributes.copy() + _children['{%s}attendeeStatus' % gdata.GDATA_NAMESPACE] = ( + 'attendee_status', AttendeeStatus) + _children['{%s}attendeeType' % gdata.GDATA_NAMESPACE] = ('attendee_type', + AttendeeType) + _attributes['valueString'] = 'name' + _attributes['email'] = 'email' + + relEnum = { 'http://schemas.google.com/g/2005#event.attendee' : 'ATTENDEE', + 'http://schemas.google.com/g/2005#event.organizer' : 'ORGANIZER', + 'http://schemas.google.com/g/2005#event.performer' : 'PERFORMER', + 'http://schemas.google.com/g/2005#event.speaker' : 'SPEAKER', + 'http://schemas.google.com/g/2005#message.bcc' : 'BCC', + 'http://schemas.google.com/g/2005#message.cc' : 'CC', + 'http://schemas.google.com/g/2005#message.from' : 'FROM', + 'http://schemas.google.com/g/2005#message.reply-to' : 'REPLY_TO', + 'http://schemas.google.com/g/2005#message.to' : 'TO' } + + def __init__(self, name=None, email=None, attendee_status=None, + attendee_type=None, rel=None, extension_elements=None, + extension_attributes=None, text=None): + UriEnumElement.__init__(self, 'who', Who.relEnum, attrib_name='rel', + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.name = name + self.email = email + self.attendee_status = attendee_status + self.attendee_type = attendee_type + self.rel = rel + + +class OriginalEvent(atom.AtomBase): + """The Google Calendar OriginalEvent element""" + + _tag = 'originalEvent' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + # TODO: The when tag used to map to a EntryLink, make sure it should really be a When. + _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', When) + _attributes['id'] = 'id' + _attributes['href'] = 'href' + + def __init__(self, id=None, href=None, when=None, + extension_elements=None, extension_attributes=None, text=None): + self.id = id + self.href = href + self.when = when + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def GetCalendarEventEntryClass(): + return CalendarEventEntry + +# This class is not completely defined here, because of a circular reference +# in which CalendarEventEntryLink and CalendarEventEntry refer to one another. +class CalendarEventEntryLink(gdata.EntryLink): + """An entryLink which contains a calendar event entry + + Within an event's recurranceExceptions, an entry link + points to a calendar event entry. This class exists + to capture the calendar specific extensions in the entry. + """ + + _tag = 'entryLink' + _namespace = gdata.GDATA_NAMESPACE + _children = gdata.EntryLink._children.copy() + _attributes = gdata.EntryLink._attributes.copy() + # The CalendarEventEntryLink should like CalendarEventEntry as a child but + # that class hasn't been defined yet, so we will wait until after defining + # CalendarEventEntry to list it in _children. + + +class RecurrenceException(atom.AtomBase): + """The Google Calendar RecurrenceException element""" + + _tag = 'recurrenceException' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ('entry_link', + CalendarEventEntryLink) + _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', + OriginalEvent) + _attributes['specialized'] = 'specialized' + + def __init__(self, specialized=None, entry_link=None, + original_event=None, extension_elements=None, + extension_attributes=None, text=None): + self.specialized = specialized + self.entry_link = entry_link + self.original_event = original_event + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class SendEventNotifications(atom.AtomBase): + """The Google Calendar sendEventNotifications element""" + + _tag = 'sendEventNotifications' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, extension_elements=None, + value=None, extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class QuickAdd(atom.AtomBase): + """The Google Calendar quickadd element""" + + _tag = 'quickadd' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['value'] = 'value' + + def __init__(self, extension_elements=None, + value=None, extension_attributes=None, text=None): + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _TransferToElementTree(self, element_tree): + if self.value: + element_tree.attrib['value'] = self.value + element_tree.tag = GCAL_TEMPLATE % 'quickadd' + atom.AtomBase._TransferToElementTree(self, element_tree) + return element_tree + + def _TakeAttributeFromElementTree(self, attribute, element_tree): + if attribute == 'value': + self.value = element_tree.attrib[attribute] + del element_tree.attrib[attribute] + else: + atom.AtomBase._TakeAttributeFromElementTree(self, attribute, + element_tree) + + +class WebContentGadgetPref(atom.AtomBase): + + _tag = 'webContentGadgetPref' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['value'] = 'value' + + """The Google Calendar Web Content Gadget Preferences element""" + + def __init__(self, name=None, value=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.value = value + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContent(atom.AtomBase): + + _tag = 'webContent' + _namespace = GCAL_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}webContentGadgetPref' % GCAL_NAMESPACE] = ('gadget_pref', + [WebContentGadgetPref]) + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + + def __init__(self, url=None, width=None, height=None, text=None, + gadget_pref=None, extension_elements=None, extension_attributes=None): + self.url = url + self.width = width + self.height = height + self.text = text + self.gadget_pref = gadget_pref or [] + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class WebContentLink(atom.Link): + + _tag = 'link' + _namespace = atom.ATOM_NAMESPACE + _children = atom.Link._children.copy() + _attributes = atom.Link._attributes.copy() + _children['{%s}webContent' % GCAL_NAMESPACE] = ('web_content', WebContent) + + def __init__(self, title=None, href=None, link_type=None, + web_content=None): + atom.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href, + link_type=link_type) + self.web_content = web_content + + +class CalendarEventEntry(gdata.BatchEntry): + """A Google Calendar flavor of an Atom Entry """ + + _tag = gdata.BatchEntry._tag + _namespace = gdata.BatchEntry._namespace + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + # This class also contains WebContentLinks but converting those members + # is handled in a special version of _ConvertElementTreeToMember. + _children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', [Where]) + _children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', [When]) + _children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', [Who]) + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [ExtendedProperty]) + _children['{%s}visibility' % gdata.GDATA_NAMESPACE] = ('visibility', + Visibility) + _children['{%s}transparency' % gdata.GDATA_NAMESPACE] = ('transparency', + Transparency) + _children['{%s}eventStatus' % gdata.GDATA_NAMESPACE] = ('event_status', + EventStatus) + _children['{%s}recurrence' % gdata.GDATA_NAMESPACE] = ('recurrence', + Recurrence) + _children['{%s}recurrenceException' % gdata.GDATA_NAMESPACE] = ( + 'recurrence_exception', [RecurrenceException]) + _children['{%s}sendEventNotifications' % GCAL_NAMESPACE] = ( + 'send_event_notifications', SendEventNotifications) + _children['{%s}quickadd' % GCAL_NAMESPACE] = ('quick_add', QuickAdd) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event', + OriginalEvent) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + transparency=None, comments=None, event_status=None, + send_event_notifications=None, visibility=None, + recurrence=None, recurrence_exception=None, + where=None, when=None, who=None, quick_add=None, + extended_property=None, original_event=None, + batch_operation=None, batch_id=None, batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, + title=title, updated=updated) + + self.transparency = transparency + self.comments = comments + self.event_status = event_status + self.send_event_notifications = send_event_notifications + self.visibility = visibility + self.recurrence = recurrence + self.recurrence_exception = recurrence_exception or [] + self.where = where or [] + self.when = when or [] + self.who = who or [] + self.quick_add = quick_add + self.extended_property = extended_property or [] + self.original_event = original_event + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + # We needed to add special logic to _ConvertElementTreeToMember because we + # want to make links with a rel of WEB_CONTENT_LINK_REL into a + # WebContentLink + def _ConvertElementTreeToMember(self, child_tree): + # Special logic to handle Web Content links + if (child_tree.tag == '{%s}link' % atom.ATOM_NAMESPACE and + child_tree.attrib['rel'] == WEB_CONTENT_LINK_REL): + if self.link is None: + self.link = [] + self.link.append(atom._CreateClassFromElementTree(WebContentLink, + child_tree)) + return + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + else: + atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + + def GetWebContentLink(self): + """Finds the first link with rel set to WEB_CONTENT_REL + + Returns: + A gdata.calendar.WebContentLink or none if none of the links had rel + equal to WEB_CONTENT_REL + """ + + for a_link in self.link: + if a_link.rel == WEB_CONTENT_LINK_REL: + return a_link + return None + + +def CalendarEventEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventEntry, xml_string) + + +def CalendarEventCommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventCommentEntry, xml_string) + + +CalendarEventEntryLink._children = {'{%s}entry' % atom.ATOM_NAMESPACE: + ('entry', CalendarEventEntry)} + + +def CalendarEventEntryLinkFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventEntryLink, xml_string) + + +class CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Calendar event feed flavor of an Atom Feed""" + + _tag = gdata.BatchFeed._tag + _namespace = gdata.BatchFeed._namespace + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [CalendarEventEntry]) + _children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, entry=None, + total_results=None, start_index=None, items_per_page=None, + interrupted=None, timezone=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + interrupted=interrupted, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.timezone = timezone + + +def CalendarListEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarListEntry, xml_string) + + +def CalendarAclEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarAclEntry, xml_string) + + +def CalendarListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarListFeed, xml_string) + + +def CalendarAclFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarAclFeed, xml_string) + + +def CalendarEventFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventFeed, xml_string) + + +def CalendarEventCommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CalendarEventCommentFeed, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/calendar/service.py b/gdata.py-1.2.3/src/gdata/calendar/service.py new file mode 100755 index 0000000..39745dd --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/calendar/service.py @@ -0,0 +1,585 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CalendarService extends the GDataService to streamline Google Calendar operations. + + CalendarService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + + +__author__ = 'api.vli (Vivian Li)' + + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.calendar +import atom + + +DEFAULT_BATCH_URL = ('http://www.google.com/calendar/feeds/default/private' + '/full/batch') + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class CalendarService(gdata.service.GDataService): + """Client for the Google Calendar service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', + additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='cl', source=source, + server=server, + additional_headers=additional_headers) + + def GetCalendarEventFeed(self, uri='/calendar/feeds/default/private/full'): + return self.Get(uri, converter=gdata.calendar.CalendarEventFeedFromString) + + def GetCalendarEventEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventEntryFromString) + + def GetCalendarListFeed(self, uri='/calendar/feeds/default/allcalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetAllCalendarsFeed(self, uri='/calendar/feeds/default/allcalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetOwnCalendarsFeed(self, uri='/calendar/feeds/default/owncalendars/full'): + return self.Get(uri, converter=gdata.calendar.CalendarListFeedFromString) + + def GetCalendarListEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarListEntryFromString) + + def GetCalendarAclFeed(self, uri='/calendar/feeds/default/acl/full'): + return self.Get(uri, converter=gdata.calendar.CalendarAclFeedFromString) + + def GetCalendarAclEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarAclEntryFromString) + + def GetCalendarEventCommentFeed(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventCommentFeedFromString) + + def GetCalendarEventCommentEntry(self, uri): + return self.Get(uri, converter=gdata.calendar.CalendarEventCommentEntryFromString) + + def Query(self, uri, converter=None): + """Performs a query and returns a resulting feed or entry. + + Args: + feed: string The feed which is to be queried + + Returns: + On success, a GDataFeed or Entry depending on which is sent from the + server. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + if converter: + result = self.Get(uri, converter=converter) + else: + result = self.Get(uri) + return result + + def CalendarQuery(self, query): + if isinstance(query, CalendarEventQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarEventFeedFromString) + elif isinstance(query, CalendarListQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarListFeedFromString) + elif isinstance(query, CalendarEventCommentQuery): + return self.Query(query.ToUri(), + converter=gdata.calendar.CalendarEventCommentFeedFromString) + else: + return self.Query(query.ToUri()) + + def InsertEvent(self, new_event, insert_uri, url_params=None, + escape_params=True): + """Adds an event to Google Calendar. + + Args: + new_event: atom.Entry or subclass A new event which is to be added to + Google Calendar. + insert_uri: the URL to post new events to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the event created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_event, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventEntryFromString) + + def InsertCalendarSubscription(self, calendar, url_params=None, + escape_params=True): + """Subscribes the authenticated user to the provided calendar. + + Args: + calendar: The calendar to which the user should be subscribed. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the subscription created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + insert_uri = '/calendar/feeds/default/allcalendars/full' + return self.Post(calendar, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + + def InsertCalendar(self, new_calendar, url_params=None, + escape_params=True): + """Creates a new calendar. + + Args: + new_calendar: The calendar to be created + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the calendar created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + insert_uri = '/calendar/feeds/default/owncalendars/full' + response = self.Post(new_calendar, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + return response + + def UpdateCalendar(self, calendar, url_params=None, + escape_params=True): + """Updates a calendar. + + Args: + calendar: The calendar which should be updated + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the calendar created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + update_uri = calendar.GetEditLink().href + response = self.Put(data=calendar, uri=update_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarListEntryFromString) + return response + + def InsertAclEntry(self, new_entry, insert_uri, url_params=None, + escape_params=True): + """Adds an ACL entry (rule) to Google Calendar. + + Args: + new_entry: atom.Entry or subclass A new ACL entry which is to be added to + Google Calendar. + insert_uri: the URL to post new entries to the ACL feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the ACL entry created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_entry, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarAclEntryFromString) + + def InsertEventComment(self, new_entry, insert_uri, url_params=None, + escape_params=True): + """Adds an entry to Google Calendar. + + Args: + new_entry: atom.Entry or subclass A new entry which is to be added to + Google Calendar. + insert_uri: the URL to post new entrys to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the comment created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Post(new_entry, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventCommentEntryFromString) + + def DeleteEvent(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an event with the specified ID from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/default/private/full/abx' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Delete('/%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def DeleteAclEntry(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an ACL entry at the given edit_uri from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Delete('/%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def DeleteCalendarEntry(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes a calendar entry at the given edit_uri from Google Calendar. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/calendar/feeds/default/allcalendars/abcdef@group.calendar.google.com' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, True is returned + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + + return self.Delete(edit_uri, url_params=url_params, + escape_params=escape_params) + + def UpdateEvent(self, edit_uri, updated_event, url_params=None, + escape_params=True): + """Updates an existing event. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_event: string, atom.Entry, or subclass containing + the Atom Entry which will replace the event which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Put(updated_event, '/%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarEventEntryFromString) + + def UpdateAclEntry(self, edit_uri, updated_rule, url_params=None, + escape_params=True): + """Updates an existing ACL rule. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_rule: string, atom.Entry, or subclass containing + the Atom Entry which will replace the event which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Put(updated_rule, '/%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.calendar.CalendarAclEntryFromString) + + def ExecuteBatch(self, batch_feed, url, + converter=gdata.calendar.CalendarEventFeedFromString): + """Sends a batch request feed to the server. + + The batch request needs to be sent to the batch URL for a particular + calendar. You can find the URL by calling GetBatchLink().href on the + CalendarEventFeed. + + Args: + batch_feed: gdata.calendar.CalendarEventFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL for the Calendar to which these operations should + be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is + CalendarEventFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a CalendarEventFeed. + """ + return self.Post(batch_feed, url, converter=converter) + + +class CalendarEventQuery(gdata.service.Query): + + def __init__(self, user='default', visibility='private', projection='full', + text_query=None, params=None, categories=None): + gdata.service.Query.__init__(self, + feed='http://www.google.com/calendar/feeds/%s/%s/%s' % ( + urllib.quote(user), + urllib.quote(visibility), + urllib.quote(projection)), + text_query=text_query, params=params, categories=categories) + + def _GetStartMin(self): + if 'start-min' in self.keys(): + return self['start-min'] + else: + return None + + def _SetStartMin(self, val): + self['start-min'] = val + + start_min = property(_GetStartMin, _SetStartMin, + doc="""The start-min query parameter""") + + def _GetStartMax(self): + if 'start-max' in self.keys(): + return self['start-max'] + else: + return None + + def _SetStartMax(self, val): + self['start-max'] = val + + start_max = property(_GetStartMax, _SetStartMax, + doc="""The start-max query parameter""") + + def _GetOrderBy(self): + if 'orderby' in self.keys(): + return self['orderby'] + else: + return None + + def _SetOrderBy(self, val): + if val is not 'lastmodified' and val is not 'starttime': + raise Error, "Order By must be either 'lastmodified' or 'starttime'" + self['orderby'] = val + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The orderby query parameter""") + + def _GetSortOrder(self): + if 'sortorder' in self.keys(): + return self['sortorder'] + else: + return None + + def _SetSortOrder(self, val): + if (val is not 'ascending' and val is not 'descending' + and val is not 'a' and val is not 'd' and val is not 'ascend' + and val is not 'descend'): + raise Error, "Sort order must be either ascending, ascend, " + ( + "a or descending, descend, or d") + self['sortorder'] = val + + sortorder = property(_GetSortOrder, _SetSortOrder, + doc="""The sortorder query parameter""") + + def _GetSingleEvents(self): + if 'singleevents' in self.keys(): + return self['singleevents'] + else: + return None + + def _SetSingleEvents(self, val): + self['singleevents'] = val + + singleevents = property(_GetSingleEvents, _SetSingleEvents, + doc="""The singleevents query parameter""") + + def _GetFutureEvents(self): + if 'futureevents' in self.keys(): + return self['futureevents'] + else: + return None + + def _SetFutureEvents(self, val): + self['futureevents'] = val + + futureevents = property(_GetFutureEvents, _SetFutureEvents, + doc="""The futureevents query parameter""") + + def _GetRecurrenceExpansionStart(self): + if 'recurrence-expansion-start' in self.keys(): + return self['recurrence-expansion-start'] + else: + return None + + def _SetRecurrenceExpansionStart(self, val): + self['recurrence-expansion-start'] = val + + recurrence_expansion_start = property(_GetRecurrenceExpansionStart, + _SetRecurrenceExpansionStart, + doc="""The recurrence-expansion-start query parameter""") + + def _GetRecurrenceExpansionEnd(self): + if 'recurrence-expansion-end' in self.keys(): + return self['recurrence-expansion-end'] + else: + return None + + def _SetRecurrenceExpansionEnd(self, val): + self['recurrence-expansion-end'] = val + + recurrence_expansion_end = property(_GetRecurrenceExpansionEnd, + _SetRecurrenceExpansionEnd, + doc="""The recurrence-expansion-end query parameter""") + + def _SetTimezone(self, val): + self['ctz'] = val + + def _GetTimezone(self): + if 'ctz' in self.keys(): + return self['ctz'] + else: + return None + + ctz = property(_GetTimezone, _SetTimezone, + doc="""The ctz query parameter which sets report time on the server.""") + + +class CalendarListQuery(gdata.service.Query): + """Queries the Google Calendar meta feed""" + + def __init__(self, userId=None, text_query=None, + params=None, categories=None): + if userId is None: + userId = 'default' + + gdata.service.Query.__init__(self, feed='http://www.google.com/calendar/feeds/' + +userId, + text_query=text_query, params=params, + categories=categories) + +class CalendarEventCommentQuery(gdata.service.Query): + """Queries the Google Calendar event comments feed""" + + def __init__(self, feed=None): + gdata.service.Query.__init__(self, feed=feed) diff --git a/gdata.py-1.2.3/src/gdata/client.py b/gdata.py-1.2.3/src/gdata/client.py new file mode 100644 index 0000000..62db08e --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/client.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import urllib +import urlparse +import gdata.auth +import gdata.service +import atom.service + + +SCOPE_URL_PARAM_NAME = gdata.service.SCOPE_URL_PARAM_NAME +# Maps the service names used in ClientLogin to scope URLs. +CLIENT_LOGIN_SCOPES = gdata.service.CLIENT_LOGIN_SCOPES + + +class AuthorizationRequired(gdata.service.Error): + pass + + +class GDataClient(gdata.service.GDataService): + """This class is deprecated. + + All functionality has been migrated to gdata.service.GDataService. + """ + def __init__(self, application_name=None, tokens=None): + gdata.service.GDataService.__init__(self, source=application_name, + tokens=tokens) + + def ClientLogin(self, username, password, service_name, source=None, + account_type=None, auth_url=None, login_token=None, login_captcha=None): + gdata.service.GDataService.ClientLogin(self, username=username, + password=password, account_type=account_type, service=service_name, + auth_service_url=auth_url, source=source, captcha_token=login_token, + captcha_response=login_captcha) + + def Get(self, url, parser): + """Simplified interface for Get. + + Requires a parser function which takes the server response's body as + the only argument. + + Args: + url: A string or something that can be converted to a string using str. + The URL of the requested resource. + parser: A function which takes the HTTP body from the server as it's + only result. Common values would include str, + gdata.GDataEntryFromString, and gdata.GDataFeedFromString. + + Returns: The result of calling parser(http_response_body). + """ + return gdata.service.GDataService.Get(self, uri=url, converter=parser) + + def Post(self, data, url, parser, media_source=None): + """Streamlined version of Post. + + Requires a parser function which takes the server response's body as + the only argument. + """ + return gdata.service.GDataService.Post(self, data=data, uri=url, + media_source=media_source, converter=parser) + + def Put(self, data, url, parser, media_source=None): + """Streamlined version of Put. + + Requires a parser function which takes the server response's body as + the only argument. + """ + return gdata.service.GDataService.Put(self, data=data, uri=url, + media_source=media_source, converter=parser) + + def Delete(self, url): + return gdata.service.GDataService.Delete(self, uri=url) + + +ExtractToken = gdata.service.ExtractToken +GenerateAuthSubRequestUrl = gdata.service.GenerateAuthSubRequestUrl diff --git a/gdata.py-1.2.3/src/gdata/codesearch/__init__.py b/gdata.py-1.2.3/src/gdata/codesearch/__init__.py new file mode 100644 index 0000000..fa23ef0 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/codesearch/__init__.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2007 Benoit Chesneau +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +"""Contains extensions to Atom objects used by Google Codesearch""" + +__author__ = 'Benoit Chesneau' + + +import atom +import gdata + + +CODESEARCH_NAMESPACE='http://schemas.google.com/codesearch/2006' +CODESEARCH_TEMPLATE='{http://shema.google.com/codesearch/2006}%s' + + +class Match(atom.AtomBase): + """ The Google Codesearch match element """ + _tag = 'match' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['lineNumber'] = 'line_number' + _attributes['type'] = 'type' + + def __init__(self, line_number=None, type=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.type = type + self.line_number = line_number + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class File(atom.AtomBase): + """ The Google Codesearch file element""" + _tag = 'file' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.name = name + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Package(atom.AtomBase): + """ The Google Codesearch package element""" + _tag = 'package' + _namespace = CODESEARCH_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['uri'] = 'uri' + + def __init__(self, name=None, uri=None, extension_elements=None, + extension_attributes=None, text=None): + self.text = text + self.name = name + self.uri = uri + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class CodesearchEntry(gdata.GDataEntry): + """ Google codesearch atom entry""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + _children['{%s}file' % CODESEARCH_NAMESPACE] = ('file', File) + _children['{%s}package' % CODESEARCH_NAMESPACE] = ('package', Package) + _children['{%s}match' % CODESEARCH_NAMESPACE] = ('match', [Match]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + match=None, + extension_elements=None, extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=None) + + self.match = match or [] + + +def CodesearchEntryFromString(xml_string): + """Converts an XML string into a CodesearchEntry object. + + Args: + xml_string: string The XML describing a Codesearch feed entry. + + Returns: + A CodesearchEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(CodesearchEntry, xml_string) + + +class CodesearchFeed(gdata.GDataFeed): + """feed containing list of Google codesearch Items""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CodesearchEntry]) + + +def CodesearchFeedFromString(xml_string): + """Converts an XML string into a CodesearchFeed object. + Args: + xml_string: string The XML describing a Codesearch feed. + Returns: + A CodeseartchFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(CodesearchFeed, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/codesearch/service.py b/gdata.py-1.2.3/src/gdata/codesearch/service.py new file mode 100644 index 0000000..e4210c7 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/codesearch/service.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +# +# Copyright (c) 2007 Benoit Chesneau +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + +"""CodesearchService extends GDataService to streamline Google Codesearch +operations""" + + +__author__ = 'Benoit Chesneau' + + +import atom +import gdata.service +import gdata.codesearch + + +class CodesearchService(gdata.service.GDataService): + """Client extension for Google codesearch service""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', additional_headers=None): + """Constructor for the CodesearchService. + + Args: + email: string (optional) The e-mail address of the account to use for + authentication. + password: string (optional) The password of the account to use for + authentication. + source: string (optional) The name of the user's application. + server: string (optional) The server the feed is hosted on. + additional_headers: dict (optional) Any additional HTTP headers to be + transmitted to the service in the form of key-value + pairs. + Yields: + A CodesearchService object used to communicate with the Google Codesearch + service. + """ + + gdata.service.GDataService.__init__(self, + email=email, password=password, service='codesearch', + source=source,server=server, + additional_headers=additional_headers) + + def Query(self, uri, converter=gdata.codesearch.CodesearchFeedFromString): + """Queries the Codesearch feed and returns the resulting feed of + entries. + + Args: + uri: string The full URI to be queried. This can contain query + parameters, a hostname, or simply the relative path to a Document + List feed. The DocumentQuery object is useful when constructing + query parameters. + converter: func (optional) A function which will be executed on the + retrieved item, generally to render it into a Python object. + By default the CodesearchFeedFromString function is used to + return a CodesearchFeed object. This is because most feed + queries will result in a feed and not a single entry. + + Returns : + A CodesearchFeed objects representing the feed returned by the server + """ + return self.Get(uri, converter=converter) + + def GetSnippetsFeed(self, text_query=None): + """Retrieve Codesearch feed for a keyword + + Args: + text_query : string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + Returns: + A CodesearchFeed objects representing the feed returned by the server + """ + + query=gdata.codesearch.service.CodesearchQuery(text_query=text_query) + feed = self.Query(query.ToUri()) + return feed + + +class CodesearchQuery(gdata.service.Query): + """Object used to construct the query to the Google Codesearch feed. here only as a shorcut""" + + def __init__(self, feed='/codesearch/feeds/search', text_query=None, + params=None, categories=None): + """Constructor for Codesearch Query. + + Args: + feed: string (optional) The path for the feed. (e.g. '/codesearch/feeds/search') + text_query: string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + + Yelds: + A CodesearchQuery object to construct a URI based on Codesearch feed + """ + + gdata.service.Query.__init__(self, feed, text_query, params, categories) diff --git a/gdata.py-1.2.3/src/gdata/contacts/__init__.py b/gdata.py-1.2.3/src/gdata/contacts/__init__.py new file mode 100644 index 0000000..a03a730 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/contacts/__init__.py @@ -0,0 +1,355 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to ElementWrapper objects used with Google Contacts.""" + +__author__ = 'dbrattli (Dag Brattli)' + + +import atom +import gdata + + +## Constants from http://code.google.com/apis/gdata/elements.html ## +REL_HOME = 'http://schemas.google.com/g/2005#home' +REL_WORK = 'http://schemas.google.com/g/2005#work' +REL_OTHER = 'http://schemas.google.com/g/2005#other' + + +IM_AIM = 'http://schemas.google.com/g/2005#AIM' # AOL Instant Messenger protocol +IM_MSN = 'http://schemas.google.com/g/2005#MSN' # MSN Messenger protocol +IM_YAHOO = 'http://schemas.google.com/g/2005#YAHOO' # Yahoo Messenger protocol +IM_SKYPE = 'http://schemas.google.com/g/2005#SKYPE' # Skype protocol +IM_QQ = 'http://schemas.google.com/g/2005#QQ' # QQ protocol +# Google Talk protocol +IM_GOOGLE_TALK = 'http://schemas.google.com/g/2005#GOOGLE_TALK' +IM_ICQ = 'http://schemas.google.com/g/2005#ICQ' # ICQ protocol +IM_JABBER = 'http://schemas.google.com/g/2005#JABBER' # Jabber protocol + + +PHOTO_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#photo' +PHOTO_EDIT_LINK_REL = 'http://schemas.google.com/contacts/2008/rel#edit-photo' + + +PHONE_CAR = 'http://schemas.google.com/g/2005#car' # Number of a car phone. +PHONE_FAX = 'http://schemas.google.com/g/2005#fax' +# Unknown or unspecified type, such as a business phone number that doesn't +# belong to a particular person. +PHONE_GENERAL = 'http://schemas.google.com/g/2005#general' +PHONE_HOME = REL_HOME +PHONE_HOME_FAX = 'http://schemas.google.com/g/2005#home_fax' +# Phone number that makes sense only in a context known to the user (such as +# an enterprise PBX). +PHONE_INTERNAL = 'http://schemas.google.com/g/2005#internal-extension' +PHONE_MOBILE = 'http://schemas.google.com/g/2005#mobile' +# A special type of number for which no other rel value makes sense. +# For example, a TTY device. label can be used to indicate the actual type. +PHONE_OTHER = REL_OTHER +PHONE_PAGER = 'http://schemas.google.com/g/2005#pager' +PHONE_SATELLITE = 'http://schemas.google.com/g/2005#satellite' +PHONE_VOIP = 'http://schemas.google.com/g/2005#voip' +PHONE_WORK = REL_WORK +PHONE_WORK_FAX = 'http://schemas.google.com/g/2005#work_fax' + + +CONTACTS_NAMESPACE = 'http://schemas.google.com/contact/2008' + + +class OrgName(atom.AtomBase): + _tag = 'orgName' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class OrgTitle(atom.AtomBase): + _tag = 'orgTitle' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Organization(atom.AtomBase): + _tag = 'organization' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['rel'] = 'rel' + _attributes['label'] = 'label' + _attributes['primary'] = 'primary' + + _children['{%s}orgName' % gdata.GDATA_NAMESPACE] = ('org_name', OrgName) + _children['{%s}orgTitle' % gdata.GDATA_NAMESPACE] = ('org_title', OrgTitle) + + def __init__(self, rel=None, primary='false', org_name=None, org_title=None, + label=None, text=None, extension_elements=None, + extension_attributes=None): + self.rel = rel or REL_OTHER + self.primary = primary + self.org_name = org_name + self.org_title = org_title + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class PostalAddress(atom.AtomBase): + _tag = 'postalAddress' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + + def __init__(self, primary=None, rel=None, text=None, + extension_elements=None, extension_attributes=None): + self.primary = primary + self.rel = rel or REL_OTHER + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class IM(atom.AtomBase): + _tag = 'im' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['address'] = 'address' + _attributes['primary'] = 'primary' + _attributes['protocol'] = 'protocol' + _attributes['label'] = 'label' + _attributes['rel'] = 'rel' + + def __init__(self, primary=None, rel=None, address=None, protocol=None, + label=None, text=None, extension_elements=None, + extension_attributes=None): + self.protocol = protocol + self.address = address + self.primary = primary + self.rel = rel or REL_OTHER + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Email(atom.AtomBase): + _tag = 'email' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['address'] = 'address' + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + _attributes['label'] = 'label' + + def __init__(self, primary=None, rel=None, address=None, text=None, + label=None, extension_elements=None, extension_attributes=None): + self.address = address + self.primary = primary + self.rel = rel or REL_OTHER + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class PhoneNumber(atom.AtomBase): + _tag = 'phoneNumber' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + _attributes['primary'] = 'primary' + _attributes['rel'] = 'rel' + + def __init__(self, primary=None, rel=None, text=None, + extension_elements=None, extension_attributes=None): + self.primary = primary + self.rel = rel or REL_OTHER + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Deleted(atom.AtomBase): + _tag = 'deleted' + _namespace = gdata.GDATA_NAMESPACE + + def __init__(self, text=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class GroupMembershipInfo(atom.AtomBase): + _tag = 'groupMembershipInfo' + _namespace = CONTACTS_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + + _attributes['deleted'] = 'deleted' + _attributes['href'] = 'href' + + def __init__(self, deleted=None, href=None, text=None, + extension_elements=None, extension_attributes=None): + self.deleted = deleted + self.href = href + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class ContactEntry(gdata.BatchEntry): + """A Google Contact flavor of an Atom Entry """ + + _children = gdata.BatchEntry._children.copy() + + _children['{%s}postalAddress' % gdata.GDATA_NAMESPACE] = ('postal_address', + [PostalAddress]) + _children['{%s}phoneNumber' % gdata.GDATA_NAMESPACE] = ('phone_number', + [PhoneNumber]) + _children['{%s}organization' % gdata.GDATA_NAMESPACE] = ('organization', + Organization) + _children['{%s}email' % gdata.GDATA_NAMESPACE] = ('email', [Email]) + _children['{%s}im' % gdata.GDATA_NAMESPACE] = ('im', [IM]) + _children['{%s}deleted' % gdata.GDATA_NAMESPACE] = ('deleted', Deleted) + _children['{%s}groupMembershipInfo' % CONTACTS_NAMESPACE] = ( + 'group_membership_info', [GroupMembershipInfo]) + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [gdata.ExtendedProperty]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, email=None, postal_address=None, + deleted=None, organization=None, phone_number=None, im=None, + extended_property=None, group_membership_info=None, + batch_operation=None, batch_id=None, batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, title=title, updated=updated) + self.organization = organization + self.deleted = deleted + self.phone_number = phone_number or [] + self.postal_address = postal_address or [] + self.im = im or [] + self.extended_property = extended_property or [] + self.email = email or [] + self.group_membership_info = group_membership_info or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def GetPhotoLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_LINK_REL: + return a_link + return None + + def GetPhotoEditLink(self): + for a_link in self.link: + if a_link.rel == PHOTO_EDIT_LINK_REL: + return a_link + return None + + +def ContactEntryFromString(xml_string): + return atom.CreateClassFromXMLString(ContactEntry, xml_string) + + +class ContactsFeed(gdata.BatchFeed, gdata.LinkFinder): + """A Google Contacts feed flavor of an Atom Feed""" + + _children = gdata.BatchFeed._children.copy() + + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ContactEntry]) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +def ContactsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(ContactsFeed, xml_string) + + +class GroupEntry(gdata.BatchEntry): + """Represents a contact group.""" + _children = gdata.BatchEntry._children.copy() + _children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = ( + 'extended_property', [gdata.ExtendedProperty]) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, control=None, title=None, updated=None, + extended_property=None, batch_operation=None, batch_id=None, + batch_status=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.BatchEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + batch_operation=batch_operation, batch_id=batch_id, + batch_status=batch_status, + title=title, updated=updated) + self.extended_property = extended_property or [] + + +def GroupEntryFromString(xml_string): + return atom.CreateClassFromXMLString(GroupEntry, xml_string) + + +class GroupsFeed(gdata.BatchFeed): + """A Google contact groups feed flavor of an Atom Feed""" + _children = gdata.BatchFeed._children.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [GroupEntry]) + + +def GroupsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(GroupsFeed, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/contacts/service.py b/gdata.py-1.2.3/src/gdata/contacts/service.py new file mode 100644 index 0000000..17a76b0 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/contacts/service.py @@ -0,0 +1,295 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ContactsService extends the GDataService to streamline Google Contacts operations. + + ContactsService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'dbrattli (Dag Brattli)' + + +import gdata +import atom.service +import gdata.service +import gdata.calendar +import atom + +DEFAULT_BATCH_URL = ('http://www.google.com/m8/feeds/contacts/default/full' + '/batch') + +class Error(Exception): + pass + +class RequestError(Error): + pass + +class ContactsService(gdata.service.GDataService): + """Client for the Google Contats service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', + additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='cp', source=source, + server=server, + additional_headers=additional_headers) + + def GetContactsFeed(self, + uri='http://www.google.com/m8/feeds/contacts/default/full'): + return self.Get(uri, converter=gdata.contacts.ContactsFeedFromString) + + def GetContact(self, uri): + return self.Get(uri, converter=gdata.contacts.ContactEntryFromString) + + def CreateContact(self, new_contact, + insert_uri='/m8/feeds/contacts/default/full', url_params=None, + escape_params=True): + """Adds an new contact to Google Contacts. + + Args: + new_contact: atom.Entry or subclass A new contact which is to be added to + Google Contacts. + insert_uri: the URL to post new contacts to the feed + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful insert, an entry containing the contact created + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + return self.Post(new_contact, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.ContactEntryFromString) + + + def UpdateContact(self, edit_uri, updated_contact, url_params=None, + escape_params=True): + """Updates an existing contact. + + Args: + edit_uri: string The edit link URI for the element being updated + updated_contact: string, atom.Entry or subclass containing + the Atom Entry which will replace the contact which is + stored at the edit_url + url_params: dict (optional) Additional URL parameters to be included + in the update request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful update, a httplib.HTTPResponse containing the server's + response to the PUT request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Put(updated_contact, '/%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.ContactEntryFromString) + + def DeleteContact(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + """Removes an contact with the specified ID from Google Contacts. + + Args: + edit_uri: string The edit URL of the entry to be deleted. Example: + 'http://www.google.com/m8/feeds/contacts/default/full/xxx/yyy' + url_params: dict (optional) Additional URL parameters to be included + in the deletion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + On successful delete, a httplib.HTTPResponse containing the server's + response to the DELETE request. + On failure, a RequestError is raised of the form: + {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response} + """ + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Delete('/%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def GetGroupsFeed(self, + uri='http://www.google.com/m8/feeds/groups/default/full'): + return self.Get(uri, converter=gdata.contacts.GroupsFeedFromString) + + def CreateGroup(self, new_group, + insert_uri='/m8/feeds/groups/default/full', url_params=None, + escape_params=True): + return self.Post(new_group, insert_uri, url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.GroupEntryFromString) + + def UpdateGroup(self, edit_uri, updated_group, url_params=None, + escape_params=True): + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Put(updated_group, '/%s' % edit_uri, + url_params=url_params, + escape_params=escape_params, + converter=gdata.contacts.GroupEntryFromString) + + def DeleteGroup(self, edit_uri, extra_headers=None, + url_params=None, escape_params=True): + url_prefix = 'http://%s/' % self.server + if edit_uri.startswith(url_prefix): + edit_uri = edit_uri[len(url_prefix):] + return self.Delete('/%s' % edit_uri, + url_params=url_params, escape_params=escape_params) + + def ChangePhoto(self, media, contact_entry_or_url, content_type=None, + content_length=None): + """Change the photo for the contact by uploading a new photo. + + Performs a PUT against the photo edit URL to send the binary data for the + photo. + + Args: + media: filename, file-like-object, or a gdata.MediaSource object to send. + contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this + method will search for an edit photo link URL and + perform a PUT to the URL. + content_type: str (optional) the mime type for the photo data. This is + necessary if media is a file or file name, but if media + is a MediaSource object then the media object can contain + the mime type. If media_type is set, it will override the + mime type in the media object. + content_length: int or str (optional) Specifying the content length is + only required if media is a file-like object. If media + is a filename, the length is determined using + os.path.getsize. If media is a MediaSource object, it is + assumed that it already contains the content length. + """ + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if isinstance(media, gdata.MediaSource): + payload = media + # If the media object is a file-like object, then use it as the file + # handle in the in the MediaSource. + elif hasattr(media, 'read'): + payload = gdata.MediaSource(file_handle=media, + content_type=content_type, content_length=content_length) + # Assume that the media object is a file name. + else: + payload = gdata.MediaSource(content_type=content_type, + content_length=content_length, file_path=media) + return self.Put(payload, url) + + def GetPhoto(self, contact_entry_or_url): + """Retrives the binary data for the contact's profile photo as a string. + + Args: + contact_entry_or_url: a gdata.contacts.ContactEntry objecr or a string + containing the photo link's URL. If the contact entry does not + contain a photo link, the image will not be fetched and this method + will return None. + """ + # TODO: add the ability to write out the binary image data to a file, + # reading and writing a chunk at a time to avoid potentially using up + # large amounts of memory. + url = None + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + photo_link = contact_entry_or_url.GetPhotoLink() + if photo_link: + url = photo_link.href + else: + url = contact_entry_or_url + if url: + return self.Get(url, converter=str) + else: + return None + + def DeletePhoto(self, contact_entry_or_url): + url = None + if isinstance(contact_entry_or_url, gdata.contacts.ContactEntry): + url = contact_entry_or_url.GetPhotoEditLink().href + else: + url = contact_entry_or_url + if url: + self.Delete(url) + + def ExecuteBatch(self, batch_feed, url, + converter=gdata.contacts.ContactsFeedFromString): + """Sends a batch request feed to the server. + + Args: + batch_feed: gdata.contacts.ContactFeed A feed containing batch + request entries. Each entry contains the operation to be performed + on the data contained in the entry. For example an entry with an + operation type of insert will be used as if the individual entry + had been inserted. + url: str The batch URL to which these operations should be applied. + converter: Function (optional) The function used to convert the server's + response to an object. The default value is ContactsFeedFromString. + + Returns: + The results of the batch request's execution on the server. If the + default converter is used, this is stored in a ContactsFeed. + """ + return self.Post(batch_feed, url, converter=converter) + +class ContactsQuery(gdata.service.Query): + + def __init__(self, feed=None, text_query=None, params=None, + categories=None, group=None): + self.feed = feed or '/m8/feeds/contacts/default/full' + if group: + self._SetGroup(group) + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) + + def _GetGroup(self): + if 'group' in self: + return self['group'] + else: + return None + + def _SetGroup(self, group_id): + self['group'] = group_id + + group = property(_GetGroup, _SetGroup, + doc='The group query parameter to find only contacts in this group') + +class GroupsQuery(gdata.service.Query): + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + self.feed = feed or '/m8/feeds/groups/default/full' + gdata.service.Query.__init__(self, feed=self.feed, text_query=text_query, + params=params, categories=categories) diff --git a/gdata.py-1.2.3/src/gdata/docs/__init__.py b/gdata.py-1.2.3/src/gdata/docs/__init__.py new file mode 100644 index 0000000..76afab0 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/docs/__init__.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Documents.""" + +__author__ = 'api.jfisher (Jeff Fisher)' + +import atom +import gdata + + +class DocumentListEntry(gdata.GDataEntry): + """The Google Documents version of an Atom Entry""" + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +def DocumentListEntryFromString(xml_string): + """Converts an XML string into a DocumentListEntry object. + + Args: + xml_string: string The XML describing a Document List feed entry. + + Returns: + A DocumentListEntry object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListEntry, xml_string) + + +class DocumentListFeed(gdata.GDataFeed): + """A feed containing a list of Google Documents Items""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [DocumentListEntry]) + + +def DocumentListFeedFromString(xml_string): + """Converts an XML string into a DocumentListFeed object. + + Args: + xml_string: string The XML describing a DocumentList feed. + + Returns: + A DocumentListFeed object corresponding to the given XML. + """ + return atom.CreateClassFromXMLString(DocumentListFeed, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/docs/service.py b/gdata.py-1.2.3/src/gdata/docs/service.py new file mode 100644 index 0000000..2d35ee2 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/docs/service.py @@ -0,0 +1,292 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DocsService extends the GDataService to streamline Google Documents + operations. + + DocsService: Provides methods to query feeds and manipulate items. + Extends GDataService. + + DocumentQuery: Queries a Google Document list feed. +""" + + +__author__ = 'api.jfisher (Jeff Fisher)' + + +import urllib +import atom +import gdata.service +import gdata.docs + + +# XML Namespaces used in Google Documents entities. +DATA_KIND_SCHEME = 'http://schemas.google.com/g/2005#kind' +DOCUMENT_KIND_TERM = 'http://schemas.google.com/docs/2007#document' +SPREADSHEET_KIND_TERM = 'http://schemas.google.com/docs/2007#spreadsheet' +PRESENTATION_KIND_TERM = 'http://schemas.google.com/docs/2007#presentation' +# File extensions of documents that are permitted to be uploaded. +SUPPORTED_FILETYPES = { + 'CSV': 'text/csv', + 'TSV': 'text/tab-separated-values', + 'TAB': 'text/tab-separated-values', + 'DOC': 'application/msword', + 'ODS': 'application/x-vnd.oasis.opendocument.spreadsheet', + 'ODT': 'application/vnd.oasis.opendocument.text', + 'RTF': 'application/rtf', + 'SXW': 'application/vnd.sun.xml.writer', + 'TXT': 'text/plain', + 'XLS': 'application/vnd.ms-excel', + 'PPT': 'application/vnd.ms-powerpoint', + 'PPS': 'application/vnd.ms-powerpoint', + 'HTM': 'text/html', + 'HTML' : 'text/html'} + + +class DocsService(gdata.service.GDataService): + + """Client extension for the Google Documents service Document List feed.""" + + def __init__(self, email=None, password=None, source=None, + server='docs.google.com', additional_headers=None): + """Constructor for the DocsService. + + Args: + email: string (optional) The e-mail address of the account to use for + authentication. + password: string (optional) The password of the account to use for + authentication. + source: string (optional) The name of the user's application. + server: string (optional) The server the feed is hosted on. + additional_headers: dict (optional) Any additional HTTP headers to be + transmitted to the service in the form of key-value + pairs. + + Yields: + A DocsService object used to communicate with the Google Documents + service. + """ + gdata.service.GDataService.__init__(self, email=email, password=password, + service='writely', source=source, + server=server, + additional_headers=additional_headers) + + def Query(self, uri, converter=gdata.docs.DocumentListFeedFromString): + """Queries the Document List feed and returns the resulting feed of + entries. + + Args: + uri: string The full URI to be queried. This can contain query + parameters, a hostname, or simply the relative path to a Document + List feed. The DocumentQuery object is useful when constructing + query parameters. + converter: func (optional) A function which will be executed on the + retrieved item, generally to render it into a Python object. + By default the DocumentListFeedFromString function is used to + return a DocumentListFeed object. This is because most feed + queries will result in a feed and not a single entry. + """ + return self.Get(uri, converter=converter) + + def QueryDocumentListFeed(self, uri): + """Retrieves a DocumentListFeed by retrieving a URI based off the Document + List feed, including any query parameters. A DocumentQuery object can + be used to construct these parameters. + + Args: + uri: string The URI of the feed being retrieved possibly with query + parameters. + + Returns: + A DocumentListFeed object representing the feed returned by the server. + """ + return self.Get(uri, converter=gdata.docs.DocumentListFeedFromString) + + def GetDocumentListEntry(self, uri): + """Retrieves a particular DocumentListEntry by its unique URI. + + Args: + uri: string The unique URI of an entry in a Document List feed. + + Returns: + A DocumentListEntry object representing the retrieved entry. + """ + return self.Get(uri, converter=gdata.docs.DocumentListEntryFromString) + + def GetDocumentListFeed(self): + """Retrieves a feed containing all of a user's documents.""" + q = gdata.docs.service.DocumentQuery(); + return self.QueryDocumentListFeed(q.ToUri()) + + def UploadPresentation(self, media_source, title): + """Uploads a presentation inside of a MediaSource object to the Document + List feed with the given title. + + Args: + media_source: MediaSource The MediaSource object containing a + presentation file to be uploaded. + title: string The title of the presentation on the server after being + uploaded. + + Returns: + A GDataEntry containing information about the presentation created on the + Google Documents service. + """ + category = atom.Category(scheme=DATA_KIND_SCHEME, + term=PRESENTATION_KIND_TERM) + return self._UploadFile(media_source, title, category) + + def UploadSpreadsheet(self, media_source, title): + """Uploads a spreadsheet inside of a MediaSource object to the Document + List feed with the given title. + + Args: + media_source: MediaSource The MediaSource object containing a spreadsheet + file to be uploaded. + title: string The title of the spreadsheet on the server after being + uploaded. + + Returns: + A GDataEntry containing information about the spreadsheet created on the + Google Documents service. + """ + category = atom.Category(scheme=DATA_KIND_SCHEME, + term=SPREADSHEET_KIND_TERM) + return self._UploadFile(media_source, title, category) + + def UploadDocument(self, media_source, title): + """Uploads a document inside of a MediaSource object to the Document List + feed with the given title. + + Args: + media_source: MediaSource The gdata.MediaSource object containing a + document file to be uploaded. + title: string The title of the document on the server after being + uploaded. + + Returns: + A GDataEntry containing information about the document created on the + Google Documents service. + """ + category = atom.Category(scheme=DATA_KIND_SCHEME, + term=DOCUMENT_KIND_TERM) + return self._UploadFile(media_source, title, category) + + def _UploadFile(self, media_source, title, category): + """Uploads a file to the Document List feed. + + Args: + media_source: A gdata.MediaSource object containing the file to be + uploaded. + title: string The title of the document on the server after being + uploaded. + category: An atom.Category object specifying the appropriate document + type + Returns: + A GDataEntry containing information about the document created on + the Google Documents service. + """ + media_entry = gdata.GDataEntry() + media_entry.title = atom.Title(text=title) + media_entry.category.append(category) + media_entry = self.Post(media_entry, '/feeds/documents/private/full', + media_source = media_source, + extra_headers = {'Slug' : media_source.file_name }) + + return media_entry + + +class DocumentQuery(gdata.service.Query): + + """Object used to construct a URI to query the Google Document List feed""" + + def __init__(self, feed='/feeds/documents', visibility='private', + projection='full', text_query=None, params=None, + categories=None): + """Constructor for Document List Query + + Args: + feed: string (optional) The path for the feed. (e.g. '/feeds/documents') + visibility: string (optional) The visibility chosen for the current feed. + projection: string (optional) The projection chosen for the current feed. + text_query: string (optional) The contents of the q query parameter. This + string is URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to + the query's items. + categories: list (optional) List of category strings which should be + included as query categories. See gdata.service.Query for + additional documentation. + + Yields: + A DocumentQuery object used to construct a URI based on the Document + List feed. + """ + self.visibility = visibility + self.projection = projection + gdata.service.Query.__init__(self, feed, text_query, params, categories) + + def ToUri(self): + """Generates a URI from the query parameters set in the object. + + Returns: + A string containing the URI used to retrieve entries from the Document + List feed. + """ + old_feed = self.feed + self.feed = '/'.join([old_feed, self.visibility, self.projection]) + new_feed = gdata.service.Query.ToUri(self) + self.feed = old_feed + return new_feed + + def AddNamedFolder(self, email, folder_name): + """Adds a named folder category, qualified by a schema. + + This function lets you query for documents that are contained inside a + named folder without fear of collision with other categories. + + Args: + email: string The email of the user who owns the folder. + folder_name: string The name of the folder. + + Returns: + The string of the category that was added to the object. + """ + + category = '{http://schemas.google.com/docs/2007/folders/' + category += email + '}' + folder_name + + self.categories.append(category) + + return category + + def RemoveNamedFolder(self, email, folder_name): + """Removes a named folder category, qualified by a schema. + + Args: + email: string The email of the user who owns the folder. + folder_name: string The name of the folder. + + Returns: + The string of the category that was removed to the object. + """ + + category = '{http://schemas.google.com/docs/2007/folders/' + category += email + '}' + folder_name + + self.categories.remove(category) + + return category diff --git a/gdata.py-1.2.3/src/gdata/exif/__init__.py b/gdata.py-1.2.3/src/gdata/exif/__init__.py new file mode 100644 index 0000000..7f1f9c2 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/exif/__init__.py @@ -0,0 +1,217 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.exif, implementing the exif namespace in gdata +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module maps elements from the {EXIF} namespace[1] to GData objects. +These elements describe image data, using exif attributes[2]. + +Picasa Web Albums uses the exif namespace to represent Exif data encoded +in a photo [3]. + +Picasa Web Albums uses the following exif elements: +exif:distance +exif:exposure +exif:flash +exif:focallength +exif:fstop +exif:imageUniqueID +exif:iso +exif:make +exif:model +exif:tags +exif:time + +[1]: http://schemas.google.com/photos/exif/2007. +[2]: http://en.wikipedia.org/wiki/Exif +[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference +""" + + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007' + +class ExifBaseElement(atom.AtomBase): + """Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag + """ % EXIF_NAMESPACE + + _tag = '' + _namespace = EXIF_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Distance(ExifBaseElement): + "(float) The distance to the subject, e.g. 0.0" + + _tag = 'distance' +def DistanceFromString(xml_string): + return atom.CreateClassFromXMLString(Distance, xml_string) + +class Exposure(ExifBaseElement): + "(float) The exposure time used, e.g. 0.025 or 8.0E4" + + _tag = 'exposure' +def ExposureFromString(xml_string): + return atom.CreateClassFromXMLString(Exposure, xml_string) + +class Flash(ExifBaseElement): + """(string) Boolean value indicating whether the flash was used. + The .text attribute will either be `true' or `false' + + As a convenience, this object's .bool method will return what you want, + so you can say: + + flash_used = bool(Flash) + + """ + + _tag = 'flash' + def __bool__(self): + if self.text.lower() in ('true','false'): + return self.text.lower() == 'true' +def FlashFromString(xml_string): + return atom.CreateClassFromXMLString(Flash, xml_string) + +class Focallength(ExifBaseElement): + "(float) The focal length used, e.g. 23.7" + + _tag = 'focallength' +def FocallengthFromString(xml_string): + return atom.CreateClassFromXMLString(Focallength, xml_string) + +class Fstop(ExifBaseElement): + "(float) The fstop value used, e.g. 5.0" + + _tag = 'fstop' +def FstopFromString(xml_string): + return atom.CreateClassFromXMLString(Fstop, xml_string) + +class ImageUniqueID(ExifBaseElement): + "(string) The unique image ID for the photo. Generated by Google Photo servers" + + _tag = 'imageUniqueID' +def ImageUniqueIDFromString(xml_string): + return atom.CreateClassFromXMLString(ImageUniqueID, xml_string) + +class Iso(ExifBaseElement): + "(int) The iso equivalent value used, e.g. 200" + + _tag = 'iso' +def IsoFromString(xml_string): + return atom.CreateClassFromXMLString(Iso, xml_string) + +class Make(ExifBaseElement): + "(string) The make of the camera used, e.g. Fictitious Camera Company" + + _tag = 'make' +def MakeFromString(xml_string): + return atom.CreateClassFromXMLString(Make, xml_string) + +class Model(ExifBaseElement): + "(string) The model of the camera used,e.g AMAZING-100D" + + _tag = 'model' +def ModelFromString(xml_string): + return atom.CreateClassFromXMLString(Model, xml_string) + +class Time(ExifBaseElement): + """(int) The date/time the photo was taken, e.g. 1180294337000. + Represented as the number of milliseconds since January 1st, 1970. + + The value of this element will always be identical to the value + of the . + + Look at this object's .isoformat() for a human friendly datetime string: + + photo_epoch = Time.text # 1180294337000 + photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z' + + Alternatively: + photo_datetime = Time.datetime() # (requires python >= 2.3) + """ + + _tag = 'time' + def isoformat(self): + """(string) Return the timestamp as a ISO 8601 formatted string, + e.g. '2007-05-27T19:32:17.000Z' + """ + import time + epoch = float(self.text)/1000 + return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch)) + + def datetime(self): + """(datetime.datetime) Return the timestamp as a datetime.datetime object + + Requires python 2.3 + """ + import datetime + epoch = float(self.text)/1000 + return datetime.datetime.fromtimestamp(epoch) + +def TimeFromString(xml_string): + return atom.CreateClassFromXMLString(Time, xml_string) + +class Tags(ExifBaseElement): + """The container for all exif elements. + The element can appear as a child of a photo entry. + """ + + _tag = 'tags' + _children = atom.AtomBase._children.copy() + _children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop) + _children['{%s}make' % EXIF_NAMESPACE] = ('make', Make) + _children['{%s}model' % EXIF_NAMESPACE] = ('model', Model) + _children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance) + _children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure) + _children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash) + _children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength) + _children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso) + _children['{%s}time' % EXIF_NAMESPACE] = ('time', Time) + _children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID) + + def __init__(self, extension_elements=None, extension_attributes=None, text=None): + ExifBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.fstop=None + self.make=None + self.model=None + self.distance=None + self.exposure=None + self.flash=None + self.focallength=None + self.iso=None + self.time=None + self.imageUniqueID=None +def TagsFromString(xml_string): + return atom.CreateClassFromXMLString(Tags, xml_string) + diff --git a/gdata.py-1.2.3/src/gdata/geo/__init__.py b/gdata.py-1.2.3/src/gdata/geo/__init__.py new file mode 100644 index 0000000..1fcf604 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/geo/__init__.py @@ -0,0 +1,185 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.geo, implementing geological positioning in gdata structures +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Picasa Web Albums uses the georss and gml namespaces for +elements defined in the GeoRSS and Geography Markup Language specifications. + +Specifically, Picasa Web Albums uses the following elements: + +georss:where +gml:Point +gml:pos + +http://code.google.com/apis/picasaweb/reference.html#georss_reference + + +Picasa Web Albums also accepts geographic-location data in two other formats: +W3C format and plain-GeoRSS (without GML) format. +""" +# +#Over the wire, the Picasa Web Albums only accepts and sends the +#elements mentioned above, but this module will let you seamlessly convert +#between the different formats (TODO 2007-10-18 hg) + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#' +GML_NAMESPACE = 'http://www.opengis.net/gml' +GEORSS_NAMESPACE = 'http://www.georss.org/georss' + +class GeoBaseElement(atom.AtomBase): + """Base class for elements. + + To add new elements, you only need to add the element tag name to self._tag + and the namespace to self._namespace + """ + + _tag = '' + _namespace = GML_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Pos(GeoBaseElement): + """(string) Specifies a latitude and longitude, separated by a space, + e.g. `35.669998 139.770004'""" + + _tag = 'pos' +def PosFromString(xml_string): + return atom.CreateClassFromXMLString(Pos, xml_string) + +class Point(GeoBaseElement): + """(container) Specifies a particular geographical point, by means of + a element.""" + + _tag = 'Point' + _children = atom.AtomBase._children.copy() + _children['{%s}pos' % GML_NAMESPACE] = ('pos', Pos) + def __init__(self, pos=None, extension_elements=None, extension_attributes=None, text=None): + GeoBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + if pos is None: + pos = Pos() + self.pos=pos +def PointFromString(xml_string): + return atom.CreateClassFromXMLString(Point, xml_string) + +class Where(GeoBaseElement): + """(container) Specifies a geographical location or region. + A container element, containing a single element. + (Not to be confused with .) + + Note that the (only) child attribute, .Point, is title-cased. + This reflects the names of elements in the xml stream + (principle of least surprise). + + As a convenience, you can get a tuple of (lat, lon) with Where.location(), + and set the same data with Where.setLocation( (lat, lon) ). + + Similarly, there are methods to set and get only latitude and longitude. + """ + + _tag = 'where' + _namespace = GEORSS_NAMESPACE + _children = atom.AtomBase._children.copy() + _children['{%s}Point' % GML_NAMESPACE] = ('Point', Point) + def __init__(self, point=None, extension_elements=None, extension_attributes=None, text=None): + GeoBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + if point is None: + point = Point() + self.Point=point + def location(self): + "(float, float) Return Where.Point.pos.text as a (lat,lon) tuple" + try: + return tuple([float(z) for z in self.Point.pos.text.split(' ')]) + except AttributeError: + return tuple() + def set_location(self, latlon): + """(bool) Set Where.Point.pos.text from a (lat,lon) tuple. + + Arguments: + lat (float): The latitude in degrees, from -90.0 to 90.0 + lon (float): The longitude in degrees, from -180.0 to 180.0 + + Returns True on success. + + """ + + assert(isinstance(latlon[0], float)) + assert(isinstance(latlon[1], float)) + try: + self.Point.pos.text = "%s %s" % (latlon[0], latlon[1]) + return True + except AttributeError: + return False + def latitude(self): + "(float) Get the latitude value of the geo-tag. See also .location()" + lat, lon = self.location() + return lat + + def longitude(self): + "(float) Get the longtitude value of the geo-tag. See also .location()" + lat, lon = self.location() + return lon + + longtitude = longitude + + def set_latitude(self, lat): + """(bool) Set the latitude value of the geo-tag. + + Args: + lat (float): The new latitude value + + See also .set_location() + """ + _lat, lon = self.location() + return self.set_location(lat, lon) + + def set_longitude(self, lon): + """(bool) Set the longtitude value of the geo-tag. + + Args: + lat (float): The new latitude value + + See also .set_location() + """ + lat, _lon = self.location() + return self.set_location(lat, lon) + + set_longtitude = set_longitude + +def WhereFromString(xml_string): + return atom.CreateClassFromXMLString(Where, xml_string) + diff --git a/gdata.py-1.2.3/src/gdata/media/__init__.py b/gdata.py-1.2.3/src/gdata/media/__init__.py new file mode 100644 index 0000000..fd2abc7 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/media/__init__.py @@ -0,0 +1,331 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is gdata.photos.media, implementing parts of the MediaRSS spec in gdata structures +# +# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# Portions copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Essential attributes of photos in Google Photos/Picasa Web Albums are +expressed using elements from the `media' namespace, defined in the +MediaRSS specification[1]. + +Due to copyright issues, the elements herein are documented sparingly, please +consult with the Google Photos API Reference Guide[2], alternatively the +official MediaRSS specification[1] for details. +(If there is a version conflict between the two sources, stick to the +Google Photos API). + +[1]: http://search.yahoo.com/mrss (version 1.1.1) +[2]: http://code.google.com/apis/picasaweb/reference.html#media_reference + +Keep in mind that Google Photos only uses a subset of the MediaRSS elements +(and some of the attributes are trimmed down, too): + +media:content +media:credit +media:description +media:group +media:keywords +media:thumbnail +media:title +""" + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' + + +import atom +import gdata + +MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/' +YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007' + +class MediaBaseElement(atom.AtomBase): + """Base class for elements in the MEDIA_NAMESPACE. + To add new elements, you only need to add the element tag name to self._tag + """ + + _tag = '' + _namespace = MEDIA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +class Content(MediaBaseElement): + """(attribute container) This element describes the original content, + e.g. an image or a video. There may be multiple Content elements + in a media:Group. + + For example, a video may have a + element that specifies a JPEG + representation of the video, and a + element that specifies the URL of the video itself. + + Attributes: + url: non-ambigous reference to online object + width: width of the object frame, in pixels + height: width of the object frame, in pixels + medium: one of `image' or `video', allowing the api user to quickly + determine the object's type + type: Internet media Type[1] (a.k.a. mime type) of the object -- a more + verbose way of determining the media type + (optional) fileSize: the size of the object, in bytes + + [1]: http://en.wikipedia.org/wiki/Internet_media_type + """ + + _tag = 'content' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + _attributes['medium'] = 'medium' + _attributes['type'] = 'type' + _attributes['fileSize'] = 'fileSize' + + def __init__(self, url=None, width=None, height=None, + medium=None, content_type=None, fileSize=None, format=None, + extension_elements=None, extension_attributes=None, text=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.url = url + self.width = width + self.height = height + self.medium = medium + self.type = content_type + self.fileSize = fileSize + +def ContentFromString(xml_string): + return atom.CreateClassFromXMLString(Content, xml_string) + +class Credit(MediaBaseElement): + """(string) Contains the nickname of the user who created the content, + e.g. `Liz Bennet'. + + This is a user-specified value that should be used when referring to + the user by name. + + Note that none of the attributes from the MediaRSS spec are supported. + """ + + _tag = 'credit' +def CreditFromString(xml_string): + return atom.CreateClassFromXMLString(Credit, xml_string) + +class Description(MediaBaseElement): + """(string) A description of the media object. + Either plain unicode text, or entity-encoded html (look at the `type' + attribute). + + E.g `A set of photographs I took while vacationing in Italy.' + + For `api' projections, the description is in plain text; + for `base' projections, the description is in HTML. + + Attributes: + type: either `text' or `html'. + """ + + _tag = 'description' + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + def __init__(self, description_type=None, + extension_elements=None, extension_attributes=None, text=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + self.type = description_type +def DescriptionFromString(xml_string): + return atom.CreateClassFromXMLString(Description, xml_string) + +class Keywords(MediaBaseElement): + """(string) Lists the tags associated with the entry, + e.g `italy, vacation, sunset'. + + Contains a comma-separated list of tags that have been added to the photo, or + all tags that have been added to photos in the album. + """ + + _tag = 'keywords' +def KeywordsFromString(xml_string): + return atom.CreateClassFromXMLString(Keywords, xml_string) + +class Thumbnail(MediaBaseElement): + """(attributes) Contains the URL of a thumbnail of a photo or album cover. + + There can be multiple elements for a given ; + for example, a given item may have multiple thumbnails at different sizes. + Photos generally have two thumbnails at different sizes; + albums generally have one cropped thumbnail. + + If the thumbsize parameter is set to the initial query, this element points + to thumbnails of the requested sizes; otherwise the thumbnails are the + default thumbnail size. + + This element must not be confused with the element. + + Attributes: + url: The URL of the thumbnail image. + height: The height of the thumbnail image, in pixels. + width: The width of the thumbnail image, in pixels. + """ + + _tag = 'thumbnail' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + _attributes['width'] = 'width' + _attributes['height'] = 'height' + def __init__(self, url=None, width=None, height=None, + extension_attributes=None, text=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.url = url + self.width = width + self.height = height + + +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + +class Title(MediaBaseElement): + """(string) Contains the title of the entry's media content, in plain text. + + Attributes: + type: Always set to plain + """ + + _tag = 'title' + _attributes = atom.AtomBase._attributes.copy() + _attributes['type'] = 'type' + def __init__(self, title_type=None, + extension_attributes=None, text=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.type = title_type +def TitleFromString(xml_string): + return atom.CreateClassFromXMLString(Title, xml_string) + +class Player(MediaBaseElement): + """(string) Contains the embeddable player URL for the entry's media content + if the media is a video. + + Attributes: + url: Always set to plain + """ + + _tag = 'player' + _attributes = atom.AtomBase._attributes.copy() + _attributes['url'] = 'url' + + def __init__(self, player_url=None, + extension_attributes=None, extension_elements=None): + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.url= player_url + +class Private(atom.AtomBase): + """The YouTube Private element""" + _tag = 'private' + _namespace = YOUTUBE_NAMESPACE + +class Duration(atom.AtomBase): + """The YouTube Duration element""" + _tag = 'duration' + _namespace = YOUTUBE_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['seconds'] = 'seconds' + +class Category(MediaBaseElement): + """The mediagroup:category element""" + + _tag = 'category' + _attributes = atom.AtomBase._attributes.copy() + _attributes['term'] = 'term' + _attributes['scheme'] = 'scheme' + _attributes['label'] = 'label' + + def __init__(self, term=None, scheme=None, label=None, text=None, + extension_elements=None, extension_attributes=None): + """Constructor for Category + + Args: + term: str + scheme: str + label: str + text: str The text data in the this element + extension_elements: list A list of ExtensionElement instances + extension_attributes: dict A dictionary of attribute value string pairs + """ + + self.term = term + self.scheme = scheme + self.label = label + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +class Group(MediaBaseElement): + """Container element for all media elements. + The element can appear as a child of an album, photo or + video entry.""" + + _tag = 'group' + _children = atom.AtomBase._children.copy() + _children['{%s}content' % MEDIA_NAMESPACE] = ('content', [Content,]) + _children['{%s}credit' % MEDIA_NAMESPACE] = ('credit', Credit) + _children['{%s}description' % MEDIA_NAMESPACE] = ('description', Description) + _children['{%s}keywords' % MEDIA_NAMESPACE] = ('keywords', Keywords) + _children['{%s}thumbnail' % MEDIA_NAMESPACE] = ('thumbnail', [Thumbnail,]) + _children['{%s}title' % MEDIA_NAMESPACE] = ('title', Title) + _children['{%s}category' % MEDIA_NAMESPACE] = ('category', [Category,]) + _children['{%s}duration' % YOUTUBE_NAMESPACE] = ('duration', Duration) + _children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', Private) + _children['{%s}player' % MEDIA_NAMESPACE] = ('player', Player) + + def __init__(self, content=None, credit=None, description=None, keywords=None, + thumbnail=None, title=None, duration=None, private=None, + category=None, player=None, extension_elements=None, + extension_attributes=None, text=None): + + MediaBaseElement.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + self.content=content + self.credit=credit + self.description=description + self.keywords=keywords + self.thumbnail=thumbnail or [] + self.title=title + self.duration=duration + self.private=private + self.category=category or [] + self.player=player + +def GroupFromString(xml_string): + return atom.CreateClassFromXMLString(Group, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/oauth/CHANGES.txt b/gdata.py-1.2.3/src/gdata/oauth/CHANGES.txt new file mode 100755 index 0000000..7c2b92c --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/oauth/CHANGES.txt @@ -0,0 +1,17 @@ +1. Moved oauth.py to __init__.py + +2. Refactored __init__.py for compatibility with python 2.2 (Issue 59) + +3. Refactored rsa.py for compatibility with python 2.2 (Issue 59) + +4. Refactored OAuthRequest.from_token_and_callback since the callback url was +getting double url-encoding the callback url in place of single. (Issue 43) + +5. Added build_signature_base_string method to rsa.py since it used the +implementation of this method from oauth.OAuthSignatureMethod_HMAC_SHA1 which +was incorrect since it enforced the presence of a consumer secret and a token +secret. Also, changed its super class from oauth.OAuthSignatureMethod_HMAC_SHA1 +to oauth.OAuthSignatureMethod (Issue 64) + +6. Refactored .to_header method since it returned non-oauth params +as well which was incorrect. (Issue 31) \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/oauth/__init__.py b/gdata.py-1.2.3/src/gdata/oauth/__init__.py new file mode 100755 index 0000000..baf543e --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/oauth/__init__.py @@ -0,0 +1,524 @@ +import cgi +import urllib +import time +import random +import urlparse +import hmac +import binascii + +VERSION = '1.0' # Hi Blaine! +HTTP_METHOD = 'GET' +SIGNATURE_METHOD = 'PLAINTEXT' + +# Generic exception class +class OAuthError(RuntimeError): + def __init__(self, message='OAuth error occured.'): + self.message = message + +# optional WWW-Authenticate header (401 error) +def build_authenticate_header(realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + +# url escape +def escape(s): + # escape '/' too + return urllib.quote(s, safe='~') + +# util function: current timestamp +# seconds since epoch (UTC) +def generate_timestamp(): + return int(time.time()) + +# util function: nonce +# pseudorandom number +def generate_nonce(length=8): + return ''.join([str(random.randint(0, 9)) for i in range(length)]) + +# OAuthConsumer is a data type that represents the identity of the Consumer +# via its shared secret with the Service Provider. +class OAuthConsumer(object): + key = None + secret = None + + def __init__(self, key, secret): + self.key = key + self.secret = secret + +# OAuthToken is a data type that represents an End User via either an access +# or request token. +class OAuthToken(object): + # access tokens and request tokens + key = None + secret = None + + ''' + key = the token + secret = the token secret + ''' + def __init__(self, key, secret): + self.key = key + self.secret = secret + + def to_string(self): + return urllib.urlencode({'oauth_token': self.key, 'oauth_token_secret': self.secret}) + + # return a token from something like: + # oauth_token_secret=digg&oauth_token=digg + def from_string(s): + params = cgi.parse_qs(s, keep_blank_values=False) + key = params['oauth_token'][0] + secret = params['oauth_token_secret'][0] + return OAuthToken(key, secret) + from_string = staticmethod(from_string) + + def __str__(self): + return self.to_string() + +# OAuthRequest represents the request and can be serialized +class OAuthRequest(object): + ''' + OAuth parameters: + - oauth_consumer_key + - oauth_token + - oauth_signature_method + - oauth_signature + - oauth_timestamp + - oauth_nonce + - oauth_version + ... any additional parameters, as defined by the Service Provider. + ''' + parameters = None # oauth parameters + http_method = HTTP_METHOD + http_url = None + version = VERSION + + def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None): + self.http_method = http_method + self.http_url = http_url + self.parameters = parameters or {} + + def set_parameter(self, parameter, value): + self.parameters[parameter] = value + + def get_parameter(self, parameter): + try: + return self.parameters[parameter] + except: + raise OAuthError('Parameter not found: %s' % parameter) + + def _get_timestamp_nonce(self): + return self.get_parameter('oauth_timestamp'), self.get_parameter('oauth_nonce') + + # get any non-oauth parameters + def get_nonoauth_parameters(self): + parameters = {} + for k, v in self.parameters.iteritems(): + # ignore oauth parameters + if k.find('oauth_') < 0: + parameters[k] = v + return parameters + + # serialize as a header for an HTTPAuth request + def to_header(self, realm=''): + auth_header = 'OAuth realm="%s"' % realm + # add the oauth parameters + if self.parameters: + for k, v in self.parameters.iteritems(): + if k[:6] == 'oauth_': + auth_header += ', %s="%s"' % (k, escape(str(v))) + return {'Authorization': auth_header} + + # serialize as post data for a POST request + def to_postdata(self): + return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in self.parameters.iteritems()]) + + # serialize as a url for a GET request + def to_url(self): + return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata()) + + # return a string that consists of all the parameters that need to be signed + def get_normalized_parameters(self): + params = self.parameters + try: + # exclude the signature if it exists + del params['oauth_signature'] + except: + pass + key_values = params.items() + # sort lexicographically, first after key, then after value + key_values.sort() + # combine key value pairs in string and escape + return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) for k, v in key_values]) + + # just uppercases the http method + def get_normalized_http_method(self): + return self.http_method.upper() + + # parses the url and rebuilds it to be scheme://host/path + def get_normalized_http_url(self): + parts = urlparse.urlparse(self.http_url) + url_string = '%s://%s%s' % (parts[0], parts[1], parts[2]) # scheme, netloc, path + return url_string + + # set the signature parameter to the result of build_signature + def sign_request(self, signature_method, consumer, token): + # set the signature method + self.set_parameter('oauth_signature_method', signature_method.get_name()) + # set the signature + self.set_parameter('oauth_signature', self.build_signature(signature_method, consumer, token)) + + def build_signature(self, signature_method, consumer, token): + # call the build signature method within the signature method + return signature_method.build_signature(self, consumer, token) + + def from_request(http_method, http_url, headers=None, parameters=None, query_string=None): + # combine multiple parameter sources + if parameters is None: + parameters = {} + + # headers + if headers and 'Authorization' in headers: + auth_header = headers['Authorization'] + # check that the authorization header is OAuth + if auth_header.index('OAuth') > -1: + try: + # get the parameters from the header + header_params = OAuthRequest._split_header(auth_header) + parameters.update(header_params) + except: + raise OAuthError('Unable to parse OAuth parameters from Authorization header.') + + # GET or POST query string + if query_string: + query_params = OAuthRequest._split_url_string(query_string) + parameters.update(query_params) + + # URL parameters + param_str = urlparse.urlparse(http_url)[4] # query + url_params = OAuthRequest._split_url_string(param_str) + parameters.update(url_params) + + if parameters: + return OAuthRequest(http_method, http_url, parameters) + + return None + from_request = staticmethod(from_request) + + def from_consumer_and_token(oauth_consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + defaults = { + 'oauth_consumer_key': oauth_consumer.key, + 'oauth_timestamp': generate_timestamp(), + 'oauth_nonce': generate_nonce(), + 'oauth_version': OAuthRequest.version, + } + + defaults.update(parameters) + parameters = defaults + + if token: + parameters['oauth_token'] = token.key + + return OAuthRequest(http_method, http_url, parameters) + from_consumer_and_token = staticmethod(from_consumer_and_token) + + def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None): + if not parameters: + parameters = {} + + parameters['oauth_token'] = token.key + + if callback: + parameters['oauth_callback'] = callback + + return OAuthRequest(http_method, http_url, parameters) + from_token_and_callback = staticmethod(from_token_and_callback) + + # util function: turn Authorization: header into parameters, has to do some unescaping + def _split_header(header): + params = {} + parts = header.split(',') + for param in parts: + # ignore realm parameter + if param.find('OAuth realm') > -1: + continue + # remove whitespace + param = param.strip() + # split key-value + param_parts = param.split('=', 1) + # remove quotes and unescape the value + params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"')) + return params + _split_header = staticmethod(_split_header) + + # util function: turn url string into parameters, has to do some unescaping + def _split_url_string(param_str): + parameters = cgi.parse_qs(param_str, keep_blank_values=False) + for k, v in parameters.iteritems(): + parameters[k] = urllib.unquote(v[0]) + return parameters + _split_url_string = staticmethod(_split_url_string) + +# OAuthServer is a worker to check a requests validity against a data store +class OAuthServer(object): + timestamp_threshold = 300 # in seconds, five minutes + version = VERSION + signature_methods = None + data_store = None + + def __init__(self, data_store=None, signature_methods=None): + self.data_store = data_store + self.signature_methods = signature_methods or {} + + def set_data_store(self, oauth_data_store): + self.data_store = data_store + + def get_data_store(self): + return self.data_store + + def add_signature_method(self, signature_method): + self.signature_methods[signature_method.get_name()] = signature_method + return self.signature_methods + + # process a request_token request + # returns the request token on success + def fetch_request_token(self, oauth_request): + try: + # get the request token for authorization + token = self._get_token(oauth_request, 'request') + except OAuthError: + # no token required for the initial token request + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + self._check_signature(oauth_request, consumer, None) + # fetch a new token + token = self.data_store.fetch_request_token(consumer) + return token + + # process an access_token request + # returns the access token on success + def fetch_access_token(self, oauth_request): + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the request token + token = self._get_token(oauth_request, 'request') + self._check_signature(oauth_request, consumer, token) + new_token = self.data_store.fetch_access_token(consumer, token) + return new_token + + # verify an api call, checks all the parameters + def verify_request(self, oauth_request): + # -> consumer and token + version = self._get_version(oauth_request) + consumer = self._get_consumer(oauth_request) + # get the access token + token = self._get_token(oauth_request, 'access') + self._check_signature(oauth_request, consumer, token) + parameters = oauth_request.get_nonoauth_parameters() + return consumer, token, parameters + + # authorize a request token + def authorize_token(self, token, user): + return self.data_store.authorize_request_token(token, user) + + # get the callback url + def get_callback(self, oauth_request): + return oauth_request.get_parameter('oauth_callback') + + # optional support for the authenticate header + def build_authenticate_header(self, realm=''): + return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} + + # verify the correct version request for this server + def _get_version(self, oauth_request): + try: + version = oauth_request.get_parameter('oauth_version') + except: + version = VERSION + if version and version != self.version: + raise OAuthError('OAuth version %s not supported.' % str(version)) + return version + + # figure out the signature with some defaults + def _get_signature_method(self, oauth_request): + try: + signature_method = oauth_request.get_parameter('oauth_signature_method') + except: + signature_method = SIGNATURE_METHOD + try: + # get the signature method object + signature_method = self.signature_methods[signature_method] + except: + signature_method_names = ', '.join(self.signature_methods.keys()) + raise OAuthError('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names)) + + return signature_method + + def _get_consumer(self, oauth_request): + consumer_key = oauth_request.get_parameter('oauth_consumer_key') + if not consumer_key: + raise OAuthError('Invalid consumer key.') + consumer = self.data_store.lookup_consumer(consumer_key) + if not consumer: + raise OAuthError('Invalid consumer.') + return consumer + + # try to find the token for the provided request token key + def _get_token(self, oauth_request, token_type='access'): + token_field = oauth_request.get_parameter('oauth_token') + token = self.data_store.lookup_token(token_type, token_field) + if not token: + raise OAuthError('Invalid %s token: %s' % (token_type, token_field)) + return token + + def _check_signature(self, oauth_request, consumer, token): + timestamp, nonce = oauth_request._get_timestamp_nonce() + self._check_timestamp(timestamp) + self._check_nonce(consumer, token, nonce) + signature_method = self._get_signature_method(oauth_request) + try: + signature = oauth_request.get_parameter('oauth_signature') + except: + raise OAuthError('Missing signature.') + # validate the signature + valid_sig = signature_method.check_signature(oauth_request, consumer, token, signature) + if not valid_sig: + key, base = signature_method.build_signature_base_string(oauth_request, consumer, token) + raise OAuthError('Invalid signature. Expected signature base string: %s' % base) + built = signature_method.build_signature(oauth_request, consumer, token) + + def _check_timestamp(self, timestamp): + # verify that timestamp is recentish + timestamp = int(timestamp) + now = int(time.time()) + lapsed = now - timestamp + if lapsed > self.timestamp_threshold: + raise OAuthError('Expired timestamp: given %d and now %s has a greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) + + def _check_nonce(self, consumer, token, nonce): + # verify that the nonce is uniqueish + nonce = self.data_store.lookup_nonce(consumer, token, nonce) + if nonce: + raise OAuthError('Nonce already used: %s' % str(nonce)) + +# OAuthClient is a worker to attempt to execute a request +class OAuthClient(object): + consumer = None + token = None + + def __init__(self, oauth_consumer, oauth_token): + self.consumer = oauth_consumer + self.token = oauth_token + + def get_consumer(self): + return self.consumer + + def get_token(self): + return self.token + + def fetch_request_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_request): + # -> OAuthToken + raise NotImplementedError + + def access_resource(self, oauth_request): + # -> some protected resource + raise NotImplementedError + +# OAuthDataStore is a database abstraction used to lookup consumers and tokens +class OAuthDataStore(object): + + def lookup_consumer(self, key): + # -> OAuthConsumer + raise NotImplementedError + + def lookup_token(self, oauth_consumer, token_type, token_token): + # -> OAuthToken + raise NotImplementedError + + def lookup_nonce(self, oauth_consumer, oauth_token, nonce, timestamp): + # -> OAuthToken + raise NotImplementedError + + def fetch_request_token(self, oauth_consumer): + # -> OAuthToken + raise NotImplementedError + + def fetch_access_token(self, oauth_consumer, oauth_token): + # -> OAuthToken + raise NotImplementedError + + def authorize_request_token(self, oauth_token, user): + # -> OAuthToken + raise NotImplementedError + +# OAuthSignatureMethod is a strategy class that implements a signature method +class OAuthSignatureMethod(object): + def get_name(self): + # -> str + raise NotImplementedError + + def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token): + # -> str key, str raw + raise NotImplementedError + + def build_signature(self, oauth_request, oauth_consumer, oauth_token): + # -> str + raise NotImplementedError + + def check_signature(self, oauth_request, consumer, token, signature): + built = self.build_signature(oauth_request, consumer, token) + return built == signature + +class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod): + + def get_name(self): + return 'HMAC-SHA1' + + def build_signature_base_string(self, oauth_request, consumer, token): + sig = ( + escape(oauth_request.get_normalized_http_method()), + escape(oauth_request.get_normalized_http_url()), + escape(oauth_request.get_normalized_parameters()), + ) + + key = '%s&' % escape(consumer.secret) + if token: + key += escape(token.secret) + raw = '&'.join(sig) + return key, raw + + def build_signature(self, oauth_request, consumer, token): + # build the base signature string + key, raw = self.build_signature_base_string(oauth_request, consumer, token) + + # hmac object + try: + import hashlib # 2.5 + hashed = hmac.new(key, raw, hashlib.sha1) + except: + import sha # deprecated + hashed = hmac.new(key, raw, sha) + + # calculate the digest base 64 + return binascii.b2a_base64(hashed.digest())[:-1] + +class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod): + + def get_name(self): + return 'PLAINTEXT' + + def build_signature_base_string(self, oauth_request, consumer, token): + # concatenate the consumer key and secret + sig = escape(consumer.secret) + '&' + if token: + sig = sig + escape(token.secret) + return sig + + def build_signature(self, oauth_request, consumer, token): + return self.build_signature_base_string(oauth_request, consumer, token) diff --git a/gdata.py-1.2.3/src/gdata/oauth/rsa.py b/gdata.py-1.2.3/src/gdata/oauth/rsa.py new file mode 100755 index 0000000..f8d9b85 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/oauth/rsa.py @@ -0,0 +1,120 @@ +#!/usr/bin/python + +""" +requires tlslite - http://trevp.net/tlslite/ + +""" + +import binascii + +from gdata.tlslite.utils import keyfactory +from gdata.tlslite.utils import cryptomath + +# XXX andy: ugly local import due to module name, oauth.oauth +import gdata.oauth as oauth + +class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod): + def get_name(self): + return "RSA-SHA1" + + def _fetch_public_cert(self, oauth_request): + # not implemented yet, ideas are: + # (1) do a lookup in a table of trusted certs keyed off of consumer + # (2) fetch via http using a url provided by the requester + # (3) some sort of specific discovery code based on request + # + # either way should return a string representation of the certificate + raise NotImplementedError + + def _fetch_private_cert(self, oauth_request): + # not implemented yet, ideas are: + # (1) do a lookup in a table of trusted certs keyed off of consumer + # + # either way should return a string representation of the certificate + raise NotImplementedError + + def build_signature_base_string(self, oauth_request, consumer, token): + sig = ( + oauth.escape(oauth_request.get_normalized_http_method()), + oauth.escape(oauth_request.get_normalized_http_url()), + oauth.escape(oauth_request.get_normalized_parameters()), + ) + key = '' + raw = '&'.join(sig) + return key, raw + + def build_signature(self, oauth_request, consumer, token): + key, base_string = self.build_signature_base_string(oauth_request, + consumer, + token) + + # Fetch the private key cert based on the request + cert = self._fetch_private_cert(oauth_request) + + # Pull the private key from the certificate + privatekey = keyfactory.parsePrivateKey(cert) + + # Convert base_string to bytes + #base_string_bytes = cryptomath.createByteArraySequence(base_string) + + # Sign using the key + signed = privatekey.hashAndSign(base_string) + + return binascii.b2a_base64(signed)[:-1] + + def check_signature(self, oauth_request, consumer, token, signature): + decoded_sig = base64.b64decode(signature); + + key, base_string = self.build_signature_base_string(oauth_request, + consumer, + token) + + # Fetch the public key cert based on the request + cert = self._fetch_public_cert(oauth_request) + + # Pull the public key from the certificate + publickey = keyfactory.parsePEMKey(cert, public=True) + + # Check the signature + ok = publickey.hashAndVerify(decoded_sig, base_string) + + return ok + + +class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1): + def _fetch_public_cert(self, oauth_request): + cert = """ +-----BEGIN CERTIFICATE----- +MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0 +IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV +BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY +zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb +mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3 +DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d +4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb +WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J +-----END CERTIFICATE----- +""" + return cert + + def _fetch_private_cert(self, oauth_request): + cert = """ +-----BEGIN PRIVATE KEY----- +MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V +A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d +7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ +hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H +X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm +uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw +rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z +zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn +qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG +WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno +cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+ +3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8 +AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54 +Lw03eHTNQghS0A== +-----END PRIVATE KEY----- +""" + return cert diff --git a/gdata.py-1.2.3/src/gdata/photos/__init__.py b/gdata.py-1.2.3/src/gdata/photos/__init__.py new file mode 100644 index 0000000..1952135 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/photos/__init__.py @@ -0,0 +1,1112 @@ +# -*-*- encoding: utf-8 -*-*- +# +# This is the base file for the PicasaWeb python client. +# It is used for lower level operations. +# +# $Id: __init__.py 148 2007-10-28 15:09:19Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# Portions (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module provides a pythonic, gdata-centric interface to Google Photos +(a.k.a. Picasa Web Services. + +It is modelled after the gdata/* interfaces from the gdata-python-client +project[1] by Google. + +You'll find the user-friendly api in photos.service. Please see the +documentation or live help() system for available methods. + +[1]: http://gdata-python-client.googlecode.com/ + + """ + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' +__version__ = '$Revision: 164 $'[11:-2] + +import re +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + +# importing google photo submodules +import gdata.media as Media, gdata.exif as Exif, gdata.geo as Geo + +# XML namespaces which are often used in Google Photo elements +PHOTOS_NAMESPACE = 'http://schemas.google.com/photos/2007' +MEDIA_NAMESPACE = 'http://search.yahoo.com/mrss/' +EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007' +OPENSEARCH_NAMESPACE = 'http://a9.com/-/spec/opensearchrss/1.0/' +GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#' +GML_NAMESPACE = 'http://www.opengis.net/gml' +GEORSS_NAMESPACE = 'http://www.georss.org/georss' +PHEED_NAMESPACE = 'http://www.pheed.com/pheed/' +BATCH_NAMESPACE = 'http://schemas.google.com/gdata/batch' + + +class PhotosBaseElement(atom.AtomBase): + """Base class for elements in the PHOTO_NAMESPACE. To add new elements, + you only need to add the element tag name to self._tag + """ + + _tag = '' + _namespace = PHOTOS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, name=None, extension_elements=None, + extension_attributes=None, text=None): + self.name = name + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + #def __str__(self): + #return str(self.text) + #def __unicode__(self): + #return unicode(self.text) + def __int__(self): + return int(self.text) + def bool(self): + return self.text == 'true' + +class GPhotosBaseFeed(gdata.GDataFeed, gdata.LinkFinder): + "Base class for all Feeds in gdata.photos" + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _attributes = gdata.GDataFeed._attributes.copy() + _children = gdata.GDataFeed._children.copy() + # We deal with Entry elements ourselves + del _children['{%s}entry' % atom.ATOM_NAMESPACE] + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None): + gdata.GDataFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + def kind(self): + "(string) Returns the kind" + try: + return self.category[0].term.split('#')[1] + except IndexError: + return None + + def _feedUri(self, kind): + "Convenience method to return a uri to a feed of a special kind" + assert(kind in ('album', 'tag', 'photo', 'comment', 'user')) + here_href = self.GetSelfLink().href + if 'kind=%s' % kind in here_href: + return here_href + if not 'kind=' in here_href: + sep = '?' + if '?' in here_href: sep = '&' + return here_href + "%skind=%s" % (sep, kind) + rx = re.match('.*(kind=)(album|tag|photo|comment)', here_href) + return here_href[:rx.end(1)] + kind + here_href[rx.end(2):] + + def _ConvertElementTreeToMember(self, child_tree): + """Re-implementing the method from AtomBase, since we deal with + Entry elements specially""" + category = child_tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + return atom.AtomBase._ConvertElementTreeToMember(self, child_tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + return atom.AtomBase._ConvertElementTreeToMember(self, child_tree) + ## TODO: is it safe to use getattr on gdata.photos? + entry_class = getattr(gdata.photos, '%sEntry' % kind.title()) + if not hasattr(self, 'entry') or self.entry is None: + self.entry = [] + self.entry.append(atom._CreateClassFromElementTree( + entry_class, child_tree)) + +class GPhotosBaseEntry(gdata.GDataEntry, gdata.LinkFinder): + "Base class for all Entry elements in gdata.photos" + _tag = 'entry' + _kind = '' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + self.category.append( + atom.Category(scheme='http://schemas.google.com/g/2005#kind', + term = 'http://schemas.google.com/photos/2007#%s' % self._kind)) + + def kind(self): + "(string) Returns the kind" + try: + return self.category[0].term.split('#')[1] + except IndexError: + return None + + def _feedUri(self, kind): + "Convenience method to get the uri to this entry's feed of the some kind" + try: + href = self.GetFeedLink().href + except AttributeError: + return None + sep = '?' + if '?' in href: sep = '&' + return '%s%skind=%s' % (href, sep, kind) + + +class PhotosBaseEntry(GPhotosBaseEntry): + pass + +class PhotosBaseFeed(GPhotosBaseFeed): + pass + +class GPhotosBaseData(object): + pass + +class Access(PhotosBaseElement): + """The Google Photo `Access' element. + + The album's access level. Valid values are `public' or `private'. + In documentation, access level is also referred to as `visibility.'""" + + _tag = 'access' +def AccessFromString(xml_string): + return atom.CreateClassFromXMLString(Access, xml_string) + +class Albumid(PhotosBaseElement): + "The Google Photo `Albumid' element" + + _tag = 'albumid' +def AlbumidFromString(xml_string): + return atom.CreateClassFromXMLString(Albumid, xml_string) + +class BytesUsed(PhotosBaseElement): + "The Google Photo `BytesUsed' element" + + _tag = 'bytesUsed' +def BytesUsedFromString(xml_string): + return atom.CreateClassFromXMLString(BytesUsed, xml_string) + +class Client(PhotosBaseElement): + "The Google Photo `Client' element" + + _tag = 'client' +def ClientFromString(xml_string): + return atom.CreateClassFromXMLString(Client, xml_string) + +class Checksum(PhotosBaseElement): + "The Google Photo `Checksum' element" + + _tag = 'checksum' +def ChecksumFromString(xml_string): + return atom.CreateClassFromXMLString(Checksum, xml_string) + +class CommentCount(PhotosBaseElement): + "The Google Photo `CommentCount' element" + + _tag = 'commentCount' +def CommentCountFromString(xml_string): + return atom.CreateClassFromXMLString(CommentCount, xml_string) + +class CommentingEnabled(PhotosBaseElement): + "The Google Photo `CommentingEnabled' element" + + _tag = 'commentingEnabled' +def CommentingEnabledFromString(xml_string): + return atom.CreateClassFromXMLString(CommentingEnabled, xml_string) + +class Height(PhotosBaseElement): + "The Google Photo `Height' element" + + _tag = 'height' +def HeightFromString(xml_string): + return atom.CreateClassFromXMLString(Height, xml_string) + +class Id(PhotosBaseElement): + "The Google Photo `Id' element" + + _tag = 'id' +def IdFromString(xml_string): + return atom.CreateClassFromXMLString(Id, xml_string) + +class Location(PhotosBaseElement): + "The Google Photo `Location' element" + + _tag = 'location' +def LocationFromString(xml_string): + return atom.CreateClassFromXMLString(Location, xml_string) + +class MaxPhotosPerAlbum(PhotosBaseElement): + "The Google Photo `MaxPhotosPerAlbum' element" + + _tag = 'maxPhotosPerAlbum' +def MaxPhotosPerAlbumFromString(xml_string): + return atom.CreateClassFromXMLString(MaxPhotosPerAlbum, xml_string) + +class Name(PhotosBaseElement): + "The Google Photo `Name' element" + + _tag = 'name' +def NameFromString(xml_string): + return atom.CreateClassFromXMLString(Name, xml_string) + +class Nickname(PhotosBaseElement): + "The Google Photo `Nickname' element" + + _tag = 'nickname' +def NicknameFromString(xml_string): + return atom.CreateClassFromXMLString(Nickname, xml_string) + +class Numphotos(PhotosBaseElement): + "The Google Photo `Numphotos' element" + + _tag = 'numphotos' +def NumphotosFromString(xml_string): + return atom.CreateClassFromXMLString(Numphotos, xml_string) + +class Numphotosremaining(PhotosBaseElement): + "The Google Photo `Numphotosremaining' element" + + _tag = 'numphotosremaining' +def NumphotosremainingFromString(xml_string): + return atom.CreateClassFromXMLString(Numphotosremaining, xml_string) + +class Position(PhotosBaseElement): + "The Google Photo `Position' element" + + _tag = 'position' +def PositionFromString(xml_string): + return atom.CreateClassFromXMLString(Position, xml_string) + +class Photoid(PhotosBaseElement): + "The Google Photo `Photoid' element" + + _tag = 'photoid' +def PhotoidFromString(xml_string): + return atom.CreateClassFromXMLString(Photoid, xml_string) + +class Quotacurrent(PhotosBaseElement): + "The Google Photo `Quotacurrent' element" + + _tag = 'quotacurrent' +def QuotacurrentFromString(xml_string): + return atom.CreateClassFromXMLString(Quotacurrent, xml_string) + +class Quotalimit(PhotosBaseElement): + "The Google Photo `Quotalimit' element" + + _tag = 'quotalimit' +def QuotalimitFromString(xml_string): + return atom.CreateClassFromXMLString(Quotalimit, xml_string) + +class Rotation(PhotosBaseElement): + "The Google Photo `Rotation' element" + + _tag = 'rotation' +def RotationFromString(xml_string): + return atom.CreateClassFromXMLString(Rotation, xml_string) + +class Size(PhotosBaseElement): + "The Google Photo `Size' element" + + _tag = 'size' +def SizeFromString(xml_string): + return atom.CreateClassFromXMLString(Size, xml_string) + +class Snippet(PhotosBaseElement): + """The Google Photo `snippet' element. + + When searching, the snippet element will contain a + string with the word you're looking for, highlighted in html markup + E.g. when your query is `hafjell', this element may contain: + `... here at Hafjell.' + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:truncated and gphoto:snippettype. + + """ + + _tag = 'snippet' +def SnippetFromString(xml_string): + return atom.CreateClassFromXMLString(Snippet, xml_string) + +class Snippettype(PhotosBaseElement): + """The Google Photo `Snippettype' element + + When searching, this element will tell you the type of element that matches. + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:snippet and gphoto:truncated. + + Possible values and their interpretation: + o ALBUM_TITLE - The album title matches + o PHOTO_TAGS - The match is a tag/keyword + o PHOTO_DESCRIPTION - The match is in the photo's description + + If you discover a value not listed here, please submit a patch to update this docstring. + + """ + + _tag = 'snippettype' +def SnippettypeFromString(xml_string): + return atom.CreateClassFromXMLString(Snippettype, xml_string) + +class Thumbnail(PhotosBaseElement): + """The Google Photo `Thumbnail' element + + Used to display user's photo thumbnail (hackergotchi). + + (Not to be confused with the element, which gives you + small versions of the photo object.)""" + + _tag = 'thumbnail' +def ThumbnailFromString(xml_string): + return atom.CreateClassFromXMLString(Thumbnail, xml_string) + +class Timestamp(PhotosBaseElement): + """The Google Photo `Timestamp' element + Represented as the number of milliseconds since January 1st, 1970. + + + Take a look at the convenience methods .isoformat() and .datetime(): + + photo_epoch = Time.text # 1180294337000 + photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z' + + Alternatively: + photo_datetime = Time.datetime() # (requires python >= 2.3) + """ + + _tag = 'timestamp' + def isoformat(self): + """(string) Return the timestamp as a ISO 8601 formatted string, + e.g. '2007-05-27T19:32:17.000Z' + """ + import time + epoch = float(self.text)/1000 + return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch)) + + def datetime(self): + """(datetime.datetime) Return the timestamp as a datetime.datetime object + + Requires python 2.3 + """ + import datetime + epoch = float(self.text)/1000 + return datetime.datetime.fromtimestamp(epoch) +def TimestampFromString(xml_string): + return atom.CreateClassFromXMLString(Timestamp, xml_string) + +class Truncated(PhotosBaseElement): + """The Google Photo `Truncated' element + + You'll find this element in searches -- that is, feeds that combine the + `kind=photo' and `q=yoursearch' parameters in the request. + + See also gphoto:snippet and gphoto:snippettype. + + Possible values and their interpretation: + 0 -- unknown + """ + + _tag = 'Truncated' +def TruncatedFromString(xml_string): + return atom.CreateClassFromXMLString(Truncated, xml_string) + +class User(PhotosBaseElement): + "The Google Photo `User' element" + + _tag = 'user' +def UserFromString(xml_string): + return atom.CreateClassFromXMLString(User, xml_string) + +class Version(PhotosBaseElement): + "The Google Photo `Version' element" + + _tag = 'version' +def VersionFromString(xml_string): + return atom.CreateClassFromXMLString(Version, xml_string) + +class Width(PhotosBaseElement): + "The Google Photo `Width' element" + + _tag = 'width' +def WidthFromString(xml_string): + return atom.CreateClassFromXMLString(Width, xml_string) + +class Weight(PhotosBaseElement): + """The Google Photo `Weight' element. + + The weight of the tag is the number of times the tag + appears in the collection of tags currently being viewed. + The default weight is 1, in which case this tags is omitted.""" + _tag = 'weight' +def WeightFromString(xml_string): + return atom.CreateClassFromXMLString(Weight, xml_string) + +class CommentAuthor(atom.Author): + """The Atom `Author' element in CommentEntry entries is augmented to + contain elements from the PHOTOS_NAMESPACE + + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + _children = atom.Author._children.copy() + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail) +def CommentAuthorFromString(xml_string): + return atom.CreateClassFromXMLString(CommentAuthor, xml_string) + +########################## ################################ + +class AlbumData(object): + _children = {} + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}name' % PHOTOS_NAMESPACE] = ('name', Name) + _children['{%s}location' % PHOTOS_NAMESPACE] = ('location', Location) + _children['{%s}access' % PHOTOS_NAMESPACE] = ('access', Access) + _children['{%s}bytesUsed' % PHOTOS_NAMESPACE] = ('bytesUsed', BytesUsed) + _children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp) + _children['{%s}numphotos' % PHOTOS_NAMESPACE] = ('numphotos', Numphotos) + _children['{%s}numphotosremaining' % PHOTOS_NAMESPACE] = \ + ('numphotosremaining', Numphotosremaining) + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \ + ('commentingEnabled', CommentingEnabled) + _children['{%s}commentCount' % PHOTOS_NAMESPACE] = \ + ('commentCount', CommentCount) + ## NOTE: storing media:group as self.media, to create a self-explaining api + gphoto_id = None + name = None + location = None + access = None + bytesUsed = None + timestamp = None + numphotos = None + numphotosremaining = None + user = None + nickname = None + commentingEnabled = None + commentCount = None + +class AlbumEntry(GPhotosBaseEntry, AlbumData): + """All metadata for a Google Photos Album + + Take a look at AlbumData for metadata accessible as attributes to this object. + + Notes: + To avoid name clashes, and to create a more sensible api, some + objects have names that differ from the original elements: + + o media:group -> self.media, + o geo:where -> self.geo, + o photo:id -> self.gphoto_id + """ + + _kind = 'album' + _children = GPhotosBaseEntry._children.copy() + _children.update(AlbumData._children.copy()) + # child tags only for Album entries, not feeds + _children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where) + _children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group) + media = Media.Group() + geo = Geo.Where() + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + #GPHOTO NAMESPACE: + gphoto_id=None, name=None, location=None, access=None, + timestamp=None, numphotos=None, user=None, nickname=None, + commentingEnabled=None, commentCount=None, thumbnail=None, + # MEDIA NAMESPACE: + media=None, + # GEORSS NAMESPACE: + geo=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, + updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + self.gphoto_id = gphoto_id + self.name = name + self.location = location + self.access = access + self.timestamp = timestamp + self.numphotos = numphotos + self.user = user + self.nickname = nickname + self.commentingEnabled = commentingEnabled + self.commentCount = commentCount + self.thumbnail = thumbnail + self.extended_property = extended_property or [] + self.text = text + ## NOTE: storing media:group as self.media, and geo:where as geo, + ## to create a self-explaining api + self.media = media or Media.Group() + self.geo = geo or Geo.Where() + + def GetAlbumId(self): + "Return the id of this album" + + return self.GetFeedLink().href.split('/')[-1] + + def GetPhotosUri(self): + "(string) Return the uri to this albums feed of the PhotoEntry kind" + return self._feedUri('photo') + + def GetCommentsUri(self): + "(string) Return the uri to this albums feed of the CommentEntry kind" + return self._feedUri('comment') + + def GetTagsUri(self): + "(string) Return the uri to this albums feed of the TagEntry kind" + return self._feedUri('tag') + +def AlbumEntryFromString(xml_string): + return atom.CreateClassFromXMLString(AlbumEntry, xml_string) + +class AlbumFeed(GPhotosBaseFeed, AlbumData): + """All metadata for a Google Photos Album, including its sub-elements + + This feed represents an album as the container for other objects. + + A Album feed contains entries of + PhotoEntry, CommentEntry or TagEntry, + depending on the `kind' parameter in the original query. + + Take a look at AlbumData for accessible attributes. + + """ + + _children = GPhotosBaseFeed._children.copy() + _children.update(AlbumData._children.copy()) + + def GetPhotosUri(self): + "(string) Return the uri to the same feed, but of the PhotoEntry kind" + + return self._feedUri('photo') + + def GetTagsUri(self): + "(string) Return the uri to the same feed, but of the TagEntry kind" + + return self._feedUri('tag') + + def GetCommentsUri(self): + "(string) Return the uri to the same feed, but of the CommentEntry kind" + + return self._feedUri('comment') + +def AlbumFeedFromString(xml_string): + return atom.CreateClassFromXMLString(AlbumFeed, xml_string) + + +class PhotoData(object): + _children = {} + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid) + _children['{%s}checksum' % PHOTOS_NAMESPACE] = ('checksum', Checksum) + _children['{%s}client' % PHOTOS_NAMESPACE] = ('client', Client) + _children['{%s}height' % PHOTOS_NAMESPACE] = ('height', Height) + _children['{%s}position' % PHOTOS_NAMESPACE] = ('position', Position) + _children['{%s}rotation' % PHOTOS_NAMESPACE] = ('rotation', Rotation) + _children['{%s}size' % PHOTOS_NAMESPACE] = ('size', Size) + _children['{%s}timestamp' % PHOTOS_NAMESPACE] = ('timestamp', Timestamp) + _children['{%s}version' % PHOTOS_NAMESPACE] = ('version', Version) + _children['{%s}width' % PHOTOS_NAMESPACE] = ('width', Width) + _children['{%s}commentingEnabled' % PHOTOS_NAMESPACE] = \ + ('commentingEnabled', CommentingEnabled) + _children['{%s}commentCount' % PHOTOS_NAMESPACE] = \ + ('commentCount', CommentCount) + ## NOTE: storing media:group as self.media, exif:tags as self.exif, and + ## geo:where as self.geo, to create a self-explaining api + _children['{%s}tags' % EXIF_NAMESPACE] = ('exif', Exif.Tags) + _children['{%s}where' % GEORSS_NAMESPACE] = ('geo', Geo.Where) + _children['{%s}group' % MEDIA_NAMESPACE] = ('media', Media.Group) + # These elements show up in search feeds + _children['{%s}snippet' % PHOTOS_NAMESPACE] = ('snippet', Snippet) + _children['{%s}snippettype' % PHOTOS_NAMESPACE] = ('snippettype', Snippettype) + _children['{%s}truncated' % PHOTOS_NAMESPACE] = ('truncated', Truncated) + gphoto_id = None + albumid = None + checksum = None + client = None + height = None + position = None + rotation = None + size = None + timestamp = None + version = None + width = None + commentingEnabled = None + commentCount = None + snippet=None + snippettype=None + truncated=None + media = Media.Group() + geo = Geo.Where() + tags = Exif.Tags() + +class PhotoEntry(GPhotosBaseEntry, PhotoData): + """All metadata for a Google Photos Photo + + Take a look at PhotoData for metadata accessible as attributes to this object. + + Notes: + To avoid name clashes, and to create a more sensible api, some + objects have names that differ from the original elements: + + o media:group -> self.media, + o exif:tags -> self.exif, + o geo:where -> self.geo, + o photo:id -> self.gphoto_id + """ + + _kind = 'photo' + _children = GPhotosBaseEntry._children.copy() + _children.update(PhotoData._children.copy()) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, text=None, + # GPHOTO NAMESPACE: + gphoto_id=None, albumid=None, checksum=None, client=None, height=None, + position=None, rotation=None, size=None, timestamp=None, version=None, + width=None, commentCount=None, commentingEnabled=None, + # MEDIARSS NAMESPACE: + media=None, + # EXIF_NAMESPACE: + exif=None, + # GEORSS NAMESPACE: + geo=None, + extension_elements=None, extension_attributes=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + self.gphoto_id = gphoto_id + self.albumid = albumid + self.checksum = checksum + self.client = client + self.height = height + self.position = position + self.rotation = rotation + self.size = size + self.timestamp = timestamp + self.version = version + self.width = width + self.commentingEnabled = commentingEnabled + self.commentCount = commentCount + ## NOTE: storing media:group as self.media, to create a self-explaining api + self.media = media or Media.Group() + self.exif = exif or Exif.Tags() + self.geo = geo or Geo.Where() + + def GetPostLink(self): + "Return the uri to this photo's `POST' link (use it for updates of the object)" + + return self.GetFeedLink() + + def GetCommentsUri(self): + "Return the uri to this photo's feed of CommentEntry comments" + return self._feedUri('comment') + + def GetTagsUri(self): + "Return the uri to this photo's feed of TagEntry tags" + return self._feedUri('tag') + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this photo""" + + href = self.GetSelfLink().href + return href[:href.find('/photoid')] + +def PhotoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(PhotoEntry, xml_string) + +class PhotoFeed(GPhotosBaseFeed, PhotoData): + """All metadata for a Google Photos Photo, including its sub-elements + + This feed represents a photo as the container for other objects. + + A Photo feed contains entries of + CommentEntry or TagEntry, + depending on the `kind' parameter in the original query. + + Take a look at PhotoData for metadata accessible as attributes to this object. + + """ + _children = GPhotosBaseFeed._children.copy() + _children.update(PhotoData._children.copy()) + + def GetTagsUri(self): + "(string) Return the uri to the same feed, but of the TagEntry kind" + + return self._feedUri('tag') + + def GetCommentsUri(self): + "(string) Return the uri to the same feed, but of the CommentEntry kind" + + return self._feedUri('comment') + +def PhotoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(PhotoFeed, xml_string) + +class TagData(GPhotosBaseData): + _children = {} + _children['{%s}weight' % PHOTOS_NAMESPACE] = ('weight', Weight) + weight=None + +class TagEntry(GPhotosBaseEntry, TagData): + """All metadata for a Google Photos Tag + + The actual tag is stored in the .title.text attribute + + """ + + _kind = 'tag' + _children = GPhotosBaseEntry._children.copy() + _children.update(TagData._children.copy()) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + weight=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, text=text, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + self.weight = weight + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this tag""" + + href = self.GetSelfLink().href + pos = href.find('/photoid') + if pos == -1: + return None + return href[:pos] + + def GetPhotoUri(self): + """Return the uri to the PhotoEntry containing this tag""" + + href = self.GetSelfLink().href + pos = href.find('/tag') + if pos == -1: + return None + return href[:pos] + +def TagEntryFromString(xml_string): + return atom.CreateClassFromXMLString(TagEntry, xml_string) + + +class TagFeed(GPhotosBaseFeed, TagData): + """All metadata for a Google Photos Tag, including its sub-elements""" + + _children = GPhotosBaseFeed._children.copy() + _children.update(TagData._children.copy()) + +def TagFeedFromString(xml_string): + return atom.CreateClassFromXMLString(TagFeed, xml_string) + +class CommentData(GPhotosBaseData): + _children = {} + ## NOTE: storing photo:id as self.gphoto_id, to avoid name clash with atom:id + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + _children['{%s}albumid' % PHOTOS_NAMESPACE] = ('albumid', Albumid) + _children['{%s}photoid' % PHOTOS_NAMESPACE] = ('photoid', Photoid) + _children['{%s}author' % atom.ATOM_NAMESPACE] = ('author', [CommentAuthor,]) + gphoto_id=None + albumid=None + photoid=None + author=None + +class CommentEntry(GPhotosBaseEntry, CommentData): + """All metadata for a Google Photos Comment + + The comment is stored in the .content.text attribute, + with a content type in .content.type. + + + """ + + _kind = 'comment' + _children = GPhotosBaseEntry._children.copy() + _children.update(CommentData._children.copy()) + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + gphoto_id=None, albumid=None, photoid=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + self.gphoto_id = gphoto_id + self.albumid = albumid + self.photoid = photoid + + def GetCommentId(self): + """Return the globally unique id of this comment""" + return self.GetSelfLink().href.split('/')[-1] + + def GetAlbumUri(self): + """Return the uri to the AlbumEntry containing this comment""" + + href = self.GetSelfLink().href + return href[:href.find('/photoid')] + + def GetPhotoUri(self): + """Return the uri to the PhotoEntry containing this comment""" + + href = self.GetSelfLink().href + return href[:href.find('/commentid')] + +def CommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(CommentEntry, xml_string) + +class CommentFeed(GPhotosBaseFeed, CommentData): + """All metadata for a Google Photos Comment, including its sub-elements""" + + _children = GPhotosBaseFeed._children.copy() + _children.update(CommentData._children.copy()) + +def CommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(CommentFeed, xml_string) + +class UserData(GPhotosBaseData): + _children = {} + _children['{%s}maxPhotosPerAlbum' % PHOTOS_NAMESPACE] = ('maxPhotosPerAlbum', MaxPhotosPerAlbum) + _children['{%s}nickname' % PHOTOS_NAMESPACE] = ('nickname', Nickname) + _children['{%s}quotalimit' % PHOTOS_NAMESPACE] = ('quotalimit', Quotalimit) + _children['{%s}quotacurrent' % PHOTOS_NAMESPACE] = ('quotacurrent', Quotacurrent) + _children['{%s}thumbnail' % PHOTOS_NAMESPACE] = ('thumbnail', Thumbnail) + _children['{%s}user' % PHOTOS_NAMESPACE] = ('user', User) + _children['{%s}id' % PHOTOS_NAMESPACE] = ('gphoto_id', Id) + + maxPhotosPerAlbum=None + nickname=None + quotalimit=None + quotacurrent=None + thumbnail=None + user=None + gphoto_id=None + + +class UserEntry(GPhotosBaseEntry, UserData): + """All metadata for a Google Photos User + + This entry represents an album owner and all appropriate metadata. + + Take a look at at the attributes of the UserData for metadata available. + """ + _children = GPhotosBaseEntry._children.copy() + _children.update(UserData._children.copy()) + _kind = 'user' + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, + title=None, updated=None, + # GPHOTO NAMESPACE: + gphoto_id=None, maxPhotosPerAlbum=None, nickname=None, quotalimit=None, + quotacurrent=None, thumbnail=None, user=None, + extended_property=None, + extension_elements=None, extension_attributes=None, text=None): + + GPhotosBaseEntry.__init__(self, author=author, category=category, + content=content, + atom_id=atom_id, link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + + self.gphoto_id=gphoto_id + self.maxPhotosPerAlbum=maxPhotosPerAlbum + self.nickname=nickname + self.quotalimit=quotalimit + self.quotacurrent=quotacurrent + self.thumbnail=thumbnail + self.user=user + + def GetAlbumsUri(self): + "(string) Return the uri to this user's feed of the AlbumEntry kind" + return self._feedUri('album') + + def GetPhotosUri(self): + "(string) Return the uri to this user's feed of the PhotoEntry kind" + return self._feedUri('photo') + + def GetCommentsUri(self): + "(string) Return the uri to this user's feed of the CommentEntry kind" + return self._feedUri('comment') + + def GetTagsUri(self): + "(string) Return the uri to this user's feed of the TagEntry kind" + return self._feedUri('tag') + +def UserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(UserEntry, xml_string) + +class UserFeed(GPhotosBaseFeed, UserData): + """Feed for a User in the google photos api. + + This feed represents a user as the container for other objects. + + A User feed contains entries of + AlbumEntry, PhotoEntry, CommentEntry, UserEntry or TagEntry, + depending on the `kind' parameter in the original query. + + The user feed itself also contains all of the metadata available + as part of a UserData object.""" + _children = GPhotosBaseFeed._children.copy() + _children.update(UserData._children.copy()) + + def GetAlbumsUri(self): + """Get the uri to this feed, but with entries of the AlbumEntry kind.""" + return self._feedUri('album') + + def GetTagsUri(self): + """Get the uri to this feed, but with entries of the TagEntry kind.""" + return self._feedUri('tag') + + def GetPhotosUri(self): + """Get the uri to this feed, but with entries of the PhotosEntry kind.""" + return self._feedUri('photo') + + def GetCommentsUri(self): + """Get the uri to this feed, but with entries of the CommentsEntry kind.""" + return self._feedUri('comment') + +def UserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(UserFeed, xml_string) + + + +def AnyFeedFromString(xml_string): + """Creates an instance of the appropriate feed class from the + xml string contents. + + Args: + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or a basic gdata.GDataFeed instance if it is + impossible to determine the appropriate class (look for extra elements + in GDataFeed's .FindExtensions() and extension_elements[] ). + """ + tree = ElementTree.fromstring(xml_string) + category = tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseFeed, tree) + ## TODO: is getattr safe this way? + feed_class = getattr(gdata.photos, '%sFeed' % kind.title()) + return atom._CreateClassFromElementTree(feed_class, tree) + +def AnyEntryFromString(xml_string): + """Creates an instance of the appropriate entry class from the + xml string contents. + + Args: + xml_string: str A string which contains valid XML. The root element + of the XML string should match the tag and namespace of the desired + class. + + Returns: + An instance of the target class with members assigned according to the + contents of the XML - or a basic gdata.GDataEndry instance if it is + impossible to determine the appropriate class (look for extra elements + in GDataEntry's .FindExtensions() and extension_elements[] ). + """ + tree = ElementTree.fromstring(xml_string) + category = tree.find('{%s}category' % atom.ATOM_NAMESPACE) + if category is None: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree) + namespace, kind = category.get('term').split('#') + if namespace != PHOTOS_NAMESPACE: + # TODO: is this the best way to handle this? + return atom._CreateClassFromElementTree(GPhotosBaseEntry, tree) + ## TODO: is getattr safe this way? + feed_class = getattr(gdata.photos, '%sEntry' % kind.title()) + return atom._CreateClassFromElementTree(feed_class, tree) + diff --git a/gdata.py-1.2.3/src/gdata/photos/service.py b/gdata.py-1.2.3/src/gdata/photos/service.py new file mode 100755 index 0000000..fc8815b --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/photos/service.py @@ -0,0 +1,685 @@ +#!/usr/bin/env python +# -*-*- encoding: utf-8 -*-*- +# +# This is the service file for the Google Photo python client. +# It is used for higher level operations. +# +# $Id: service.py 144 2007-10-25 21:03:34Z havard.gulldahl $ +# +# Copyright 2007 Håvard Gulldahl +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google PhotoService provides a human-friendly interface to +Google Photo (a.k.a Picasa Web) services[1]. + +It extends gdata.service.GDataService and as such hides all the +nasty details about authenticating, parsing and communicating with +Google Photos. + +[1]: http://code.google.com/apis/picasaweb/gdata.html + +Example: + import gdata.photos, gdata.photos.service + pws = gdata.photos.service.PhotosService() + pws.ClientLogin(username, password) + #Get all albums + albums = pws.GetUserFeed().entry + # Get all photos in second album + photos = pws.GetFeed(albums[1].GetPhotosUri()).entry + # Get all tags for photos in second album and print them + tags = pws.GetFeed(albums[1].GetTagsUri()).entry + print [ tag.summary.text for tag in tags ] + # Get all comments for the first photos in list and print them + comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry + print [ c.summary.text for c in comments ] + + # Get a photo to work with + photo = photos[0] + # Update metadata + + # Attributes from the namespace + photo.summary.text = u'A nice view from my veranda' + photo.title.text = u'Verandaview.jpg' + + # Attributes from the namespace + photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated + + # Adding attributes to media object + + # Rotate 90 degrees clockwise + photo.rotation = gdata.photos.Rotation(text='90') + + # Submit modified photo object + photo = pws.UpdatePhotoMetadata(photo) + + # Make sure you only modify the newly returned object, else you'll get + # versioning errors. See Optimistic-concurrency + + # Add comment to a picture + comment = pws.InsertComment(photo, u'I wish the water always was this warm') + + # Remove comment because it was silly + print "*blush*" + pws.Delete(comment.GetEditLink().href) + +""" + +__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ +__license__ = 'Apache License v2' +__version__ = '$Revision: 176 $'[11:-2] + + +import sys, os.path, StringIO +import time +import gdata.service +import gdata +import atom.service +import atom +import gdata.photos + +SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png') + +UNKOWN_ERROR=1000 +GPHOTOS_BAD_REQUEST=400 +GPHOTOS_CONFLICT=409 +GPHOTOS_INTERNAL_SERVER_ERROR=500 +GPHOTOS_INVALID_ARGUMENT=601 +GPHOTOS_INVALID_CONTENT_TYPE=602 +GPHOTOS_NOT_AN_IMAGE=603 +GPHOTOS_INVALID_KIND=604 + +class GooglePhotosException(Exception): + def __init__(self, response): + + self.error_code = response['status'] + self.reason = response['reason'].strip() + if '' in str(response['body']): #general html message, discard it + response['body'] = "" + self.body = response['body'].strip() + self.message = "(%(status)s) %(body)s -- %(reason)s" % response + + #return explicit error codes + error_map = { '(12) Not an image':GPHOTOS_NOT_AN_IMAGE, + 'kind: That is not one of the acceptable values': + GPHOTOS_INVALID_KIND, + + } + for msg, code in error_map.iteritems(): + if self.body == msg: + self.error_code = code + break + self.args = [self.error_code, self.reason, self.body] + +class PhotosService(gdata.service.GDataService): + userUri = '/data/feed/api/user/%s' + + def __init__(self, email=None, password=None, + source=None, server='picasaweb.google.com', additional_headers=None): + """ GooglePhotosService constructor. + + Arguments: + email: string (optional) The e-mail address of the account to use for + authentication. + password: string (optional) The password of the account to use for + authentication. + source: string (optional) The name of the user's application. + server: string (optional) The server the feed is hosted on. + additional_headers: dict (optional) Any additional HTTP headers to be + transmitted to the service in the form of key-value + pairs. + + Returns: + A PhotosService object used to communicate with the Google Photos + service. + """ + self.email = email + self.client = source + gdata.service.GDataService.__init__(self, email=self.email, password=password, + service='lh2', source=source, + server=server, + additional_headers=additional_headers) + + def GetFeed(self, uri, limit=None, start_index=None): + """Get a feed. + + The results are ordered by the values of their `updated' elements, + with the most recently updated entry appearing first in the feed. + + Arguments: + uri: the uri to fetch + limit (optional): the maximum number of entries to return. Defaults to what + the server returns. + + Returns: + one of gdata.photos.AlbumFeed, + gdata.photos.UserFeed, + gdata.photos.PhotoFeed, + gdata.photos.CommentFeed, + gdata.photos.TagFeed, + depending on the results of the query. + Raises: + GooglePhotosException + + See: + http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual + """ + if limit is not None: + uri += '&max-results=%s' % limit + if start_index is not None: + uri += '&start-index=%s' % start_index + try: + return self.Get(uri, converter=gdata.photos.AnyFeedFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def GetEntry(self, uri, limit=None, start_index=None): + """Get an Entry. + + Arguments: + uri: the uri to the entry + limit (optional): the maximum number of entries to return. Defaults to what + the server returns. + + Returns: + one of gdata.photos.AlbumEntry, + gdata.photos.UserEntry, + gdata.photos.PhotoEntry, + gdata.photos.CommentEntry, + gdata.photos.TagEntry, + depending on the results of the query. + Raises: + GooglePhotosException + """ + if limit is not None: + uri += '&max-results=%s' % limit + if start_index is not None: + uri += '&start-index=%s' % start_index + try: + return self.Get(uri, converter=gdata.photos.AnyEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def GetUserFeed(self, kind='album', user='default', limit=None): + """Get user-based feed, containing albums, photos, comments or tags; + defaults to albums. + + The entries are ordered by the values of their `updated' elements, + with the most recently updated entry appearing first in the feed. + + Arguments: + kind: the kind of entries to get, either `album', `photo', + `comment' or `tag', or a python list of these. Defaults to `album'. + user (optional): whose albums we're querying. Defaults to current user. + limit (optional): the maximum number of entries to return. + Defaults to everything the server returns. + + + Returns: + gdata.photos.UserFeed, containing appropriate Entry elements + + See: + http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual + http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html + """ + if isinstance(kind, (list, tuple) ): + kind = ",".join(kind) + + uri = '/data/feed/api/user/%s?kind=%s' % (user, kind) + return self.GetFeed(uri, limit=limit) + + def GetTaggedPhotos(self, tag, user='default', limit=None): + """Get all photos belonging to a specific user, tagged by the given keyword + + Arguments: + tag: The tag you're looking for, e.g. `dog' + user (optional): Whose images/videos you want to search, defaults + to current user + limit (optional): the maximum number of entries to return. + Defaults to everything the server returns. + + Returns: + gdata.photos.UserFeed containing PhotoEntry elements + """ + # Lower-casing because of + # http://code.google.com/p/gdata-issues/issues/detail?id=194 + uri = '/data/feed/api/user/%s?kind=photo&tag=%s' % (user, tag.lower()) + return self.GetFeed(uri, limit) + + def SearchUserPhotos(self, query, user='default', limit=100): + """Search through all photos for a specific user and return a feed. + This will look for matches in file names and image tags (a.k.a. keywords) + + Arguments: + query: The string you're looking for, e.g. `vacation' + user (optional): The username of whose photos you want to search, defaults + to current user. + limit (optional): Don't return more than `limit' hits, defaults to 100 + + Only public photos are searched, unless you are authenticated and + searching through your own photos. + + Returns: + gdata.photos.UserFeed with PhotoEntry elements + """ + uri = '/data/feed/api/user/%s?kind=photo&q=%s' % (user, query) + return self.GetFeed(uri, limit=limit) + + def SearchCommunityPhotos(self, query, limit=100): + """Search through all public photos and return a feed. + This will look for matches in file names and image tags (a.k.a. keywords) + + Arguments: + query: The string you're looking for, e.g. `vacation' + limit (optional): Don't return more than `limit' hits, defaults to 100 + + Returns: + gdata.GDataFeed with PhotoEntry elements + """ + uri='/data/feed/api/all?q=%s' % query + return self.GetFeed(uri, limit=limit) + + def GetContacts(self, user='default', limit=None): + """Retrieve a feed that contains a list of your contacts + + Arguments: + user: Username of the user whose contacts you want + + Returns + gdata.photos.UserFeed, with UserEntry entries + + See: + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + uri = '/data/feed/api/user/%s/contacts?kind=user' % user + return self.GetFeed(uri, limit=limit) + + def SearchContactsPhotos(self, user='default', search=None, limit=None): + """Search over your contacts' photos and return a feed + + Arguments: + user: Username of the user whose contacts you want + search (optional): What to search for (photo title, description and keywords) + + Returns + gdata.photos.UserFeed, with PhotoEntry elements + + See: + http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 + """ + + uri = '/data/feed/api/user/%s/contacts?kind=photo&q=%s' % (user, search) + return self.GetFeed(uri, limit=limit) + + def InsertAlbum(self, title, summary, location=None, access='public', + commenting_enabled='true', timestamp=None): + """Add an album. + + Needs authentication, see self.ClientLogin() + + Arguments: + title: Album title + summary: Album summary / description + access (optional): `private' or `public'. Public albums are searchable + by everyone on the internet. Defaults to `public' + commenting_enabled (optional): `true' or `false'. Defaults to `true'. + timestamp (optional): A date and time for the album, in milliseconds since + Unix epoch[1] UTC. Defaults to now. + + Returns: + The newly created gdata.photos.AlbumEntry + + See: + http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed + + [1]: http://en.wikipedia.org/wiki/Unix_epoch + """ + album = gdata.photos.AlbumEntry() + album.title = atom.Title(text=title, title_type='text') + album.summary = atom.Summary(text=summary, summary_type='text') + if location is not None: + album.location = gdata.photos.Location(text=location) + album.access = gdata.photos.Access(text=access) + if commenting_enabled in ('true', 'false'): + album.commentingEnabled = gdata.photos.CommentingEnabled(text=commenting_enabled) + if timestamp is None: + timestamp = '%i' % int(time.time() * 1000) + album.timestamp = gdata.photos.Timestamp(text=timestamp) + try: + return self.Post(album, uri=self.userUri % self.email, + converter=gdata.photos.AlbumEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertPhoto(self, album_or_uri, photo, filename_or_handle, + content_type='image/jpeg'): + """Add a PhotoEntry + + Needs authentication, see self.ClientLogin() + + Arguments: + album_or_uri: AlbumFeed or uri of the album where the photo should go + photo: PhotoEntry to add + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + + """ + + try: + assert(isinstance(photo, gdata.photos.PhotoEntry)) + except AssertionError: + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`photo` must be a gdata.photos.PhotoEntry instance', + 'reason':'Found %s, not PhotoEntry' % type(photo) + }) + try: + majtype, mintype = content_type.split('/') + assert(mintype in SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % \ + ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] + }) + if isinstance(filename_or_handle, (str, unicode)) and \ + os.path.exists(filename_or_handle): # it's a file name + mediasource = gdata.MediaSource() + mediasource.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'):# it's a file-like resource + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) # rewind pointer to the start of the file + # gdata.MediaSource needs the content length, so read the whole image + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'image' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: #filename_or_handle is not valid + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`filename_or_handle` must be a path name or a file-like object', + 'reason':'Found %s, not path name or object with a .read() method' % \ + type(filename_or_handle) + }) + + if isinstance(album_or_uri, (str, unicode)): # it's a uri + feed_uri = album_or_uri + elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object + feed_uri = album_or_uri.GetFeedLink().href + + try: + return self.Post(photo, uri=feed_uri, media_source=mediasource, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle, + content_type='image/jpeg', keywords=None): + """Add a photo without constructing a PhotoEntry. + + Needs authentication, see self.ClientLogin() + + Arguments: + album_or_uri: AlbumFeed or uri of the album where the photo should go + title: Photo title + summary: Photo summary / description + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + keywords (optional): a 1) comma separated string or 2) a python list() of + keywords (a.k.a. tags) to add to the image. + E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation'] + + Returns: + The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors + + See: + http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed + [1]: http://en.wikipedia.org/wiki/Unix_epoch + """ + + metadata = gdata.photos.PhotoEntry() + metadata.title=atom.Title(text=title) + metadata.summary = atom.Summary(text=summary, summary_type='text') + if keywords is not None: + if isinstance(keywords, list): + keywords = ','.join(keywords) + metadata.media.keywords = gdata.media.Keywords(text=keywords) + return self.InsertPhoto(album_or_uri, metadata, filename_or_handle, + content_type) + + def UpdatePhotoMetadata(self, photo): + """Update a photo's metadata. + + Needs authentication, see self.ClientLogin() + + You can update any or all of the following metadata properties: + * + * <media:description> + * <gphoto:checksum> + * <gphoto:client> + * <gphoto:rotation> + * <gphoto:timestamp> + * <gphoto:commentingEnabled> + + Arguments: + photo: a gdata.photos.PhotoEntry object with updated elements + + Returns: + The modified gdata.photos.PhotoEntry + + Example: + p = GetFeed(uri).entry[0] + p.title.text = u'My new text' + p.commentingEnabled.text = 'false' + p = UpdatePhotoMetadata(p) + + It is important that you don't keep the old object around, once + it has been updated. See + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + try: + return self.Put(data=photo, uri=photo.GetEditLink().href, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + + def UpdatePhotoBlob(self, photo_or_uri, filename_or_handle, + content_type = 'image/jpeg'): + """Update a photo's binary data. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a + `edit-media' uri pointing to it + filename_or_handle: A file-like object or file name where the image/video + will be read from + content_type (optional): Internet media type (a.k.a. mime type) of + media object. Currently Google Photos supports these types: + o image/bmp + o image/gif + o image/jpeg + o image/png + Images will be converted to jpeg on upload. Defaults to `image/jpeg' + + Returns: + The modified gdata.photos.PhotoEntry + + Example: + p = GetFeed(PhotoUri) + p = UpdatePhotoBlob(p, '/tmp/newPic.jpg') + + It is important that you don't keep the old object around, once + it has been updated. See + http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency + """ + + try: + majtype, mintype = content_type.split('/') + assert(mintype in SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % \ + ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] + }) + + if isinstance(filename_or_handle, (str, unicode)) and \ + os.path.exists(filename_or_handle): # it's a file name + photoblob = gdata.MediaSource() + photoblob.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'):# it's a file-like resource + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) # rewind pointer to the start of the file + # gdata.MediaSource needs the content length, so read the whole image + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'image' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: #filename_or_handle is not valid + raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, + 'body':'`filename_or_handle` must be a path name or a file-like object', + 'reason':'Found %s, not path name or an object with .read() method' % \ + type(filename_or_handle) + }) + + if isinstance(photo_or_uri, (str, unicode)): + entry_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + entry_uri = photo_or_uri.GetEditMediaLink().href + try: + return self.Put(photoblob, entry_uri, + converter=gdata.photos.PhotoEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def InsertTag(self, photo_or_uri, tag): + """Add a tag (a.k.a. keyword) to a photo. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a + `post' uri pointing to it + (string) tag: The tag/keyword + + Returns: + The new gdata.photos.TagEntry + + Example: + p = GetFeed(PhotoUri) + tag = InsertTag(p, 'Beautiful sunsets') + + """ + tag = gdata.photos.TagEntry(title=atom.Title(text=tag)) + if isinstance(photo_or_uri, (str, unicode)): + post_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + post_uri = photo_or_uri.GetPostLink().href + try: + return self.Post(data=tag, uri=post_uri, + converter=gdata.photos.TagEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + + def InsertComment(self, photo_or_uri, comment): + """Add a comment to a photo. + + Needs authentication, see self.ClientLogin() + + Arguments: + photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented + , or a `post' uri pointing to it + (string) comment: The actual comment + + Returns: + The new gdata.photos.CommentEntry + + Example: + p = GetFeed(PhotoUri) + tag = InsertComment(p, 'OOOH! I would have loved to be there. + Who's that in the back?') + + """ + comment = gdata.photos.CommentEntry(content=atom.Content(text=comment)) + if isinstance(photo_or_uri, (str, unicode)): + post_uri = photo_or_uri # it's a uri + elif hasattr(photo_or_uri, 'GetEditMediaLink'): + post_uri = photo_or_uri.GetPostLink().href + try: + return self.Post(data=comment, uri=post_uri, + converter=gdata.photos.CommentEntryFromString) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + + def Delete(self, object_or_uri, *args, **kwargs): + """Delete an object. + + Re-implementing the GDataService.Delete method, to add some + convenience. + + Arguments: + object_or_uri: Any object that has a GetEditLink() method that + returns a link, or a uri to that object. + + Returns: + ? or GooglePhotosException on errors + """ + try: + uri = object_or_uri.GetEditLink().href + except AttributeError: + uri = object_or_uri + try: + return gdata.service.GDataService.Delete(self, uri, *args, **kwargs) + except gdata.service.RequestError, e: + raise GooglePhotosException(e.args[0]) + +def GetSmallestThumbnail(media_thumbnail_list): + """Helper function to get the smallest thumbnail of a list of + gdata.media.Thumbnail. + Returns gdata.media.Thumbnail """ + r = {} + for thumb in media_thumbnail_list: + r[int(thumb.width)*int(thumb.height)] = thumb + keys = r.keys() + keys.sort() + return r[keys[0]] + +def ConvertAtomTimestampToEpoch(timestamp): + """Helper function to convert a timestamp string, for instance + from atom:updated or atom:published, to milliseconds since Unix epoch + (a.k.a. POSIX time). + + `2007-07-22T00:45:10.000Z' -> """ + return time.mktime(time.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.000Z')) + ## TODO: Timezone aware diff --git a/gdata.py-1.2.3/src/gdata/service.py b/gdata.py-1.2.3/src/gdata/service.py new file mode 100755 index 0000000..60b18a2 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/service.py @@ -0,0 +1,1616 @@ +#!/usr/bin/python +# +# Copyright (C) 2006,2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""GDataService provides CRUD ops. and programmatic login for GData services. + + Error: A base exception class for all exceptions in the gdata_client + module. + + CaptchaRequired: This exception is thrown when a login attempt results in a + captcha challenge from the ClientLogin service. When this + exception is thrown, the captcha_token and captcha_url are + set to the values provided in the server's response. + + BadAuthentication: Raised when a login attempt is made with an incorrect + username or password. + + NotAuthenticated: Raised if an operation requiring authentication is called + before a user has authenticated. + + NonAuthSubToken: Raised if a method to modify an AuthSub token is used when + the user is either not authenticated or is authenticated + through another authentication mechanism. + + NonOAuthToken: Raised if a method to modify an OAuth token is used when the + user is either not authenticated or is authenticated through + another authentication mechanism. + + RequestError: Raised if a CRUD request returned a non-success code. + + UnexpectedReturnType: Raised if the response from the server was not of the + desired type. For example, this would be raised if the + server sent a feed when the client requested an entry. + + GDataService: Encapsulates user credentials needed to perform insert, update + and delete operations with the GData API. An instance can + perform user authentication, query, insertion, deletion, and + update. + + Query: Eases query URI creation by allowing URI parameters to be set as + dictionary attributes. For example a query with a feed of + '/base/feeds/snippets' and ['bq'] set to 'digital camera' will + produce '/base/feeds/snippets?bq=digital+camera' when .ToUri() is + called on it. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +import re +import urllib +import urlparse +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom.service +import gdata +import atom +import atom.http_interface +import atom.token_store +import gdata.auth + + +AUTH_SERVER_HOST = 'https://www.google.com' + + +# When requesting an AuthSub token, it is often helpful to track the scope +# which is being requested. One way to accomplish this is to add a URL +# parameter to the 'next' URL which contains the requested scope. This +# constant is the default name (AKA key) for the URL parameter. +SCOPE_URL_PARAM_NAME = 'authsub_token_scope' +# When requesting an OAuth access token or authorization of an existing OAuth +# request token, it is often helpful to track the scope(s) which is/are being +# requested. One way to accomplish this is to add a URL parameter to the +# 'callback' URL which contains the requested scope. This constant is the +# default name (AKA key) for the URL parameter. +OAUTH_SCOPE_URL_PARAM_NAME = 'oauth_token_scope' +# Maps the service names used in ClientLogin to scope URLs. +CLIENT_LOGIN_SCOPES = { + 'cl': [ # Google Calendar + 'https://www.google.com/calendar/feeds/', + 'http://www.google.com/calendar/feeds/'], + 'gbase': [ # Google Base + 'http://base.google.com/base/feeds/', + 'http://www.google.com/base/feeds/'], + 'blogger': [ # Blogger + 'http://www.blogger.com/feeds/'], + 'codesearch': [ # Google Code Search + 'http://www.google.com/codesearch/feeds/'], + 'cp': [ # Contacts API + 'https://www.google.com/m8/feeds/', + 'http://www.google.com/m8/feeds/'], + 'finance': [ # Google Finance + 'http://finance.google.com/finance/feeds/'], + 'health': [ # Google Health + 'https://www.google.com/health/feeds/'], + 'writely': [ # Documents List API + 'https://docs.google.com/feeds/', + 'http://docs.google.com/feeds/'], + 'lh2': [ # Picasa Web Albums + 'http://picasaweb.google.com/data/'], + 'apps': [ # Google Apps Provisioning API + 'http://www.google.com/a/feeds/', + 'https://www.google.com/a/feeds/', + 'http://apps-apis.google.com/a/feeds/', + 'https://apps-apis.google.com/a/feeds/'], + 'weaver': [ # Health H9 Sandbox + 'https://www.google.com/h9/'], + 'wise': [ # Spreadsheets Data API + 'https://spreadsheets.google.com/feeds/', + 'http://spreadsheets.google.com/feeds/'], + 'sitemaps': [ # Google Webmaster Tools + 'https://www.google.com/webmasters/tools/feeds/'], + 'youtube': [ # YouTube + 'http://gdata.youtube.com/feeds/api/', + 'http://uploads.gdata.youtube.com/feeds/api', + 'http://gdata.youtube.com/action/GetUploadToken']} + + +def lookup_scopes(service_name): + """Finds the scope URLs for the desired service. + + In some cases, an unknown service may be used, and in those cases this + function will return None. + """ + if service_name in CLIENT_LOGIN_SCOPES: + return CLIENT_LOGIN_SCOPES[service_name] + return None + + +# Module level variable specifies which module should be used by GDataService +# objects to make HttpRequests. This setting can be overridden on each +# instance of GDataService. +# This module level variable is deprecated. Reassign the http_client member +# of a GDataService object instead. +http_request_handler = atom.service + + +class Error(Exception): + pass + + +class CaptchaRequired(Error): + pass + + +class BadAuthentication(Error): + pass + + +class NotAuthenticated(Error): + pass + + +class NonAuthSubToken(Error): + pass + + +class NonOAuthToken(Error): + pass + + +class RequestError(Error): + pass + + +class UnexpectedReturnType(Error): + pass + + +class BadAuthenticationServiceURL(Error): + pass + + +class FetchingOAuthRequestTokenFailed(RequestError): + pass + + +class TokenUpgradeFailed(RequestError): + pass + + +class RevokingOAuthTokenFailed(RequestError): + pass + + +class AuthorizationRequired(Error): + pass + + +class TokenHadNoScope(Error): + pass + + +class GDataService(atom.service.AtomService): + """Contains elements needed for GData login and CRUD request headers. + + Maintains additional headers (tokens for example) needed for the GData + services to allow a user to perform inserts, updates, and deletes. + """ + # The hander member is deprecated, use http_client instead. + handler = None + # The auth_token member is deprecated, use the token_store instead. + auth_token = None + # The tokens dict is deprecated in favor of the token_store. + tokens = None + + def __init__(self, email=None, password=None, account_type='HOSTED_OR_GOOGLE', + service=None, auth_service_url=None, source=None, server=None, + additional_headers=None, handler=None, tokens=None, + http_client=None, token_store=None): + """Creates an object of type GDataService. + + Args: + email: string (optional) The user's email address, used for + authentication. + password: string (optional) The user's password. + account_type: string (optional) The type of account to use. Use + 'GOOGLE' for regular Google accounts or 'HOSTED' for Google + Apps accounts, or 'HOSTED_OR_GOOGLE' to try finding a HOSTED + account first and, if it doesn't exist, try finding a regular + GOOGLE account. Default value: 'HOSTED_OR_GOOGLE'. + service: string (optional) The desired service for which credentials + will be obtained. + auth_service_url: string (optional) User-defined auth token request URL + allows users to explicitly specify where to send auth token requests. + source: string (optional) The name of the user's application. + server: string (optional) The name of the server to which a connection + will be opened. Default value: 'base.google.com'. + additional_headers: dictionary (optional) Any additional headers which + should be included with CRUD operations. + handler: module (optional) This parameter is deprecated and has been + replaced by http_client. + tokens: This parameter is deprecated, calls should be made to + token_store instead. + http_client: An object responsible for making HTTP requests using a + request method. If none is provided, a new instance of + atom.http.ProxiedHttpClient will be used. + token_store: Keeps a collection of authorization tokens which can be + applied to requests for a specific URLs. Critical methods are + find_token based on a URL (atom.url.Url or a string), add_token, + and remove_token. + """ + atom.service.AtomService.__init__(self, http_client=http_client, + token_store=token_store) + self.email = email + self.password = password + self.account_type = account_type + self.service = service + self.auth_service_url = auth_service_url + self.server = server + self.additional_headers = additional_headers or {} + self._oauth_input_params = None + self.__SetSource(source) + self.__captcha_token = None + self.__captcha_url = None + self.__gsessionid = None + + if http_request_handler.__name__ == 'gdata.urlfetch': + import gdata.alt.appengine + self.http_client = gdata.alt.appengine.AppEngineHttpClient() + + # Define properties for GDataService + def _SetAuthSubToken(self, auth_token, scopes=None): + """Deprecated, use SetAuthSubToken instead.""" + self.SetAuthSubToken(auth_token, scopes=scopes) + + def __SetAuthSubToken(self, auth_token, scopes=None): + """Deprecated, use SetAuthSubToken instead.""" + self._SetAuthSubToken(auth_token, scopes=scopes) + + def _GetAuthToken(self): + """Returns the auth token used for authenticating requests. + + Returns: + string + """ + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if hasattr(token, 'auth_header'): + return token.auth_header + return None + + def _GetCaptchaToken(self): + """Returns a captcha token if the most recent login attempt generated one. + + The captcha token is only set if the Programmatic Login attempt failed + because the Google service issued a captcha challenge. + + Returns: + string + """ + return self.__captcha_token + + def __GetCaptchaToken(self): + return self._GetCaptchaToken() + + captcha_token = property(__GetCaptchaToken, + doc="""Get the captcha token for a login request.""") + + def _GetCaptchaURL(self): + """Returns the URL of the captcha image if a login attempt generated one. + + The captcha URL is only set if the Programmatic Login attempt failed + because the Google service issued a captcha challenge. + + Returns: + string + """ + return self.__captcha_url + + def __GetCaptchaURL(self): + return self._GetCaptchaURL() + + captcha_url = property(__GetCaptchaURL, + doc="""Get the captcha URL for a login request.""") + + def SetOAuthInputParameters(self, signature_method, consumer_key, + consumer_secret=None, rsa_key=None, + two_legged_oauth=False): + """Sets parameters required for using OAuth authentication mechanism. + + NOTE: Though consumer_secret and rsa_key are optional, either of the two + is required depending on the value of the signature_method. + + Args: + signature_method: class which provides implementation for strategy class + oauth.oauth.OAuthSignatureMethod. Signature method to be used for + signing each request. Valid implementations are provided as the + constants defined by gdata.auth.OAuthSignatureMethod. Currently + they are gdata.auth.OAuthSignatureMethod.RSA_SHA1 and + gdata.auth.OAuthSignatureMethod.HMAC_SHA1 + consumer_key: string Domain identifying third_party web application. + consumer_secret: string (optional) Secret generated during registration. + Required only for HMAC_SHA1 signature method. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. + two_legged_oauth: string (default=False) Enables two-legged OAuth process. + """ + self._oauth_input_params = gdata.auth.OAuthInputParams( + signature_method, consumer_key, consumer_secret=consumer_secret, + rsa_key=rsa_key) + if two_legged_oauth: + oauth_token = gdata.auth.OAuthToken( + oauth_input_params=self._oauth_input_params) + self.SetOAuthToken(oauth_token) + + def FetchOAuthRequestToken(self, scopes=None, extra_parameters=None): + """Fetches OAuth request token and returns it. + + Args: + scopes: string or list of string base URL(s) of the service(s) to be + accessed. If None, then this method tries to determine the + scope(s) from the current service. + extra_parameters: dict (optional) key-value pairs as any additional + parameters to be included in the URL and signature while making a + request for fetching an OAuth request token. All the OAuth parameters + are added by default. But if provided through this argument, any + default parameters will be overwritten. For e.g. a default parameter + oauth_version 1.0 can be overwritten if + extra_parameters = {'oauth_version': '2.0'} + + Returns: + The fetched request token as a gdata.auth.OAuthToken object. + + Raises: + FetchingOAuthRequestTokenFailed if the server responded to the request + with an error. + """ + if scopes is None: + scopes = lookup_scopes(self.service) + if not isinstance(scopes, (list, tuple)): + scopes = [scopes,] + request_token_url = gdata.auth.GenerateOAuthRequestTokenUrl( + self._oauth_input_params, scopes, + request_token_url='%s/accounts/OAuthGetRequestToken' % AUTH_SERVER_HOST, + extra_parameters=extra_parameters) + response = self.http_client.request('GET', str(request_token_url)) + if response.status == 200: + token = gdata.auth.OAuthToken() + token.set_token_string(response.read()) + token.scopes = scopes + token.oauth_input_params = self._oauth_input_params + return token + error = { + 'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response.read() + } + raise FetchingOAuthRequestTokenFailed(error) + + def SetOAuthToken(self, oauth_token): + """Attempts to set the current token and add it to the token store. + + The oauth_token can be any OAuth token i.e. unauthorized request token, + authorized request token or access token. + This method also attempts to add the token to the token store. + Use this method any time you want the current token to point to the + oauth_token passed. For e.g. call this method with the request token + you receive from FetchOAuthRequestToken. + + Args: + request_token: gdata.auth.OAuthToken OAuth request token. + """ + if self.auto_set_current_token: + self.current_token = oauth_token + if self.auto_store_tokens: + self.token_store.add_token(oauth_token) + + def GenerateOAuthAuthorizationURL( + self, request_token=None, callback_url=None, extra_params=None, + include_scopes_in_callback=False, + scopes_param_prefix=OAUTH_SCOPE_URL_PARAM_NAME): + """Generates URL at which user will login to authorize the request token. + + Args: + request_token: gdata.auth.OAuthToken (optional) OAuth request token. + If not specified, then the current token will be used if it is of + type <gdata.auth.OAuthToken>, else it is found by looking in the + token_store by looking for a token for the current scope. + callback_url: string (optional) The URL user will be sent to after + logging in and granting access. + extra_params: dict (optional) Additional parameters to be sent. + include_scopes_in_callback: Boolean (default=False) if set to True, and + if 'callback_url' is present, the 'callback_url' will be modified to + include the scope(s) from the request token as a URL parameter. The + key for the 'callback' URL's scope parameter will be + OAUTH_SCOPE_URL_PARAM_NAME. The benefit of including the scope URL as + a parameter to the 'callback' URL, is that the page which receives + the OAuth token will be able to tell which URLs the token grants + access to. + scopes_param_prefix: string (default='oauth_token_scope') The URL + parameter key which maps to the list of valid scopes for the token. + This URL parameter will be included in the callback URL along with + the scopes of the token as value if include_scopes_in_callback=True. + + Returns: + A string URL at which the user is required to login. + + Raises: + NonOAuthToken if the user's request token is not an OAuth token or if a + request token was not available. + """ + if request_token and not isinstance(request_token, gdata.auth.OAuthToken): + raise NonOAuthToken + if not request_token: + if isinstance(self.current_token, gdata.auth.OAuthToken): + request_token = self.current_token + else: + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.OAuthToken): + request_token = token + if not request_token: + raise NonOAuthToken + return str(gdata.auth.GenerateOAuthAuthorizationUrl( + request_token, + authorization_url='%s/accounts/OAuthAuthorizeToken' % AUTH_SERVER_HOST, + callback_url=callback_url, extra_params=extra_params, + include_scopes_in_callback=include_scopes_in_callback, + scopes_param_prefix=scopes_param_prefix)) + + def UpgradeToOAuthAccessToken(self, authorized_request_token=None, + oauth_version='1.0'): + """Upgrades the authorized request token to an access token. + + Args: + authorized_request_token: gdata.auth.OAuthToken (optional) OAuth request + token. If not specified, then the current token will be used if it is + of type <gdata.auth.OAuthToken>, else it is found by looking in the + token_store by looking for a token for the current scope. + oauth_version: str (default='1.0') oauth_version parameter. All other + 'oauth_' parameters are added by default. This parameter too, is + added by default but here you can override it's value. + + Raises: + NonOAuthToken if the user's authorized request token is not an OAuth + token or if an authorized request token was not available. + TokenUpgradeFailed if the server responded to the request with an + error. + """ + if (authorized_request_token and + not isinstance(authorized_request_token, gdata.auth.OAuthToken)): + raise NonOAuthToken + if not authorized_request_token: + if isinstance(self.current_token, gdata.auth.OAuthToken): + authorized_request_token = self.current_token + else: + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.OAuthToken): + authorized_request_token = token + if not authorized_request_token: + raise NonOAuthToken + access_token_url = gdata.auth.GenerateOAuthAccessTokenUrl( + authorized_request_token, + self._oauth_input_params, + access_token_url='%s/accounts/OAuthGetAccessToken' % AUTH_SERVER_HOST, + oauth_version=oauth_version) + response = self.http_client.request('GET', str(access_token_url)) + if response.status == 200: + token = gdata.auth.OAuthTokenFromHttpBody(response.read()) + token.scopes = authorized_request_token.scopes + token.oauth_input_params = authorized_request_token.oauth_input_params + self.SetOAuthToken(token) + else: + raise TokenUpgradeFailed({'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response.read()}) + + def RevokeOAuthToken(self): + """Revokes an existing OAuth token. + + Raises: + NonOAuthToken if the user's auth token is not an OAuth token. + RevokingOAuthTokenFailed if request for revoking an OAuth token failed. + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.OAuthToken): + raise NonOAuthToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + if response.status == 200: + self.token_store.remove_token(token) + else: + raise RevokingOAuthTokenFailed + + def GetAuthSubToken(self): + """Returns the AuthSub token as a string. + + If the token is an gdta.auth.AuthSubToken, the Authorization Label + ("AuthSub token") is removed. + + This method examines the current_token to see if it is an AuthSubToken + or SecureAuthSubToken. If not, it searches the token_store for a token + which matches the current scope. + + The current scope is determined by the service name string member. + + Returns: + If the current_token is set to an AuthSubToken/SecureAuthSubToken, + return the token string. If there is no current_token, a token string + for a token which matches the service object's default scope is returned. + If there are no tokens valid for the scope, returns None. + """ + if isinstance(self.current_token, gdata.auth.AuthSubToken): + return self.current_token.get_token_string() + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.AuthSubToken): + return token.get_token_string() + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + return None + + def SetAuthSubToken(self, token, scopes=None, rsa_key=None): + """Sets the token sent in requests to an AuthSub token. + + Sets the current_token and attempts to add the token to the token_store. + + Only use this method if you have received a token from the AuthSub + service. The auth token is set automatically when UpgradeToSessionToken() + is used. See documentation for Google AuthSub here: + http://code.google.com/apis/accounts/AuthForWebApps.html + + Args: + token: gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken or string + The token returned by the AuthSub service. If the token is an + AuthSubToken or SecureAuthSubToken, the scope information stored in + the token is used. If the token is a string, the scopes parameter is + used to determine the valid scopes. + scopes: list of URLs for which the token is valid. This is only used + if the token parameter is a string. + rsa_key: string (optional) Private key required for RSA_SHA1 signature + method. This parameter is necessary if the token is a string + representing a secure token. + """ + if not isinstance(token, gdata.auth.AuthSubToken): + token_string = token + if rsa_key: + token = gdata.auth.SecureAuthSubToken(rsa_key) + else: + token = gdata.auth.AuthSubToken() + + token.set_token_string(token_string) + + # If no scopes were set for the token, use the scopes passed in, or + # try to determine the scopes based on the current service name. If + # all else fails, set the token to match all requests. + if not token.scopes: + if scopes is None: + scopes = lookup_scopes(self.service) + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + token.scopes = scopes + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + self.token_store.add_token(token) + + def GetClientLoginToken(self): + """Returns the token string for the current token or a token matching the + service scope. + + If the current_token is a ClientLoginToken, the token string for + the current token is returned. If the current_token is not set, this method + searches for a token in the token_store which is valid for the service + object's current scope. + + The current scope is determined by the service name string member. + The token string is the end of the Authorization header, it doesn not + include the ClientLogin label. + """ + if isinstance(self.current_token, gdata.auth.ClientLoginToken): + return self.current_token.get_token_string() + current_scopes = lookup_scopes(self.service) + if current_scopes: + token = self.token_store.find_token(current_scopes[0]) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if isinstance(token, gdata.auth.ClientLoginToken): + return token.get_token_string() + return None + + def SetClientLoginToken(self, token, scopes=None): + """Sets the token sent in requests to a ClientLogin token. + + This method sets the current_token to a new ClientLoginToken and it + also attempts to add the ClientLoginToken to the token_store. + + Only use this method if you have received a token from the ClientLogin + service. The auth_token is set automatically when ProgrammaticLogin() + is used. See documentation for Google ClientLogin here: + http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html + + Args: + token: string or instance of a ClientLoginToken. + """ + if not isinstance(token, gdata.auth.ClientLoginToken): + token_string = token + token = gdata.auth.ClientLoginToken() + token.set_token_string(token_string) + + if not token.scopes: + if scopes is None: + scopes = lookup_scopes(self.service) + if scopes is None: + scopes = [atom.token_store.SCOPE_ALL] + token.scopes = scopes + if self.auto_set_current_token: + self.current_token = token + if self.auto_store_tokens: + self.token_store.add_token(token) + + # Private methods to create the source property. + def __GetSource(self): + return self.__source + + def __SetSource(self, new_source): + self.__source = new_source + # Update the UserAgent header to include the new application name. + self.additional_headers['User-Agent'] = atom.http_interface.USER_AGENT % ( + self.__source,) + + source = property(__GetSource, __SetSource, + doc="""The source is the name of the application making the request. + It should be in the form company_id-app_name-app_version""") + + # Authentication operations + + def ProgrammaticLogin(self, captcha_token=None, captcha_response=None): + """Authenticates the user and sets the GData Auth token. + + Login retreives a temporary auth token which must be used with all + requests to GData services. The auth token is stored in the GData client + object. + + Login is also used to respond to a captcha challenge. If the user's login + attempt failed with a CaptchaRequired error, the user can respond by + calling Login with the captcha token and the answer to the challenge. + + Args: + captcha_token: string (optional) The identifier for the captcha challenge + which was presented to the user. + captcha_response: string (optional) The user's answer to the captch + challenge. + + Raises: + CaptchaRequired if the login service will require a captcha response + BadAuthentication if the login service rejected the username or password + Error if the login service responded with a 403 different from the above + """ + request_body = gdata.auth.generate_client_login_request_body(self.email, + self.password, self.service, self.source, self.account_type, + captcha_token, captcha_response) + + # If the user has defined their own authentication service URL, + # send the ClientLogin requests to this URL: + if not self.auth_service_url: + auth_request_url = AUTH_SERVER_HOST + '/accounts/ClientLogin' + else: + auth_request_url = self.auth_service_url + + auth_response = self.http_client.request('POST', auth_request_url, + data=request_body, + headers={'Content-Type':'application/x-www-form-urlencoded'}) + response_body = auth_response.read() + + if auth_response.status == 200: + # TODO: insert the token into the token_store directly. + self.SetClientLoginToken( + gdata.auth.get_client_login_token(response_body)) + self.__captcha_token = None + self.__captcha_url = None + + elif auth_response.status == 403: + # Examine each line to find the error type and the captcha token and + # captch URL if they are present. + captcha_parameters = gdata.auth.get_captcha_challenge(response_body, + captcha_base_url='%s/accounts/' % AUTH_SERVER_HOST) + if captcha_parameters: + self.__captcha_token = captcha_parameters['token'] + self.__captcha_url = captcha_parameters['url'] + raise CaptchaRequired, 'Captcha Required' + elif response_body.splitlines()[0] == 'Error=BadAuthentication': + self.__captcha_token = None + self.__captcha_url = None + raise BadAuthentication, 'Incorrect username or password' + else: + self.__captcha_token = None + self.__captcha_url = None + raise Error, 'Server responded with a 403 code' + elif auth_response.status == 302: + self.__captcha_token = None + self.__captcha_url = None + # Google tries to redirect all bad URLs back to + # http://www.google.<locale>. If a redirect + # attempt is made, assume the user has supplied an incorrect authentication URL + raise BadAuthenticationServiceURL, 'Server responded with a 302 code.' + + def ClientLogin(self, username, password, account_type=None, service=None, + auth_service_url=None, source=None, captcha_token=None, + captcha_response=None): + """Convenience method for authenticating using ProgrammaticLogin. + + Sets values for email, password, and other optional members. + + Args: + username: + password: + account_type: string (optional) + service: string (optional) + auth_service_url: string (optional) + captcha_token: string (optional) + captcha_response: string (optional) + """ + self.email = username + self.password = password + + if account_type: + self.account_type = account_type + if service: + self.service = service + if source: + self.source = source + if auth_service_url: + self.auth_service_url = auth_service_url + + self.ProgrammaticLogin(captcha_token, captcha_response) + + def GenerateAuthSubURL(self, next, scope, secure=False, session=True, + domain='default'): + """Generate a URL at which the user will login and be redirected back. + + Users enter their credentials on a Google login page and a token is sent + to the URL specified in next. See documentation for AuthSub login at: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + next: string The URL user will be sent to after logging in. + scope: string or list of strings. The URLs of the services to be + accessed. + secure: boolean (optional) Determines whether or not the issued token + is a secure token. + session: boolean (optional) Determines whether or not the issued token + can be upgraded to a session token. + """ + if not isinstance(scope, (list, tuple)): + scope = (scope,) + return gdata.auth.generate_auth_sub_url(next, scope, secure=secure, + session=session, + request_url='%s/accounts/AuthSubRequest' % AUTH_SERVER_HOST, + domain=domain) + + def UpgradeToSessionToken(self, token=None): + """Upgrades a single use AuthSub token to a session token. + + Args: + token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken + (optional) which is good for a single use but can be upgraded + to a session token. If no token is passed in, the token + is found by looking in the token_store by looking for a token + for the current scope. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + TokenUpgradeFailed if the server responded to the request with an + error. + """ + if token is None: + scopes = lookup_scopes(self.service) + if scopes: + token = self.token_store.find_token(scopes[0]) + else: + token = self.token_store.find_token(atom.token_store.SCOPE_ALL) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + self.SetAuthSubToken(self.upgrade_to_session_token(token)) + + def upgrade_to_session_token(self, token): + """Upgrades a single use AuthSub token to a session token. + + Args: + token: A gdata.auth.AuthSubToken or gdata.auth.SecureAuthSubToken + which is good for a single use but can be upgraded to a + session token. + + Returns: + The upgraded token as a gdata.auth.AuthSubToken object. + + Raises: + TokenUpgradeFailed if the server responded to the request with an + error. + """ + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubSessionToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + response_body = response.read() + if response.status == 200: + token.set_token_string( + gdata.auth.token_from_http_body(response_body)) + return token + else: + raise TokenUpgradeFailed({'status': response.status, + 'reason': 'Non 200 response on upgrade', + 'body': response_body}) + + def RevokeAuthSubToken(self): + """Revokes an existing AuthSub token. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubRevokeToken', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + if response.status == 200: + self.token_store.remove_token(token) + + def AuthSubTokenInfo(self): + """Fetches the AuthSub token's metadata from the server. + + Raises: + NonAuthSubToken if the user's auth token is not an AuthSub token + """ + scopes = lookup_scopes(self.service) + token = self.token_store.find_token(scopes[0]) + if not isinstance(token, gdata.auth.AuthSubToken): + raise NonAuthSubToken + + response = token.perform_request(self.http_client, 'GET', + AUTH_SERVER_HOST + '/accounts/AuthSubTokenInfo', + headers={'Content-Type':'application/x-www-form-urlencoded'}) + result_body = response.read() + if response.status == 200: + return result_body + else: + raise RequestError, {'status': response.status, + 'body': result_body} + + # CRUD operations + def Get(self, uri, extra_headers=None, redirects_remaining=4, + encoding='UTF-8', converter=None): + """Query the GData API with the given URI + + The uri is the portion of the URI after the server value + (ex: www.google.com). + + To perform a query against Google Base, set the server to + 'base.google.com' and set the uri to '/base/feeds/...', where ... is + your query. For example, to find snippets for all digital cameras uri + should be set to: '/base/feeds/snippets?bq=digital+camera' + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + redirects_remaining: int (optional) Tracks the number of additional + redirects this method will allow. If the service object receives + a redirect and remaining is 0, it will not follow the redirect. + This was added to avoid infinite redirect loops. + encoding: string (optional) The character encoding for the server's + response. Default is UTF-8 + converter: func (optional) A function which will transform + the server's results before it is returned. Example: use + GDataFeedFromString to parse the server response as if it + were a GDataFeed. + + Returns: + If there is no ResultsTransformer specified in the call, a GDataFeed + or GDataEntry depending on which is sent from the server. If the + response is niether a feed or entry and there is no ResultsTransformer, + return a string. If there is a ResultsTransformer, the returned value + will be that of the ResultsTransformer function. + """ + + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if uri.find('?') > -1: + uri += '&gsessionid=%s' % (self.__gsessionid,) + else: + uri += '?gsessionid=%s' % (self.__gsessionid,) + + server_response = self.request('GET', uri, + headers=extra_headers) + result_body = server_response.read() + + if server_response.status == 200: + if converter: + return converter(result_body) + # There was no ResultsTransformer specified, so try to convert the + # server's response into a GDataFeed. + feed = gdata.GDataFeedFromString(result_body) + if not feed: + # If conversion to a GDataFeed failed, try to convert the server's + # response to a GDataEntry. + entry = gdata.GDataEntryFromString(result_body) + if not entry: + # The server's response wasn't a feed, or an entry, so return the + # response body as a string. + return result_body + return entry + return feed + elif server_response.status == 302: + if redirects_remaining > 0: + location = server_response.getheader('Location') + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.Get(self, location, extra_headers, redirects_remaining - 1, + encoding=encoding, converter=converter) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + def GetMedia(self, uri, extra_headers=None): + """Returns a MediaSource containing media and its metadata from the given + URI string. + """ + response_handle = self.request('GET', uri, + headers=extra_headers) + return gdata.MediaSource(response_handle, response_handle.getheader( + 'Content-Type'), + response_handle.getheader('Content-Length')) + + def GetEntry(self, uri, extra_headers=None): + """Query the GData API with the given URI and receive an Entry. + + See also documentation for gdata.service.Get + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + + Returns: + A GDataEntry built from the XML in the server's response. + """ + + result = GDataService.Get(self, uri, extra_headers, + converter=atom.EntryFromString) + if isinstance(result, atom.Entry): + return result + else: + raise UnexpectedReturnType, 'Server did not send an entry' + + def GetFeed(self, uri, extra_headers=None, + converter=gdata.GDataFeedFromString): + """Query the GData API with the given URI and receive a Feed. + + See also documentation for gdata.service.Get + + Args: + uri: string The query in the form of a URI. Example: + '/base/feeds/snippets?bq=digital+camera'. + extra_headers: dictionary (optional) Extra HTTP headers to be included + in the GET request. These headers are in addition to + those stored in the client's additional_headers property. + The client automatically sets the Content-Type and + Authorization headers. + + Returns: + A GDataFeed built from the XML in the server's response. + """ + + result = GDataService.Get(self, uri, extra_headers, converter=converter) + if isinstance(result, atom.Feed): + return result + else: + raise UnexpectedReturnType, 'Server did not send a feed' + + def GetNext(self, feed): + """Requests the next 'page' of results in the feed. + + This method uses the feed's next link to request an additional feed + and uses the class of the feed to convert the results of the GET request. + + Args: + feed: atom.Feed or a subclass. The feed should contain a next link and + the type of the feed will be applied to the results from the + server. The new feed which is returned will be of the same class + as this feed which was passed in. + + Returns: + A new feed representing the next set of results in the server's feed. + The type of this feed will match that of the feed argument. + """ + next_link = feed.GetNextLink() + # Create a closure which will convert an XML string to the class of + # the feed object passed in. + def ConvertToFeedClass(xml_string): + return atom.CreateClassFromXMLString(feed.__class__, xml_string) + # Make a GET request on the next link and use the above closure for the + # converted which processes the XML string from the server. + if next_link and next_link.href: + return GDataService.Get(self, next_link.href, + converter=ConvertToFeedClass) + else: + return None + + def Post(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4, media_source=None, + converter=None): + """Insert or update data into a GData service at the given URI. + + Args: + data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The + XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + media_source: MediaSource (optional) Container for the media to be sent + along with the entry, if provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the post succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + return GDataService.PostOrPut(self, 'POST', data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, redirects_remaining=redirects_remaining, + media_source=media_source, converter=converter) + + def PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4, media_source=None, + converter=None): + """Insert data into a GData service at the given URI. + + Args: + verb: string, either 'POST' or 'PUT' + data: string, ElementTree._Element, atom.Entry, or gdata.GDataEntry The + XML to be sent to the uri. + uri: string The location (feed) to which the data should be inserted. + Example: '/base/feeds/items'. + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + media_source: MediaSource (optional) Container for the media to be sent + along with the entry, if provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the post succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if uri.find('?') > -1: + uri += '&gsessionid=%s' % (self.__gsessionid,) + else: + uri += '?gsessionid=%s' % (self.__gsessionid,) + + if data and media_source: + if ElementTree.iselement(data): + data_str = ElementTree.tostring(data) + else: + data_str = str(data) + + multipart = [] + multipart.append('Media multipart posting\r\n--END_OF_PART\r\n' + \ + 'Content-Type: application/atom+xml\r\n\r\n') + multipart.append('\r\n--END_OF_PART\r\nContent-Type: ' + \ + media_source.content_type+'\r\n\r\n') + multipart.append('\r\n--END_OF_PART--\r\n') + + extra_headers['MIME-version'] = '1.0' + extra_headers['Content-Length'] = str(len(multipart[0]) + + len(multipart[1]) + len(multipart[2]) + + len(data_str) + media_source.content_length) + + extra_headers['Content-Type'] = 'multipart/related; boundary=END_OF_PART' + server_response = self.request(verb, uri, + data=[multipart[0], data_str, multipart[1], media_source.file_handle, + multipart[2]], headers=extra_headers) + result_body = server_response.read() + + elif media_source or isinstance(data, gdata.MediaSource): + if isinstance(data, gdata.MediaSource): + media_source = data + extra_headers['Content-Length'] = str(media_source.content_length) + extra_headers['Content-Type'] = media_source.content_type + server_response = self.request(verb, uri, + data=media_source.file_handle, headers=extra_headers) + result_body = server_response.read() + + else: + http_data = data + content_type = 'application/atom+xml' + extra_headers['Content-Type'] = content_type + server_response = self.request(verb, uri, data=http_data, + headers=extra_headers) + result_body = server_response.read() + + # Server returns 201 for most post requests, but when performing a batch + # request the server responds with a 200 on success. + if server_response.status == 201 or server_response.status == 200: + if converter: + return converter(result_body) + feed = gdata.GDataFeedFromString(result_body) + if not feed: + entry = gdata.GDataEntryFromString(result_body) + if not entry: + return result_body + return entry + return feed + elif server_response.status == 302: + if redirects_remaining > 0: + location = server_response.getheader('Location') + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.PostOrPut(self, verb, data, location, + extra_headers, url_params, escape_params, + redirects_remaining - 1, media_source, converter=converter) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + def Put(self, data, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=3, media_source=None, + converter=None): + """Updates an entry at the given URI. + + Args: + data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The + XML containing the updated data. + uri: string A URI indicating entry to which the update will be applied. + Example: '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type, + Authorization, and Content-Length headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + converter: func (optional) A function which will be executed on the + server's response. Often this is a function like + GDataEntryFromString which will parse the body of the server's + response and return a GDataEntry. + + Returns: + If the put succeeded, this method will return a GDataFeed, GDataEntry, + or the results of running converter on the server's result body (if + converter was specified). + """ + return GDataService.PostOrPut(self, 'PUT', data, uri, + extra_headers=extra_headers, url_params=url_params, + escape_params=escape_params, redirects_remaining=redirects_remaining, + media_source=media_source, converter=converter) + + def Delete(self, uri, extra_headers=None, url_params=None, + escape_params=True, redirects_remaining=4): + """Deletes the entry at the given URI. + + Args: + uri: string The URI of the entry to be deleted. Example: + '/base/feeds/items/ITEM-ID' + extra_headers: dict (optional) HTTP headers which are to be included. + The client automatically sets the Content-Type and + Authorization headers. + url_params: dict (optional) Additional URL parameters to be included + in the URI. These are translated into query arguments + in the form '&dict_key=value&...'. + Example: {'max-results': '250'} becomes &max-results=250 + escape_params: boolean (optional) If false, the calling code has already + ensured that the query will form a valid URL (all + reserved characters have been escaped). If true, this + method will escape the query and any URL parameters + provided. + + Returns: + True if the entry was deleted. + """ + if extra_headers is None: + extra_headers = {} + + if self.__gsessionid is not None: + if uri.find('gsessionid=') < 0: + if uri.find('?') > -1: + uri += '&gsessionid=%s' % (self.__gsessionid,) + else: + uri += '?gsessionid=%s' % (self.__gsessionid,) + + server_response = self.request('DELETE', uri, + headers=extra_headers) + result_body = server_response.read() + + if server_response.status == 200: + return True + elif server_response.status == 302: + if redirects_remaining > 0: + location = server_response.getheader('Location') + if location is not None: + m = re.compile('[\?\&]gsessionid=(\w*)').search(location) + if m is not None: + self.__gsessionid = m.group(1) + return GDataService.Delete(self, location, extra_headers, + url_params, escape_params, redirects_remaining - 1) + else: + raise RequestError, {'status': server_response.status, + 'reason': '302 received without Location header', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': 'Redirect received, but redirects_remaining <= 0', + 'body': result_body} + else: + raise RequestError, {'status': server_response.status, + 'reason': server_response.reason, 'body': result_body} + + +def ExtractToken(url, scopes_included_in_next=True): + """Gets the AuthSub token from the current page's URL. + + Designed to be used on the URL that the browser is sent to after the user + authorizes this application at the page given by GenerateAuthSubRequestUrl. + + Args: + url: The current page's URL. It should contain the token as a URL + parameter. Example: 'http://example.com/?...&token=abcd435' + scopes_included_in_next: If True, this function looks for a scope value + associated with the token. The scope is a URL parameter with the + key set to SCOPE_URL_PARAM_NAME. This parameter should be present + if the AuthSub request URL was generated using + GenerateAuthSubRequestUrl with include_scope_in_next set to True. + + Returns: + A tuple containing the token string and a list of scope strings for which + this token should be valid. If the scope was not included in the URL, the + tuple will contain (token, None). + """ + parsed = urlparse.urlparse(url) + token = gdata.auth.AuthSubTokenFromUrl(parsed[4]) + scopes = '' + if scopes_included_in_next: + for pair in parsed[4].split('&'): + if pair.startswith('%s=' % SCOPE_URL_PARAM_NAME): + scopes = urllib.unquote_plus(pair.split('=')[1]) + return (token, scopes.split(' ')) + + +def GenerateAuthSubRequestUrl(next, scopes, hd='default', secure=False, + session=True, request_url='http://www.google.com/accounts/AuthSubRequest', + include_scopes_in_next=True): + """Creates a URL to request an AuthSub token to access Google services. + + For more details on AuthSub, see the documentation here: + http://code.google.com/apis/accounts/docs/AuthSub.html + + Args: + next: The URL where the browser should be sent after the user authorizes + the application. This page is responsible for receiving the token + which is embeded in the URL as a parameter. + scopes: The base URL to which access will be granted. Example: + 'http://www.google.com/calendar/feeds' will grant access to all + URLs in the Google Calendar data API. If you would like a token for + multiple scopes, pass in a list of URL strings. + hd: The domain to which the user's account belongs. This is set to the + domain name if you are using Google Apps. Example: 'example.org' + Defaults to 'default' + secure: If set to True, all requests should be signed. The default is + False. + session: If set to True, the token received by the 'next' URL can be + upgraded to a multiuse session token. If session is set to False, the + token may only be used once and cannot be upgraded. Default is True. + request_url: The base of the URL to which the user will be sent to + authorize this application to access their data. The default is + 'http://www.google.com/accounts/AuthSubRequest'. + include_scopes_in_next: Boolean if set to true, the 'next' parameter will + be modified to include the requested scope as a URL parameter. The + key for the next's scope parameter will be SCOPE_URL_PARAM_NAME. The + benefit of including the scope URL as a parameter to the next URL, is + that the page which receives the AuthSub token will be able to tell + which URLs the token grants access to. + + Returns: + A URL string to which the browser should be sent. + """ + if isinstance(scopes, list): + scope = ' '.join(scopes) + else: + scope = scopes + if include_scopes_in_next: + if next.find('?') > -1: + next += '&%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope}) + else: + next += '?%s' % urllib.urlencode({SCOPE_URL_PARAM_NAME:scope}) + return gdata.auth.GenerateAuthSubUrl(next=next, scope=scope, secure=secure, + session=session, request_url=request_url, domain=hd) + + +class Query(dict): + """Constructs a query URL to be used in GET requests + + Url parameters are created by adding key-value pairs to this object as a + dict. For example, to add &max-results=25 to the URL do + my_query['max-results'] = 25 + + Category queries are created by adding category strings to the categories + member. All items in the categories list will be concatenated with the / + symbol (symbolizing a category x AND y restriction). If you would like to OR + 2 categories, append them as one string with a | between the categories. + For example, do query.categories.append('Fritz|Laurie') to create a query + like this feed/-/Fritz%7CLaurie . This query will look for results in both + categories. + """ + + def __init__(self, feed=None, text_query=None, params=None, + categories=None): + """Constructor for Query + + Args: + feed: str (optional) The path for the feed (Examples: + '/base/feeds/snippets' or 'calendar/feeds/jo@gmail.com/private/full' + text_query: str (optional) The contents of the q query parameter. The + contents of the text_query are URL escaped upon conversion to a URI. + params: dict (optional) Parameter value string pairs which become URL + params when translated to a URI. These parameters are added to the + query's items (key-value pairs). + categories: list (optional) List of category strings which should be + included as query categories. See + http://code.google.com/apis/gdata/reference.html#Queries for + details. If you want to get results from category A or B (both + categories), specify a single list item 'A|B'. + """ + + self.feed = feed + self.categories = [] + if text_query: + self.text_query = text_query + if isinstance(params, dict): + for param in params: + self[param] = params[param] + if isinstance(categories, list): + for category in categories: + self.categories.append(category) + + def _GetTextQuery(self): + if 'q' in self.keys(): + return self['q'] + else: + return None + + def _SetTextQuery(self, query): + self['q'] = query + + text_query = property(_GetTextQuery, _SetTextQuery, + doc="""The feed query's q parameter""") + + def _GetAuthor(self): + if 'author' in self.keys(): + return self['author'] + else: + return None + + def _SetAuthor(self, query): + self['author'] = query + + author = property(_GetAuthor, _SetAuthor, + doc="""The feed query's author parameter""") + + def _GetAlt(self): + if 'alt' in self.keys(): + return self['alt'] + else: + return None + + def _SetAlt(self, query): + self['alt'] = query + + alt = property(_GetAlt, _SetAlt, + doc="""The feed query's alt parameter""") + + def _GetUpdatedMin(self): + if 'updated-min' in self.keys(): + return self['updated-min'] + else: + return None + + def _SetUpdatedMin(self, query): + self['updated-min'] = query + + updated_min = property(_GetUpdatedMin, _SetUpdatedMin, + doc="""The feed query's updated-min parameter""") + + def _GetUpdatedMax(self): + if 'updated-max' in self.keys(): + return self['updated-max'] + else: + return None + + def _SetUpdatedMax(self, query): + self['updated-max'] = query + + updated_max = property(_GetUpdatedMax, _SetUpdatedMax, + doc="""The feed query's updated-max parameter""") + + def _GetPublishedMin(self): + if 'published-min' in self.keys(): + return self['published-min'] + else: + return None + + def _SetPublishedMin(self, query): + self['published-min'] = query + + published_min = property(_GetPublishedMin, _SetPublishedMin, + doc="""The feed query's published-min parameter""") + + def _GetPublishedMax(self): + if 'published-max' in self.keys(): + return self['published-max'] + else: + return None + + def _SetPublishedMax(self, query): + self['published-max'] = query + + published_max = property(_GetPublishedMax, _SetPublishedMax, + doc="""The feed query's published-max parameter""") + + def _GetStartIndex(self): + if 'start-index' in self.keys(): + return self['start-index'] + else: + return None + + def _SetStartIndex(self, query): + if not isinstance(query, str): + query = str(query) + self['start-index'] = query + + start_index = property(_GetStartIndex, _SetStartIndex, + doc="""The feed query's start-index parameter""") + + def _GetMaxResults(self): + if 'max-results' in self.keys(): + return self['max-results'] + else: + return None + + def _SetMaxResults(self, query): + if not isinstance(query, str): + query = str(query) + self['max-results'] = query + + max_results = property(_GetMaxResults, _SetMaxResults, + doc="""The feed query's max-results parameter""") + + def _GetOrderBy(self): + if 'orderby' in self.keys(): + return self['orderby'] + else: + return None + + def _SetOrderBy(self, query): + self['orderby'] = query + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The feed query's orderby parameter""") + + def ToUri(self): + q_feed = self.feed or '' + category_string = '/'.join( + [urllib.quote_plus(c) for c in self.categories]) + # Add categories to the feed if there are any. + if len(self.categories) > 0: + q_feed = q_feed + '/-/' + category_string + return atom.service.BuildUri(q_feed, self) + + def __str__(self): + return self.ToUri() diff --git a/gdata.py-1.2.3/src/gdata/spreadsheet/__init__.py b/gdata.py-1.2.3/src/gdata/spreadsheet/__init__.py new file mode 100755 index 0000000..25ec13a --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/spreadsheet/__init__.py @@ -0,0 +1,474 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Spreadsheets. +""" + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata +import re +import string + + +# XML namespaces which are often used in Google Spreadsheets entities. +GSPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006' +GSPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s' + +GSPREADSHEETS_EXTENDED_NAMESPACE = ('http://schemas.google.com/spreadsheets' + '/2006/extended') +GSPREADSHEETS_EXTENDED_TEMPLATE = ('{http://schemas.google.com/spreadsheets' + '/2006/extended}%s') + + +class ColCount(atom.AtomBase): + """The Google Spreadsheets colCount element """ + + _tag = 'colCount' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def ColCountFromString(xml_string): + return atom.CreateClassFromXMLString(ColCount, xml_string) + + +class RowCount(atom.AtomBase): + """The Google Spreadsheets rowCount element """ + + _tag = 'rowCount' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, text=None, extension_elements=None, + extension_attributes=None): + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + +def RowCountFromString(xml_string): + return atom.CreateClassFromXMLString(RowCount, xml_string) + + +class Cell(atom.AtomBase): + """The Google Spreadsheets cell element """ + + _tag = 'cell' + _namespace = GSPREADSHEETS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['row'] = 'row' + _attributes['col'] = 'col' + _attributes['inputValue'] = 'inputValue' + _attributes['numericValue'] = 'numericValue' + + def __init__(self, text=None, row=None, col=None, inputValue=None, + numericValue=None, extension_elements=None, extension_attributes=None): + self.text = text + self.row = row + self.col = col + self.inputValue = inputValue + self.numericValue = numericValue + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def CellFromString(xml_string): + return atom.CreateClassFromXMLString(Cell, xml_string) + + +class Custom(atom.AtomBase): + """The Google Spreadsheets custom element""" + + _namespace = GSPREADSHEETS_EXTENDED_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + + def __init__(self, column=None, text=None, extension_elements=None, + extension_attributes=None): + self.column = column # The name of the column + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + def _BecomeChildElement(self, tree): + new_child = ElementTree.Element('') + tree.append(new_child) + new_child.tag = '{%s}%s' % (self.__class__._namespace, + self.column) + self._AddMembersToElementTree(new_child) + + def _ToElementTree(self): + new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace, + self.column)) + self._AddMembersToElementTree(new_tree) + return new_tree + + def _HarvestElementTree(self, tree): + namespace_uri, local_tag = string.split(tree.tag[1:], "}", 1) + self.column = local_tag + # Fill in the instance members from the contents of the XML tree. + for child in tree: + self._ConvertElementTreeToMember(child) + for attribute, value in tree.attrib.iteritems(): + self._ConvertElementAttributeToMember(attribute, value) + self.text = tree.text + + +def CustomFromString(xml_string): + element_tree = ElementTree.fromstring(xml_string) + return _CustomFromElementTree(element_tree) + + +def _CustomFromElementTree(element_tree): + namespace_uri, local_tag = string.split(element_tree.tag[1:], "}", 1) + if namespace_uri == GSPREADSHEETS_EXTENDED_NAMESPACE: + new_custom = Custom() + new_custom._HarvestElementTree(element_tree) + new_custom.column = local_tag + return new_custom + return None + + + + + +class SpreadsheetsSpreadsheet(gdata.GDataEntry): + """A Google Spreadsheets flavor of a Spreadsheet Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsSpreadsheetFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheet, + xml_string) + + +class SpreadsheetsWorksheet(gdata.GDataEntry): + """A Google Spreadsheets flavor of a Worksheet Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count', + RowCount) + _children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count', + ColCount) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + row_count=None, col_count=None, text=None, extension_elements=None, + extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.row_count = row_count + self.col_count = col_count + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsWorksheetFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsWorksheet, + xml_string) + + +class SpreadsheetsCell(gdata.BatchEntry): + """A Google Spreadsheets flavor of a Cell Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchEntry._children.copy() + _attributes = gdata.BatchEntry._attributes.copy() + _children['{%s}cell' % GSPREADSHEETS_NAMESPACE] = ('cell', Cell) + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + cell=None, batch_operation=None, batch_id=None, batch_status=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.batch_operation = batch_operation + self.batch_id = batch_id + self.batch_status = batch_status + self.updated = updated + self.cell = cell + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SpreadsheetsCellFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsCell, + xml_string) + + +class SpreadsheetsList(gdata.GDataEntry): + """A Google Spreadsheets flavor of a List Atom Entry """ + + _tag = 'entry' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + def __init__(self, author=None, category=None, content=None, + contributor=None, atom_id=None, link=None, published=None, rights=None, + source=None, summary=None, title=None, control=None, updated=None, + custom=None, + text=None, extension_elements=None, extension_attributes=None): + self.author = author or [] + self.category = category or [] + self.content = content + self.contributor = contributor or [] + self.id = atom_id + self.link = link or [] + self.published = published + self.rights = rights + self.source = source + self.summary = summary + self.control = control + self.title = title + self.updated = updated + self.custom = custom or {} + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + # We need to overwrite _ConvertElementTreeToMember to add special logic to + # convert custom attributes to members + def _ConvertElementTreeToMember(self, child_tree): + # Find the element's tag in this class's list of child members + if self.__class__._children.has_key(child_tree.tag): + member_name = self.__class__._children[child_tree.tag][0] + member_class = self.__class__._children[child_tree.tag][1] + # If the class member is supposed to contain a list, make sure the + # matching member is set to a list, then append the new member + # instance to the list. + if isinstance(member_class, list): + if getattr(self, member_name) is None: + setattr(self, member_name, []) + getattr(self, member_name).append(atom._CreateClassFromElementTree( + member_class[0], child_tree)) + else: + setattr(self, member_name, + atom._CreateClassFromElementTree(member_class, child_tree)) + elif child_tree.tag.find('{%s}' % GSPREADSHEETS_EXTENDED_NAMESPACE) == 0: + # If this is in the custom namespace, make add it to the custom dict. + name = child_tree.tag[child_tree.tag.index('}')+1:] + custom = _CustomFromElementTree(child_tree) + if custom: + self.custom[name] = custom + else: + ExtensionContainer._ConvertElementTreeToMember(self, child_tree) + + # We need to overwtite _AddMembersToElementTree to add special logic to + # convert custom members to XML nodes. + def _AddMembersToElementTree(self, tree): + # Convert the members of this class which are XML child nodes. + # This uses the class's _children dictionary to find the members which + # should become XML child nodes. + member_node_names = [values[0] for tag, values in + self.__class__._children.iteritems()] + for member_name in member_node_names: + member = getattr(self, member_name) + if member is None: + pass + elif isinstance(member, list): + for instance in member: + instance._BecomeChildElement(tree) + else: + member._BecomeChildElement(tree) + # Convert the members of this class which are XML attributes. + for xml_attribute, member_name in self.__class__._attributes.iteritems(): + member = getattr(self, member_name) + if member is not None: + tree.attrib[xml_attribute] = member + # Convert all special custom item attributes to nodes + for name, custom in self.custom.iteritems(): + custom._BecomeChildElement(tree) + # Lastly, call the ExtensionContainers's _AddMembersToElementTree to + # convert any extension attributes. + atom.ExtensionContainer._AddMembersToElementTree(self, tree) + + +def SpreadsheetsListFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsList, + xml_string) + element_tree = ElementTree.fromstring(xml_string) + return _SpreadsheetsListFromElementTree(element_tree) + + +class SpreadsheetsSpreadsheetsFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsSpreadsheet]) + + +def SpreadsheetsSpreadsheetsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheetsFeed, + xml_string) + + +class SpreadsheetsWorksheetsFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsWorksheet]) + + +def SpreadsheetsWorksheetsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsWorksheetsFeed, + xml_string) + + +class SpreadsheetsCellsFeed(gdata.BatchFeed): + """A feed containing Google Spreadsheets Cells""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.BatchFeed._children.copy() + _attributes = gdata.BatchFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsCell]) + _children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count', + RowCount) + _children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count', + ColCount) + + def __init__(self, author=None, category=None, contributor=None, + generator=None, icon=None, atom_id=None, link=None, logo=None, + rights=None, subtitle=None, title=None, updated=None, + entry=None, total_results=None, start_index=None, + items_per_page=None, extension_elements=None, + extension_attributes=None, text=None, row_count=None, + col_count=None, interrupted=None): + gdata.BatchFeed.__init__(self, author=author, category=category, + contributor=contributor, generator=generator, + icon=icon, atom_id=atom_id, link=link, + logo=logo, rights=rights, subtitle=subtitle, + title=title, updated=updated, entry=entry, + total_results=total_results, + start_index=start_index, + items_per_page=items_per_page, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text, interrupted=interrupted) + self.row_count = row_count + self.col_count = col_count + + def GetBatchLink(self): + for link in self.link: + if link.rel == 'http://schemas.google.com/g/2005#batch': + return link + return None + + +def SpreadsheetsCellsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsCellsFeed, + xml_string) + + +class SpreadsheetsListFeed(gdata.GDataFeed): + """A feed containing Google Spreadsheets Spreadsheets""" + + _tag = 'feed' + _namespace = atom.ATOM_NAMESPACE + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [SpreadsheetsList]) + + +def SpreadsheetsListFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SpreadsheetsListFeed, + xml_string) diff --git a/gdata.py-1.2.3/src/gdata/spreadsheet/service.py b/gdata.py-1.2.3/src/gdata/spreadsheet/service.py new file mode 100755 index 0000000..3109a1b --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/spreadsheet/service.py @@ -0,0 +1,467 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SpreadsheetsService extends the GDataService to streamline Google +Spreadsheets operations. + + GBaseService: Provides methods to query feeds and manipulate items. Extends + GDataService. + + DictionaryToParamList: Function which converts a dictionary into a list of + URL arguments (represented as strings). This is a + utility function used in CRUD operations. +""" + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +import gdata +import atom.service +import gdata.service +import gdata.spreadsheet +import atom + + +class Error(Exception): + """Base class for exceptions in this module.""" + pass + + +class RequestError(Error): + pass + + +class SpreadsheetsService(gdata.service.GDataService): + """Client for the Google Spreadsheets service.""" + + def __init__(self, email=None, password=None, source=None, + server='spreadsheets.google.com', + additional_headers=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='wise', source=source, + server=server, + additional_headers=additional_headers) + + def GetSpreadsheetsFeed(self, key=None, query=None, visibility='private', + projection='full'): + """Gets a spreadsheets feed or a specific entry if a key is defined + Args: + key: string (optional) The spreadsheet key defined in /ccc?key= + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no key, then a SpreadsheetsSpreadsheetsFeed. + If there is a key, then a SpreadsheetsSpreadsheet. + """ + + uri = ('http://%s/feeds/spreadsheets/%s/%s' + % (self.server, visibility, projection)) + + if key is not None: + uri = '%s/%s' % (uri, key) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if key: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsSpreadsheetFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString) + + def GetWorksheetsFeed(self, key, wksht_id=None, query=None, + visibility='private', projection='full'): + """Gets a worksheets feed or a specific entry if a wksht is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string (optional) The id for a specific worksheet entry + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no wksht_id, then a SpreadsheetsWorksheetsFeed. + If there is a wksht_id, then a SpreadsheetsWorksheet. + """ + + uri = ('http://%s/feeds/worksheets/%s/%s/%s' + % (self.server, key, visibility, projection)) + + if wksht_id != None: + uri = '%s/%s' % (uri, wksht_id) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if wksht_id: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString) + + def AddWorksheet(self, title, row_count, col_count, key): + """Creates a new worksheet in the desired spreadsheet. + + The new worksheet is appended to the end of the list of worksheets. The + new worksheet will only have the available number of columns and cells + specified. + + Args: + title: str The title which will be displayed in the list of worksheets. + row_count: int or str The number of rows in the new worksheet. + col_count: int or str The number of columns in the new worksheet. + key: str The spreadsheet key to the spreadsheet to which the new + worksheet should be added. + + Returns: + A SpreadsheetsWorksheet if the new worksheet was created succesfully. + """ + new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheet( + title=atom.Title(text=title), + row_count=gdata.spreadsheet.RowCount(text=str(row_count)), + col_count=gdata.spreadsheet.ColCount(text=str(col_count))) + return self.Post(new_worksheet, + 'http://%s/feeds/worksheets/%s/private/full' % (self.server, key), + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + + def UpdateWorksheet(self, worksheet_entry, url=None): + """Changes the size and/or title of the desired worksheet. + + Args: + worksheet_entry: SpreadsheetWorksheet The new contents of the + worksheet. + url: str (optional) The URL to which the edited worksheet entry should + be sent. If the url is None, the edit URL from the worksheet will + be used. + + Returns: + A SpreadsheetsWorksheet with the new information about the worksheet. + """ + target_url = url or worksheet_entry.GetEditLink().href + return self.Put(worksheet_entry, target_url, + converter=gdata.spreadsheet.SpreadsheetsWorksheetFromString) + + def DeleteWorksheet(self, worksheet_entry=None, url=None): + """Removes the desired worksheet from the spreadsheet + + Args: + worksheet_entry: SpreadsheetWorksheet (optional) The worksheet to + be deleted. If this is none, then the DELETE reqest is sent to + the url specified in the url parameter. + url: str (optaional) The URL to which the DELETE request should be + sent. If left as None, the worksheet's edit URL is used. + + Returns: + True if the worksheet was deleted successfully. + """ + if url: + target_url = url + else: + target_url = worksheet_entry.GetEditLink().href + return self.Delete(target_url) + + def GetCellsFeed(self, key, wksht_id='default', cell=None, query=None, + visibility='private', projection='full'): + """Gets a cells feed or a specific entry if a cell is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string The id for a specific worksheet entry + cell: string (optional) The R1C1 address of the cell + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no cell, then a SpreadsheetsCellsFeed. + If there is a cell, then a SpreadsheetsCell. + """ + + uri = ('http://%s/feeds/cells/%s/%s/%s/%s' + % (self.server, key, wksht_id, visibility, projection)) + + if cell != None: + uri = '%s/%s' % (uri, cell) + + if query != None: + query.feed = uri + uri = query.ToUri() + + if cell: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsCellFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString) + + def GetListFeed(self, key, wksht_id='default', row_id=None, query=None, + visibility='private', projection='full'): + """Gets a list feed or a specific entry if a row_id is defined + Args: + key: string The spreadsheet key defined in /ccc?key= + wksht_id: string The id for a specific worksheet entry + row_id: string (optional) The row_id of a row in the list + query: DocumentQuery (optional) Query parameters + + Returns: + If there is no row_id, then a SpreadsheetsListFeed. + If there is a row_id, then a SpreadsheetsList. + """ + + uri = ('http://%s/feeds/list/%s/%s/%s/%s' + % (self.server, key, wksht_id, visibility, projection)) + + if row_id is not None: + uri = '%s/%s' % (uri, row_id) + + if query is not None: + query.feed = uri + uri = query.ToUri() + + if row_id: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + else: + return self.Get(uri, + converter=gdata.spreadsheet.SpreadsheetsListFeedFromString) + + def UpdateCell(self, row, col, inputValue, key, wksht_id='default'): + """Updates an existing cell. + + Args: + row: int The row the cell to be editted is in + col: int The column the cell to be editted is in + inputValue: str the new value of the cell + key: str The key of the spreadsheet in which this cell resides. + wksht_id: str The ID of the worksheet which holds this cell. + + Returns: + The updated cell entry + """ + row = str(row) + col = str(col) + # make the new cell + new_cell = gdata.spreadsheet.Cell(row=row, col=col, inputValue=inputValue) + # get the edit uri and PUT + cell = 'R%sC%s' % (row, col) + entry = self.GetCellsFeed(key, wksht_id, cell) + for a_link in entry.link: + if a_link.rel == 'edit': + entry.cell = new_cell + return self.Put(entry, a_link.href, + converter=gdata.spreadsheet.SpreadsheetsCellFromString) + + def _GenerateCellsBatchUrl(self, spreadsheet_key, worksheet_id): + return ('http://spreadsheets.google.com/feeds/cells/%s/%s/' + 'private/full/batch' % (spreadsheet_key, worksheet_id)) + + def ExecuteBatch(self, batch_feed, url=None, spreadsheet_key=None, + worksheet_id=None, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString): + """Sends a batch request feed to the server. + + The batch request needs to be sent to the batch URL for a particular + worksheet. You can specify the worksheet by providing the spreadsheet_key + and worksheet_id, or by sending the URL from the cells feed's batch link. + + Args: + batch_feed: gdata.spreadsheet.SpreadsheetsCellFeed A feed containing + BatchEntry elements which contain the desired CRUD operation and + any necessary data to modify a cell. + url: str (optional) The batch URL for the cells feed to which these + changes should be applied. This can be found by calling + cells_feed.GetBatchLink().href. + spreadsheet_key: str (optional) Used to generate the batch request URL + if the url argument is None. If using the spreadsheet key to + generate the URL, the worksheet id is also required. + worksheet_id: str (optional) Used if the url is not provided, it is + oart of the batch feed target URL. This is used with the spreadsheet + key. + converter: Function (optional) Function to be executed on the server's + response. This function should take one string as a parameter. The + default value is SpreadsheetsCellsFeedFromString which will turn the result + into a gdata.base.GBaseItem object. + + Returns: + A gdata.BatchFeed containing the results. + """ + + if url is None: + url = self._GenerateCellsBatchUrl(spreadsheet_key, worksheet_id) + return self.Post(batch_feed, url, converter=converter) + + def InsertRow(self, row_data, key, wksht_id='default'): + """Inserts a new row with the provided data + + Args: + uri: string The post uri of the list feed + row_data: dict A dictionary of column header to row data + + Returns: + The inserted row + """ + new_entry = gdata.spreadsheet.SpreadsheetsList() + for k, v in row_data.iteritems(): + new_custom = gdata.spreadsheet.Custom() + new_custom.column = k + new_custom.text = v + new_entry.custom[new_custom.column] = new_custom + # Generate the post URL for the worksheet which will receive the new entry. + post_url = 'http://spreadsheets.google.com/feeds/list/%s/%s/private/full'%( + key, wksht_id) + return self.Post(new_entry, post_url, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + + def UpdateRow(self, entry, new_row_data): + """Updates a row with the provided data + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The entry to be updated + new_row_data: dict A dictionary of column header to row data + + Returns: + The updated row + """ + entry.custom = {} + for k, v in new_row_data.iteritems(): + new_custom = gdata.spreadsheet.Custom() + new_custom.column = k + new_custom.text = v + entry.custom[k] = new_custom + for a_link in entry.link: + if a_link.rel == 'edit': + return self.Put(entry, a_link.href, + converter=gdata.spreadsheet.SpreadsheetsListFromString) + + def DeleteRow(self, entry): + """Deletes a row, the provided entry + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The row to be deleted + + Returns: + The delete response + """ + for a_link in entry.link: + if a_link.rel == 'edit': + return self.Delete(a_link.href) + + +class DocumentQuery(gdata.service.Query): + + def _GetTitleQuery(self): + return self['title'] + + def _SetTitleQuery(self, document_query): + self['title'] = document_query + + title = property(_GetTitleQuery, _SetTitleQuery, + doc="""The title query parameter""") + + def _GetTitleExactQuery(self): + return self['title-exact'] + + def _SetTitleExactQuery(self, document_query): + self['title-exact'] = document_query + + title_exact = property(_GetTitleExactQuery, _SetTitleExactQuery, + doc="""The title-exact query parameter""") + + +class CellQuery(gdata.service.Query): + + def _GetMinRowQuery(self): + return self['min-row'] + + def _SetMinRowQuery(self, cell_query): + self['min-row'] = cell_query + + min_row = property(_GetMinRowQuery, _SetMinRowQuery, + doc="""The min-row query parameter""") + + def _GetMaxRowQuery(self): + return self['max-row'] + + def _SetMaxRowQuery(self, cell_query): + self['max-row'] = cell_query + + max_row = property(_GetMaxRowQuery, _SetMaxRowQuery, + doc="""The max-row query parameter""") + + def _GetMinColQuery(self): + return self['min-col'] + + def _SetMinColQuery(self, cell_query): + self['min-col'] = cell_query + + min_col = property(_GetMinColQuery, _SetMinColQuery, + doc="""The min-col query parameter""") + + def _GetMaxColQuery(self): + return self['max-col'] + + def _SetMaxColQuery(self, cell_query): + self['max-col'] = cell_query + + max_col = property(_GetMaxColQuery, _SetMaxColQuery, + doc="""The max-col query parameter""") + + def _GetRangeQuery(self): + return self['range'] + + def _SetRangeQuery(self, cell_query): + self['range'] = cell_query + + range = property(_GetRangeQuery, _SetRangeQuery, + doc="""The range query parameter""") + + def _GetReturnEmptyQuery(self): + return self['return-empty'] + + def _SetReturnEmptyQuery(self, cell_query): + self['return-empty'] = cell_query + + return_empty = property(_GetReturnEmptyQuery, _SetReturnEmptyQuery, + doc="""The return-empty query parameter""") + + +class ListQuery(gdata.service.Query): + + def _GetSpreadsheetQuery(self): + return self['sq'] + + def _SetSpreadsheetQuery(self, list_query): + self['sq'] = list_query + + sq = property(_GetSpreadsheetQuery, _SetSpreadsheetQuery, + doc="""The sq query parameter""") + + def _GetOrderByQuery(self): + return self['orderby'] + + def _SetOrderByQuery(self, list_query): + self['orderby'] = list_query + + orderby = property(_GetOrderByQuery, _SetOrderByQuery, + doc="""The orderby query parameter""") + + def _GetReverseQuery(self): + return self['reverse'] + + def _SetReverseQuery(self, list_query): + self['reverse'] = list_query + + reverse = property(_GetReverseQuery, _SetReverseQuery, + doc="""The reverse query parameter""") diff --git a/gdata.py-1.2.3/src/gdata/spreadsheet/text_db.py b/gdata.py-1.2.3/src/gdata/spreadsheet/text_db.py new file mode 100644 index 0000000..a8de546 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/spreadsheet/text_db.py @@ -0,0 +1,559 @@ +#!/usr/bin/python +# +# Copyright Google 2007-2008, all rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import StringIO +import gdata +import gdata.service +import gdata.spreadsheet +import gdata.spreadsheet.service +import gdata.docs +import gdata.docs.service + + +"""Make the Google Documents API feel more like using a database. + +This module contains a client and other classes which make working with the +Google Documents List Data API and the Google Spreadsheets Data API look a +bit more like working with a heirarchical database. Using the DatabaseClient, +you can create or find spreadsheets and use them like a database, with +worksheets representing tables and rows representing records. + +Example Usage: +# Create a new database, a new table, and add records. +client = gdata.spreadsheet.text_db.DatabaseClient(username='jo@example.com', + password='12345') +database = client.CreateDatabase('My Text Database') +table = database.CreateTable('addresses', ['name','email', + 'phonenumber', 'mailingaddress']) +record = table.AddRecord({'name':'Bob', 'email':'bob@example.com', + 'phonenumber':'555-555-1234', 'mailingaddress':'900 Imaginary St.'}) + +# Edit a record +record.content['email'] = 'bob2@example.com' +record.Push() + +# Delete a table +table.Delete + +Warnings: +Care should be exercised when using this module on spreadsheets +which contain formulas. This module treats all rows as containing text and +updating a row will overwrite any formula with the output of the formula. +The intended use case is to allow easy storage of text data in a spreadsheet. + + Error: Domain specific extension of Exception. + BadCredentials: Error raised is username or password was incorrect. + CaptchaRequired: Raised if a login attempt failed and a CAPTCHA challenge + was issued. + DatabaseClient: Communicates with Google Docs APIs servers. + Database: Represents a spreadsheet and interacts with tables. + Table: Represents a worksheet and interacts with records. + RecordResultSet: A list of records in a table. + Record: Represents a row in a worksheet allows manipulation of text data. +""" + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +class Error(Exception): + pass + + +class BadCredentials(Error): + pass + + +class CaptchaRequired(Error): + pass + + +class DatabaseClient(object): + """Allows creation and finding of Google Spreadsheets databases. + + The DatabaseClient simplifies the process of creating and finding Google + Spreadsheets and will talk to both the Google Spreadsheets API and the + Google Documents List API. + """ + + def __init__(self, username=None, password=None): + """Constructor for a Database Client. + + If the username and password are present, the constructor will contact + the Google servers to authenticate. + + Args: + username: str (optional) Example: jo@example.com + password: str (optional) + """ + self.__docs_client = gdata.docs.service.DocsService() + self.__spreadsheets_client = ( + gdata.spreadsheet.service.SpreadsheetsService()) + self.SetCredentials(username, password) + + def SetCredentials(self, username, password): + """Attempts to log in to Google APIs using the provided credentials. + + If the username or password are None, the client will not request auth + tokens. + + Args: + username: str (optional) Example: jo@example.com + password: str (optional) + """ + self.__docs_client.email = username + self.__docs_client.password = password + self.__spreadsheets_client.email = username + self.__spreadsheets_client.password = password + if username and password: + try: + self.__docs_client.ProgrammaticLogin() + self.__spreadsheets_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + raise CaptchaRequired('Please visit https://www.google.com/accounts/' + 'DisplayUnlockCaptcha to unlock your account.') + except gdata.service.BadAuthentication: + raise BadCredentials('Username or password incorrect.') + + def CreateDatabase(self, name): + """Creates a new Google Spreadsheet with the desired name. + + Args: + name: str The title for the spreadsheet. + + Returns: + A Database instance representing the new spreadsheet. + """ + # Create a Google Spreadsheet to form the foundation of this database. + # Spreadsheet is created by uploading a file to the Google Documents + # List API. + virtual_csv_file = StringIO.StringIO(',,,') + virtual_media_source = gdata.MediaSource(file_handle=virtual_csv_file, content_type='text/csv', content_length=3) + db_entry = self.__docs_client.UploadSpreadsheet(virtual_media_source, name) + return Database(spreadsheet_entry=db_entry, database_client=self) + + def GetDatabases(self, spreadsheet_key=None, name=None): + """Finds spreadsheets which have the unique key or title. + + If querying on the spreadsheet_key there will be at most one result, but + searching by name could yield multiple results. + + Args: + spreadsheet_key: str The unique key for the spreadsheet, this + usually in the the form 'pk23...We' or 'o23...423.12,,,3'. + name: str The title of the spreadsheets. + + Returns: + A list of Database objects representing the desired spreadsheets. + """ + if spreadsheet_key: + db_entry = self.__docs_client.GetDocumentListEntry( + r'/feeds/documents/private/full/spreadsheet%3A' + spreadsheet_key) + return [Database(spreadsheet_entry=db_entry, database_client=self)] + else: + title_query = gdata.docs.service.DocumentQuery() + title_query['title'] = name + db_feed = self.__docs_client.QueryDocumentListFeed(title_query.ToUri()) + matching_databases = [] + for entry in db_feed.entry: + matching_databases.append(Database(spreadsheet_entry=entry, + database_client=self)) + return matching_databases + + def _GetDocsClient(self): + return self.__docs_client + + def _GetSpreadsheetsClient(self): + return self.__spreadsheets_client + + +class Database(object): + """Provides interface to find and create tables. + + The database represents a Google Spreadsheet. + """ + + def __init__(self, spreadsheet_entry=None, database_client=None): + """Constructor for a database object. + + Args: + spreadsheet_entry: gdata.docs.DocumentListEntry The + Atom entry which represents the Google Spreadsheet. The + spreadsheet's key is extracted from the entry and stored as a + member. + database_client: DatabaseClient A client which can talk to the + Google Spreadsheets servers to perform operations on worksheets + within this spreadsheet. + """ + self.entry = spreadsheet_entry + if self.entry: + id_parts = spreadsheet_entry.id.text.split('/') + self.spreadsheet_key = id_parts[-1].replace('spreadsheet%3A', '') + self.client = database_client + + def CreateTable(self, name, fields=None): + """Add a new worksheet to this spreadsheet and fill in column names. + + Args: + name: str The title of the new worksheet. + fields: list of strings The column names which are placed in the + first row of this worksheet. These names are converted into XML + tags by the server. To avoid changes during the translation + process I recommend using all lowercase alphabetic names. For + example ['somelongname', 'theothername'] + + Returns: + Table representing the newly created worksheet. + """ + worksheet = self.client._GetSpreadsheetsClient().AddWorksheet(title=name, + row_count=1, col_count=len(fields), key=self.spreadsheet_key) + return Table(name=name, worksheet_entry=worksheet, + database_client=self.client, + spreadsheet_key=self.spreadsheet_key, fields=fields) + + def GetTables(self, worksheet_id=None, name=None): + """Searches for a worksheet with the specified ID or name. + + The list of results should have one table at most, or no results + if the id or name were not found. + + Args: + worksheet_id: str The ID of the worksheet, example: 'od6' + name: str The title of the worksheet. + + Returns: + A list of length 0 or 1 containing the desired Table. A list is returned + to make this method feel like GetDatabases and GetRecords. + """ + if worksheet_id: + worksheet_entry = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, wksht_id=worksheet_id) + return [Table(name=worksheet_entry.title.text, + worksheet_entry=worksheet_entry, database_client=self.client, + spreadsheet_key=self.spreadsheet_key)] + else: + matching_tables = [] + query = None + if name: + query = gdata.spreadsheet.service.DocumentQuery() + query.title = name + + worksheet_feed = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, query=query) + for entry in worksheet_feed.entry: + matching_tables.append(Table(name=entry.title.text, + worksheet_entry=entry, database_client=self.client, + spreadsheet_key=self.spreadsheet_key)) + return matching_tables + + def Delete(self): + """Deletes the entire database spreadsheet from Google Spreadsheets.""" + entry = self.client._GetDocsClient().Get( + r'http://docs.google.com/feeds/documents/private/full/spreadsheet%3A' + + self.spreadsheet_key) + self.client._GetDocsClient().Delete(entry.GetEditLink().href) + + +class Table(object): + + def __init__(self, name=None, worksheet_entry=None, database_client=None, + spreadsheet_key=None, fields=None): + self.name = name + self.entry = worksheet_entry + id_parts = worksheet_entry.id.text.split('/') + self.worksheet_id = id_parts[-1] + self.spreadsheet_key = spreadsheet_key + self.client = database_client + self.fields = fields or [] + if fields: + self.SetFields(fields) + + def LookupFields(self): + """Queries to find the column names in the first row of the worksheet. + + Useful when you have retrieved the table from the server and you don't + know the column names. + """ + if self.entry: + first_row_contents = [] + query = gdata.spreadsheet.service.CellQuery() + query.max_row = '1' + query.min_row = '1' + feed = self.client._GetSpreadsheetsClient().GetCellsFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=query) + for entry in feed.entry: + first_row_contents.append(entry.content.text) + # Get the next set of cells if needed. + next_link = feed.GetNextLink() + while next_link: + feed = self.client._GetSpreadsheetsClient().Get(next_link.href, + converter=gdata.spreadsheet.SpreadsheetsCellsFeedFromString) + for entry in feed.entry: + first_row_contents.append(entry.content.text) + next_link = feed.GetNextLink() + # Convert the contents of the cells to valid headers. + self.fields = ConvertStringsToColumnHeaders(first_row_contents) + + def SetFields(self, fields): + """Changes the contents of the cells in the first row of this worksheet. + + Args: + fields: list of strings The names in the list comprise the + first row of the worksheet. These names are converted into XML + tags by the server. To avoid changes during the translation + process I recommend using all lowercase alphabetic names. For + example ['somelongname', 'theothername'] + """ + # TODO: If the table already had fields, we might want to clear out the, + # current column headers. + self.fields = fields + i = 0 + for column_name in fields: + i = i + 1 + # TODO: speed this up by using a batch request to update cells. + self.client._GetSpreadsheetsClient().UpdateCell(1, i, column_name, + self.spreadsheet_key, self.worksheet_id) + + def Delete(self): + """Deletes this worksheet from the spreadsheet.""" + worksheet = self.client._GetSpreadsheetsClient().GetWorksheetsFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id) + self.client._GetSpreadsheetsClient().DeleteWorksheet( + worksheet_entry=worksheet) + + def AddRecord(self, data): + """Adds a new row to this worksheet. + + Args: + data: dict of strings Mapping of string values to column names. + + Returns: + Record which represents this row of the spreadsheet. + """ + new_row = self.client._GetSpreadsheetsClient().InsertRow(data, + self.spreadsheet_key, wksht_id=self.worksheet_id) + return Record(content=data, row_entry=new_row, + spreadsheet_key=self.spreadsheet_key, worksheet_id=self.worksheet_id, + database_client=self.client) + + def GetRecord(self, row_id=None, row_number=None): + """Gets a single record from the worksheet based on row ID or number. + + Args: + row_id: The ID for the individual row. + row_number: str or int The position of the desired row. Numbering + begins at 1, which refers to the second row in the worksheet since + the first row is used for column names. + + Returns: + Record for the desired row. + """ + if row_id: + row_entry = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=row_id) + return Record(content=None, row_entry=row_entry, + spreadsheet_key=self.spreadsheet_key, + worksheet_id=self.worksheet_id, database_client=self.client) + else: + row_query = gdata.spreadsheet.service.ListQuery() + row_query.start_index = str(row_number) + row_query.max_results = '1' + row_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + if len(row_feed.entry) >= 1: + return Record(content=None, row_entry=row_feed.entry[0], + spreadsheet_key=self.spreadsheet_key, + worksheet_id=self.worksheet_id, database_client=self.client) + else: + return None + + def GetRecords(self, start_row, end_row): + """Gets all rows between the start and end row numbers inclusive. + + Args: + start_row: str or int + end_row: str or int + + Returns: + RecordResultSet for the desired rows. + """ + start_row = int(start_row) + end_row = int(end_row) + max_rows = end_row - start_row + 1 + row_query = gdata.spreadsheet.service.ListQuery() + row_query.start_index = str(start_row) + row_query.max_results = str(max_rows) + rows_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + return RecordResultSet(rows_feed, self.client, self.spreadsheet_key, + self.worksheet_id) + + def FindRecords(self, query_string): + """Performs a query against the worksheet to find rows which match. + + For details on query string syntax see the section on sq under + http://code.google.com/apis/spreadsheets/reference.html#list_Parameters + + Args: + query_string: str Examples: 'name == john' to find all rows with john + in the name column, '(cost < 19.50 and name != toy) or cost > 500' + + Returns: + RecordResultSet with the first group of matches. + """ + row_query = gdata.spreadsheet.service.ListQuery() + row_query.sq = query_string + matching_feed = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, query=row_query) + return RecordResultSet(matching_feed, self.client, + self.spreadsheet_key, self.worksheet_id) + + +class RecordResultSet(list): + """A collection of rows which allows fetching of the next set of results. + + The server may not send all rows in the requested range because there are + too many. Using this result set you can access the first set of results + as if it is a list, then get the next batch (if there are more results) by + calling GetNext(). + """ + + def __init__(self, feed, client, spreadsheet_key, worksheet_id): + self.client = client + self.spreadsheet_key = spreadsheet_key + self.worksheet_id = worksheet_id + self.feed = feed + list(self) + for entry in self.feed.entry: + self.append(Record(content=None, row_entry=entry, + spreadsheet_key=spreadsheet_key, worksheet_id=worksheet_id, + database_client=client)) + + def GetNext(self): + """Fetches the next batch of rows in the result set. + + Returns: + A new RecordResultSet. + """ + next_link = self.feed.GetNextLink() + if next_link and next_link.href: + new_feed = self.client._GetSpreadsheetsClient().Get(next_link.href, + converter=gdata.spreadsheet.SpreadsheetsListFeedFromString) + return RecordResultSet(new_feed, self.client, self.spreadsheet_key, + self.worksheet_id) + + +class Record(object): + """Represents one row in a worksheet and provides a dictionary of values. + + Attributes: + custom: dict Represents the contents of the row with cell values mapped + to column headers. + """ + + def __init__(self, content=None, row_entry=None, spreadsheet_key=None, + worksheet_id=None, database_client=None): + """Constructor for a record. + + Args: + content: dict of strings Mapping of string values to column names. + row_entry: gdata.spreadsheet.SpreadsheetsList The Atom entry + representing this row in the worksheet. + spreadsheet_key: str The ID of the spreadsheet in which this row + belongs. + worksheet_id: str The ID of the worksheet in which this row belongs. + database_client: DatabaseClient The client which can be used to talk + the Google Spreadsheets server to edit this row. + """ + self.entry = row_entry + self.spreadsheet_key = spreadsheet_key + self.worksheet_id = worksheet_id + if row_entry: + self.row_id = row_entry.id.text.split('/')[-1] + else: + self.row_id = None + self.client = database_client + self.content = content or {} + if not content: + self.ExtractContentFromEntry(row_entry) + + def ExtractContentFromEntry(self, entry): + """Populates the content and row_id based on content of the entry. + + This method is used in the Record's contructor. + + Args: + entry: gdata.spreadsheet.SpreadsheetsList The Atom entry + representing this row in the worksheet. + """ + self.content = {} + if entry: + self.row_id = entry.id.text.split('/')[-1] + for label, custom in entry.custom.iteritems(): + self.content[label] = custom.text + + def Push(self): + """Send the content of the record to spreadsheets to edit the row. + + All items in the content dictionary will be sent. Items which have been + removed from the content may remain in the row. The content member + of the record will not be modified so additional fields in the row + might be absent from this local copy. + """ + self.entry = self.client._GetSpreadsheetsClient().UpdateRow(self.entry, self.content) + + def Pull(self): + """Query Google Spreadsheets to get the latest data from the server. + + Fetches the entry for this row and repopulates the content dictionary + with the data found in the row. + """ + if self.row_id: + self.entry = self.client._GetSpreadsheetsClient().GetListFeed( + self.spreadsheet_key, wksht_id=self.worksheet_id, row_id=self.row_id) + self.ExtractContentFromEntry(self.entry) + + def Delete(self): + self.client._GetSpreadsheetsClient().DeleteRow(self.entry) + + +def ConvertStringsToColumnHeaders(proposed_headers): + """Converts a list of strings to column names which spreadsheets accepts. + + When setting values in a record, the keys which represent column names must + fit certain rules. They are all lower case, contain no spaces or special + characters. If two columns have the same name after being sanitized, the + columns further to the right have _2, _3 _4, etc. appended to them. + + If there are column names which consist of all special characters, or if + the column header is blank, an obfuscated value will be used for a column + name. This method does not handle blank column names or column names with + only special characters. + """ + headers = [] + for input_string in proposed_headers: + # TODO: probably a more efficient way to do this. Perhaps regex. + sanitized = input_string.lower().replace('_', '').replace( + ':', '').replace(' ', '') + # When the same sanitized header appears multiple times in the first row + # of a spreadsheet, _n is appended to the name to make it unique. + header_count = headers.count(sanitized) + if header_count > 0: + headers.append('%s_%i' % (sanitized, header_count+1)) + else: + headers.append(sanitized) + return headers diff --git a/gdata.py-1.2.3/src/gdata/test_data.py b/gdata.py-1.2.3/src/gdata/test_data.py new file mode 100755 index 0000000..31e0a02 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/test_data.py @@ -0,0 +1,2838 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + +XML_ENTRY_1 = """<?xml version='1.0'?> +<entry xmlns='http://www.w3.org/2005/Atom' + xmlns:g='http://base.google.com/ns/1.0'> + <category scheme="http://base.google.com/categories/itemtypes" + term="products"/> + <id> http://www.google.com/test/id/url </id> + <title type='text'>Testing 2000 series laptop + +

    A Testing Laptop
    + + + + Computer + Laptop + testing laptop + products +""" + + +TEST_BASE_ENTRY = """ + + + Testing 2000 series laptop + +
    A Testing Laptop
    +
    + + yes + + + + Computer + Laptop + testing laptop + products +
    """ + + +BIG_FEED = """ + + dive into mark + + A <em>lot</em> of effort + went into making this effortless + + 2005-07-31T12:29:29Z + tag:example.org,2003:3 + + + Copyright (c) 2003, Mark Pilgrim + + Example Toolkit + + + Atom draft-07 snapshot + + + tag:example.org,2003:3.2397 + 2005-07-31T12:29:29Z + 2003-12-13T08:29:29-04:00 + + Mark Pilgrim + http://example.org/ + f8dy@example.com + + + Sam Ruby + + + Joe Gregorio + + +
    +

    [Update: The Atom draft is finished.]

    +
    +
    +
    +
    +""" + +SMALL_FEED = """ + + Example Feed + + 2003-12-13T18:30:02Z + + John Doe + + urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6 + + Atom-Powered Robots Run Amok + + urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a + 2003-12-13T18:30:02Z + Some text. + + +""" + +GBASE_FEED = """ + +http://www.google.com/base/feeds/snippets +2007-02-08T23:18:21.935Z +Items matching query: digital camera + + + + + + + + +GoogleBase +2171885 +1 +25 + +http://www.google.com/base/feeds/snippets/13246453826751927533 +2007-02-08T13:23:27.000Z +2007-02-08T16:40:57.000Z + + +Digital Camera Battery Notebook Computer 12v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables +Notebook Computer 12v DC Power Cable - 5.5mm x 2.1mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power portable computers that operate with 12v power and have a 2.1mm power connector (center +) Digital ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +PayPal & Bill Me Later credit available online only. +new +420 9th Ave. 10001 +305668-REG +Products +Digital Camera Battery +2007-03-10T13:23:27.000Z +1172711 +34.95 usd +Digital Photography>Camera Connecting Cables +EN +DCB5092 +US +1.0 +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305668.jpg&dhm=ffffffff84c9a95e&size=6 + + +http://www.google.com/base/feeds/snippets/10145771037331858608 +2007-02-08T13:23:27.000Z +2007-02-08T16:40:57.000Z + + +Digital Camera Battery Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) Camera Connecting Cables +Electronic Device 5v DC Power Cable - 5.5mm x 2.5mm (Center +) This connection cable will allow any Digital Pursuits battery pack to power any electronic device that operates with 5v power and has a 2.5mm power connector (center +) Digital ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +420 9th Ave. 10001 +new +0.18 +US +Digital Photography>Camera Connecting Cables +PayPal & Bill Me Later credit available online only. +305656-REG +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305656.jpg&dhm=7315bdc8&size=6 +DCB5108 +838098005108 +34.95 usd +EN +Digital Camera Battery +1172711 +Products +2007-03-10T13:23:27.000Z + + +http://www.google.com/base/feeds/snippets/3128608193804768644 +2007-02-08T02:21:27.000Z +2007-02-08T15:40:13.000Z + + +Digital Camera Battery Power Cable for Kodak 645 Pro-Back ProBack & DCS-300 Series Camera Connecting Cables +Camera Connection Cable - to Power Kodak 645 Pro-Back DCS-300 Series Digital Cameras This connection cable will allow any Digital Pursuits battery pack to power the following digital cameras: Kodak DCS Pro Back 645 DCS-300 series Digital Photography ... + + + + + +B&H Photo-Video +anon-szot0wdsq0at@base.google.com + +0.3 +DCB6006 +http://base.google.com/base_image?q=http%3A%2F%2Fwww.bhphotovideo.com%2Fimages%2Fitems%2F305685.jpg&dhm=72f0ca0a&size=6 +420 9th Ave. 10001 +PayPal & Bill Me Later credit available online only. +Products +US +digital kodak camera +Digital Camera Battery +2007-03-10T02:21:27.000Z +EN +new +34.95 usd +1172711 +Digital Photography>Camera Connecting Cables +305685-REG + +""" + +EXTENSION_TREE = """ + + + John Doe + Bar + + + +""" + +TEST_AUTHOR = """ + + John Doe + johndoes@someemailadress.com + http://www.google.com + +""" + +TEST_LINK = """ + +""" + +TEST_GBASE_ATTRIBUTE = """ + Digital Camera Battery +""" + + +CALENDAR_FEED = """ + + http://www.google.com/calendar/feeds/default + 2007-03-20T22:48:57.833Z + GData Ops Demo's Calendar List + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + Google Calendar + 1 + + + http://www.google.com/calendar/feeds/default/gdata.ops.demo%40gmail.com + 2007-03-20T22:48:57.837Z + 2007-03-20T22:48:52.000Z + GData Ops Demo + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + http://www.google.com/calendar/feeds/default/jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com + 2007-03-20T22:48:57.837Z + 2007-03-20T22:48:53.000Z + GData Ops Demo Secondary Calendar + + + + + + + GData Ops Demo Secondary Calendar + + + + + + + + +""" + +CALENDAR_FULL_EVENT_FEED = """ + + + http://www.google.com/calendar/feeds/default/private/full + 2007-03-20T21:29:57.000Z + + GData Ops Demo + GData Ops Demo + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + Google Calendar + 10 + 1 + 25 + + + + http://www.google.com/calendar/feeds/default/private/full/o99flmgmkfkfrr8u745ghr3100 + 2007-03-20T21:29:52.000Z + 2007-03-20T21:29:57.000Z + + test deleted + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/2qt3ao5hbaq7m9igr5ak9esjo0 + 2007-03-20T21:26:04.000Z + 2007-03-20T21:28:46.000Z + + Afternoon at Dolores Park with Kim + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/uvsqhg7klnae40v50vihr1pvos + 2007-03-20T21:28:37.000Z + 2007-03-20T21:28:37.000Z + + Team meeting + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + DTSTART;TZID=America/Los_Angeles:20070323T090000 + DTEND;TZID=America/Los_Angeles:20070323T100000 + RRULE:FREQ=WEEKLY;BYDAY=FR;UNTIL=20070817T160000Z;WKST=SU + BEGIN:VTIMEZONE TZID:America/Los_Angeles + X-LIC-LOCATION:America/Los_Angeles BEGIN:STANDARD + TZOFFSETFROM:-0700 TZOFFSETTO:-0800 TZNAME:PST + DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU + END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0800 TZOFFSETTO:-0700 + TZNAME:PDT DTSTART:19700405T020000 + RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT + END:VTIMEZONE + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/st4vk9kiffs6rasrl32e4a7alo + 2007-03-20T21:25:46.000Z + 2007-03-20T21:25:46.000Z + + Movie with Kim and danah + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/ofl1e45ubtsoh6gtu127cls2oo + 2007-03-20T21:24:43.000Z + 2007-03-20T21:25:08.000Z + + Dinner with Kim and Sarah + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/b69s2avfi2joigsclecvjlc91g + 2007-03-20T21:24:19.000Z + 2007-03-20T21:25:05.000Z + + Dinner with Jane and John + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/u9p66kkiotn8bqh9k7j4rcnjjc + 2007-03-20T21:24:33.000Z + 2007-03-20T21:24:33.000Z + + Tennis with Elizabeth + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/76oj2kceidob3s708tvfnuaq3c + 2007-03-20T21:24:00.000Z + 2007-03-20T21:24:00.000Z + + Lunch with Jenn + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/5np9ec8m7uoauk1vedh5mhodco + 2007-03-20T07:50:02.000Z + 2007-03-20T20:39:26.000Z + + test entry + test desc + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/fu6sl0rqakf3o0a13oo1i1a1mg + 2007-02-14T23:23:37.000Z + 2007-02-14T23:25:30.000Z + + test + + + + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + + + + + + + + + http://www.google.com/calendar/feeds/default/private/full/h7a0haa4da8sil3rr19ia6luvc + 2007-07-16T22:13:28.000Z + 2007-07-16T22:13:29.000Z + + + + + + + + + + + + + GData Ops Demo + gdata.ops.demo@gmail.com + + + + + + + + + + + + + +""" + +CALENDAR_BATCH_REQUEST = """ + + + + 1 + + + Event inserted via batch + + + 2 + + http://www.google.com/calendar/feeds/default/private/full/glcs0kv2qqa0gf52qi1jo018gc + + Event queried via batch + + + 3 + + http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs + + Event updated via batch + + + + + + 4 + + http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc + + Event deleted via batch + + + + + +""" + +CALENDAR_BATCH_RESPONSE = """ + + http://www.google.com/calendar/feeds/default/private/full + 2007-09-21T23:01:00.380Z + + Batch Feed + + + + + 1 + + + http://www.google.com/calendar/feeds/default/private/full/n9ug78gd9tv53ppn4hdjvk68ek + + Event inserted via batch + + + + + + 2 + + + http://www.google.com/calendar/feeds/default/private/full/glsc0kv2aqa0ff52qi1jo018gc + + Event queried via batch + + + + + + 3 + + + http://www.google.com/calendar/feeds/default/private/full/ujm0go5dtngdkr6u91dcqvj0qs + + Event updated via batch + + + + 3 + + + + + 4 + + + http://www.google.com/calendar/feeds/default/private/full/d8qbg9egk1n6lhsgq1sjbqffqc + + Event deleted via batch + Deleted + + +""" + +GBASE_ATTRIBUTE_FEED = """ + + http://www.google.com/base/feeds/attributes + 2006-11-01T20:35:59.578Z + + + Attribute histogram for query: [item type:jobs] + + + + GoogleBase + 16 + 1 + 16 + + http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D + 2006-11-01T20:36:00.100Z + job industry(text) + Attribute"job industry" of type text. + + + + it internet + healthcare + information technology + accounting + clerical and administrative + other + sales and sales management + information systems + engineering and architecture + sales + + + +""" + + +GBASE_ATTRIBUTE_ENTRY = """ + + http://www.google.com/base/feeds/attributes/job+industry%28text%29N%5Bitem+type%3Ajobs%5D + 2006-11-01T20:36:00.100Z + job industry(text) + Attribute"job industry" of type text. + + + + it internet + healthcare + information technology + accounting + clerical and administrative + other + sales and sales management + information systems + engineering and architecture + sales + + +""" + +GBASE_LOCALES_FEED = """ + + http://www.google.com/base/feeds/locales/ + 2006-06-13T18:11:40.120Z + Locales + + + + + Google Inc. + base@google.com + + GoogleBase + 3 + 25 + + + http://www.google.com/base/feeds/locales/en_US + 2006-03-27T22:27:36.658Z + + + en_US + en_US + + + + + + http://www.google.com/base/feeds/locales/en_GB + 2006-06-13T18:14:18.601Z + + en_GB + en_GB + + + + + http://www.google.com/base/feeds/locales/de_DE + 2006-06-13T18:14:18.601Z + + de_DE + de_DE + + + +""" + +GBASE_STRING_ENCODING_ENTRY = """ + + http://www.google.com/base/feeds/snippets/17495780256183230088 + 2007-12-09T03:13:07.000Z + 2008-01-07T03:26:46.000Z + + Digital Camera Cord Fits SONY Cybershot DSC-R1 S40 + SONY \xC2\xB7 Cybershot Digital Camera Usb Cable DESCRIPTION + This is a 2.5 USB 2.0 A to Mini B (5 Pin) high quality digital camera + cable used for connecting your Sony Digital Cameras and Camcoders. Backward + Compatible with USB 2.0, 1.0 and 1.1. Fully ... + + + + eBay + + Products + EN + US + 0.99 usd + http://thumbs.ebaystatic.com/pict/270195049057_1.jpg + Cameras & Photo>Digital Camera Accessories>Cables + Cords & Connectors>USB Cables>For Other Brands + 11729 + 270195049057 + 2008-02-06T03:26:46Z +""" + + +RECURRENCE_EXCEPTION_ENTRY = """ + + http://www.google.com/calendar/feeds/default/private/composite/i7lgfj69mjqjgnodklif3vbm7g + 2007-04-05T21:51:49.000Z + 2007-04-05T21:51:49.000Z + + testDavid + + + + + + gdata ops + gdata.ops.test@gmail.com + + + + + + + + + + DTSTART;TZID=America/Anchorage:20070403T100000 + DTEND;TZID=America/Anchorage:20070403T110000 + RRULE:FREQ=DAILY;UNTIL=20070408T180000Z;WKST=SU + EXDATE;TZID=America/Anchorage:20070407T100000 + EXDATE;TZID=America/Anchorage:20070405T100000 + EXDATE;TZID=America/Anchorage:20070404T100000 BEGIN:VTIMEZONE + TZID:America/Anchorage X-LIC-LOCATION:America/Anchorage + BEGIN:STANDARD TZOFFSETFROM:-0800 TZOFFSETTO:-0900 TZNAME:AKST + DTSTART:19701025T020000 RRULE:FREQ=YEARLY;BYMONTH=10;BYDAY=-1SU + END:STANDARD BEGIN:DAYLIGHT TZOFFSETFROM:-0900 TZOFFSETTO:-0800 + TZNAME:AKDT DTSTART:19700405T020000 + RRULE:FREQ=YEARLY;BYMONTH=4;BYDAY=1SU END:DAYLIGHT + END:VTIMEZONE + + + + + + i7lgfj69mjqjgnodklif3vbm7g_20070407T180000Z + 2007-04-05T21:51:49.000Z + 2007-04-05T21:52:58.000Z + + testDavid + + + + gdata ops + gdata.ops.test@gmail.com + + + + + + + + + + + + + + + + + + + 2007-04-05T21:54:09.285Z + + + Comments for: testDavid + + + + + + + + + + + + """ + +NICK_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo + 1970-01-01T00:00:00.000Z + + Foo + + + + +""" + +NICK_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0 + + 1970-01-01T00:00:00.000Z + + Nicknames for user SusanJones + + + + 1 + 2 + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo + + + Foo + + + + + + + + http://apps-apis.google.com/a/feeds/example.com/nickname/2.0/suse + + + suse + + + + + +""" + +USER_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser + 1970-01-01T00:00:00.000Z + + TestUser + + + + + + + +""" + +USER_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/user/2.0 + + 1970-01-01T00:00:00.000Z + + Users + """ + +EMAIL_LIST_ENTRY = """ + + + https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist + + 1970-01-01T00:00:00.000Z + + testlist + + + + +""" + +EMAIL_LIST_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/emailList/2.0 + + 1970-01-01T00:00:00.000Z + + EmailLists + """ + +EMAIL_LIST_RECIPIENT_ENTRY = """ + + https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient/TestUser%40example.com + 1970-01-01T00:00:00.000Z + + TestUser + + + +""" + +EMAIL_LIST_RECIPIENT_FEED = """ + + + http://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/recipient + + 1970-01-01T00:00:00.000Z + + Recipients for email list us-sales + """ + +ACL_FEED = """ + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full + 2007-04-21T00:52:04.000Z + Elizabeth Bennet's access control list + + + + + + + + + Google Calendar + 2 + 1 + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com + 2007-04-21T00:52:04.000Z + + + owner + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/default + 2007-04-21T00:52:04.000Z + + + read + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + + """ + +ACL_ENTRY = """ + + http://www.google.com/calendar/feeds/liz%40gmail.com/acl/full/user%3Aliz%40gmail.com + 2007-04-21T00:52:04.000Z + + + owner + + + + + + + Elizabeth Bennet + liz@gmail.com + + + + + """ + +DOCUMENT_LIST_FEED = """ +21test.usertest.user@gmail.comhttp://docs.google.com/feeds/documents/private/full/spreadsheet%3AsupercalifragilisticexpeadociousTest Spreadsheet2007-07-03T18:03:32.045Ztest.usertest.user@gmail.comhttp://docs.google.com/feeds/documents/private/full/document%3Agr00vyTest Document2007-07-03T18:02:50.338Zhttp://docs.google.com/feeds/documents/private/fullAvailable +Documents - +test.user@gmail.com2007-07-09T23:07:21.898Z +""" + +DOCUMENT_LIST_ENTRY = """ +test.usertest.user@gmail.comhttp://docs.google.com/feeds/documents/private/full/spreadsheet%3AsupercalifragilisticexpealidociousTest Spreadsheet2007-07-03T18:03:32.045Z +""" + +BATCH_ENTRY = """ + + http://www.google.com/base/feeds/items/2173859253842813008 + 2006-07-11T14:51:43.560Z + 2006-07-11T14:51: 43.560Z + title + content + + + recipes + + itemB + +""" + +BATCH_FEED_REQUEST = """ + + My Batch Feed + + http://www.google.com/base/feeds/items/13308004346459454600 + + + + http://www.google.com/base/feeds/items/17437536661927313949 + + + + ... + ... + itemA + + recipes + + + ... + ... + itemB + + recipes + +""" + +BATCH_FEED_RESULT = """ + + http://www.google.com/base/feeds/items + 2006-07-11T14:51:42.894Z + My Batch + + + + + http://www.google.com/base/feeds/items/2173859253842813008 + 2006-07-11T14:51:43.560Z + 2006-07-11T14:51: 43.560Z + ... + ... + + + recipes + + itemB + + + + http://www.google.com/base/feeds/items/11974645606383737963 + 2006-07-11T14:51:43.247Z + 2006-07-11T14:51: 43.247Z + ... + ... + + + recipes + + itemA + + + + http://www.google.com/base/feeds/items/13308004346459454600 + 2006-07-11T14:51:42.894Z + Error + Bad request + + + + + + + + http://www.google.com/base/feeds/items/17437536661927313949 + 2006-07-11T14:51:43.246Z + Deleted + + + +""" + +ALBUM_FEED = """ + + http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1 + 2007-09-21T18:23:05.000Z + + Test + + public + http://lh6.google.com/sample.user/Rt8WNoDZEJE/AAAAAAAAABk/HQGlDhpIgWo/s160-c/Test.jpg + + + + + + sample + http://picasaweb.google.com/sample.user + + Picasaweb 4 + 1 + 500 + 1 + Test + + public 1188975600000 + 2 + sample.user + sample + true + 0 + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2 + 2007-09-05T20:49:23.000Z + 2007-09-21T18:23:05.000Z + + Aqua Blue.jpg + Blue + + + + 2 + 1190398985145172 + 0.0 + 1 2560 + 1600 + 883405 + + + 1189025362000 + true + c041ce17aaa637eb656c81d9cf526c24 + + true + 1 + + Aqua Blue.jpg Blue + tag, test + + + + + sample + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/3 + 2007-09-05T20:49:24.000Z + 2007-09-21T18:19:38.000Z + + Aqua Graphite.jpg + Gray + + + + + 3 + 1190398778006402 + 1.0 + 1 + 2560 + 1600 + 798334 + + + 1189025363000 + + true + a5ce2e36b9df7d3cb081511c72e73926 + + true + 0 + + Aqua Graphite.jpg + Gray + + + + + + sample + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/tag + 2007-09-05T20:49:24.000Z + + tag + tag + + + + sample + http://picasaweb.google.com/sample.user + + + + http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/tag/test + 2007-09-05T20:49:24.000Z + + test + test + + + + sample + http://picasaweb.google.com/sample.user + + +""" + +CODE_SEARCH_FEED = """ + +http://www.google.com/codesearch/feeds/search?q=malloc +2007-12-19T16:08:04Z +Google Code Search +Google Code Search +2530000 +1 + +Google Code Search + +http://www.google.com/codesearch + + + + + +http://www.google.com/codesearch?hl=en&q=+malloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoconf-2.60/autoconf.html-002#first2007-12-19T16:08:04ZCode owned by external author.software/autoconf/manual/autoconf-2.60/autoconf.html<pre> 8: void *<b>malloc</b> (); + + +</pre><pre> #undef <b>malloc</b> +</pre><pre> void *<b>malloc</b> (); + +</pre><pre> rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre> +http://www.google.com/codesearch?hl=en&q=+malloc+show:h4hfh-fV-jI:niBq_bwWZNs:H0OhClf0HWQ&sa=N&ct=rx&cd=2&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c&cs_p=ftp://ftp.gnu.org/gnu/guile/guile-1.6.8.tar.gz&cs_f=guile-1.6.8/libguile/mallocs.c#first2007-12-19T16:08:04ZCode owned by external author.guile-1.6.8/libguile/mallocs.c<pre> 86: { + scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0; + if (n &amp;&amp; !mem) + +</pre><pre>#include &lt;<b>malloc</b>.h&gt; +</pre><pre>scm_t_bits scm_tc16_<b>malloc</b>; + +</pre><pre><b>malloc</b>_free (SCM ptr) +</pre><pre><b>malloc</b>_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED) + +</pre><pre> scm_puts(&quot;#&lt;<b>malloc</b> &quot;, port); +</pre><pre> scm_t_bits mem = n ? (scm_t_bits) <b>malloc</b> (n) : 0; + +</pre><pre> SCM_RETURN_NEWSMOB (scm_tc16_<b>malloc</b>, mem); +</pre><pre> scm_tc16_<b>malloc</b> = scm_make_smob_type (&quot;<b>malloc</b>&quot;, 0); + +</pre><pre> scm_set_smob_free (scm_tc16_<b>malloc</b>, <b>malloc</b>_free); +</pre>GPL + +http://www.google.com/codesearch?hl=en&q=+malloc+show:9wyZUG-N_30:7_dFxoC1ZrY:C0_iYbFj90M&sa=N&ct=rx&cd=3&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c&cs_p=http://ftp.gnu.org/gnu/bash/bash-3.0.tar.gz&cs_f=bash-3.0/lib/malloc/alloca.c#first2007-12-19T16:08:04ZCode owned by external author.bash-3.0/lib/malloc/alloca.c<pre> 78: #ifndef emacs + #define <b>malloc</b> x<b>malloc</b> + extern pointer x<b>malloc</b> (); + +</pre><pre> <b>malloc</b>. The Emacs executable needs alloca to call x<b>malloc</b>, because +</pre><pre> ordinary <b>malloc</b> isn&#39;t protected from input signals. On the other + +</pre><pre> hand, the utilities in lib-src need alloca to call <b>malloc</b>; some of +</pre><pre> them are very simple, and don&#39;t have an x<b>malloc</b> routine. + +</pre><pre> Callers below should use <b>malloc</b>. */ +</pre><pre>#define <b>malloc</b> x<b>malloc</b> + +</pre><pre>extern pointer x<b>malloc</b> (); +</pre><pre> It is very important that sizeof(header) agree with <b>malloc</b> + +</pre><pre> register pointer new = <b>malloc</b> (sizeof (header) + size); +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:uhVCKyPcT6k:8juMxxzmUJw:H7_IDsTB2L4&sa=N&ct=rx&cd=4&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c&cs_p=http://ftp.mozilla.org/pub/mozilla.org/mozilla/releases/mozilla1.7b/src/mozilla-source-1.7b-source.tar.bz2&cs_f=mozilla/xpcom/build/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.mozilla/xpcom/build/malloc.c<pre> 54: http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html + + You may already by default be using a c library containing a <b>malloc</b> + +</pre><pre>/* ---------- To make a <b>malloc</b>.h, start cutting here ------------ */ +</pre><pre> Note: There may be an updated version of this <b>malloc</b> obtainable at + +</pre><pre> ftp://gee.cs.oswego.edu/pub/misc/<b>malloc</b>.c +</pre><pre>* Why use this <b>malloc</b>? + +</pre><pre> most tunable <b>malloc</b> ever written. However it is among the fastest +</pre><pre> allocator for <b>malloc</b>-intensive programs. + +</pre><pre> http://gee.cs.oswego.edu/dl/html/<b>malloc</b>.html +</pre><pre> You may already by default be using a c library containing a <b>malloc</b> + +</pre><pre> that is somehow based on some version of this <b>malloc</b> (for example in +</pre>Mozilla +http://www.google.com/codesearch?hl=en&q=+malloc+show:4n1P2HVOISs:Ybbpph0wR2M:OhIN_sDrG0U&sa=N&ct=rx&cd=5&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh&cs_p=http://regexps.srparish.net/src/hackerlab/hackerlab-1.0pre2.tar.gz&cs_f=hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh#first2007-12-19T16:08:04ZCode owned by external author.hackerlab-1.0pre2/src/hackerlab/tests/mem-tests/unit-must-malloc.sh<pre> 11: echo ================ unit-must-<b>malloc</b> tests ================ + ./unit-must-<b>malloc</b> + echo ...passed + +</pre><pre># tag: Tom Lord Tue Dec 4 14:54:29 2001 (mem-tests/unit-must-<b>malloc</b>.sh) +</pre><pre>echo ================ unit-must-<b>malloc</b> tests ================ + +</pre><pre>./unit-must-<b>malloc</b> +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:GzkwiWG266M:ykuz3bG00ws:2sTvVSif08g&sa=N&ct=rx&cd=6&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.14.tar.bz2&cs_f=tar-1.14/lib/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.tar-1.14/lib/malloc.c<pre> 22: #endif + #undef <b>malloc</b> + + +</pre><pre>/* Work around bug on some systems where <b>malloc</b> (0) fails. +</pre><pre>#undef <b>malloc</b> + +</pre><pre>rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:o_TFIeBY6dY:ktI_dt8wPao:AI03BD1Dz0Y&sa=N&ct=rx&cd=7&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c&cs_p=http://ftp.gnu.org/gnu/tar/tar-1.16.1.tar.gz&cs_f=tar-1.16.1/lib/malloc.c#first2007-12-19T16:08:04ZCode owned by external author.tar-1.16.1/lib/malloc.c<pre> 21: #include &lt;config.h&gt; + #undef <b>malloc</b> + + +</pre><pre>/* <b>malloc</b>() function that is glibc compatible. +</pre><pre>#undef <b>malloc</b> + +</pre><pre>rpl_<b>malloc</b> (size_t n) +</pre><pre> return <b>malloc</b> (n); + +</pre>GPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:_ibw-VLkMoI:jBOtIJSmFd4:-0NUEVeCwfY&sa=N&ct=rx&cd=8&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h&cs_p=http://freshmeat.net/redir/uclibc/20616/url_bz2/uClibc-0.9.28.1.tar.bz2&cs_f=uClibc-0.9.29/include/malloc.h#first2007-12-19T16:08:04ZCode owned by external author.uClibc-0.9.29/include/malloc.h<pre> 1: /* Prototypes and definition for <b>malloc</b> implementation. + Copyright (C) 1996, 1997, 1999, 2000 Free Software Foundation, Inc. + +</pre><pre>/* Prototypes and definition for <b>malloc</b> implementation. +</pre><pre> `pt<b>malloc</b>&#39;, a <b>malloc</b> implementation for multiple threads without + +</pre><pre> See the files `pt<b>malloc</b>.c&#39; or `COPYRIGHT&#39; for copying conditions. +</pre><pre> This work is mainly derived from <b>malloc</b>-2.6.4 by Doug Lea + +</pre><pre> ftp://g.oswego.edu/pub/misc/<b>malloc</b>.c +</pre><pre> `pt<b>malloc</b>.c&#39;. + +</pre><pre># define __<b>malloc</b>_ptr_t void * +</pre><pre># define __<b>malloc</b>_ptr_t char * + +</pre><pre># define __<b>malloc</b>_size_t size_t +</pre>LGPL +http://www.google.com/codesearch?hl=en&q=+malloc+show:F6qHcZ9vefo:bTX7o9gKfks:hECF4r_eKC0&sa=N&ct=rx&cd=9&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h&cs_p=http://ftp.gnu.org/gnu/glibc/glibc-2.0.1.tar.gz&cs_f=glibc-2.0.1/hurd/hurdmalloc.h#first2007-12-19T16:08:04ZCode owned by external author.glibc-2.0.1/hurd/hurdmalloc.h<pre> 15: #define <b>malloc</b> _hurd_<b>malloc</b> + #define realloc _hurd_realloc + +</pre><pre> All hurd-internal code which uses <b>malloc</b> et al includes this file so it +</pre><pre> will use the internal <b>malloc</b> routines _hurd_{<b>malloc</b>,realloc,free} + +</pre><pre> of <b>malloc</b> et al is the unixoid one using sbrk. +</pre><pre>extern void *_hurd_<b>malloc</b> (size_t); + +</pre><pre>#define <b>malloc</b> _hurd_<b>malloc</b> +</pre>GPL + +http://www.google.com/codesearch?hl=en&q=+malloc+show:CHUvHYzyLc8:pdcAfzDA6lY:wjofHuNLTHg&sa=N&ct=rx&cd=10&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h&cs_p=ftp://apache.mirrors.pair.com/httpd/httpd-2.2.4.tar.bz2&cs_f=httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h#first2007-12-19T16:08:04ZCode owned by external author.httpd-2.2.4/srclib/apr/include/arch/netware/apr_private.h<pre> 173: #undef <b>malloc</b> + #define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x) + +</pre><pre>/* Redefine <b>malloc</b> to use the library <b>malloc</b> call so +</pre><pre>#undef <b>malloc</b> + +</pre><pre>#define <b>malloc</b>(x) library_<b>malloc</b>(gLibHandle,x) +</pre>Apache + +""" + +YOUTUBE_VIDEO_FEED = """http://gdata.youtube.com/feeds/api/standardfeeds/top_rated2008-05-14T02:24:07.000-07:00Top Ratedhttp://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API100125 +http://gdata.youtube.com/feeds/api/videos/C71ypXYGho82008-03-20T10:17:27.000-07:002008-05-14T04:26:37.000-07:00Me odeio por te amar - KARYN GARCIAhttp://www.karyngarcia.com.brTvKarynGarciahttp://gdata.youtube.com/feeds/api/users/tvkaryngarciaMe odeio por te amar - KARYN GARCIAhttp://www.karyngarcia.com.bramar, boyfriend, garcia, karyn, me, odeio, por, teMusictest111test222 +http://gdata.youtube.com/feeds/api/videos/gsVaTyb1tBw2008-02-15T04:31:45.000-08:002008-05-14T05:09:42.000-07:00extreme helmet cam Kani, Keil and Patotrimmedperaltamagichttp://gdata.youtube.com/feeds/api/users/peraltamagicextreme helmet cam Kani, Keil and Patotrimmedalcala, cam, campillo, dirt, extreme, helmet, kani, patoSports +""" + +YOUTUBE_ENTRY_PRIVATE = """ + + http://gdata.youtube.com/feeds/videos/UMFI1hdm96E + 2007-01-07T01:50:15.000Z + 2007-01-07T01:50:15.000Z + + + + + + + + + "Crazy (Gnarles Barkley)" - Acoustic Cover + <div style="color: #000000;font-family: + Arial, Helvetica, sans-serif; font-size:12px; font-size: 12px; + width: 555px;"><table cellspacing="0" cellpadding="0" + border="0"><tbody><tr><td width="140" + valign="top" rowspan="2"><div style="border: 1px solid + #999999; margin: 0px 10px 5px 0px;"><a + href="http://www.youtube.com/watch?v=UMFI1hdm96E"><img + alt="" + src="http://img.youtube.com/vi/UMFI1hdm96E/2.jpg"></a></div></td> + <td width="256" valign="top"><div style="font-size: + 12px; font-weight: bold;"><a style="font-size: 15px; + font-weight: bold; font-decoration: none;" + href="http://www.youtube.com/watch?v=UMFI1hdm96E">&quot;Crazy + (Gnarles Barkley)&quot; - Acoustic Cover</a> + <br></div> <div style="font-size: 12px; margin: + 3px 0px;"><span>Gnarles Barkley acoustic cover + http://www.myspace.com/davidchoimusic</span></div></td> + <td style="font-size: 11px; line-height: 1.4em; padding-left: + 20px; padding-top: 1px;" width="146" + valign="top"><div><span style="color: #666666; + font-size: 11px;">From:</span> <a + href="http://www.youtube.com/profile?user=davidchoimusic">davidchoimusic</a></div> + <div><span style="color: #666666; font-size: + 11px;">Views:</span> 113321</div> <div + style="white-space: nowrap;text-align: left"><img + style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_full_11x11.gif"> + <img style="border: 0px none; margin: 0px; padding: 0px; + vertical-align: middle; font-size: 11px;" align="top" alt="" + src="http://gdata.youtube.com/static/images/icn_star_half_11x11.gif"></div> + <div style="font-size: 11px;">1005 <span style="color: + #666666; font-size: + 11px;">ratings</span></div></td></tr> + <tr><td><span style="color: #666666; font-size: + 11px;">Time:</span> <span style="color: #000000; + font-size: 11px; font-weight: + bold;">04:15</span></td> <td style="font-size: + 11px; padding-left: 20px;"><span style="color: #666666; + font-size: 11px;">More in</span> <a + href="http://www.youtube.com/categories_portal?c=10">Music</a></td></tr></tbody></table></div> + + + + + + davidchoimusic + http://gdata.youtube.com/feeds/users/davidchoimusic + + + "Crazy (Gnarles Barkley)" - Acoustic Cover + Gnarles Barkley acoustic cover http://www.myspace.com/davidchoimusic + music, singing, gnarls, barkley, acoustic, cover + + + Music + + DeveloperTag1 + + + + + + + + + + + + + 37.398529052734375 -122.0635986328125 + + + + + + + + yes + + The content of this video may violate the terms of use. + +""" + +YOUTUBE_COMMENT_FEED = """ +http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments2008-05-19T21:45:45.261ZCommentshttp://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API0125 + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/91F809A3DE2EB81B + 2008-02-22T15:27:15.000-08:002008-02-22T15:27:15.000-08:00 + + test66 + test66 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/A261AEEFD23674AA + 2008-02-22T15:27:01.000-08:002008-02-22T15:27:01.000-08:00 + + test333 + test333 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + + + http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments/0DCF1E3531B3FF85 + 2008-02-22T15:11:06.000-08:002008-02-22T15:11:06.000-08:00 + + test2 + test2 + + + + apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmann + +""" + +YOUTUBE_PLAYLIST_FEED = """ + + http://gdata.youtube.com/feeds/users/andyland74/playlists?start-index=1&max-results=25 + 2008-02-26T00:26:15.635Z + + andyland74's Playlists + http://www.youtube.com/img/pic_youtubelogo_123x63.gif + + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + YouTube data API + 1 + 1 + 25 + + My new playlist Description + + http://gdata.youtube.com/feeds/users/andyland74/playlists/8BCDD04DE8F771B2 + 2007-11-04T17:30:27.000-08:00 + 2008-02-22T09:55:14.000-08:00 + + My New Playlist Title + My new playlist Description + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + +""" + +YOUTUBE_PLAYLIST_VIDEO_FEED = """http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B5052008-05-16T12:03:17.000-07:00Test PlaylistTest playlist 1http://www.youtube.com/img/pic_youtubelogo_123x63.gifgdpythonhttp://gdata.youtube.com/feeds/api/users/gdpythonYouTube data API1125Test PlaylistTest playlist 1http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505/B0F29389E537F8882008-05-16T20:54:08.520ZUploading YouTube Videos with the PHP Client LibraryJochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API. + +PHP Developer's Guide: +http://code.google.com/apis/youtube/developers_guide_php.html + +Other documentation: +http://code.google.com/apis/youtube/GoogleDevelopershttp://gdata.youtube.com/feeds/api/users/googledevelopersUploading YouTube Videos with the PHP Client LibraryJochen Hartmann demonstrates the basics of how to use the PHP Client Library with the YouTube Data API. + +PHP Developer's Guide: +http://code.google.com/apis/youtube/developers_guide_php.html + +Other documentation: +http://code.google.com/apis/youtube/api, data, demo, php, screencast, tutorial, uploading, walkthrough, youtubeEducationundefined1""" + +YOUTUBE_SUBSCRIPTION_FEED = """ + + http://gdata.youtube.com/feeds/users/andyland74/subscriptions?start-index=1&max-results=25 + 2008-02-26T00:26:15.635Z + + andyland74's Subscriptions + http://www.youtube.com/img/pic_youtubelogo_123x63.gif + + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + YouTube data API + 1 + 1 + 25 + + http://gdata.youtube.com/feeds/users/andyland74/subscriptions/d411759045e2ad8c + 2007-11-04T17:30:27.000-08:00 + 2008-02-22T09:55:14.000-08:00 + + + Videos published by : NBC + + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + NBC + + +""" + +YOUTUBE_VIDEO_RESPONSE_FEED = """ + + http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses2008-05-19T22:37:34.076ZVideos responses to 'Giant NES controller coffee table'http://www.youtube.com/img/pic_youtubelogo_123x63.gifYouTubehttp://www.youtube.com/YouTube data API8125 + + http://gdata.youtube.com/feeds/videos/7b9EnRI9VbY2008-03-11T19:08:53.000-07:002008-05-18T21:33:10.000-07:00 + + + + + + + + + + + + Catnip Partysnipped + + + + + PismoBeachhttp://gdata.youtube.com/feeds/users/pismobeach + + Catnip Party + Uncle, Hillary, Hankette, and B4 all but overdose on the patioBrattman, cat, catmint, catnip, cats, chat, drug, gato, gatto, kat, kato, katt, Katze, kedi, kissa, OD, overdose, party, sex, Uncle + + Animals + + + + + + + + + + + + + + + + +""" + + +YOUTUBE_PROFILE = """ + + http://gdata.youtube.com/feeds/users/andyland74 + 2006-10-16T00:09:45.000-07:00 + 2008-02-26T11:48:21.000-08:00 + + + andyland74 Channel + + + + andyland74 + http://gdata.youtube.com/feeds/users/andyland74 + + 33 + andyland74 + andy + example + Catch-22 + m + Google + Testing YouTube APIs + Somewhere + US + Aqua Teen Hungerforce + Elliott Smith + Technical Writer + University of North Carolina + + + + + + + + +""" + +YOUTUBE_CONTACTS_FEED = """ + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts2008-05-16T19:24:34.916Zapitestjhartmann's Contactshttp://www.youtube.com/img/pic_youtubelogo_123x63.gifapitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmannYouTube data API2125 + + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/test898990902008-02-04T11:27:54.000-08:002008-05-16T19:24:34.916Ztest89899090apitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmanntest89899090requested + + http://gdata.youtube.com/feeds/users/apitestjhartmann/contacts/testjfisher2008-02-26T14:13:03.000-08:002008-05-16T19:24:34.916Ztestjfisherapitestjhartmannhttp://gdata.youtube.com/feeds/users/apitestjhartmanntestjfisherpending +""" + +NEW_CONTACT = """ + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/8411573 + 2008-02-28T18:47:02.303Z + + Fitzgerald + Notes + + + + + (206)555-1212 + 456-123-2133 + (206)555-1213 + + + + + + + 1600 Amphitheatre Pkwy Mountain View +""" + +CONTACTS_FEED = """ + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base + 2008-03-05T12:36:38.836Z + + Contacts + + + + + + Elizabeth Bennet + liz@gmail.com + + + Contacts + + 1 + 1 + 25 + + + http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base/c9012de + + 2008-03-05T12:36:38.835Z + + Fitzgerald + + + + + + 456 + + + + +""" + + +CONTACT_GROUPS_FEED = """ + + jo@gmail.com + 2008-05-21T21:11:25.237Z + + Jo's Contact Groups + + + + + + + Jo Brown + jo@gmail.com + + Contacts + 3 + 1 + 25 + + http://google.com/m8/feeds/groups/jo%40gmail.com/base/270f + 2008-05-14T13:10:19.070Z + + joggers + joggers + + + +""" + +CONTACT_GROUP_ENTRY = """ + + + http://www.google.com/feeds/groups/jo%40gmail.com/base/1234 + 2005-01-18T21:00:00Z + 2006-01-01T00:00:00Z + Salsa group + Salsa group + + + + Very nice people. + +""" + +BLOG_ENTRY = """ + tag:blogger.com,1999:blog-blogID.post-postID + 2006-08-02T18:44:43.089-07:00 + 2006-11-08T18:10:23.020-08:00 + Lizzy's Diary + Being the journal of Elizabeth Bennet + + + + + + + + + + + + Elizabeth Bennet + liz@gmail.com + +""" + +BLOG_POST = """ + Marriage! + +
    +

    Mr. Darcy has proposed marriage to me!

    +

    He is the last man on earth I would ever desire to marry.

    +

    Whatever shall I do?

    +
    +
    + + Elizabeth Bennet + liz@gmail.com + +
    """ + +BLOG_POSTS_FEED = """ + tag:blogger.com,1999:blog-blogID + 2006-11-08T18:10:23.020-08:00 + Lizzy's Diary + + + + + + + + Elizabeth Bennet + liz@gmail.com + + Blogger + + tag:blogger.com,1999:blog-blogID.post-postID + 2006-11-08T18:10:00.000-08:00 + 2006-11-08T18:10:14.954-08:00 + Quite disagreeable + <p>I met Mr. Bingley's friend Mr. Darcy + this evening. I found him quite disagreeable.</p> + + + + + + + + Elizabeth Bennet + liz@gmail.com + + +""" + +BLOG_COMMENTS_FEED = """ + tag:blogger.com,1999:blog-blogID.postpostID..comments + 2007-04-04T21:56:29.803-07:00 + My Blog : Time to relax + + + + + Blog Author name + + Blogger + 1 + 1 + + tag:blogger.com,1999:blog-blogID.post-commentID + 2007-04-04T21:56:00.000-07:00 + 2007-04-04T21:56:29.803-07:00 + This is my first comment + This is my first comment + + + + + Blog Author name + + + +""" + + +SITES_FEED = """ + https://www.google.com/webmasters/tools/feeds/sites + Sites + 1 + + + + + 2008-10-02T07:26:51.833Z + + http://www.example.com + http://www.example.com + + + + 2007-11-17T18:27:32.543Z + + + + true + 2008-09-14T08:59:28.000 + US + none + normal + true + false + + + 456456-google.html + +""" + + +SITEMAPS_FEED = """ + http://www.example.com + http://www.example.com/ + 2006-11-17T18:27:32.543Z + + + + HTML + WAP + + + Value1 + Value2 + Value3 + + + http://www.example.com/sitemap-index.xml + http://www.example.com/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + WEB + StatusValue + 2006-11-18T19:27:32.543Z + 102 + + + http://www.example.com/mobile/sitemap-index.xml + http://www.example.com/mobile/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + StatusValue + 2006-11-18T19:27:32.543Z + 102 + HTML + + + http://www.example.com/news/sitemap-index.xml + http://www.example.com/news/sitemap-index.xml + + 2006-11-17T18:27:32.543Z + StatusValue + 2006-11-18T19:27:32.543Z + 102 + LabelValue + +""" diff --git a/gdata.py-1.2.3/src/gdata/tlslite/BaseDB.py b/gdata.py-1.2.3/src/gdata/tlslite/BaseDB.py new file mode 100755 index 0000000..ca8dff6 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/BaseDB.py @@ -0,0 +1,120 @@ +"""Base class for SharedKeyDB and VerifierDB.""" + +import anydbm +import thread + +class BaseDB: + def __init__(self, filename, type): + self.type = type + self.filename = filename + if self.filename: + self.db = None + else: + self.db = {} + self.lock = thread.allocate_lock() + + def create(self): + """Create a new on-disk database. + + @raise anydbm.error: If there's a problem creating the database. + """ + if self.filename: + self.db = anydbm.open(self.filename, "n") #raises anydbm.error + self.db["--Reserved--type"] = self.type + self.db.sync() + else: + self.db = {} + + def open(self): + """Open a pre-existing on-disk database. + + @raise anydbm.error: If there's a problem opening the database. + @raise ValueError: If the database is not of the right type. + """ + if not self.filename: + raise ValueError("Can only open on-disk databases") + self.db = anydbm.open(self.filename, "w") #raises anydbm.error + try: + if self.db["--Reserved--type"] != self.type: + raise ValueError("Not a %s database" % self.type) + except KeyError: + raise ValueError("Not a recognized database") + + def __getitem__(self, username): + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + valueStr = self.db[username] + finally: + self.lock.release() + + return self._getItem(username, valueStr) + + def __setitem__(self, username, value): + if self.db == None: + raise AssertionError("DB not open") + + valueStr = self._setItem(username, value) + + self.lock.acquire() + try: + self.db[username] = valueStr + if self.filename: + self.db.sync() + finally: + self.lock.release() + + def __delitem__(self, username): + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + del(self.db[username]) + if self.filename: + self.db.sync() + finally: + self.lock.release() + + def __contains__(self, username): + """Check if the database contains the specified username. + + @type username: str + @param username: The username to check for. + + @rtype: bool + @return: True if the database contains the username, False + otherwise. + + """ + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + return self.db.has_key(username) + finally: + self.lock.release() + + def check(self, username, param): + value = self.__getitem__(username) + return self._checkItem(value, username, param) + + def keys(self): + """Return a list of usernames in the database. + + @rtype: list + @return: The usernames in the database. + """ + if self.db == None: + raise AssertionError("DB not open") + + self.lock.acquire() + try: + usernames = self.db.keys() + finally: + self.lock.release() + usernames = [u for u in usernames if not u.startswith("--Reserved--")] + return usernames \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/Checker.py b/gdata.py-1.2.3/src/gdata/tlslite/Checker.py new file mode 100755 index 0000000..f978697 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/Checker.py @@ -0,0 +1,146 @@ +"""Class for post-handshake certificate checking.""" + +from utils.cryptomath import hashAndBase64 +from X509 import X509 +from X509CertChain import X509CertChain +from errors import * + + +class Checker: + """This class is passed to a handshake function to check the other + party's certificate chain. + + If a handshake function completes successfully, but the Checker + judges the other party's certificate chain to be missing or + inadequate, a subclass of + L{tlslite.errors.TLSAuthenticationError} will be raised. + + Currently, the Checker can check either an X.509 or a cryptoID + chain (for the latter, cryptoIDlib must be installed). + """ + + def __init__(self, cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + checkResumedSession=False): + """Create a new Checker instance. + + You must pass in one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + @type cryptoID: str + @param cryptoID: A cryptoID which the other party's certificate + chain must match. The cryptoIDlib module must be installed. + Mutually exclusive with all of the 'x509...' arguments. + + @type protocol: str + @param protocol: A cryptoID protocol URI which the other + party's certificate chain must match. Requires the 'cryptoID' + argument. + + @type x509Fingerprint: str + @param x509Fingerprint: A hex-encoded X.509 end-entity + fingerprint which the other party's end-entity certificate must + match. Mutually exclusive with the 'cryptoID' and + 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed. Mutually exclusive with the 'cryptoID' and + 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type checkResumedSession: bool + @param checkResumedSession: If resumed sessions should be + checked. This defaults to False, on the theory that if the + session was checked once, we don't need to bother + re-checking it. + """ + + if cryptoID and (x509Fingerprint or x509TrustList): + raise ValueError() + if x509Fingerprint and x509TrustList: + raise ValueError() + if x509CommonName and not x509TrustList: + raise ValueError() + if protocol and not cryptoID: + raise ValueError() + if cryptoID: + import cryptoIDlib #So we raise an error here + if x509TrustList: + import cryptlib_py #So we raise an error here + self.cryptoID = cryptoID + self.protocol = protocol + self.x509Fingerprint = x509Fingerprint + self.x509TrustList = x509TrustList + self.x509CommonName = x509CommonName + self.checkResumedSession = checkResumedSession + + def __call__(self, connection): + """Check a TLSConnection. + + When a Checker is passed to a handshake function, this will + be called at the end of the function. + + @type connection: L{tlslite.TLSConnection.TLSConnection} + @param connection: The TLSConnection to examine. + + @raise tlslite.errors.TLSAuthenticationError: If the other + party's certificate chain is missing or bad. + """ + if not self.checkResumedSession and connection.resumed: + return + + if self.cryptoID or self.x509Fingerprint or self.x509TrustList: + if connection._client: + chain = connection.session.serverCertChain + else: + chain = connection.session.clientCertChain + + if self.x509Fingerprint or self.x509TrustList: + if isinstance(chain, X509CertChain): + if self.x509Fingerprint: + if chain.getFingerprint() != self.x509Fingerprint: + raise TLSFingerprintError(\ + "X.509 fingerprint mismatch: %s, %s" % \ + (chain.getFingerprint(), self.x509Fingerprint)) + else: #self.x509TrustList + if not chain.validate(self.x509TrustList): + raise TLSValidationError("X.509 validation failure") + if self.x509CommonName and \ + (chain.getCommonName() != self.x509CommonName): + raise TLSAuthorizationError(\ + "X.509 Common Name mismatch: %s, %s" % \ + (chain.getCommonName(), self.x509CommonName)) + elif chain: + raise TLSAuthenticationTypeError() + else: + raise TLSNoAuthenticationError() + elif self.cryptoID: + import cryptoIDlib.CertChain + if isinstance(chain, cryptoIDlib.CertChain.CertChain): + if chain.cryptoID != self.cryptoID: + raise TLSFingerprintError(\ + "cryptoID mismatch: %s, %s" % \ + (chain.cryptoID, self.cryptoID)) + if self.protocol: + if not chain.checkProtocol(self.protocol): + raise TLSAuthorizationError(\ + "cryptoID protocol mismatch") + if not chain.validate(): + raise TLSValidationError("cryptoID validation failure") + elif chain: + raise TLSAuthenticationTypeError() + else: + raise TLSNoAuthenticationError() + diff --git a/gdata.py-1.2.3/src/gdata/tlslite/FileObject.py b/gdata.py-1.2.3/src/gdata/tlslite/FileObject.py new file mode 100755 index 0000000..6ee02b2 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/FileObject.py @@ -0,0 +1,220 @@ +"""Class returned by TLSConnection.makefile().""" + +class FileObject: + """This class provides a file object interface to a + L{tlslite.TLSConnection.TLSConnection}. + + Call makefile() on a TLSConnection to create a FileObject instance. + + This class was copied, with minor modifications, from the + _fileobject class in socket.py. Note that fileno() is not + implemented.""" + + default_bufsize = 16384 #TREV: changed from 8192 + + def __init__(self, sock, mode='rb', bufsize=-1): + self._sock = sock + self.mode = mode # Not actually used in this version + if bufsize < 0: + bufsize = self.default_bufsize + self.bufsize = bufsize + self.softspace = False + if bufsize == 0: + self._rbufsize = 1 + elif bufsize == 1: + self._rbufsize = self.default_bufsize + else: + self._rbufsize = bufsize + self._wbufsize = bufsize + self._rbuf = "" # A string + self._wbuf = [] # A list of strings + + def _getclosed(self): + return self._sock is not None + closed = property(_getclosed, doc="True if the file is closed") + + def close(self): + try: + if self._sock: + for result in self._sock._decrefAsync(): #TREV + pass + finally: + self._sock = None + + def __del__(self): + try: + self.close() + except: + # close() may fail if __init__ didn't complete + pass + + def flush(self): + if self._wbuf: + buffer = "".join(self._wbuf) + self._wbuf = [] + self._sock.sendall(buffer) + + #def fileno(self): + # raise NotImplementedError() #TREV + + def write(self, data): + data = str(data) # XXX Should really reject non-string non-buffers + if not data: + return + self._wbuf.append(data) + if (self._wbufsize == 0 or + self._wbufsize == 1 and '\n' in data or + self._get_wbuf_len() >= self._wbufsize): + self.flush() + + def writelines(self, list): + # XXX We could do better here for very long lists + # XXX Should really reject non-string non-buffers + self._wbuf.extend(filter(None, map(str, list))) + if (self._wbufsize <= 1 or + self._get_wbuf_len() >= self._wbufsize): + self.flush() + + def _get_wbuf_len(self): + buf_len = 0 + for x in self._wbuf: + buf_len += len(x) + return buf_len + + def read(self, size=-1): + data = self._rbuf + if size < 0: + # Read until EOF + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + if self._rbufsize <= 1: + recv_size = self.default_bufsize + else: + recv_size = self._rbufsize + while True: + data = self._sock.recv(recv_size) + if not data: + break + buffers.append(data) + return "".join(buffers) + else: + # Read until size bytes or EOF seen, whichever comes first + buf_len = len(data) + if buf_len >= size: + self._rbuf = data[size:] + return data[:size] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + left = size - buf_len + recv_size = max(self._rbufsize, left) + data = self._sock.recv(recv_size) + if not data: + break + buffers.append(data) + n = len(data) + if n >= left: + self._rbuf = data[left:] + buffers[-1] = data[:left] + break + buf_len += n + return "".join(buffers) + + def readline(self, size=-1): + data = self._rbuf + if size < 0: + # Read until \n or EOF, whichever comes first + if self._rbufsize <= 1: + # Speed up unbuffered case + assert data == "" + buffers = [] + recv = self._sock.recv + while data != "\n": + data = recv(1) + if not data: + break + buffers.append(data) + return "".join(buffers) + nl = data.find('\n') + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + return data[:nl] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + data = self._sock.recv(self._rbufsize) + if not data: + break + buffers.append(data) + nl = data.find('\n') + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + buffers[-1] = data[:nl] + break + return "".join(buffers) + else: + # Read until size bytes or \n or EOF seen, whichever comes first + nl = data.find('\n', 0, size) + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + return data[:nl] + buf_len = len(data) + if buf_len >= size: + self._rbuf = data[size:] + return data[:size] + buffers = [] + if data: + buffers.append(data) + self._rbuf = "" + while True: + data = self._sock.recv(self._rbufsize) + if not data: + break + buffers.append(data) + left = size - buf_len + nl = data.find('\n', 0, left) + if nl >= 0: + nl += 1 + self._rbuf = data[nl:] + buffers[-1] = data[:nl] + break + n = len(data) + if n >= left: + self._rbuf = data[left:] + buffers[-1] = data[:left] + break + buf_len += n + return "".join(buffers) + + def readlines(self, sizehint=0): + total = 0 + list = [] + while True: + line = self.readline() + if not line: + break + list.append(line) + total += len(line) + if sizehint and total >= sizehint: + break + return list + + # Iterator protocols + + def __iter__(self): + return self + + def next(self): + line = self.readline() + if not line: + raise StopIteration + return line diff --git a/gdata.py-1.2.3/src/gdata/tlslite/HandshakeSettings.py b/gdata.py-1.2.3/src/gdata/tlslite/HandshakeSettings.py new file mode 100755 index 0000000..c7c3223 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/HandshakeSettings.py @@ -0,0 +1,159 @@ +"""Class for setting handshake parameters.""" + +from constants import CertificateType +from utils import cryptomath +from utils import cipherfactory + +class HandshakeSettings: + """This class encapsulates various parameters that can be used with + a TLS handshake. + @sort: minKeySize, maxKeySize, cipherNames, certificateTypes, + minVersion, maxVersion + + @type minKeySize: int + @ivar minKeySize: The minimum bit length for asymmetric keys. + + If the other party tries to use SRP, RSA, or Diffie-Hellman + parameters smaller than this length, an alert will be + signalled. The default is 1023. + + @type maxKeySize: int + @ivar maxKeySize: The maximum bit length for asymmetric keys. + + If the other party tries to use SRP, RSA, or Diffie-Hellman + parameters larger than this length, an alert will be signalled. + The default is 8193. + + @type cipherNames: list + @ivar cipherNames: The allowed ciphers, in order of preference. + + The allowed values in this list are 'aes256', 'aes128', '3des', and + 'rc4'. If these settings are used with a client handshake, they + determine the order of the ciphersuites offered in the ClientHello + message. + + If these settings are used with a server handshake, the server will + choose whichever ciphersuite matches the earliest entry in this + list. + + NOTE: If '3des' is used in this list, but TLS Lite can't find an + add-on library that supports 3DES, then '3des' will be silently + removed. + + The default value is ['aes256', 'aes128', '3des', 'rc4']. + + @type certificateTypes: list + @ivar certificateTypes: The allowed certificate types, in order of + preference. + + The allowed values in this list are 'x509' and 'cryptoID'. This + list is only used with a client handshake. The client will + advertise to the server which certificate types are supported, and + will check that the server uses one of the appropriate types. + + NOTE: If 'cryptoID' is used in this list, but cryptoIDlib is not + installed, then 'cryptoID' will be silently removed. + + @type minVersion: tuple + @ivar minVersion: The minimum allowed SSL/TLS version. + + This variable can be set to (3,0) for SSL 3.0, (3,1) for + TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to + use a lower version, a protocol_version alert will be signalled. + The default is (3,0). + + @type maxVersion: tuple + @ivar maxVersion: The maximum allowed SSL/TLS version. + + This variable can be set to (3,0) for SSL 3.0, (3,1) for + TLS 1.0, or (3,2) for TLS 1.1. If the other party wishes to + use a higher version, a protocol_version alert will be signalled. + The default is (3,2). (WARNING: Some servers may (improperly) + reject clients which offer support for TLS 1.1. In this case, + try lowering maxVersion to (3,1)). + """ + def __init__(self): + self.minKeySize = 1023 + self.maxKeySize = 8193 + self.cipherNames = ["aes256", "aes128", "3des", "rc4"] + self.cipherImplementations = ["cryptlib", "openssl", "pycrypto", + "python"] + self.certificateTypes = ["x509", "cryptoID"] + self.minVersion = (3,0) + self.maxVersion = (3,2) + + #Filters out options that are not supported + def _filter(self): + other = HandshakeSettings() + other.minKeySize = self.minKeySize + other.maxKeySize = self.maxKeySize + other.cipherNames = self.cipherNames + other.cipherImplementations = self.cipherImplementations + other.certificateTypes = self.certificateTypes + other.minVersion = self.minVersion + other.maxVersion = self.maxVersion + + if not cipherfactory.tripleDESPresent: + other.cipherNames = [e for e in self.cipherNames if e != "3des"] + if len(other.cipherNames)==0: + raise ValueError("No supported ciphers") + + try: + import cryptoIDlib + except ImportError: + other.certificateTypes = [e for e in self.certificateTypes \ + if e != "cryptoID"] + if len(other.certificateTypes)==0: + raise ValueError("No supported certificate types") + + if not cryptomath.cryptlibpyLoaded: + other.cipherImplementations = [e for e in \ + self.cipherImplementations if e != "cryptlib"] + if not cryptomath.m2cryptoLoaded: + other.cipherImplementations = [e for e in \ + other.cipherImplementations if e != "openssl"] + if not cryptomath.pycryptoLoaded: + other.cipherImplementations = [e for e in \ + other.cipherImplementations if e != "pycrypto"] + if len(other.cipherImplementations)==0: + raise ValueError("No supported cipher implementations") + + if other.minKeySize<512: + raise ValueError("minKeySize too small") + if other.minKeySize>16384: + raise ValueError("minKeySize too large") + if other.maxKeySize<512: + raise ValueError("maxKeySize too small") + if other.maxKeySize>16384: + raise ValueError("maxKeySize too large") + for s in other.cipherNames: + if s not in ("aes256", "aes128", "rc4", "3des"): + raise ValueError("Unknown cipher name: '%s'" % s) + for s in other.cipherImplementations: + if s not in ("cryptlib", "openssl", "python", "pycrypto"): + raise ValueError("Unknown cipher implementation: '%s'" % s) + for s in other.certificateTypes: + if s not in ("x509", "cryptoID"): + raise ValueError("Unknown certificate type: '%s'" % s) + + if other.minVersion > other.maxVersion: + raise ValueError("Versions set incorrectly") + + if not other.minVersion in ((3,0), (3,1), (3,2)): + raise ValueError("minVersion set incorrectly") + + if not other.maxVersion in ((3,0), (3,1), (3,2)): + raise ValueError("maxVersion set incorrectly") + + return other + + def _getCertificateTypes(self): + l = [] + for ct in self.certificateTypes: + if ct == "x509": + l.append(CertificateType.x509) + elif ct == "cryptoID": + l.append(CertificateType.cryptoID) + else: + raise AssertionError() + return l diff --git a/gdata.py-1.2.3/src/gdata/tlslite/Session.py b/gdata.py-1.2.3/src/gdata/tlslite/Session.py new file mode 100755 index 0000000..a951f45 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/Session.py @@ -0,0 +1,131 @@ +"""Class representing a TLS session.""" + +from utils.compat import * +from mathtls import * +from constants import * + +class Session: + """ + This class represents a TLS session. + + TLS distinguishes between connections and sessions. A new + handshake creates both a connection and a session. Data is + transmitted over the connection. + + The session contains a more permanent record of the handshake. The + session can be inspected to determine handshake results. The + session can also be used to create a new connection through + "session resumption". If the client and server both support this, + they can create a new connection based on an old session without + the overhead of a full handshake. + + The session for a L{tlslite.TLSConnection.TLSConnection} can be + retrieved from the connection's 'session' attribute. + + @type srpUsername: str + @ivar srpUsername: The client's SRP username (or None). + + @type sharedKeyUsername: str + @ivar sharedKeyUsername: The client's shared-key username (or + None). + + @type clientCertChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @ivar clientCertChain: The client's certificate chain (or None). + + @type serverCertChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @ivar serverCertChain: The server's certificate chain (or None). + """ + + def __init__(self): + self.masterSecret = createByteArraySequence([]) + self.sessionID = createByteArraySequence([]) + self.cipherSuite = 0 + self.srpUsername = None + self.sharedKeyUsername = None + self.clientCertChain = None + self.serverCertChain = None + self.resumable = False + self.sharedKey = False + + def _clone(self): + other = Session() + other.masterSecret = self.masterSecret + other.sessionID = self.sessionID + other.cipherSuite = self.cipherSuite + other.srpUsername = self.srpUsername + other.sharedKeyUsername = self.sharedKeyUsername + other.clientCertChain = self.clientCertChain + other.serverCertChain = self.serverCertChain + other.resumable = self.resumable + other.sharedKey = self.sharedKey + return other + + def _calcMasterSecret(self, version, premasterSecret, clientRandom, + serverRandom): + if version == (3,0): + self.masterSecret = PRF_SSL(premasterSecret, + concatArrays(clientRandom, serverRandom), 48) + elif version in ((3,1), (3,2)): + self.masterSecret = PRF(premasterSecret, "master secret", + concatArrays(clientRandom, serverRandom), 48) + else: + raise AssertionError() + + def valid(self): + """If this session can be used for session resumption. + + @rtype: bool + @return: If this session can be used for session resumption. + """ + return self.resumable or self.sharedKey + + def _setResumable(self, boolean): + #Only let it be set if this isn't a shared key + if not self.sharedKey: + #Only let it be set to True if the sessionID is non-null + if (not boolean) or (boolean and self.sessionID): + self.resumable = boolean + + def getCipherName(self): + """Get the name of the cipher used with this connection. + + @rtype: str + @return: The name of the cipher used with this connection. + Either 'aes128', 'aes256', 'rc4', or '3des'. + """ + if self.cipherSuite in CipherSuite.aes128Suites: + return "aes128" + elif self.cipherSuite in CipherSuite.aes256Suites: + return "aes256" + elif self.cipherSuite in CipherSuite.rc4Suites: + return "rc4" + elif self.cipherSuite in CipherSuite.tripleDESSuites: + return "3des" + else: + return None + + def _createSharedKey(self, sharedKeyUsername, sharedKey): + if len(sharedKeyUsername)>16: + raise ValueError() + if len(sharedKey)>47: + raise ValueError() + + self.sharedKeyUsername = sharedKeyUsername + + self.sessionID = createByteArrayZeros(16) + for x in range(len(sharedKeyUsername)): + self.sessionID[x] = ord(sharedKeyUsername[x]) + + premasterSecret = createByteArrayZeros(48) + sharedKey = chr(len(sharedKey)) + sharedKey + for x in range(48): + premasterSecret[x] = ord(sharedKey[x % len(sharedKey)]) + + self.masterSecret = PRF(premasterSecret, "shared secret", + createByteArraySequence([]), 48) + self.sharedKey = True + return self + + diff --git a/gdata.py-1.2.3/src/gdata/tlslite/SessionCache.py b/gdata.py-1.2.3/src/gdata/tlslite/SessionCache.py new file mode 100755 index 0000000..34cf0b0 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/SessionCache.py @@ -0,0 +1,103 @@ +"""Class for caching TLS sessions.""" + +import thread +import time + +class SessionCache: + """This class is used by the server to cache TLS sessions. + + Caching sessions allows the client to use TLS session resumption + and avoid the expense of a full handshake. To use this class, + simply pass a SessionCache instance into the server handshake + function. + + This class is thread-safe. + """ + + #References to these instances + #are also held by the caller, who may change the 'resumable' + #flag, so the SessionCache must return the same instances + #it was passed in. + + def __init__(self, maxEntries=10000, maxAge=14400): + """Create a new SessionCache. + + @type maxEntries: int + @param maxEntries: The maximum size of the cache. When this + limit is reached, the oldest sessions will be deleted as + necessary to make room for new ones. The default is 10000. + + @type maxAge: int + @param maxAge: The number of seconds before a session expires + from the cache. The default is 14400 (i.e. 4 hours).""" + + self.lock = thread.allocate_lock() + + # Maps sessionIDs to sessions + self.entriesDict = {} + + #Circular list of (sessionID, timestamp) pairs + self.entriesList = [(None,None)] * maxEntries + + self.firstIndex = 0 + self.lastIndex = 0 + self.maxAge = maxAge + + def __getitem__(self, sessionID): + self.lock.acquire() + try: + self._purge() #Delete old items, so we're assured of a new one + session = self.entriesDict[sessionID] + + #When we add sessions they're resumable, but it's possible + #for the session to be invalidated later on (if a fatal alert + #is returned), so we have to check for resumability before + #returning the session. + + if session.valid(): + return session + else: + raise KeyError() + finally: + self.lock.release() + + + def __setitem__(self, sessionID, session): + self.lock.acquire() + try: + #Add the new element + self.entriesDict[sessionID] = session + self.entriesList[self.lastIndex] = (sessionID, time.time()) + self.lastIndex = (self.lastIndex+1) % len(self.entriesList) + + #If the cache is full, we delete the oldest element to make an + #empty space + if self.lastIndex == self.firstIndex: + del(self.entriesDict[self.entriesList[self.firstIndex][0]]) + self.firstIndex = (self.firstIndex+1) % len(self.entriesList) + finally: + self.lock.release() + + #Delete expired items + def _purge(self): + currentTime = time.time() + + #Search through the circular list, deleting expired elements until + #we reach a non-expired element. Since elements in list are + #ordered in time, we can break once we reach the first non-expired + #element + index = self.firstIndex + while index != self.lastIndex: + if currentTime - self.entriesList[index][1] > self.maxAge: + del(self.entriesDict[self.entriesList[index][0]]) + index = (index+1) % len(self.entriesList) + else: + break + self.firstIndex = index + +def _test(): + import doctest, SessionCache + return doctest.testmod(SessionCache) + +if __name__ == "__main__": + _test() diff --git a/gdata.py-1.2.3/src/gdata/tlslite/SharedKeyDB.py b/gdata.py-1.2.3/src/gdata/tlslite/SharedKeyDB.py new file mode 100755 index 0000000..3246ec7 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/SharedKeyDB.py @@ -0,0 +1,58 @@ +"""Class for storing shared keys.""" + +from utils.cryptomath import * +from utils.compat import * +from mathtls import * +from Session import Session +from BaseDB import BaseDB + +class SharedKeyDB(BaseDB): + """This class represent an in-memory or on-disk database of shared + keys. + + A SharedKeyDB can be passed to a server handshake function to + authenticate a client based on one of the shared keys. + + This class is thread-safe. + """ + + def __init__(self, filename=None): + """Create a new SharedKeyDB. + + @type filename: str + @param filename: Filename for an on-disk database, or None for + an in-memory database. If the filename already exists, follow + this with a call to open(). To create a new on-disk database, + follow this with a call to create(). + """ + BaseDB.__init__(self, filename, "shared key") + + def _getItem(self, username, valueStr): + session = Session() + session._createSharedKey(username, valueStr) + return session + + def __setitem__(self, username, sharedKey): + """Add a shared key to the database. + + @type username: str + @param username: The username to associate the shared key with. + Must be less than or equal to 16 characters in length, and must + not already be in the database. + + @type sharedKey: str + @param sharedKey: The shared key to add. Must be less than 48 + characters in length. + """ + BaseDB.__setitem__(self, username, sharedKey) + + def _setItem(self, username, value): + if len(username)>16: + raise ValueError("username too long") + if len(value)>=48: + raise ValueError("shared key too long") + return value + + def _checkItem(self, value, username, param): + newSession = self._getItem(username, param) + return value.masterSecret == newSession.masterSecret \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/TLSConnection.py b/gdata.py-1.2.3/src/gdata/tlslite/TLSConnection.py new file mode 100755 index 0000000..d125f8f --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/TLSConnection.py @@ -0,0 +1,1600 @@ +""" +MAIN CLASS FOR TLS LITE (START HERE!). +""" +from __future__ import generators + +import socket +from utils.compat import formatExceptionTrace +from TLSRecordLayer import TLSRecordLayer +from Session import Session +from constants import * +from utils.cryptomath import getRandomBytes +from errors import * +from messages import * +from mathtls import * +from HandshakeSettings import HandshakeSettings + + +class TLSConnection(TLSRecordLayer): + """ + This class wraps a socket and provides TLS handshaking and data + transfer. + + To use this class, create a new instance, passing a connected + socket into the constructor. Then call some handshake function. + If the handshake completes without raising an exception, then a TLS + connection has been negotiated. You can transfer data over this + connection as if it were a socket. + + This class provides both synchronous and asynchronous versions of + its key functions. The synchronous versions should be used when + writing single-or multi-threaded code using blocking sockets. The + asynchronous versions should be used when performing asynchronous, + event-based I/O with non-blocking sockets. + + Asynchronous I/O is a complicated subject; typically, you should + not use the asynchronous functions directly, but should use some + framework like asyncore or Twisted which TLS Lite integrates with + (see + L{tlslite.integration.TLSAsyncDispatcherMixIn.TLSAsyncDispatcherMixIn} or + L{tlslite.integration.TLSTwistedProtocolWrapper.TLSTwistedProtocolWrapper}). + """ + + + def __init__(self, sock): + """Create a new TLSConnection instance. + + @param sock: The socket data will be transmitted on. The + socket should already be connected. It may be in blocking or + non-blocking mode. + + @type sock: L{socket.socket} + """ + TLSRecordLayer.__init__(self, sock) + + def handshakeClientSRP(self, username, password, session=None, + settings=None, checker=None, async=False): + """Perform an SRP handshake in the role of client. + + This function performs a TLS/SRP handshake. SRP mutually + authenticates both parties to each other using only a + username and password. This function may also perform a + combined SRP and server-certificate handshake, if the server + chooses to authenticate itself with a certificate chain in + addition to doing SRP. + + TLS/SRP is non-standard. Most TLS implementations don't + support it. See + U{http://www.ietf.org/html.charters/tls-charter.html} or + U{http://trevp.net/tlssrp/} for the latest information on + TLS/SRP. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type username: str + @param username: The SRP username. + + @type password: str + @param password: The SRP password. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. This + session must be an SRP session performed with the same username + and password as were passed in. If the resumption does not + succeed, a full SRP handshake will be performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(srpParams=(username, password), + session=session, settings=settings, checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientCert(self, certChain=None, privateKey=None, + session=None, settings=None, checker=None, + async=False): + """Perform a certificate-based handshake in the role of client. + + This function performs an SSL or TLS handshake. The server + will authenticate itself using an X.509 or cryptoID certificate + chain. If the handshake succeeds, the server's certificate + chain will be stored in the session's serverCertChain attribute. + Unless a checker object is passed in, this function does no + validation or checking of the server's certificate chain. + + If the server requests client authentication, the + client will send the passed-in certificate chain, and use the + passed-in private key to authenticate itself. If no + certificate chain and private key were passed in, the client + will attempt to proceed without client authentication. The + server may or may not allow this. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: The certificate chain to be used if the + server requests client authentication. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: The private key to be used if the server + requests client authentication. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. If the + resumption does not succeed, a full handshake will be + performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(certParams=(certChain, + privateKey), session=session, settings=settings, + checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientUnknown(self, srpCallback=None, certCallback=None, + session=None, settings=None, checker=None, + async=False): + """Perform a to-be-determined type of handshake in the role of client. + + This function performs an SSL or TLS handshake. If the server + requests client certificate authentication, the + certCallback will be invoked and should return a (certChain, + privateKey) pair. If the callback returns None, the library + will attempt to proceed without client authentication. The + server may or may not allow this. + + If the server requests SRP authentication, the srpCallback + will be invoked and should return a (username, password) pair. + If the callback returns None, the local implementation will + signal a user_canceled error alert. + + After the handshake completes, the client can inspect the + connection's session attribute to determine what type of + authentication was performed. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type srpCallback: callable + @param srpCallback: The callback to be used if the server + requests SRP authentication. If None, the client will not + offer support for SRP ciphersuites. + + @type certCallback: callable + @param certCallback: The callback to be used if the server + requests client certificate authentication. + + @type session: L{tlslite.Session.Session} + @param session: A TLS session to attempt to resume. If the + resumption does not succeed, a full handshake will be + performed. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(unknownParams=(srpCallback, + certCallback), session=session, settings=settings, + checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def handshakeClientSharedKey(self, username, sharedKey, settings=None, + checker=None, async=False): + """Perform a shared-key handshake in the role of client. + + This function performs a shared-key handshake. Using shared + symmetric keys of high entropy (128 bits or greater) mutually + authenticates both parties to each other. + + TLS with shared-keys is non-standard. Most TLS + implementations don't support it. See + U{http://www.ietf.org/html.charters/tls-charter.html} for the + latest information on TLS with shared-keys. If the shared-keys + Internet-Draft changes or is superceded, TLS Lite will track + those changes, so the shared-key support in later versions of + TLS Lite may become incompatible with this version. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type username: str + @param username: The shared-key username. + + @type sharedKey: str + @param sharedKey: The shared key. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @type async: bool + @param async: If False, this function will block until the + handshake is completed. If True, this function will return a + generator. Successive invocations of the generator will + return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or will raise StopIteration if + the handshake operation is completed. + + @rtype: None or an iterable + @return: If 'async' is True, a generator object will be + returned. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + handshaker = self._handshakeClientAsync(sharedKeyParams=(username, + sharedKey), settings=settings, checker=checker) + if async: + return handshaker + for result in handshaker: + pass + + def _handshakeClientAsync(self, srpParams=(), certParams=(), + unknownParams=(), sharedKeyParams=(), + session=None, settings=None, checker=None, + recursive=False): + + handshaker = self._handshakeClientAsyncHelper(srpParams=srpParams, + certParams=certParams, unknownParams=unknownParams, + sharedKeyParams=sharedKeyParams, session=session, + settings=settings, recursive=recursive) + for result in self._handshakeWrapperAsync(handshaker, checker): + yield result + + + def _handshakeClientAsyncHelper(self, srpParams, certParams, unknownParams, + sharedKeyParams, session, settings, recursive): + if not recursive: + self._handshakeStart(client=True) + + #Unpack parameters + srpUsername = None # srpParams + password = None # srpParams + clientCertChain = None # certParams + privateKey = None # certParams + srpCallback = None # unknownParams + certCallback = None # unknownParams + #session # sharedKeyParams (or session) + #settings # settings + + if srpParams: + srpUsername, password = srpParams + elif certParams: + clientCertChain, privateKey = certParams + elif unknownParams: + srpCallback, certCallback = unknownParams + elif sharedKeyParams: + session = Session()._createSharedKey(*sharedKeyParams) + + if not settings: + settings = HandshakeSettings() + settings = settings._filter() + + #Validate parameters + if srpUsername and not password: + raise ValueError("Caller passed a username but no password") + if password and not srpUsername: + raise ValueError("Caller passed a password but no username") + + if clientCertChain and not privateKey: + raise ValueError("Caller passed a certChain but no privateKey") + if privateKey and not clientCertChain: + raise ValueError("Caller passed a privateKey but no certChain") + + if clientCertChain: + foundType = False + try: + import cryptoIDlib.CertChain + if isinstance(clientCertChain, cryptoIDlib.CertChain.CertChain): + if "cryptoID" not in settings.certificateTypes: + raise ValueError("Client certificate doesn't "\ + "match Handshake Settings") + settings.certificateTypes = ["cryptoID"] + foundType = True + except ImportError: + pass + if not foundType and isinstance(clientCertChain, + X509CertChain): + if "x509" not in settings.certificateTypes: + raise ValueError("Client certificate doesn't match "\ + "Handshake Settings") + settings.certificateTypes = ["x509"] + foundType = True + if not foundType: + raise ValueError("Unrecognized certificate type") + + + if session: + if not session.valid(): + session = None #ignore non-resumable sessions... + elif session.resumable and \ + (session.srpUsername != srpUsername): + raise ValueError("Session username doesn't match") + + #Add Faults to parameters + if srpUsername and self.fault == Fault.badUsername: + srpUsername += "GARBAGE" + if password and self.fault == Fault.badPassword: + password += "GARBAGE" + if sharedKeyParams: + identifier = sharedKeyParams[0] + sharedKey = sharedKeyParams[1] + if self.fault == Fault.badIdentifier: + identifier += "GARBAGE" + session = Session()._createSharedKey(identifier, sharedKey) + elif self.fault == Fault.badSharedKey: + sharedKey += "GARBAGE" + session = Session()._createSharedKey(identifier, sharedKey) + + + #Initialize locals + serverCertChain = None + cipherSuite = 0 + certificateType = CertificateType.x509 + premasterSecret = None + + #Get client nonce + clientRandom = getRandomBytes(32) + + #Initialize acceptable ciphersuites + cipherSuites = [] + if srpParams: + cipherSuites += CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) + elif certParams: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + elif unknownParams: + if srpCallback: + cipherSuites += \ + CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += \ + CipherSuite.getSrpSuites(settings.cipherNames) + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + elif sharedKeyParams: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + else: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + + #Initialize acceptable certificate types + certificateTypes = settings._getCertificateTypes() + + #Tentatively set the version to the client's minimum version. + #We'll use this for the ClientHello, and if an error occurs + #parsing the Server Hello, we'll use this version for the response + self.version = settings.maxVersion + + #Either send ClientHello (with a resumable session)... + if session: + #If it's a resumable (i.e. not a shared-key session), then its + #ciphersuite must be one of the acceptable ciphersuites + if (not sharedKeyParams) and \ + session.cipherSuite not in cipherSuites: + raise ValueError("Session's cipher suite not consistent "\ + "with parameters") + else: + clientHello = ClientHello() + clientHello.create(settings.maxVersion, clientRandom, + session.sessionID, cipherSuites, + certificateTypes, session.srpUsername) + + #Or send ClientHello (without) + else: + clientHello = ClientHello() + clientHello.create(settings.maxVersion, clientRandom, + createByteArraySequence([]), cipherSuites, + certificateTypes, srpUsername) + for result in self._sendMsg(clientHello): + yield result + + #Get ServerHello (or missing_srp_username) + for result in self._getMsg((ContentType.handshake, + ContentType.alert), + HandshakeType.server_hello): + if result in (0,1): + yield result + else: + break + msg = result + + if isinstance(msg, ServerHello): + serverHello = msg + elif isinstance(msg, Alert): + alert = msg + + #If it's not a missing_srp_username, re-raise + if alert.description != AlertDescription.missing_srp_username: + self._shutdown(False) + raise TLSRemoteAlert(alert) + + #If we're not in SRP callback mode, we won't have offered SRP + #without a username, so we shouldn't get this alert + if not srpCallback: + for result in self._sendError(\ + AlertDescription.unexpected_message): + yield result + srpParams = srpCallback() + #If the callback returns None, cancel the handshake + if srpParams == None: + for result in self._sendError(AlertDescription.user_canceled): + yield result + + #Recursively perform handshake + for result in self._handshakeClientAsyncHelper(srpParams, + None, None, None, None, settings, True): + yield result + return + + #Get the server version. Do this before anything else, so any + #error alerts will use the server's version + self.version = serverHello.server_version + + #Future responses from server must use this version + self._versionCheck = True + + #Check ServerHello + if serverHello.server_version < settings.minVersion: + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(serverHello.server_version)): + yield result + if serverHello.server_version > settings.maxVersion: + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too new version: %s" % str(serverHello.server_version)): + yield result + if serverHello.cipher_suite not in cipherSuites: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect ciphersuite"): + yield result + if serverHello.certificate_type not in certificateTypes: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect certificate type"): + yield result + if serverHello.compression_method != 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server responded with incorrect compression method"): + yield result + + #Get the server nonce + serverRandom = serverHello.random + + #If the server agrees to resume + if session and session.sessionID and \ + serverHello.session_id == session.sessionID: + + #If a shared-key, we're flexible about suites; otherwise the + #server-chosen suite has to match the session's suite + if sharedKeyParams: + session.cipherSuite = serverHello.cipher_suite + elif serverHello.cipher_suite != session.cipherSuite: + for result in self._sendError(\ + AlertDescription.illegal_parameter,\ + "Server's ciphersuite doesn't match session"): + yield result + + #Set the session for this connection + self.session = session + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._getFinished(): + yield result + for result in self._sendFinished(): + yield result + + #Mark the connection as open + self._handshakeDone(resumed=True) + + #If server DOES NOT agree to resume + else: + + if sharedKeyParams: + for result in self._sendError(\ + AlertDescription.user_canceled, + "Was expecting a shared-key resumption"): + yield result + + #We've already validated these + cipherSuite = serverHello.cipher_suite + certificateType = serverHello.certificate_type + + #If the server chose an SRP suite... + if cipherSuite in CipherSuite.srpSuites: + #Get ServerKeyExchange, ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_key_exchange, cipherSuite): + if result in (0,1): + yield result + else: + break + serverKeyExchange = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + + #If the server chose an SRP+RSA suite... + elif cipherSuite in CipherSuite.srpRsaSuites: + #Get Certificate, ServerKeyExchange, ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, certificateType): + if result in (0,1): + yield result + else: + break + serverCertificate = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_key_exchange, cipherSuite): + if result in (0,1): + yield result + else: + break + serverKeyExchange = result + + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + + #If the server chose an RSA suite... + elif cipherSuite in CipherSuite.rsaSuites: + #Get Certificate[, CertificateRequest], ServerHelloDone + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, certificateType): + if result in (0,1): + yield result + else: + break + serverCertificate = result + + for result in self._getMsg(ContentType.handshake, + (HandshakeType.server_hello_done, + HandshakeType.certificate_request)): + if result in (0,1): + yield result + else: + break + msg = result + + certificateRequest = None + if isinstance(msg, CertificateRequest): + certificateRequest = msg + for result in self._getMsg(ContentType.handshake, + HandshakeType.server_hello_done): + if result in (0,1): + yield result + else: + break + serverHelloDone = result + elif isinstance(msg, ServerHelloDone): + serverHelloDone = msg + else: + raise AssertionError() + + + #Calculate SRP premaster secret, if server chose an SRP or + #SRP+RSA suite + if cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + #Get and check the server's group parameters and B value + N = serverKeyExchange.srp_N + g = serverKeyExchange.srp_g + s = serverKeyExchange.srp_s + B = serverKeyExchange.srp_B + + if (g,N) not in goodGroupParameters: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "Unknown group parameters"): + yield result + if numBits(N) < settings.minKeySize: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "N value is too small: %d" % numBits(N)): + yield result + if numBits(N) > settings.maxKeySize: + for result in self._sendError(\ + AlertDescription.untrusted_srp_parameters, + "N value is too large: %d" % numBits(N)): + yield result + if B % N == 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Suspicious B value"): + yield result + + #Check the server's signature, if server chose an + #SRP+RSA suite + if cipherSuite in CipherSuite.srpRsaSuites: + #Hash ServerKeyExchange/ServerSRPParams + hashBytes = serverKeyExchange.hash(clientRandom, + serverRandom) + + #Extract signature bytes from ServerKeyExchange + sigBytes = serverKeyExchange.signature + if len(sigBytes) == 0: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Server sent an SRP ServerKeyExchange "\ + "message without a signature"): + yield result + + #Get server's public key from the Certificate message + for result in self._getKeyFromChain(serverCertificate, + settings): + if result in (0,1): + yield result + else: + break + publicKey, serverCertChain = result + + #Verify signature + if not publicKey.verify(sigBytes, hashBytes): + for result in self._sendError(\ + AlertDescription.decrypt_error, + "Signature failed to verify"): + yield result + + + #Calculate client's ephemeral DH values (a, A) + a = bytesToNumber(getRandomBytes(32)) + A = powMod(g, a, N) + + #Calculate client's static DH values (x, v) + x = makeX(bytesToString(s), srpUsername, password) + v = powMod(g, x, N) + + #Calculate u + u = makeU(N, A, B) + + #Calculate premaster secret + k = makeK(N, g) + S = powMod((B - (k*v)) % N, a+(u*x), N) + + if self.fault == Fault.badA: + A = N + S = 0 + premasterSecret = numberToBytes(S) + + #Send ClientKeyExchange + for result in self._sendMsg(\ + ClientKeyExchange(cipherSuite).createSRP(A)): + yield result + + + #Calculate RSA premaster secret, if server chose an RSA suite + elif cipherSuite in CipherSuite.rsaSuites: + + #Handle the presence of a CertificateRequest + if certificateRequest: + if unknownParams and certCallback: + certParamsNew = certCallback() + if certParamsNew: + clientCertChain, privateKey = certParamsNew + + #Get server's public key from the Certificate message + for result in self._getKeyFromChain(serverCertificate, + settings): + if result in (0,1): + yield result + else: + break + publicKey, serverCertChain = result + + + #Calculate premaster secret + premasterSecret = getRandomBytes(48) + premasterSecret[0] = settings.maxVersion[0] + premasterSecret[1] = settings.maxVersion[1] + + if self.fault == Fault.badPremasterPadding: + premasterSecret[0] = 5 + if self.fault == Fault.shortPremasterSecret: + premasterSecret = premasterSecret[:-1] + + #Encrypt premaster secret to server's public key + encryptedPreMasterSecret = publicKey.encrypt(premasterSecret) + + #If client authentication was requested, send Certificate + #message, either with certificates or empty + if certificateRequest: + clientCertificate = Certificate(certificateType) + + if clientCertChain: + #Check to make sure we have the same type of + #certificates the server requested + wrongType = False + if certificateType == CertificateType.x509: + if not isinstance(clientCertChain, X509CertChain): + wrongType = True + elif certificateType == CertificateType.cryptoID: + if not isinstance(clientCertChain, + cryptoIDlib.CertChain.CertChain): + wrongType = True + if wrongType: + for result in self._sendError(\ + AlertDescription.handshake_failure, + "Client certificate is of wrong type"): + yield result + + clientCertificate.create(clientCertChain) + + for result in self._sendMsg(clientCertificate): + yield result + else: + #The server didn't request client auth, so we + #zeroize these so the clientCertChain won't be + #stored in the session. + privateKey = None + clientCertChain = None + + #Send ClientKeyExchange + clientKeyExchange = ClientKeyExchange(cipherSuite, + self.version) + clientKeyExchange.createRSA(encryptedPreMasterSecret) + for result in self._sendMsg(clientKeyExchange): + yield result + + #If client authentication was requested and we have a + #private key, send CertificateVerify + if certificateRequest and privateKey: + if self.version == (3,0): + #Create a temporary session object, just for the + #purpose of creating the CertificateVerify + session = Session() + session._calcMasterSecret(self.version, + premasterSecret, + clientRandom, + serverRandom) + verifyBytes = self._calcSSLHandshakeHash(\ + session.masterSecret, "") + elif self.version in ((3,1), (3,2)): + verifyBytes = stringToBytes(\ + self._handshake_md5.digest() + \ + self._handshake_sha.digest()) + if self.fault == Fault.badVerifyMessage: + verifyBytes[0] = ((verifyBytes[0]+1) % 256) + signedBytes = privateKey.sign(verifyBytes) + certificateVerify = CertificateVerify() + certificateVerify.create(signedBytes) + for result in self._sendMsg(certificateVerify): + yield result + + + #Create the session object + self.session = Session() + self.session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + self.session.sessionID = serverHello.session_id + self.session.cipherSuite = cipherSuite + self.session.srpUsername = srpUsername + self.session.clientCertChain = clientCertChain + self.session.serverCertChain = serverCertChain + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._sendFinished(): + yield result + for result in self._getFinished(): + yield result + + #Mark the connection as open + self.session._setResumable(True) + self._handshakeDone(resumed=False) + + + + def handshakeServer(self, sharedKeyDB=None, verifierDB=None, + certChain=None, privateKey=None, reqCert=False, + sessionCache=None, settings=None, checker=None): + """Perform a handshake in the role of server. + + This function performs an SSL or TLS handshake. Depending on + the arguments and the behavior of the client, this function can + perform a shared-key, SRP, or certificate-based handshake. It + can also perform a combined SRP and server-certificate + handshake. + + Like any handshake function, this can be called on a closed + TLS connection, or on a TLS connection that is already open. + If called on an open connection it performs a re-handshake. + This function does not send a Hello Request message before + performing the handshake, so if re-handshaking is required, + the server must signal the client to begin the re-handshake + through some other means. + + If the function completes without raising an exception, the + TLS connection will be open and available for data transfer. + + If an exception is raised, the connection will have been + automatically closed (if it was ever open). + + @type sharedKeyDB: L{tlslite.SharedKeyDB.SharedKeyDB} + @param sharedKeyDB: A database of shared symmetric keys + associated with usernames. If the client performs a + shared-key handshake, the session's sharedKeyUsername + attribute will be set. + + @type verifierDB: L{tlslite.VerifierDB.VerifierDB} + @param verifierDB: A database of SRP password verifiers + associated with usernames. If the client performs an SRP + handshake, the session's srpUsername attribute will be set. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: The certificate chain to be used if the + client requests server certificate authentication. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: The private key to be used if the client + requests server certificate authentication. + + @type reqCert: bool + @param reqCert: Whether to request client certificate + authentication. This only applies if the client chooses server + certificate authentication; if the client chooses SRP or + shared-key authentication, this will be ignored. If the client + performs a client certificate authentication, the sessions's + clientCertChain attribute will be set. + + @type sessionCache: L{tlslite.SessionCache.SessionCache} + @param sessionCache: An in-memory cache of resumable sessions. + The client can resume sessions from this cache. Alternatively, + if the client performs a full handshake, a new session will be + added to the cache. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites and SSL/TLS version chosen by the server. + + @type checker: L{tlslite.Checker.Checker} + @param checker: A Checker instance. This instance will be + invoked to examine the other party's authentication + credentials, if the handshake completes succesfully. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + @raise tlslite.errors.TLSAuthenticationError: If the checker + doesn't like the other party's authentication credentials. + """ + for result in self.handshakeServerAsync(sharedKeyDB, verifierDB, + certChain, privateKey, reqCert, sessionCache, settings, + checker): + pass + + + def handshakeServerAsync(self, sharedKeyDB=None, verifierDB=None, + certChain=None, privateKey=None, reqCert=False, + sessionCache=None, settings=None, checker=None): + """Start a server handshake operation on the TLS connection. + + This function returns a generator which behaves similarly to + handshakeServer(). Successive invocations of the generator + will return 0 if it is waiting to read from the socket, 1 if it is + waiting to write to the socket, or it will raise StopIteration + if the handshake operation is complete. + + @rtype: iterable + @return: A generator; see above for details. + """ + handshaker = self._handshakeServerAsyncHelper(\ + sharedKeyDB=sharedKeyDB, + verifierDB=verifierDB, certChain=certChain, + privateKey=privateKey, reqCert=reqCert, + sessionCache=sessionCache, settings=settings) + for result in self._handshakeWrapperAsync(handshaker, checker): + yield result + + + def _handshakeServerAsyncHelper(self, sharedKeyDB, verifierDB, + certChain, privateKey, reqCert, sessionCache, + settings): + + self._handshakeStart(client=False) + + if (not sharedKeyDB) and (not verifierDB) and (not certChain): + raise ValueError("Caller passed no authentication credentials") + if certChain and not privateKey: + raise ValueError("Caller passed a certChain but no privateKey") + if privateKey and not certChain: + raise ValueError("Caller passed a privateKey but no certChain") + + if not settings: + settings = HandshakeSettings() + settings = settings._filter() + + #Initialize acceptable cipher suites + cipherSuites = [] + if verifierDB: + if certChain: + cipherSuites += \ + CipherSuite.getSrpRsaSuites(settings.cipherNames) + cipherSuites += CipherSuite.getSrpSuites(settings.cipherNames) + if sharedKeyDB or certChain: + cipherSuites += CipherSuite.getRsaSuites(settings.cipherNames) + + #Initialize acceptable certificate type + certificateType = None + if certChain: + try: + import cryptoIDlib.CertChain + if isinstance(certChain, cryptoIDlib.CertChain.CertChain): + certificateType = CertificateType.cryptoID + except ImportError: + pass + if isinstance(certChain, X509CertChain): + certificateType = CertificateType.x509 + if certificateType == None: + raise ValueError("Unrecognized certificate type") + + #Initialize locals + clientCertChain = None + serverCertChain = None #We may set certChain to this later + postFinishedError = None + + #Tentatively set version to most-desirable version, so if an error + #occurs parsing the ClientHello, this is what we'll use for the + #error alert + self.version = settings.maxVersion + + #Get ClientHello + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_hello): + if result in (0,1): + yield result + else: + break + clientHello = result + + #If client's version is too low, reject it + if clientHello.client_version < settings.minVersion: + self.version = settings.minVersion + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(clientHello.client_version)): + yield result + + #If client's version is too high, propose my highest version + elif clientHello.client_version > settings.maxVersion: + self.version = settings.maxVersion + + else: + #Set the version to the client's version + self.version = clientHello.client_version + + #Get the client nonce; create server nonce + clientRandom = clientHello.random + serverRandom = getRandomBytes(32) + + #Calculate the first cipher suite intersection. + #This is the 'privileged' ciphersuite. We'll use it if we're + #doing a shared-key resumption or a new negotiation. In fact, + #the only time we won't use it is if we're resuming a non-sharedkey + #session, in which case we use the ciphersuite from the session. + # + #Given the current ciphersuite ordering, this means we prefer SRP + #over non-SRP. + for cipherSuite in cipherSuites: + if cipherSuite in clientHello.cipher_suites: + break + else: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #If resumption was requested... + if clientHello.session_id and (sharedKeyDB or sessionCache): + session = None + + #Check in the sharedKeys container + if sharedKeyDB and len(clientHello.session_id)==16: + try: + #Trim off zero padding, if any + for x in range(16): + if clientHello.session_id[x]==0: + break + self.allegedSharedKeyUsername = bytesToString(\ + clientHello.session_id[:x]) + session = sharedKeyDB[self.allegedSharedKeyUsername] + if not session.sharedKey: + raise AssertionError() + #use privileged ciphersuite + session.cipherSuite = cipherSuite + except KeyError: + pass + + #Then check in the session cache + if sessionCache and not session: + try: + session = sessionCache[bytesToString(\ + clientHello.session_id)] + if session.sharedKey: + raise AssertionError() + if not session.resumable: + raise AssertionError() + #Check for consistency with ClientHello + if session.cipherSuite not in cipherSuites: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + if session.cipherSuite not in clientHello.cipher_suites: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + if clientHello.srp_username: + if clientHello.srp_username != session.srpUsername: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + except KeyError: + pass + + #If a session is found.. + if session: + #Set the session + self.session = session + + #Send ServerHello + serverHello = ServerHello() + serverHello.create(self.version, serverRandom, + session.sessionID, session.cipherSuite, + certificateType) + for result in self._sendMsg(serverHello): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._sendFinished(): + yield result + for result in self._getFinished(): + yield result + + #Mark the connection as open + self._handshakeDone(resumed=True) + return + + + #If not a resumption... + + #TRICKY: we might have chosen an RSA suite that was only deemed + #acceptable because of the shared-key resumption. If the shared- + #key resumption failed, because the identifier wasn't recognized, + #we might fall through to here, where we have an RSA suite + #chosen, but no certificate. + if cipherSuite in CipherSuite.rsaSuites and not certChain: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #If an RSA suite is chosen, check for certificate type intersection + #(We do this check down here because if the mismatch occurs but the + # client is using a shared-key session, it's okay) + if cipherSuite in CipherSuite.rsaSuites + \ + CipherSuite.srpRsaSuites: + if certificateType not in clientHello.certificate_types: + for result in self._sendError(\ + AlertDescription.handshake_failure, + "the client doesn't support my certificate type"): + yield result + + #Move certChain -> serverCertChain, now that we're using it + serverCertChain = certChain + + + #Create sessionID + if sessionCache: + sessionID = getRandomBytes(32) + else: + sessionID = createByteArraySequence([]) + + #If we've selected an SRP suite, exchange keys and calculate + #premaster secret: + if cipherSuite in CipherSuite.srpSuites + CipherSuite.srpRsaSuites: + + #If there's no SRP username... + if not clientHello.srp_username: + + #Ask the client to re-send ClientHello with one + for result in self._sendMsg(Alert().create(\ + AlertDescription.missing_srp_username, + AlertLevel.warning)): + yield result + + #Get ClientHello + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_hello): + if result in (0,1): + yield result + else: + break + clientHello = result + + #Check ClientHello + #If client's version is too low, reject it (COPIED CODE; BAD!) + if clientHello.client_version < settings.minVersion: + self.version = settings.minVersion + for result in self._sendError(\ + AlertDescription.protocol_version, + "Too old version: %s" % str(clientHello.client_version)): + yield result + + #If client's version is too high, propose my highest version + elif clientHello.client_version > settings.maxVersion: + self.version = settings.maxVersion + + else: + #Set the version to the client's version + self.version = clientHello.client_version + + #Recalculate the privileged cipher suite, making sure to + #pick an SRP suite + cipherSuites = [c for c in cipherSuites if c in \ + CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites] + for cipherSuite in cipherSuites: + if cipherSuite in clientHello.cipher_suites: + break + else: + for result in self._sendError(\ + AlertDescription.handshake_failure): + yield result + + #Get the client nonce; create server nonce + clientRandom = clientHello.random + serverRandom = getRandomBytes(32) + + #The username better be there, this time + if not clientHello.srp_username: + for result in self._sendError(\ + AlertDescription.illegal_parameter, + "Client resent a hello, but without the SRP"\ + " username"): + yield result + + + #Get username + self.allegedSrpUsername = clientHello.srp_username + + #Get parameters from username + try: + entry = verifierDB[self.allegedSrpUsername] + except KeyError: + for result in self._sendError(\ + AlertDescription.unknown_srp_username): + yield result + (N, g, s, v) = entry + + #Calculate server's ephemeral DH values (b, B) + b = bytesToNumber(getRandomBytes(32)) + k = makeK(N, g) + B = (powMod(g, b, N) + (k*v)) % N + + #Create ServerKeyExchange, signing it if necessary + serverKeyExchange = ServerKeyExchange(cipherSuite) + serverKeyExchange.createSRP(N, g, stringToBytes(s), B) + if cipherSuite in CipherSuite.srpRsaSuites: + hashBytes = serverKeyExchange.hash(clientRandom, + serverRandom) + serverKeyExchange.signature = privateKey.sign(hashBytes) + + #Send ServerHello[, Certificate], ServerKeyExchange, + #ServerHelloDone + msgs = [] + serverHello = ServerHello() + serverHello.create(self.version, serverRandom, sessionID, + cipherSuite, certificateType) + msgs.append(serverHello) + if cipherSuite in CipherSuite.srpRsaSuites: + certificateMsg = Certificate(certificateType) + certificateMsg.create(serverCertChain) + msgs.append(certificateMsg) + msgs.append(serverKeyExchange) + msgs.append(ServerHelloDone()) + for result in self._sendMsgs(msgs): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Get and check ClientKeyExchange + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_key_exchange, + cipherSuite): + if result in (0,1): + yield result + else: + break + clientKeyExchange = result + A = clientKeyExchange.srp_A + if A % N == 0: + postFinishedError = (AlertDescription.illegal_parameter, + "Suspicious A value") + #Calculate u + u = makeU(N, A, B) + + #Calculate premaster secret + S = powMod((A * powMod(v,u,N)) % N, b, N) + premasterSecret = numberToBytes(S) + + + #If we've selected an RSA suite, exchange keys and calculate + #premaster secret: + elif cipherSuite in CipherSuite.rsaSuites: + + #Send ServerHello, Certificate[, CertificateRequest], + #ServerHelloDone + msgs = [] + msgs.append(ServerHello().create(self.version, serverRandom, + sessionID, cipherSuite, certificateType)) + msgs.append(Certificate(certificateType).create(serverCertChain)) + if reqCert: + msgs.append(CertificateRequest()) + msgs.append(ServerHelloDone()) + for result in self._sendMsgs(msgs): + yield result + + #From here on, the client's messages must have the right version + self._versionCheck = True + + #Get [Certificate,] (if was requested) + if reqCert: + if self.version == (3,0): + for result in self._getMsg((ContentType.handshake, + ContentType.alert), + HandshakeType.certificate, + certificateType): + if result in (0,1): + yield result + else: + break + msg = result + + if isinstance(msg, Alert): + #If it's not a no_certificate alert, re-raise + alert = msg + if alert.description != \ + AlertDescription.no_certificate: + self._shutdown(False) + raise TLSRemoteAlert(alert) + elif isinstance(msg, Certificate): + clientCertificate = msg + if clientCertificate.certChain and \ + clientCertificate.certChain.getNumCerts()!=0: + clientCertChain = clientCertificate.certChain + else: + raise AssertionError() + elif self.version in ((3,1), (3,2)): + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate, + certificateType): + if result in (0,1): + yield result + else: + break + clientCertificate = result + if clientCertificate.certChain and \ + clientCertificate.certChain.getNumCerts()!=0: + clientCertChain = clientCertificate.certChain + else: + raise AssertionError() + + #Get ClientKeyExchange + for result in self._getMsg(ContentType.handshake, + HandshakeType.client_key_exchange, + cipherSuite): + if result in (0,1): + yield result + else: + break + clientKeyExchange = result + + #Decrypt ClientKeyExchange + premasterSecret = privateKey.decrypt(\ + clientKeyExchange.encryptedPreMasterSecret) + + randomPreMasterSecret = getRandomBytes(48) + versionCheck = (premasterSecret[0], premasterSecret[1]) + if not premasterSecret: + premasterSecret = randomPreMasterSecret + elif len(premasterSecret)!=48: + premasterSecret = randomPreMasterSecret + elif versionCheck != clientHello.client_version: + if versionCheck != self.version: #Tolerate buggy IE clients + premasterSecret = randomPreMasterSecret + + #Get and check CertificateVerify, if relevant + if clientCertChain: + if self.version == (3,0): + #Create a temporary session object, just for the purpose + #of checking the CertificateVerify + session = Session() + session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + verifyBytes = self._calcSSLHandshakeHash(\ + session.masterSecret, "") + elif self.version in ((3,1), (3,2)): + verifyBytes = stringToBytes(self._handshake_md5.digest() +\ + self._handshake_sha.digest()) + for result in self._getMsg(ContentType.handshake, + HandshakeType.certificate_verify): + if result in (0,1): + yield result + else: + break + certificateVerify = result + publicKey = clientCertChain.getEndEntityPublicKey() + if len(publicKey) < settings.minKeySize: + postFinishedError = (AlertDescription.handshake_failure, + "Client's public key too small: %d" % len(publicKey)) + if len(publicKey) > settings.maxKeySize: + postFinishedError = (AlertDescription.handshake_failure, + "Client's public key too large: %d" % len(publicKey)) + + if not publicKey.verify(certificateVerify.signature, + verifyBytes): + postFinishedError = (AlertDescription.decrypt_error, + "Signature failed to verify") + + + #Create the session object + self.session = Session() + self.session._calcMasterSecret(self.version, premasterSecret, + clientRandom, serverRandom) + self.session.sessionID = sessionID + self.session.cipherSuite = cipherSuite + self.session.srpUsername = self.allegedSrpUsername + self.session.clientCertChain = clientCertChain + self.session.serverCertChain = serverCertChain + + #Calculate pending connection states + self._calcPendingStates(clientRandom, serverRandom, + settings.cipherImplementations) + + #Exchange ChangeCipherSpec and Finished messages + for result in self._getFinished(): + yield result + + #If we were holding a post-finished error until receiving the client + #finished message, send it now. We delay the call until this point + #because calling sendError() throws an exception, and our caller might + #shut down the socket upon receiving the exception. If he did, and the + #client was still sending its ChangeCipherSpec or Finished messages, it + #would cause a socket error on the client side. This is a lot of + #consideration to show to misbehaving clients, but this would also + #cause problems with fault-testing. + if postFinishedError: + for result in self._sendError(*postFinishedError): + yield result + + for result in self._sendFinished(): + yield result + + #Add the session object to the session cache + if sessionCache and sessionID: + sessionCache[bytesToString(sessionID)] = self.session + + #Mark the connection as open + self.session._setResumable(True) + self._handshakeDone(resumed=False) + + + def _handshakeWrapperAsync(self, handshaker, checker): + if not self.fault: + try: + for result in handshaker: + yield result + if checker: + try: + checker(self) + except TLSAuthenticationError: + alert = Alert().create(AlertDescription.close_notify, + AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + raise + except: + self._shutdown(False) + raise + else: + try: + for result in handshaker: + yield result + if checker: + try: + checker(self) + except TLSAuthenticationError: + alert = Alert().create(AlertDescription.close_notify, + AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + raise + except socket.error, e: + raise TLSFaultError("socket error!") + except TLSAbruptCloseError, e: + raise TLSFaultError("abrupt close error!") + except TLSAlert, alert: + if alert.description not in Fault.faultAlerts[self.fault]: + raise TLSFaultError(str(alert)) + else: + pass + except: + self._shutdown(False) + raise + else: + raise TLSFaultError("No error!") + + + def _getKeyFromChain(self, certificate, settings): + #Get and check cert chain from the Certificate message + certChain = certificate.certChain + if not certChain or certChain.getNumCerts() == 0: + for result in self._sendError(AlertDescription.illegal_parameter, + "Other party sent a Certificate message without "\ + "certificates"): + yield result + + #Get and check public key from the cert chain + publicKey = certChain.getEndEntityPublicKey() + if len(publicKey) < settings.minKeySize: + for result in self._sendError(AlertDescription.handshake_failure, + "Other party's public key too small: %d" % len(publicKey)): + yield result + if len(publicKey) > settings.maxKeySize: + for result in self._sendError(AlertDescription.handshake_failure, + "Other party's public key too large: %d" % len(publicKey)): + yield result + + yield publicKey, certChain diff --git a/gdata.py-1.2.3/src/gdata/tlslite/TLSRecordLayer.py b/gdata.py-1.2.3/src/gdata/tlslite/TLSRecordLayer.py new file mode 100755 index 0000000..875ce80 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/TLSRecordLayer.py @@ -0,0 +1,1123 @@ +"""Helper class for TLSConnection.""" +from __future__ import generators + +from utils.compat import * +from utils.cryptomath import * +from utils.cipherfactory import createAES, createRC4, createTripleDES +from utils.codec import * +from errors import * +from messages import * +from mathtls import * +from constants import * +from utils.cryptomath import getRandomBytes +from utils import hmac +from FileObject import FileObject +import sha +import md5 +import socket +import errno +import traceback + +class _ConnectionState: + def __init__(self): + self.macContext = None + self.encContext = None + self.seqnum = 0 + + def getSeqNumStr(self): + w = Writer(8) + w.add(self.seqnum, 8) + seqnumStr = bytesToString(w.bytes) + self.seqnum += 1 + return seqnumStr + + +class TLSRecordLayer: + """ + This class handles data transmission for a TLS connection. + + Its only subclass is L{tlslite.TLSConnection.TLSConnection}. We've + separated the code in this class from TLSConnection to make things + more readable. + + + @type sock: socket.socket + @ivar sock: The underlying socket object. + + @type session: L{tlslite.Session.Session} + @ivar session: The session corresponding to this connection. + + Due to TLS session resumption, multiple connections can correspond + to the same underlying session. + + @type version: tuple + @ivar version: The TLS version being used for this connection. + + (3,0) means SSL 3.0, and (3,1) means TLS 1.0. + + @type closed: bool + @ivar closed: If this connection is closed. + + @type resumed: bool + @ivar resumed: If this connection is based on a resumed session. + + @type allegedSharedKeyUsername: str or None + @ivar allegedSharedKeyUsername: This is set to the shared-key + username asserted by the client, whether the handshake succeeded or + not. If the handshake fails, this can be inspected to + determine if a guessing attack is in progress against a particular + user account. + + @type allegedSrpUsername: str or None + @ivar allegedSrpUsername: This is set to the SRP username + asserted by the client, whether the handshake succeeded or not. + If the handshake fails, this can be inspected to determine + if a guessing attack is in progress against a particular user + account. + + @type closeSocket: bool + @ivar closeSocket: If the socket should be closed when the + connection is closed (writable). + + If you set this to True, TLS Lite will assume the responsibility of + closing the socket when the TLS Connection is shutdown (either + through an error or through the user calling close()). The default + is False. + + @type ignoreAbruptClose: bool + @ivar ignoreAbruptClose: If an abrupt close of the socket should + raise an error (writable). + + If you set this to True, TLS Lite will not raise a + L{tlslite.errors.TLSAbruptCloseError} exception if the underlying + socket is unexpectedly closed. Such an unexpected closure could be + caused by an attacker. However, it also occurs with some incorrect + TLS implementations. + + You should set this to True only if you're not worried about an + attacker truncating the connection, and only if necessary to avoid + spurious errors. The default is False. + + @sort: __init__, read, readAsync, write, writeAsync, close, closeAsync, + getCipherImplementation, getCipherName + """ + + def __init__(self, sock): + self.sock = sock + + #My session object (Session instance; read-only) + self.session = None + + #Am I a client or server? + self._client = None + + #Buffers for processing messages + self._handshakeBuffer = [] + self._readBuffer = "" + + #Handshake digests + self._handshake_md5 = md5.md5() + self._handshake_sha = sha.sha() + + #TLS Protocol Version + self.version = (0,0) #read-only + self._versionCheck = False #Once we choose a version, this is True + + #Current and Pending connection states + self._writeState = _ConnectionState() + self._readState = _ConnectionState() + self._pendingWriteState = _ConnectionState() + self._pendingReadState = _ConnectionState() + + #Is the connection open? + self.closed = True #read-only + self._refCount = 0 #Used to trigger closure + + #Is this a resumed (or shared-key) session? + self.resumed = False #read-only + + #What username did the client claim in his handshake? + self.allegedSharedKeyUsername = None + self.allegedSrpUsername = None + + #On a call to close(), do we close the socket? (writeable) + self.closeSocket = False + + #If the socket is abruptly closed, do we ignore it + #and pretend the connection was shut down properly? (writeable) + self.ignoreAbruptClose = False + + #Fault we will induce, for testing purposes + self.fault = None + + #********************************************************* + # Public Functions START + #********************************************************* + + def read(self, max=None, min=1): + """Read some data from the TLS connection. + + This function will block until at least 'min' bytes are + available (or the connection is closed). + + If an exception is raised, the connection will have been + automatically closed. + + @type max: int + @param max: The maximum number of bytes to return. + + @type min: int + @param min: The minimum number of bytes to return + + @rtype: str + @return: A string of no more than 'max' bytes, and no fewer + than 'min' (unless the connection has been closed, in which + case fewer than 'min' bytes may be returned). + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + for result in self.readAsync(max, min): + pass + return result + + def readAsync(self, max=None, min=1): + """Start a read operation on the TLS connection. + + This function returns a generator which behaves similarly to + read(). Successive invocations of the generator will return 0 + if it is waiting to read from the socket, 1 if it is waiting + to write to the socket, or a string if the read operation has + completed. + + @rtype: iterable + @return: A generator; see above for details. + """ + try: + while len(self._readBuffer)= len(s): + break + if endIndex > len(s): + endIndex = len(s) + block = stringToBytes(s[startIndex : endIndex]) + applicationData = ApplicationData().create(block) + for result in self._sendMsg(applicationData, skipEmptyFrag): + yield result + skipEmptyFrag = True #only send an empy fragment on 1st message + index += 1 + except: + self._shutdown(False) + raise + + def close(self): + """Close the TLS connection. + + This function will block until it has exchanged close_notify + alerts with the other party. After doing so, it will shut down the + TLS connection. Further attempts to read through this connection + will return "". Further attempts to write through this connection + will raise ValueError. + + If makefile() has been called on this connection, the connection + will be not be closed until the connection object and all file + objects have been closed. + + Even if an exception is raised, the connection will have been + closed. + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + if not self.closed: + for result in self._decrefAsync(): + pass + + def closeAsync(self): + """Start a close operation on the TLS connection. + + This function returns a generator which behaves similarly to + close(). Successive invocations of the generator will return 0 + if it is waiting to read from the socket, 1 if it is waiting + to write to the socket, or will raise StopIteration if the + close operation has completed. + + @rtype: iterable + @return: A generator; see above for details. + """ + if not self.closed: + for result in self._decrefAsync(): + yield result + + def _decrefAsync(self): + self._refCount -= 1 + if self._refCount == 0 and not self.closed: + try: + for result in self._sendMsg(Alert().create(\ + AlertDescription.close_notify, AlertLevel.warning)): + yield result + alert = None + while not alert: + for result in self._getMsg((ContentType.alert, \ + ContentType.application_data)): + if result in (0,1): + yield result + if result.contentType == ContentType.alert: + alert = result + if alert.description == AlertDescription.close_notify: + self._shutdown(True) + else: + raise TLSRemoteAlert(alert) + except (socket.error, TLSAbruptCloseError): + #If the other side closes the socket, that's okay + self._shutdown(True) + except: + self._shutdown(False) + raise + + def getCipherName(self): + """Get the name of the cipher used with this connection. + + @rtype: str + @return: The name of the cipher used with this connection. + Either 'aes128', 'aes256', 'rc4', or '3des'. + """ + if not self._writeState.encContext: + return None + return self._writeState.encContext.name + + def getCipherImplementation(self): + """Get the name of the cipher implementation used with + this connection. + + @rtype: str + @return: The name of the cipher implementation used with + this connection. Either 'python', 'cryptlib', 'openssl', + or 'pycrypto'. + """ + if not self._writeState.encContext: + return None + return self._writeState.encContext.implementation + + + + #Emulate a socket, somewhat - + def send(self, s): + """Send data to the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + """ + self.write(s) + return len(s) + + def sendall(self, s): + """Send data to the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + """ + self.write(s) + + def recv(self, bufsize): + """Get some data from the TLS connection (socket emulation). + + @raise socket.error: If a socket error occurs. + @raise tlslite.errors.TLSAbruptCloseError: If the socket is closed + without a preceding alert. + @raise tlslite.errors.TLSAlert: If a TLS alert is signalled. + """ + return self.read(bufsize) + + def makefile(self, mode='r', bufsize=-1): + """Create a file object for the TLS connection (socket emulation). + + @rtype: L{tlslite.FileObject.FileObject} + """ + self._refCount += 1 + return FileObject(self, mode, bufsize) + + def getsockname(self): + """Return the socket's own address (socket emulation).""" + return self.sock.getsockname() + + def getpeername(self): + """Return the remote address to which the socket is connected + (socket emulation).""" + return self.sock.getpeername() + + def settimeout(self, value): + """Set a timeout on blocking socket operations (socket emulation).""" + return self.sock.settimeout(value) + + def gettimeout(self): + """Return the timeout associated with socket operations (socket + emulation).""" + return self.sock.gettimeout() + + def setsockopt(self, level, optname, value): + """Set the value of the given socket option (socket emulation).""" + return self.sock.setsockopt(level, optname, value) + + + #********************************************************* + # Public Functions END + #********************************************************* + + def _shutdown(self, resumable): + self._writeState = _ConnectionState() + self._readState = _ConnectionState() + #Don't do this: self._readBuffer = "" + self.version = (0,0) + self._versionCheck = False + self.closed = True + if self.closeSocket: + self.sock.close() + + #Even if resumable is False, we'll never toggle this on + if not resumable and self.session: + self.session.resumable = False + + + def _sendError(self, alertDescription, errorStr=None): + alert = Alert().create(alertDescription, AlertLevel.fatal) + for result in self._sendMsg(alert): + yield result + self._shutdown(False) + raise TLSLocalAlert(alert, errorStr) + + def _sendMsgs(self, msgs): + skipEmptyFrag = False + for msg in msgs: + for result in self._sendMsg(msg, skipEmptyFrag): + yield result + skipEmptyFrag = True + + def _sendMsg(self, msg, skipEmptyFrag=False): + bytes = msg.write() + contentType = msg.contentType + + #Whenever we're connected and asked to send a message, + #we first send an empty Application Data message. This prevents + #an attacker from launching a chosen-plaintext attack based on + #knowing the next IV. + if not self.closed and not skipEmptyFrag and self.version == (3,1): + if self._writeState.encContext: + if self._writeState.encContext.isBlockCipher: + for result in self._sendMsg(ApplicationData(), + skipEmptyFrag=True): + yield result + + #Update handshake hashes + if contentType == ContentType.handshake: + bytesStr = bytesToString(bytes) + self._handshake_md5.update(bytesStr) + self._handshake_sha.update(bytesStr) + + #Calculate MAC + if self._writeState.macContext: + seqnumStr = self._writeState.getSeqNumStr() + bytesStr = bytesToString(bytes) + mac = self._writeState.macContext.copy() + mac.update(seqnumStr) + mac.update(chr(contentType)) + if self.version == (3,0): + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + elif self.version in ((3,1), (3,2)): + mac.update(chr(self.version[0])) + mac.update(chr(self.version[1])) + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + else: + raise AssertionError() + mac.update(bytesStr) + macString = mac.digest() + macBytes = stringToBytes(macString) + if self.fault == Fault.badMAC: + macBytes[0] = (macBytes[0]+1) % 256 + + #Encrypt for Block or Stream Cipher + if self._writeState.encContext: + #Add padding and encrypt (for Block Cipher): + if self._writeState.encContext.isBlockCipher: + + #Add TLS 1.1 fixed block + if self.version == (3,2): + bytes = self.fixedIVBlock + bytes + + #Add padding: bytes = bytes + (macBytes + paddingBytes) + currentLength = len(bytes) + len(macBytes) + 1 + blockLength = self._writeState.encContext.block_size + paddingLength = blockLength-(currentLength % blockLength) + + paddingBytes = createByteArraySequence([paddingLength] * \ + (paddingLength+1)) + if self.fault == Fault.badPadding: + paddingBytes[0] = (paddingBytes[0]+1) % 256 + endBytes = concatArrays(macBytes, paddingBytes) + bytes = concatArrays(bytes, endBytes) + #Encrypt + plaintext = stringToBytes(bytes) + ciphertext = self._writeState.encContext.encrypt(plaintext) + bytes = stringToBytes(ciphertext) + + #Encrypt (for Stream Cipher) + else: + bytes = concatArrays(bytes, macBytes) + plaintext = bytesToString(bytes) + ciphertext = self._writeState.encContext.encrypt(plaintext) + bytes = stringToBytes(ciphertext) + + #Add record header and send + r = RecordHeader3().create(self.version, contentType, len(bytes)) + s = bytesToString(concatArrays(r.write(), bytes)) + while 1: + try: + bytesSent = self.sock.send(s) #Might raise socket.error + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 1 + continue + else: + raise + if bytesSent == len(s): + return + s = s[bytesSent:] + yield 1 + + + def _getMsg(self, expectedType, secondaryType=None, constructorType=None): + try: + if not isinstance(expectedType, tuple): + expectedType = (expectedType,) + + #Spin in a loop, until we've got a non-empty record of a type we + #expect. The loop will be repeated if: + # - we receive a renegotiation attempt; we send no_renegotiation, + # then try again + # - we receive an empty application-data fragment; we try again + while 1: + for result in self._getNextRecord(): + if result in (0,1): + yield result + recordHeader, p = result + + #If this is an empty application-data fragment, try again + if recordHeader.type == ContentType.application_data: + if p.index == len(p.bytes): + continue + + #If we received an unexpected record type... + if recordHeader.type not in expectedType: + + #If we received an alert... + if recordHeader.type == ContentType.alert: + alert = Alert().parse(p) + + #We either received a fatal error, a warning, or a + #close_notify. In any case, we're going to close the + #connection. In the latter two cases we respond with + #a close_notify, but ignore any socket errors, since + #the other side might have already closed the socket. + if alert.level == AlertLevel.warning or \ + alert.description == AlertDescription.close_notify: + + #If the sendMsg() call fails because the socket has + #already been closed, we will be forgiving and not + #report the error nor invalidate the "resumability" + #of the session. + try: + alertMsg = Alert() + alertMsg.create(AlertDescription.close_notify, + AlertLevel.warning) + for result in self._sendMsg(alertMsg): + yield result + except socket.error: + pass + + if alert.description == \ + AlertDescription.close_notify: + self._shutdown(True) + elif alert.level == AlertLevel.warning: + self._shutdown(False) + + else: #Fatal alert: + self._shutdown(False) + + #Raise the alert as an exception + raise TLSRemoteAlert(alert) + + #If we received a renegotiation attempt... + if recordHeader.type == ContentType.handshake: + subType = p.get(1) + reneg = False + if self._client: + if subType == HandshakeType.hello_request: + reneg = True + else: + if subType == HandshakeType.client_hello: + reneg = True + #Send no_renegotiation, then try again + if reneg: + alertMsg = Alert() + alertMsg.create(AlertDescription.no_renegotiation, + AlertLevel.warning) + for result in self._sendMsg(alertMsg): + yield result + continue + + #Otherwise: this is an unexpected record, but neither an + #alert nor renegotiation + for result in self._sendError(\ + AlertDescription.unexpected_message, + "received type=%d" % recordHeader.type): + yield result + + break + + #Parse based on content_type + if recordHeader.type == ContentType.change_cipher_spec: + yield ChangeCipherSpec().parse(p) + elif recordHeader.type == ContentType.alert: + yield Alert().parse(p) + elif recordHeader.type == ContentType.application_data: + yield ApplicationData().parse(p) + elif recordHeader.type == ContentType.handshake: + #Convert secondaryType to tuple, if it isn't already + if not isinstance(secondaryType, tuple): + secondaryType = (secondaryType,) + + #If it's a handshake message, check handshake header + if recordHeader.ssl2: + subType = p.get(1) + if subType != HandshakeType.client_hello: + for result in self._sendError(\ + AlertDescription.unexpected_message, + "Can only handle SSLv2 ClientHello messages"): + yield result + if HandshakeType.client_hello not in secondaryType: + for result in self._sendError(\ + AlertDescription.unexpected_message): + yield result + subType = HandshakeType.client_hello + else: + subType = p.get(1) + if subType not in secondaryType: + for result in self._sendError(\ + AlertDescription.unexpected_message, + "Expecting %s, got %s" % (str(secondaryType), subType)): + yield result + + #Update handshake hashes + sToHash = bytesToString(p.bytes) + self._handshake_md5.update(sToHash) + self._handshake_sha.update(sToHash) + + #Parse based on handshake type + if subType == HandshakeType.client_hello: + yield ClientHello(recordHeader.ssl2).parse(p) + elif subType == HandshakeType.server_hello: + yield ServerHello().parse(p) + elif subType == HandshakeType.certificate: + yield Certificate(constructorType).parse(p) + elif subType == HandshakeType.certificate_request: + yield CertificateRequest().parse(p) + elif subType == HandshakeType.certificate_verify: + yield CertificateVerify().parse(p) + elif subType == HandshakeType.server_key_exchange: + yield ServerKeyExchange(constructorType).parse(p) + elif subType == HandshakeType.server_hello_done: + yield ServerHelloDone().parse(p) + elif subType == HandshakeType.client_key_exchange: + yield ClientKeyExchange(constructorType, \ + self.version).parse(p) + elif subType == HandshakeType.finished: + yield Finished(self.version).parse(p) + else: + raise AssertionError() + + #If an exception was raised by a Parser or Message instance: + except SyntaxError, e: + for result in self._sendError(AlertDescription.decode_error, + formatExceptionTrace(e)): + yield result + + + #Returns next record or next handshake message + def _getNextRecord(self): + + #If there's a handshake message waiting, return it + if self._handshakeBuffer: + recordHeader, bytes = self._handshakeBuffer[0] + self._handshakeBuffer = self._handshakeBuffer[1:] + yield (recordHeader, Parser(bytes)) + return + + #Otherwise... + #Read the next record header + bytes = createByteArraySequence([]) + recordHeaderLength = 1 + ssl2 = False + while 1: + try: + s = self.sock.recv(recordHeaderLength-len(bytes)) + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 0 + continue + else: + raise + + #If the connection was abruptly closed, raise an error + if len(s)==0: + raise TLSAbruptCloseError() + + bytes += stringToBytes(s) + if len(bytes)==1: + if bytes[0] in ContentType.all: + ssl2 = False + recordHeaderLength = 5 + elif bytes[0] == 128: + ssl2 = True + recordHeaderLength = 2 + else: + raise SyntaxError() + if len(bytes) == recordHeaderLength: + break + + #Parse the record header + if ssl2: + r = RecordHeader2().parse(Parser(bytes)) + else: + r = RecordHeader3().parse(Parser(bytes)) + + #Check the record header fields + if r.length > 18432: + for result in self._sendError(AlertDescription.record_overflow): + yield result + + #Read the record contents + bytes = createByteArraySequence([]) + while 1: + try: + s = self.sock.recv(r.length - len(bytes)) + except socket.error, why: + if why[0] == errno.EWOULDBLOCK: + yield 0 + continue + else: + raise + + #If the connection is closed, raise a socket error + if len(s)==0: + raise TLSAbruptCloseError() + + bytes += stringToBytes(s) + if len(bytes) == r.length: + break + + #Check the record header fields (2) + #We do this after reading the contents from the socket, so that + #if there's an error, we at least don't leave extra bytes in the + #socket.. + # + # THIS CHECK HAS NO SECURITY RELEVANCE (?), BUT COULD HURT INTEROP. + # SO WE LEAVE IT OUT FOR NOW. + # + #if self._versionCheck and r.version != self.version: + # for result in self._sendError(AlertDescription.protocol_version, + # "Version in header field: %s, should be %s" % (str(r.version), + # str(self.version))): + # yield result + + #Decrypt the record + for result in self._decryptRecord(r.type, bytes): + if result in (0,1): + yield result + else: + break + bytes = result + p = Parser(bytes) + + #If it doesn't contain handshake messages, we can just return it + if r.type != ContentType.handshake: + yield (r, p) + #If it's an SSLv2 ClientHello, we can return it as well + elif r.ssl2: + yield (r, p) + else: + #Otherwise, we loop through and add the handshake messages to the + #handshake buffer + while 1: + if p.index == len(bytes): #If we're at the end + if not self._handshakeBuffer: + for result in self._sendError(\ + AlertDescription.decode_error, \ + "Received empty handshake record"): + yield result + break + #There needs to be at least 4 bytes to get a header + if p.index+4 > len(bytes): + for result in self._sendError(\ + AlertDescription.decode_error, + "A record has a partial handshake message (1)"): + yield result + p.get(1) # skip handshake type + msgLength = p.get(3) + if p.index+msgLength > len(bytes): + for result in self._sendError(\ + AlertDescription.decode_error, + "A record has a partial handshake message (2)"): + yield result + + handshakePair = (r, bytes[p.index-4 : p.index+msgLength]) + self._handshakeBuffer.append(handshakePair) + p.index += msgLength + + #We've moved at least one handshake message into the + #handshakeBuffer, return the first one + recordHeader, bytes = self._handshakeBuffer[0] + self._handshakeBuffer = self._handshakeBuffer[1:] + yield (recordHeader, Parser(bytes)) + + + def _decryptRecord(self, recordType, bytes): + if self._readState.encContext: + + #Decrypt if it's a block cipher + if self._readState.encContext.isBlockCipher: + blockLength = self._readState.encContext.block_size + if len(bytes) % blockLength != 0: + for result in self._sendError(\ + AlertDescription.decryption_failed, + "Encrypted data not a multiple of blocksize"): + yield result + ciphertext = bytesToString(bytes) + plaintext = self._readState.encContext.decrypt(ciphertext) + if self.version == (3,2): #For TLS 1.1, remove explicit IV + plaintext = plaintext[self._readState.encContext.block_size : ] + bytes = stringToBytes(plaintext) + + #Check padding + paddingGood = True + paddingLength = bytes[-1] + if (paddingLength+1) > len(bytes): + paddingGood=False + totalPaddingLength = 0 + else: + if self.version == (3,0): + totalPaddingLength = paddingLength+1 + elif self.version in ((3,1), (3,2)): + totalPaddingLength = paddingLength+1 + paddingBytes = bytes[-totalPaddingLength:-1] + for byte in paddingBytes: + if byte != paddingLength: + paddingGood = False + totalPaddingLength = 0 + else: + raise AssertionError() + + #Decrypt if it's a stream cipher + else: + paddingGood = True + ciphertext = bytesToString(bytes) + plaintext = self._readState.encContext.decrypt(ciphertext) + bytes = stringToBytes(plaintext) + totalPaddingLength = 0 + + #Check MAC + macGood = True + macLength = self._readState.macContext.digest_size + endLength = macLength + totalPaddingLength + if endLength > len(bytes): + macGood = False + else: + #Read MAC + startIndex = len(bytes) - endLength + endIndex = startIndex + macLength + checkBytes = bytes[startIndex : endIndex] + + #Calculate MAC + seqnumStr = self._readState.getSeqNumStr() + bytes = bytes[:-endLength] + bytesStr = bytesToString(bytes) + mac = self._readState.macContext.copy() + mac.update(seqnumStr) + mac.update(chr(recordType)) + if self.version == (3,0): + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + elif self.version in ((3,1), (3,2)): + mac.update(chr(self.version[0])) + mac.update(chr(self.version[1])) + mac.update( chr( int(len(bytes)/256) ) ) + mac.update( chr( int(len(bytes)%256) ) ) + else: + raise AssertionError() + mac.update(bytesStr) + macString = mac.digest() + macBytes = stringToBytes(macString) + + #Compare MACs + if macBytes != checkBytes: + macGood = False + + if not (paddingGood and macGood): + for result in self._sendError(AlertDescription.bad_record_mac, + "MAC failure (or padding failure)"): + yield result + + yield bytes + + def _handshakeStart(self, client): + self._client = client + self._handshake_md5 = md5.md5() + self._handshake_sha = sha.sha() + self._handshakeBuffer = [] + self.allegedSharedKeyUsername = None + self.allegedSrpUsername = None + self._refCount = 1 + + def _handshakeDone(self, resumed): + self.resumed = resumed + self.closed = False + + def _calcPendingStates(self, clientRandom, serverRandom, implementations): + if self.session.cipherSuite in CipherSuite.aes128Suites: + macLength = 20 + keyLength = 16 + ivLength = 16 + createCipherFunc = createAES + elif self.session.cipherSuite in CipherSuite.aes256Suites: + macLength = 20 + keyLength = 32 + ivLength = 16 + createCipherFunc = createAES + elif self.session.cipherSuite in CipherSuite.rc4Suites: + macLength = 20 + keyLength = 16 + ivLength = 0 + createCipherFunc = createRC4 + elif self.session.cipherSuite in CipherSuite.tripleDESSuites: + macLength = 20 + keyLength = 24 + ivLength = 8 + createCipherFunc = createTripleDES + else: + raise AssertionError() + + if self.version == (3,0): + createMACFunc = MAC_SSL + elif self.version in ((3,1), (3,2)): + createMACFunc = hmac.HMAC + + outputLength = (macLength*2) + (keyLength*2) + (ivLength*2) + + #Calculate Keying Material from Master Secret + if self.version == (3,0): + keyBlock = PRF_SSL(self.session.masterSecret, + concatArrays(serverRandom, clientRandom), + outputLength) + elif self.version in ((3,1), (3,2)): + keyBlock = PRF(self.session.masterSecret, + "key expansion", + concatArrays(serverRandom,clientRandom), + outputLength) + else: + raise AssertionError() + + #Slice up Keying Material + clientPendingState = _ConnectionState() + serverPendingState = _ConnectionState() + p = Parser(keyBlock) + clientMACBlock = bytesToString(p.getFixBytes(macLength)) + serverMACBlock = bytesToString(p.getFixBytes(macLength)) + clientKeyBlock = bytesToString(p.getFixBytes(keyLength)) + serverKeyBlock = bytesToString(p.getFixBytes(keyLength)) + clientIVBlock = bytesToString(p.getFixBytes(ivLength)) + serverIVBlock = bytesToString(p.getFixBytes(ivLength)) + clientPendingState.macContext = createMACFunc(clientMACBlock, + digestmod=sha) + serverPendingState.macContext = createMACFunc(serverMACBlock, + digestmod=sha) + clientPendingState.encContext = createCipherFunc(clientKeyBlock, + clientIVBlock, + implementations) + serverPendingState.encContext = createCipherFunc(serverKeyBlock, + serverIVBlock, + implementations) + + #Assign new connection states to pending states + if self._client: + self._pendingWriteState = clientPendingState + self._pendingReadState = serverPendingState + else: + self._pendingWriteState = serverPendingState + self._pendingReadState = clientPendingState + + if self.version == (3,2) and ivLength: + #Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC + #residue to create the IV for each sent block) + self.fixedIVBlock = getRandomBytes(ivLength) + + def _changeWriteState(self): + self._writeState = self._pendingWriteState + self._pendingWriteState = _ConnectionState() + + def _changeReadState(self): + self._readState = self._pendingReadState + self._pendingReadState = _ConnectionState() + + def _sendFinished(self): + #Send ChangeCipherSpec + for result in self._sendMsg(ChangeCipherSpec()): + yield result + + #Switch to pending write state + self._changeWriteState() + + #Calculate verification data + verifyData = self._calcFinished(True) + if self.fault == Fault.badFinished: + verifyData[0] = (verifyData[0]+1)%256 + + #Send Finished message under new state + finished = Finished(self.version).create(verifyData) + for result in self._sendMsg(finished): + yield result + + def _getFinished(self): + #Get and check ChangeCipherSpec + for result in self._getMsg(ContentType.change_cipher_spec): + if result in (0,1): + yield result + changeCipherSpec = result + + if changeCipherSpec.type != 1: + for result in self._sendError(AlertDescription.illegal_parameter, + "ChangeCipherSpec type incorrect"): + yield result + + #Switch to pending read state + self._changeReadState() + + #Calculate verification data + verifyData = self._calcFinished(False) + + #Get and check Finished message under new state + for result in self._getMsg(ContentType.handshake, + HandshakeType.finished): + if result in (0,1): + yield result + finished = result + if finished.verify_data != verifyData: + for result in self._sendError(AlertDescription.decrypt_error, + "Finished message is incorrect"): + yield result + + def _calcFinished(self, send=True): + if self.version == (3,0): + if (self._client and send) or (not self._client and not send): + senderStr = "\x43\x4C\x4E\x54" + else: + senderStr = "\x53\x52\x56\x52" + + verifyData = self._calcSSLHandshakeHash(self.session.masterSecret, + senderStr) + return verifyData + + elif self.version in ((3,1), (3,2)): + if (self._client and send) or (not self._client and not send): + label = "client finished" + else: + label = "server finished" + + handshakeHashes = stringToBytes(self._handshake_md5.digest() + \ + self._handshake_sha.digest()) + verifyData = PRF(self.session.masterSecret, label, handshakeHashes, + 12) + return verifyData + else: + raise AssertionError() + + #Used for Finished messages and CertificateVerify messages in SSL v3 + def _calcSSLHandshakeHash(self, masterSecret, label): + masterSecretStr = bytesToString(masterSecret) + + imac_md5 = self._handshake_md5.copy() + imac_sha = self._handshake_sha.copy() + + imac_md5.update(label + masterSecretStr + '\x36'*48) + imac_sha.update(label + masterSecretStr + '\x36'*40) + + md5Str = md5.md5(masterSecretStr + ('\x5c'*48) + \ + imac_md5.digest()).digest() + shaStr = sha.sha(masterSecretStr + ('\x5c'*40) + \ + imac_sha.digest()).digest() + + return stringToBytes(md5Str + shaStr) + diff --git a/gdata.py-1.2.3/src/gdata/tlslite/VerifierDB.py b/gdata.py-1.2.3/src/gdata/tlslite/VerifierDB.py new file mode 100755 index 0000000..f706b17 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/VerifierDB.py @@ -0,0 +1,90 @@ +"""Class for storing SRP password verifiers.""" + +from utils.cryptomath import * +from utils.compat import * +import mathtls +from BaseDB import BaseDB + +class VerifierDB(BaseDB): + """This class represent an in-memory or on-disk database of SRP + password verifiers. + + A VerifierDB can be passed to a server handshake to authenticate + a client based on one of the verifiers. + + This class is thread-safe. + """ + def __init__(self, filename=None): + """Create a new VerifierDB instance. + + @type filename: str + @param filename: Filename for an on-disk database, or None for + an in-memory database. If the filename already exists, follow + this with a call to open(). To create a new on-disk database, + follow this with a call to create(). + """ + BaseDB.__init__(self, filename, "verifier") + + def _getItem(self, username, valueStr): + (N, g, salt, verifier) = valueStr.split(" ") + N = base64ToNumber(N) + g = base64ToNumber(g) + salt = base64ToString(salt) + verifier = base64ToNumber(verifier) + return (N, g, salt, verifier) + + def __setitem__(self, username, verifierEntry): + """Add a verifier entry to the database. + + @type username: str + @param username: The username to associate the verifier with. + Must be less than 256 characters in length. Must not already + be in the database. + + @type verifierEntry: tuple + @param verifierEntry: The verifier entry to add. Use + L{tlslite.VerifierDB.VerifierDB.makeVerifier} to create a + verifier entry. + """ + BaseDB.__setitem__(self, username, verifierEntry) + + + def _setItem(self, username, value): + if len(username)>=256: + raise ValueError("username too long") + N, g, salt, verifier = value + N = numberToBase64(N) + g = numberToBase64(g) + salt = stringToBase64(salt) + verifier = numberToBase64(verifier) + valueStr = " ".join( (N, g, salt, verifier) ) + return valueStr + + def _checkItem(self, value, username, param): + (N, g, salt, verifier) = value + x = mathtls.makeX(salt, username, param) + v = powMod(g, x, N) + return (verifier == v) + + + def makeVerifier(username, password, bits): + """Create a verifier entry which can be stored in a VerifierDB. + + @type username: str + @param username: The username for this verifier. Must be less + than 256 characters in length. + + @type password: str + @param password: The password for this verifier. + + @type bits: int + @param bits: This values specifies which SRP group parameters + to use. It must be one of (1024, 1536, 2048, 3072, 4096, 6144, + 8192). Larger values are more secure but slower. 2048 is a + good compromise between safety and speed. + + @rtype: tuple + @return: A tuple which may be stored in a VerifierDB. + """ + return mathtls.makeVerifier(username, password, bits) + makeVerifier = staticmethod(makeVerifier) \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/X509.py b/gdata.py-1.2.3/src/gdata/tlslite/X509.py new file mode 100755 index 0000000..a47ddcf --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/X509.py @@ -0,0 +1,133 @@ +"""Class representing an X.509 certificate.""" + +from utils.ASN1Parser import ASN1Parser +from utils.cryptomath import * +from utils.keyfactory import _createPublicRSAKey + + +class X509: + """This class represents an X.509 certificate. + + @type bytes: L{array.array} of unsigned bytes + @ivar bytes: The DER-encoded ASN.1 certificate + + @type publicKey: L{tlslite.utils.RSAKey.RSAKey} + @ivar publicKey: The subject public key from the certificate. + """ + + def __init__(self): + self.bytes = createByteArraySequence([]) + self.publicKey = None + + def parse(self, s): + """Parse a PEM-encoded X.509 certificate. + + @type s: str + @param s: A PEM-encoded X.509 certificate (i.e. a base64-encoded + certificate wrapped with "-----BEGIN CERTIFICATE-----" and + "-----END CERTIFICATE-----" tags). + """ + + start = s.find("-----BEGIN CERTIFICATE-----") + end = s.find("-----END CERTIFICATE-----") + if start == -1: + raise SyntaxError("Missing PEM prefix") + if end == -1: + raise SyntaxError("Missing PEM postfix") + s = s[start+len("-----BEGIN CERTIFICATE-----") : end] + + bytes = base64ToBytes(s) + self.parseBinary(bytes) + return self + + def parseBinary(self, bytes): + """Parse a DER-encoded X.509 certificate. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: A DER-encoded X.509 certificate. + """ + + if isinstance(bytes, type("")): + bytes = stringToBytes(bytes) + + self.bytes = bytes + p = ASN1Parser(bytes) + + #Get the tbsCertificate + tbsCertificateP = p.getChild(0) + + #Is the optional version field present? + #This determines which index the key is at. + if tbsCertificateP.value[0]==0xA0: + subjectPublicKeyInfoIndex = 6 + else: + subjectPublicKeyInfoIndex = 5 + + #Get the subjectPublicKeyInfo + subjectPublicKeyInfoP = tbsCertificateP.getChild(\ + subjectPublicKeyInfoIndex) + + #Get the algorithm + algorithmP = subjectPublicKeyInfoP.getChild(0) + rsaOID = algorithmP.value + if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]: + raise SyntaxError("Unrecognized AlgorithmIdentifier") + + #Get the subjectPublicKey + subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1) + + #Adjust for BIT STRING encapsulation + if (subjectPublicKeyP.value[0] !=0): + raise SyntaxError() + subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:]) + + #Get the modulus and exponent + modulusP = subjectPublicKeyP.getChild(0) + publicExponentP = subjectPublicKeyP.getChild(1) + + #Decode them into numbers + n = bytesToNumber(modulusP.value) + e = bytesToNumber(publicExponentP.value) + + #Create a public key instance + self.publicKey = _createPublicRSAKey(n, e) + + def getFingerprint(self): + """Get the hex-encoded fingerprint of this certificate. + + @rtype: str + @return: A hex-encoded fingerprint. + """ + return sha.sha(self.bytes).hexdigest() + + def getCommonName(self): + """Get the Subject's Common Name from the certificate. + + The cryptlib_py module must be installed in order to use this + function. + + @rtype: str or None + @return: The CN component of the certificate's subject DN, if + present. + """ + import cryptlib_py + import array + c = cryptlib_py.cryptImportCert(self.bytes, cryptlib_py.CRYPT_UNUSED) + name = cryptlib_py.CRYPT_CERTINFO_COMMONNAME + try: + try: + length = cryptlib_py.cryptGetAttributeString(c, name, None) + returnVal = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(c, name, returnVal) + returnVal = returnVal.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + returnVal = None + return returnVal + finally: + cryptlib_py.cryptDestroyCert(c) + + def writeBytes(self): + return self.bytes + + diff --git a/gdata.py-1.2.3/src/gdata/tlslite/X509CertChain.py b/gdata.py-1.2.3/src/gdata/tlslite/X509CertChain.py new file mode 100755 index 0000000..d5f0b4d --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/X509CertChain.py @@ -0,0 +1,181 @@ +"""Class representing an X.509 certificate chain.""" + +from utils import cryptomath + +class X509CertChain: + """This class represents a chain of X.509 certificates. + + @type x509List: list + @ivar x509List: A list of L{tlslite.X509.X509} instances, + starting with the end-entity certificate and with every + subsequent certificate certifying the previous. + """ + + def __init__(self, x509List=None): + """Create a new X509CertChain. + + @type x509List: list + @param x509List: A list of L{tlslite.X509.X509} instances, + starting with the end-entity certificate and with every + subsequent certificate certifying the previous. + """ + if x509List: + self.x509List = x509List + else: + self.x509List = [] + + def getNumCerts(self): + """Get the number of certificates in this chain. + + @rtype: int + """ + return len(self.x509List) + + def getEndEntityPublicKey(self): + """Get the public key from the end-entity certificate. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].publicKey + + def getFingerprint(self): + """Get the hex-encoded fingerprint of the end-entity certificate. + + @rtype: str + @return: A hex-encoded fingerprint. + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].getFingerprint() + + def getCommonName(self): + """Get the Subject's Common Name from the end-entity certificate. + + The cryptlib_py module must be installed in order to use this + function. + + @rtype: str or None + @return: The CN component of the certificate's subject DN, if + present. + """ + if self.getNumCerts() == 0: + raise AssertionError() + return self.x509List[0].getCommonName() + + def validate(self, x509TrustList): + """Check the validity of the certificate chain. + + This checks that every certificate in the chain validates with + the subsequent one, until some certificate validates with (or + is identical to) one of the passed-in root certificates. + + The cryptlib_py module must be installed in order to use this + function. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + certificate chain must extend to one of these certificates to + be considered valid. + """ + + import cryptlib_py + c1 = None + c2 = None + lastC = None + rootC = None + + try: + rootFingerprints = [c.getFingerprint() for c in x509TrustList] + + #Check that every certificate in the chain validates with the + #next one + for cert1, cert2 in zip(self.x509List, self.x509List[1:]): + + #If we come upon a root certificate, we're done. + if cert1.getFingerprint() in rootFingerprints: + return True + + c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + try: + cryptlib_py.cryptCheckCert(c1, c2) + except: + return False + cryptlib_py.cryptDestroyCert(c1) + c1 = None + cryptlib_py.cryptDestroyCert(c2) + c2 = None + + #If the last certificate is one of the root certificates, we're + #done. + if self.x509List[-1].getFingerprint() in rootFingerprints: + return True + + #Otherwise, find a root certificate that the last certificate + #chains to, and validate them. + lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(), + cryptlib_py.CRYPT_UNUSED) + for rootCert in x509TrustList: + rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(), + cryptlib_py.CRYPT_UNUSED) + if self._checkChaining(lastC, rootC): + try: + cryptlib_py.cryptCheckCert(lastC, rootC) + return True + except: + return False + return False + finally: + if not (c1 is None): + cryptlib_py.cryptDestroyCert(c1) + if not (c2 is None): + cryptlib_py.cryptDestroyCert(c2) + if not (lastC is None): + cryptlib_py.cryptDestroyCert(lastC) + if not (rootC is None): + cryptlib_py.cryptDestroyCert(rootC) + + + + def _checkChaining(self, lastC, rootC): + import cryptlib_py + import array + def compareNames(name): + try: + length = cryptlib_py.cryptGetAttributeString(lastC, name, None) + lastName = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(lastC, name, lastName) + lastName = lastName.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + lastName = None + try: + length = cryptlib_py.cryptGetAttributeString(rootC, name, None) + rootName = array.array('B', [0] * length) + cryptlib_py.cryptGetAttributeString(rootC, name, rootName) + rootName = rootName.tostring() + except cryptlib_py.CryptException, e: + if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND: + rootName = None + + return lastName == rootName + + cryptlib_py.cryptSetAttribute(lastC, + cryptlib_py.CRYPT_CERTINFO_ISSUERNAME, + cryptlib_py.CRYPT_UNUSED) + + if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME): + return False + if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME): + return False + return True \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/__init__.py b/gdata.py-1.2.3/src/gdata/tlslite/__init__.py new file mode 100755 index 0000000..47cfd1c --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/__init__.py @@ -0,0 +1,39 @@ +""" +TLS Lite is a free python library that implements SSL v3, TLS v1, and +TLS v1.1. TLS Lite supports non-traditional authentication methods +such as SRP, shared keys, and cryptoIDs, in addition to X.509 +certificates. TLS Lite is pure python, however it can access OpenSSL, +cryptlib, pycrypto, and GMPY for faster crypto operations. TLS Lite +integrates with httplib, xmlrpclib, poplib, imaplib, smtplib, +SocketServer, asyncore, and Twisted. + +To use, do:: + + from tlslite.api import * + +Then use the L{tlslite.TLSConnection.TLSConnection} class with a socket, +or use one of the integration classes in L{tlslite.integration}. + +@version: 0.3.8 +""" +__version__ = "0.3.8" + +__all__ = ["api", + "BaseDB", + "Checker", + "constants", + "errors", + "FileObject", + "HandshakeSettings", + "mathtls", + "messages", + "Session", + "SessionCache", + "SharedKeyDB", + "TLSConnection", + "TLSRecordLayer", + "VerifierDB", + "X509", + "X509CertChain", + "integration", + "utils"] diff --git a/gdata.py-1.2.3/src/gdata/tlslite/api.py b/gdata.py-1.2.3/src/gdata/tlslite/api.py new file mode 100755 index 0000000..eebfbc6 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/api.py @@ -0,0 +1,75 @@ +"""Import this module for easy access to TLS Lite objects. + +The TLS Lite API consists of classes, functions, and variables spread +throughout this package. Instead of importing them individually with:: + + from tlslite.TLSConnection import TLSConnection + from tlslite.HandshakeSettings import HandshakeSettings + from tlslite.errors import * + . + . + +It's easier to do:: + + from tlslite.api import * + +This imports all the important objects (TLSConnection, Checker, +HandshakeSettings, etc.) into the global namespace. In particular, it +imports:: + + from constants import AlertLevel, AlertDescription, Fault + from errors import * + from Checker import Checker + from HandshakeSettings import HandshakeSettings + from Session import Session + from SessionCache import SessionCache + from SharedKeyDB import SharedKeyDB + from TLSConnection import TLSConnection + from VerifierDB import VerifierDB + from X509 import X509 + from X509CertChain import X509CertChain + + from integration.HTTPTLSConnection import HTTPTLSConnection + from integration.POP3_TLS import POP3_TLS + from integration.IMAP4_TLS import IMAP4_TLS + from integration.SMTP_TLS import SMTP_TLS + from integration.XMLRPCTransport import XMLRPCTransport + from integration.TLSSocketServerMixIn import TLSSocketServerMixIn + from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn + from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper + from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, + gmpyLoaded, pycryptoLoaded, prngName + from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, + parseAsPublicKey, parsePrivateKey +""" + +from constants import AlertLevel, AlertDescription, Fault +from errors import * +from Checker import Checker +from HandshakeSettings import HandshakeSettings +from Session import Session +from SessionCache import SessionCache +from SharedKeyDB import SharedKeyDB +from TLSConnection import TLSConnection +from VerifierDB import VerifierDB +from X509 import X509 +from X509CertChain import X509CertChain + +from integration.HTTPTLSConnection import HTTPTLSConnection +from integration.TLSSocketServerMixIn import TLSSocketServerMixIn +from integration.TLSAsyncDispatcherMixIn import TLSAsyncDispatcherMixIn +from integration.POP3_TLS import POP3_TLS +from integration.IMAP4_TLS import IMAP4_TLS +from integration.SMTP_TLS import SMTP_TLS +from integration.XMLRPCTransport import XMLRPCTransport +try: + import twisted + del(twisted) + from integration.TLSTwistedProtocolWrapper import TLSTwistedProtocolWrapper +except ImportError: + pass + +from utils.cryptomath import cryptlibpyLoaded, m2cryptoLoaded, gmpyLoaded, \ + pycryptoLoaded, prngName +from utils.keyfactory import generateRSAKey, parsePEMKey, parseXMLKey, \ + parseAsPublicKey, parsePrivateKey diff --git a/gdata.py-1.2.3/src/gdata/tlslite/constants.py b/gdata.py-1.2.3/src/gdata/tlslite/constants.py new file mode 100755 index 0000000..8f2d559 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/constants.py @@ -0,0 +1,225 @@ +"""Constants used in various places.""" + +class CertificateType: + x509 = 0 + openpgp = 1 + cryptoID = 2 + +class HandshakeType: + hello_request = 0 + client_hello = 1 + server_hello = 2 + certificate = 11 + server_key_exchange = 12 + certificate_request = 13 + server_hello_done = 14 + certificate_verify = 15 + client_key_exchange = 16 + finished = 20 + +class ContentType: + change_cipher_spec = 20 + alert = 21 + handshake = 22 + application_data = 23 + all = (20,21,22,23) + +class AlertLevel: + warning = 1 + fatal = 2 + +class AlertDescription: + """ + @cvar bad_record_mac: A TLS record failed to decrypt properly. + + If this occurs during a shared-key or SRP handshake it most likely + indicates a bad password. It may also indicate an implementation + error, or some tampering with the data in transit. + + This alert will be signalled by the server if the SRP password is bad. It + may also be signalled by the server if the SRP username is unknown to the + server, but it doesn't wish to reveal that fact. + + This alert will be signalled by the client if the shared-key username is + bad. + + @cvar handshake_failure: A problem occurred while handshaking. + + This typically indicates a lack of common ciphersuites between client and + server, or some other disagreement (about SRP parameters or key sizes, + for example). + + @cvar protocol_version: The other party's SSL/TLS version was unacceptable. + + This indicates that the client and server couldn't agree on which version + of SSL or TLS to use. + + @cvar user_canceled: The handshake is being cancelled for some reason. + + """ + + close_notify = 0 + unexpected_message = 10 + bad_record_mac = 20 + decryption_failed = 21 + record_overflow = 22 + decompression_failure = 30 + handshake_failure = 40 + no_certificate = 41 #SSLv3 + bad_certificate = 42 + unsupported_certificate = 43 + certificate_revoked = 44 + certificate_expired = 45 + certificate_unknown = 46 + illegal_parameter = 47 + unknown_ca = 48 + access_denied = 49 + decode_error = 50 + decrypt_error = 51 + export_restriction = 60 + protocol_version = 70 + insufficient_security = 71 + internal_error = 80 + user_canceled = 90 + no_renegotiation = 100 + unknown_srp_username = 120 + missing_srp_username = 121 + untrusted_srp_parameters = 122 + +class CipherSuite: + TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = 0x0050 + TLS_SRP_SHA_WITH_AES_128_CBC_SHA = 0x0053 + TLS_SRP_SHA_WITH_AES_256_CBC_SHA = 0x0056 + + TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = 0x0051 + TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = 0x0054 + TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = 0x0057 + + TLS_RSA_WITH_3DES_EDE_CBC_SHA = 0x000A + TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F + TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035 + TLS_RSA_WITH_RC4_128_SHA = 0x0005 + + srpSuites = [] + srpSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + srpSuites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + srpSuites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + def getSrpSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + return suites + getSrpSuites = staticmethod(getSrpSuites) + + srpRsaSuites = [] + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + srpRsaSuites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + def getSrpRsaSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + return suites + getSrpRsaSuites = staticmethod(getSrpRsaSuites) + + rsaSuites = [] + rsaSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_AES_128_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_AES_256_CBC_SHA) + rsaSuites.append(TLS_RSA_WITH_RC4_128_SHA) + def getRsaSuites(ciphers): + suites = [] + for cipher in ciphers: + if cipher == "aes128": + suites.append(CipherSuite.TLS_RSA_WITH_AES_128_CBC_SHA) + elif cipher == "aes256": + suites.append(CipherSuite.TLS_RSA_WITH_AES_256_CBC_SHA) + elif cipher == "rc4": + suites.append(CipherSuite.TLS_RSA_WITH_RC4_128_SHA) + elif cipher == "3des": + suites.append(CipherSuite.TLS_RSA_WITH_3DES_EDE_CBC_SHA) + return suites + getRsaSuites = staticmethod(getRsaSuites) + + tripleDESSuites = [] + tripleDESSuites.append(TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA) + tripleDESSuites.append(TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA) + tripleDESSuites.append(TLS_RSA_WITH_3DES_EDE_CBC_SHA) + + aes128Suites = [] + aes128Suites.append(TLS_SRP_SHA_WITH_AES_128_CBC_SHA) + aes128Suites.append(TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA) + aes128Suites.append(TLS_RSA_WITH_AES_128_CBC_SHA) + + aes256Suites = [] + aes256Suites.append(TLS_SRP_SHA_WITH_AES_256_CBC_SHA) + aes256Suites.append(TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA) + aes256Suites.append(TLS_RSA_WITH_AES_256_CBC_SHA) + + rc4Suites = [] + rc4Suites.append(TLS_RSA_WITH_RC4_128_SHA) + + +class Fault: + badUsername = 101 + badPassword = 102 + badA = 103 + clientSrpFaults = range(101,104) + + badVerifyMessage = 601 + clientCertFaults = range(601,602) + + badPremasterPadding = 501 + shortPremasterSecret = 502 + clientNoAuthFaults = range(501,503) + + badIdentifier = 401 + badSharedKey = 402 + clientSharedKeyFaults = range(401,403) + + badB = 201 + serverFaults = range(201,202) + + badFinished = 300 + badMAC = 301 + badPadding = 302 + genericFaults = range(300,303) + + faultAlerts = {\ + badUsername: (AlertDescription.unknown_srp_username, \ + AlertDescription.bad_record_mac),\ + badPassword: (AlertDescription.bad_record_mac,),\ + badA: (AlertDescription.illegal_parameter,),\ + badIdentifier: (AlertDescription.handshake_failure,),\ + badSharedKey: (AlertDescription.bad_record_mac,),\ + badPremasterPadding: (AlertDescription.bad_record_mac,),\ + shortPremasterSecret: (AlertDescription.bad_record_mac,),\ + badVerifyMessage: (AlertDescription.decrypt_error,),\ + badFinished: (AlertDescription.decrypt_error,),\ + badMAC: (AlertDescription.bad_record_mac,),\ + badPadding: (AlertDescription.bad_record_mac,) + } + + faultNames = {\ + badUsername: "bad username",\ + badPassword: "bad password",\ + badA: "bad A",\ + badIdentifier: "bad identifier",\ + badSharedKey: "bad sharedkey",\ + badPremasterPadding: "bad premaster padding",\ + shortPremasterSecret: "short premaster secret",\ + badVerifyMessage: "bad verify message",\ + badFinished: "bad finished message",\ + badMAC: "bad MAC",\ + badPadding: "bad padding" + } diff --git a/gdata.py-1.2.3/src/gdata/tlslite/errors.py b/gdata.py-1.2.3/src/gdata/tlslite/errors.py new file mode 100755 index 0000000..c7f7ba8 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/errors.py @@ -0,0 +1,149 @@ +"""Exception classes. +@sort: TLSError, TLSAbruptCloseError, TLSAlert, TLSLocalAlert, TLSRemoteAlert, +TLSAuthenticationError, TLSNoAuthenticationError, TLSAuthenticationTypeError, +TLSFingerprintError, TLSAuthorizationError, TLSValidationError, TLSFaultError +""" + +from constants import AlertDescription, AlertLevel + +class TLSError(Exception): + """Base class for all TLS Lite exceptions.""" + pass + +class TLSAbruptCloseError(TLSError): + """The socket was closed without a proper TLS shutdown. + + The TLS specification mandates that an alert of some sort + must be sent before the underlying socket is closed. If the socket + is closed without this, it could signify that an attacker is trying + to truncate the connection. It could also signify a misbehaving + TLS implementation, or a random network failure. + """ + pass + +class TLSAlert(TLSError): + """A TLS alert has been signalled.""" + pass + + _descriptionStr = {\ + AlertDescription.close_notify: "close_notify",\ + AlertDescription.unexpected_message: "unexpected_message",\ + AlertDescription.bad_record_mac: "bad_record_mac",\ + AlertDescription.decryption_failed: "decryption_failed",\ + AlertDescription.record_overflow: "record_overflow",\ + AlertDescription.decompression_failure: "decompression_failure",\ + AlertDescription.handshake_failure: "handshake_failure",\ + AlertDescription.no_certificate: "no certificate",\ + AlertDescription.bad_certificate: "bad_certificate",\ + AlertDescription.unsupported_certificate: "unsupported_certificate",\ + AlertDescription.certificate_revoked: "certificate_revoked",\ + AlertDescription.certificate_expired: "certificate_expired",\ + AlertDescription.certificate_unknown: "certificate_unknown",\ + AlertDescription.illegal_parameter: "illegal_parameter",\ + AlertDescription.unknown_ca: "unknown_ca",\ + AlertDescription.access_denied: "access_denied",\ + AlertDescription.decode_error: "decode_error",\ + AlertDescription.decrypt_error: "decrypt_error",\ + AlertDescription.export_restriction: "export_restriction",\ + AlertDescription.protocol_version: "protocol_version",\ + AlertDescription.insufficient_security: "insufficient_security",\ + AlertDescription.internal_error: "internal_error",\ + AlertDescription.user_canceled: "user_canceled",\ + AlertDescription.no_renegotiation: "no_renegotiation",\ + AlertDescription.unknown_srp_username: "unknown_srp_username",\ + AlertDescription.missing_srp_username: "missing_srp_username"} + +class TLSLocalAlert(TLSAlert): + """A TLS alert has been signalled by the local implementation. + + @type description: int + @ivar description: Set to one of the constants in + L{tlslite.constants.AlertDescription} + + @type level: int + @ivar level: Set to one of the constants in + L{tlslite.constants.AlertLevel} + + @type message: str + @ivar message: Description of what went wrong. + """ + def __init__(self, alert, message=None): + self.description = alert.description + self.level = alert.level + self.message = message + + def __str__(self): + alertStr = TLSAlert._descriptionStr.get(self.description) + if alertStr == None: + alertStr = str(self.description) + if self.message: + return alertStr + ": " + self.message + else: + return alertStr + +class TLSRemoteAlert(TLSAlert): + """A TLS alert has been signalled by the remote implementation. + + @type description: int + @ivar description: Set to one of the constants in + L{tlslite.constants.AlertDescription} + + @type level: int + @ivar level: Set to one of the constants in + L{tlslite.constants.AlertLevel} + """ + def __init__(self, alert): + self.description = alert.description + self.level = alert.level + + def __str__(self): + alertStr = TLSAlert._descriptionStr.get(self.description) + if alertStr == None: + alertStr = str(self.description) + return alertStr + +class TLSAuthenticationError(TLSError): + """The handshake succeeded, but the other party's authentication + was inadequate. + + This exception will only be raised when a + L{tlslite.Checker.Checker} has been passed to a handshake function. + The Checker will be invoked once the handshake completes, and if + the Checker objects to how the other party authenticated, a + subclass of this exception will be raised. + """ + pass + +class TLSNoAuthenticationError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain, but this did not occur.""" + pass + +class TLSAuthenticationTypeError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + different type of certificate chain.""" + pass + +class TLSFingerprintError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain that matches a different fingerprint.""" + pass + +class TLSAuthorizationError(TLSAuthenticationError): + """The Checker was expecting the other party to authenticate with a + certificate chain that has a different authorization.""" + pass + +class TLSValidationError(TLSAuthenticationError): + """The Checker has determined that the other party's certificate + chain is invalid.""" + pass + +class TLSFaultError(TLSError): + """The other party responded incorrectly to an induced fault. + + This exception will only occur during fault testing, when a + TLSConnection's fault variable is set to induce some sort of + faulty behavior, and the other party doesn't respond appropriately. + """ + pass diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/AsyncStateMachine.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/AsyncStateMachine.py new file mode 100755 index 0000000..abed604 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/AsyncStateMachine.py @@ -0,0 +1,235 @@ +""" +A state machine for using TLS Lite with asynchronous I/O. +""" + +class AsyncStateMachine: + """ + This is an abstract class that's used to integrate TLS Lite with + asyncore and Twisted. + + This class signals wantsReadsEvent() and wantsWriteEvent(). When + the underlying socket has become readable or writeable, the event + should be passed to this class by calling inReadEvent() or + inWriteEvent(). This class will then try to read or write through + the socket, and will update its state appropriately. + + This class will forward higher-level events to its subclass. For + example, when a complete TLS record has been received, + outReadEvent() will be called with the decrypted data. + """ + + def __init__(self): + self._clear() + + def _clear(self): + #These store the various asynchronous operations (i.e. + #generators). Only one of them, at most, is ever active at a + #time. + self.handshaker = None + self.closer = None + self.reader = None + self.writer = None + + #This stores the result from the last call to the + #currently active operation. If 0 it indicates that the + #operation wants to read, if 1 it indicates that the + #operation wants to write. If None, there is no active + #operation. + self.result = None + + def _checkAssert(self, maxActive=1): + #This checks that only one operation, at most, is + #active, and that self.result is set appropriately. + activeOps = 0 + if self.handshaker: + activeOps += 1 + if self.closer: + activeOps += 1 + if self.reader: + activeOps += 1 + if self.writer: + activeOps += 1 + + if self.result == None: + if activeOps != 0: + raise AssertionError() + elif self.result in (0,1): + if activeOps != 1: + raise AssertionError() + else: + raise AssertionError() + if activeOps > maxActive: + raise AssertionError() + + def wantsReadEvent(self): + """If the state machine wants to read. + + If an operation is active, this returns whether or not the + operation wants to read from the socket. If an operation is + not active, this returns None. + + @rtype: bool or None + @return: If the state machine wants to read. + """ + if self.result != None: + return self.result == 0 + return None + + def wantsWriteEvent(self): + """If the state machine wants to write. + + If an operation is active, this returns whether or not the + operation wants to write to the socket. If an operation is + not active, this returns None. + + @rtype: bool or None + @return: If the state machine wants to write. + """ + if self.result != None: + return self.result == 1 + return None + + def outConnectEvent(self): + """Called when a handshake operation completes. + + May be overridden in subclass. + """ + pass + + def outCloseEvent(self): + """Called when a close operation completes. + + May be overridden in subclass. + """ + pass + + def outReadEvent(self, readBuffer): + """Called when a read operation completes. + + May be overridden in subclass.""" + pass + + def outWriteEvent(self): + """Called when a write operation completes. + + May be overridden in subclass.""" + pass + + def inReadEvent(self): + """Tell the state machine it can read from the socket.""" + try: + self._checkAssert() + if self.handshaker: + self._doHandshakeOp() + elif self.closer: + self._doCloseOp() + elif self.reader: + self._doReadOp() + elif self.writer: + self._doWriteOp() + else: + self.reader = self.tlsConnection.readAsync(16384) + self._doReadOp() + except: + self._clear() + raise + + def inWriteEvent(self): + """Tell the state machine it can write to the socket.""" + try: + self._checkAssert() + if self.handshaker: + self._doHandshakeOp() + elif self.closer: + self._doCloseOp() + elif self.reader: + self._doReadOp() + elif self.writer: + self._doWriteOp() + else: + self.outWriteEvent() + except: + self._clear() + raise + + def _doHandshakeOp(self): + try: + self.result = self.handshaker.next() + except StopIteration: + self.handshaker = None + self.result = None + self.outConnectEvent() + + def _doCloseOp(self): + try: + self.result = self.closer.next() + except StopIteration: + self.closer = None + self.result = None + self.outCloseEvent() + + def _doReadOp(self): + self.result = self.reader.next() + if not self.result in (0,1): + readBuffer = self.result + self.reader = None + self.result = None + self.outReadEvent(readBuffer) + + def _doWriteOp(self): + try: + self.result = self.writer.next() + except StopIteration: + self.writer = None + self.result = None + + def setHandshakeOp(self, handshaker): + """Start a handshake operation. + + @type handshaker: generator + @param handshaker: A generator created by using one of the + asynchronous handshake functions (i.e. handshakeServerAsync, or + handshakeClientxxx(..., async=True). + """ + try: + self._checkAssert(0) + self.handshaker = handshaker + self._doHandshakeOp() + except: + self._clear() + raise + + def setServerHandshakeOp(self, **args): + """Start a handshake operation. + + The arguments passed to this function will be forwarded to + L{tlslite.TLSConnection.TLSConnection.handshakeServerAsync}. + """ + handshaker = self.tlsConnection.handshakeServerAsync(**args) + self.setHandshakeOp(handshaker) + + def setCloseOp(self): + """Start a close operation. + """ + try: + self._checkAssert(0) + self.closer = self.tlsConnection.closeAsync() + self._doCloseOp() + except: + self._clear() + raise + + def setWriteOp(self, writeBuffer): + """Start a write operation. + + @type writeBuffer: str + @param writeBuffer: The string to transmit. + """ + try: + self._checkAssert(0) + self.writer = self.tlsConnection.writeAsync(writeBuffer) + self._doWriteOp() + except: + self._clear() + raise + diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/ClientHelper.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/ClientHelper.py new file mode 100755 index 0000000..58e0152 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/ClientHelper.py @@ -0,0 +1,163 @@ +""" +A helper class for using TLS Lite with stdlib clients +(httplib, xmlrpclib, imaplib, poplib). +""" + +from gdata.tlslite.Checker import Checker + +class ClientHelper: + """This is a helper class used to integrate TLS Lite with various + TLS clients (e.g. poplib, smtplib, httplib, etc.)""" + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + """ + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Then you should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + self.username = None + self.password = None + self.sharedKey = None + self.certChain = None + self.privateKey = None + self.checker = None + + #SRP Authentication + if username and password and not \ + (sharedKey or certChain or privateKey): + self.username = username + self.password = password + + #Shared Key Authentication + elif username and sharedKey and not \ + (password or certChain or privateKey): + self.username = username + self.sharedKey = sharedKey + + #Certificate Chain Authentication + elif certChain and privateKey and not \ + (username or password or sharedKey): + self.certChain = certChain + self.privateKey = privateKey + + #No Authentication + elif not password and not username and not \ + sharedKey and not certChain and not privateKey: + pass + + else: + raise ValueError("Bad parameters") + + #Authenticate the server based on its cryptoID or fingerprint + if sharedKey and (cryptoID or protocol or x509Fingerprint): + raise ValueError("Can't use shared keys with other forms of"\ + "authentication") + + self.checker = Checker(cryptoID, protocol, x509Fingerprint, + x509TrustList, x509CommonName) + self.settings = settings + + self.tlsSession = None + + def _handshake(self, tlsConnection): + if self.username and self.password: + tlsConnection.handshakeClientSRP(username=self.username, + password=self.password, + checker=self.checker, + settings=self.settings, + session=self.tlsSession) + elif self.username and self.sharedKey: + tlsConnection.handshakeClientSharedKey(username=self.username, + sharedKey=self.sharedKey, + settings=self.settings) + else: + tlsConnection.handshakeClientCert(certChain=self.certChain, + privateKey=self.privateKey, + checker=self.checker, + settings=self.settings, + session=self.tlsSession) + self.tlsSession = tlsConnection.session diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/HTTPTLSConnection.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/HTTPTLSConnection.py new file mode 100755 index 0000000..58e31a1 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/HTTPTLSConnection.py @@ -0,0 +1,169 @@ +"""TLS Lite + httplib.""" + +import socket +import httplib +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + + +class HTTPBaseTLSConnection(httplib.HTTPConnection): + """This abstract class provides a framework for adding TLS support + to httplib.""" + + default_port = 443 + + def __init__(self, host, port=None, strict=None): + if strict == None: + #Python 2.2 doesn't support strict + httplib.HTTPConnection.__init__(self, host, port) + else: + httplib.HTTPConnection.__init__(self, host, port, strict) + + def connect(self): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if hasattr(sock, 'settimeout'): + sock.settimeout(10) + sock.connect((self.host, self.port)) + + #Use a TLSConnection to emulate a socket + self.sock = TLSConnection(sock) + + #When httplib closes this, close the socket + self.sock.closeSocket = True + self._handshake(self.sock) + + def _handshake(self, tlsConnection): + """Called to perform some sort of handshake. + + This method must be overridden in a subclass to do some type of + handshake. This method will be called after the socket has + been connected but before any data has been sent. If this + method does not raise an exception, the TLS connection will be + considered valid. + + This method may (or may not) be called every time an HTTP + request is performed, depending on whether the underlying HTTP + connection is persistent. + + @type tlsConnection: L{tlslite.TLSConnection.TLSConnection} + @param tlsConnection: The connection to perform the handshake + on. + """ + raise NotImplementedError() + + +class HTTPTLSConnection(HTTPBaseTLSConnection, ClientHelper): + """This class extends L{HTTPBaseTLSConnection} to support the + common types of handshaking.""" + + def __init__(self, host, port=None, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + """Create a new HTTPTLSConnection. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Thus you should be prepared to handle TLS-specific + exceptions when calling methods inherited from + L{httplib.HTTPConnection} such as request(), connect(), and + send(). See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + HTTPBaseTLSConnection.__init__(self, host, port) + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + def _handshake(self, tlsConnection): + ClientHelper._handshake(self, tlsConnection) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/IMAP4_TLS.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/IMAP4_TLS.py new file mode 100755 index 0000000..e47076c --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/IMAP4_TLS.py @@ -0,0 +1,132 @@ +"""TLS Lite + imaplib.""" + +import socket +from imaplib import IMAP4 +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +# IMAP TLS PORT +IMAP4_TLS_PORT = 993 + +class IMAP4_TLS(IMAP4, ClientHelper): + """This class extends L{imaplib.IMAP4} with TLS support.""" + + def __init__(self, host = '', port = IMAP4_TLS_PORT, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new IMAP4_TLS. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + IMAP4.__init__(self, host, port) + + + def open(self, host = '', port = IMAP4_TLS_PORT): + """Setup connection to remote server on "host:port". + + This connection will be used by the routines: + read, readline, send, shutdown. + """ + self.host = host + self.port = port + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((host, port)) + self.sock = TLSConnection(self.sock) + self.sock.closeSocket = True + ClientHelper._handshake(self, self.sock) + self.file = self.sock.makefile('rb') diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/IntegrationHelper.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/IntegrationHelper.py new file mode 100755 index 0000000..af5193b --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/IntegrationHelper.py @@ -0,0 +1,52 @@ + +class IntegrationHelper: + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings = None): + + self.username = None + self.password = None + self.sharedKey = None + self.certChain = None + self.privateKey = None + self.checker = None + + #SRP Authentication + if username and password and not \ + (sharedKey or certChain or privateKey): + self.username = username + self.password = password + + #Shared Key Authentication + elif username and sharedKey and not \ + (password or certChain or privateKey): + self.username = username + self.sharedKey = sharedKey + + #Certificate Chain Authentication + elif certChain and privateKey and not \ + (username or password or sharedKey): + self.certChain = certChain + self.privateKey = privateKey + + #No Authentication + elif not password and not username and not \ + sharedKey and not certChain and not privateKey: + pass + + else: + raise ValueError("Bad parameters") + + #Authenticate the server based on its cryptoID or fingerprint + if sharedKey and (cryptoID or protocol or x509Fingerprint): + raise ValueError("Can't use shared keys with other forms of"\ + "authentication") + + self.checker = Checker(cryptoID, protocol, x509Fingerprint, + x509TrustList, x509CommonName) + self.settings = settings \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/POP3_TLS.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/POP3_TLS.py new file mode 100755 index 0000000..26b37fd --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/POP3_TLS.py @@ -0,0 +1,142 @@ +"""TLS Lite + poplib.""" + +import socket +from poplib import POP3 +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +# POP TLS PORT +POP3_TLS_PORT = 995 + +class POP3_TLS(POP3, ClientHelper): + """This class extends L{poplib.POP3} with TLS support.""" + + def __init__(self, host, port = POP3_TLS_PORT, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new POP3_TLS. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type host: str + @param host: Server to connect to. + + @type port: int + @param port: Port to connect to. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + self.host = host + self.port = port + msg = "getaddrinfo returns an empty list" + self.sock = None + for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + try: + self.sock = socket.socket(af, socktype, proto) + self.sock.connect(sa) + except socket.error, msg: + if self.sock: + self.sock.close() + self.sock = None + continue + break + if not self.sock: + raise socket.error, msg + + ### New code below (all else copied from poplib) + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + self.sock = TLSConnection(self.sock) + self.sock.closeSocket = True + ClientHelper._handshake(self, self.sock) + ### + + self.file = self.sock.makefile('rb') + self._debugging = 0 + self.welcome = self._getresp() diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/SMTP_TLS.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/SMTP_TLS.py new file mode 100755 index 0000000..67e0feb --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/SMTP_TLS.py @@ -0,0 +1,114 @@ +"""TLS Lite + smtplib.""" + +from smtplib import SMTP +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + +class SMTP_TLS(SMTP): + """This class extends L{smtplib.SMTP} with TLS support.""" + + def starttls(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Puts the connection to the SMTP server into TLS mode. + + If the server supports TLS, this will encrypt the rest of the SMTP + session. + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The caller should be prepared to handle TLS-specific + exceptions. See the client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + (resp, reply) = self.docmd("STARTTLS") + if resp == 220: + helper = ClientHelper( + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + conn = TLSConnection(self.sock) + conn.closeSocket = True + helper._handshake(conn) + self.sock = conn + self.file = conn.makefile('rb') + return (resp, reply) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py new file mode 100755 index 0000000..f732f62 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py @@ -0,0 +1,139 @@ +"""TLS Lite + asyncore.""" + + +import asyncore +from gdata.tlslite.TLSConnection import TLSConnection +from AsyncStateMachine import AsyncStateMachine + + +class TLSAsyncDispatcherMixIn(AsyncStateMachine): + """This class can be "mixed in" with an + L{asyncore.dispatcher} to add TLS support. + + This class essentially sits between the dispatcher and the select + loop, intercepting events and only calling the dispatcher when + applicable. + + In the case of handle_read(), a read operation will be activated, + and when it completes, the bytes will be placed in a buffer where + the dispatcher can retrieve them by calling recv(), and the + dispatcher's handle_read() will be called. + + In the case of handle_write(), the dispatcher's handle_write() will + be called, and when it calls send(), a write operation will be + activated. + + To use this class, you must combine it with an asyncore.dispatcher, + and pass in a handshake operation with setServerHandshakeOp(). + + Below is an example of using this class with medusa. This class is + mixed in with http_channel to create http_tls_channel. Note: + 1. the mix-in is listed first in the inheritance list + + 2. the input buffer size must be at least 16K, otherwise the + dispatcher might not read all the bytes from the TLS layer, + leaving some bytes in limbo. + + 3. IE seems to have a problem receiving a whole HTTP response in a + single TLS record, so HTML pages containing '\\r\\n\\r\\n' won't + be displayed on IE. + + Add the following text into 'start_medusa.py', in the 'HTTP Server' + section:: + + from tlslite.api import * + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + class http_tls_channel(TLSAsyncDispatcherMixIn, + http_server.http_channel): + ac_in_buffer_size = 16384 + + def __init__ (self, server, conn, addr): + http_server.http_channel.__init__(self, server, conn, addr) + TLSAsyncDispatcherMixIn.__init__(self, conn) + self.tlsConnection.ignoreAbruptClose = True + self.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey) + + hs.channel_class = http_tls_channel + + If the TLS layer raises an exception, the exception will be caught + in asyncore.dispatcher, which will call close() on this class. The + TLS layer always closes the TLS connection before raising an + exception, so the close operation will complete right away, causing + asyncore.dispatcher.close() to be called, which closes the socket + and removes this instance from the asyncore loop. + + """ + + + def __init__(self, sock=None): + AsyncStateMachine.__init__(self) + + if sock: + self.tlsConnection = TLSConnection(sock) + + #Calculate the sibling I'm being mixed in with. + #This is necessary since we override functions + #like readable(), handle_read(), etc., but we + #also want to call the sibling's versions. + for cl in self.__class__.__bases__: + if cl != TLSAsyncDispatcherMixIn and cl != AsyncStateMachine: + self.siblingClass = cl + break + else: + raise AssertionError() + + def readable(self): + result = self.wantsReadEvent() + if result != None: + return result + return self.siblingClass.readable(self) + + def writable(self): + result = self.wantsWriteEvent() + if result != None: + return result + return self.siblingClass.writable(self) + + def handle_read(self): + self.inReadEvent() + + def handle_write(self): + self.inWriteEvent() + + def outConnectEvent(self): + self.siblingClass.handle_connect(self) + + def outCloseEvent(self): + asyncore.dispatcher.close(self) + + def outReadEvent(self, readBuffer): + self.readBuffer = readBuffer + self.siblingClass.handle_read(self) + + def outWriteEvent(self): + self.siblingClass.handle_write(self) + + def recv(self, bufferSize=16384): + if bufferSize < 16384 or self.readBuffer == None: + raise AssertionError() + returnValue = self.readBuffer + self.readBuffer = None + return returnValue + + def send(self, writeBuffer): + self.setWriteOp(writeBuffer) + return len(writeBuffer) + + def close(self): + if hasattr(self, "tlsConnection"): + self.setCloseOp() + else: + asyncore.dispatcher.close(self) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/TLSSocketServerMixIn.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/TLSSocketServerMixIn.py new file mode 100755 index 0000000..10224b6 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/TLSSocketServerMixIn.py @@ -0,0 +1,59 @@ +"""TLS Lite + SocketServer.""" + +from gdata.tlslite.TLSConnection import TLSConnection + +class TLSSocketServerMixIn: + """ + This class can be mixed in with any L{SocketServer.TCPServer} to + add TLS support. + + To use this class, define a new class that inherits from it and + some L{SocketServer.TCPServer} (with the mix-in first). Then + implement the handshake() method, doing some sort of server + handshake on the connection argument. If the handshake method + returns True, the RequestHandler will be triggered. Below is a + complete example of a threaded HTTPS server:: + + from SocketServer import * + from BaseHTTPServer import * + from SimpleHTTPServer import * + from tlslite.api import * + + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + sessionCache = SessionCache() + + class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn, + HTTPServer): + def handshake(self, tlsConnection): + try: + tlsConnection.handshakeServer(certChain=certChain, + privateKey=privateKey, + sessionCache=sessionCache) + tlsConnection.ignoreAbruptClose = True + return True + except TLSError, error: + print "Handshake failure:", str(error) + return False + + httpd = MyHTTPServer(('localhost', 443), SimpleHTTPRequestHandler) + httpd.serve_forever() + """ + + + def finish_request(self, sock, client_address): + tlsConnection = TLSConnection(sock) + if self.handshake(tlsConnection) == True: + self.RequestHandlerClass(tlsConnection, client_address, self) + tlsConnection.close() + + #Implement this method to do some form of handshaking. Return True + #if the handshake finishes properly and the request is authorized. + def handshake(self, tlsConnection): + raise NotImplementedError() diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py new file mode 100755 index 0000000..c88703c --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py @@ -0,0 +1,196 @@ +"""TLS Lite + Twisted.""" + +from twisted.protocols.policies import ProtocolWrapper, WrappingFactory +from twisted.python.failure import Failure + +from AsyncStateMachine import AsyncStateMachine +from gdata.tlslite.TLSConnection import TLSConnection +from gdata.tlslite.errors import * + +import socket +import errno + + +#The TLSConnection is created around a "fake socket" that +#plugs it into the underlying Twisted transport +class _FakeSocket: + def __init__(self, wrapper): + self.wrapper = wrapper + self.data = "" + + def send(self, data): + ProtocolWrapper.write(self.wrapper, data) + return len(data) + + def recv(self, numBytes): + if self.data == "": + raise socket.error, (errno.EWOULDBLOCK, "") + returnData = self.data[:numBytes] + self.data = self.data[numBytes:] + return returnData + +class TLSTwistedProtocolWrapper(ProtocolWrapper, AsyncStateMachine): + """This class can wrap Twisted protocols to add TLS support. + + Below is a complete example of using TLS Lite with a Twisted echo + server. + + There are two server implementations below. Echo is the original + protocol, which is oblivious to TLS. Echo1 subclasses Echo and + negotiates TLS when the client connects. Echo2 subclasses Echo and + negotiates TLS when the client sends "STARTTLS":: + + from twisted.internet.protocol import Protocol, Factory + from twisted.internet import reactor + from twisted.protocols.policies import WrappingFactory + from twisted.protocols.basic import LineReceiver + from twisted.python import log + from twisted.python.failure import Failure + import sys + from tlslite.api import * + + s = open("./serverX509Cert.pem").read() + x509 = X509() + x509.parse(s) + certChain = X509CertChain([x509]) + + s = open("./serverX509Key.pem").read() + privateKey = parsePEMKey(s, private=True) + + verifierDB = VerifierDB("verifierDB") + verifierDB.open() + + class Echo(LineReceiver): + def connectionMade(self): + self.transport.write("Welcome to the echo server!\\r\\n") + + def lineReceived(self, line): + self.transport.write(line + "\\r\\n") + + class Echo1(Echo): + def connectionMade(self): + if not self.transport.tlsStarted: + self.transport.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey, + verifierDB=verifierDB) + else: + Echo.connectionMade(self) + + def connectionLost(self, reason): + pass #Handle any TLS exceptions here + + class Echo2(Echo): + def lineReceived(self, data): + if data == "STARTTLS": + self.transport.setServerHandshakeOp(certChain=certChain, + privateKey=privateKey, + verifierDB=verifierDB) + else: + Echo.lineReceived(self, data) + + def connectionLost(self, reason): + pass #Handle any TLS exceptions here + + factory = Factory() + factory.protocol = Echo1 + #factory.protocol = Echo2 + + wrappingFactory = WrappingFactory(factory) + wrappingFactory.protocol = TLSTwistedProtocolWrapper + + log.startLogging(sys.stdout) + reactor.listenTCP(1079, wrappingFactory) + reactor.run() + + This class works as follows: + + Data comes in and is given to the AsyncStateMachine for handling. + AsyncStateMachine will forward events to this class, and we'll + pass them on to the ProtocolHandler, which will proxy them to the + wrapped protocol. The wrapped protocol may then call back into + this class, and these calls will be proxied into the + AsyncStateMachine. + + The call graph looks like this: + - self.dataReceived + - AsyncStateMachine.inReadEvent + - self.out(Connect|Close|Read)Event + - ProtocolWrapper.(connectionMade|loseConnection|dataReceived) + - self.(loseConnection|write|writeSequence) + - AsyncStateMachine.(setCloseOp|setWriteOp) + """ + + #WARNING: IF YOU COPY-AND-PASTE THE ABOVE CODE, BE SURE TO REMOVE + #THE EXTRA ESCAPING AROUND "\\r\\n" + + def __init__(self, factory, wrappedProtocol): + ProtocolWrapper.__init__(self, factory, wrappedProtocol) + AsyncStateMachine.__init__(self) + self.fakeSocket = _FakeSocket(self) + self.tlsConnection = TLSConnection(self.fakeSocket) + self.tlsStarted = False + self.connectionLostCalled = False + + def connectionMade(self): + try: + ProtocolWrapper.connectionMade(self) + except TLSError, e: + self.connectionLost(Failure(e)) + ProtocolWrapper.loseConnection(self) + + def dataReceived(self, data): + try: + if not self.tlsStarted: + ProtocolWrapper.dataReceived(self, data) + else: + self.fakeSocket.data += data + while self.fakeSocket.data: + AsyncStateMachine.inReadEvent(self) + except TLSError, e: + self.connectionLost(Failure(e)) + ProtocolWrapper.loseConnection(self) + + def connectionLost(self, reason): + if not self.connectionLostCalled: + ProtocolWrapper.connectionLost(self, reason) + self.connectionLostCalled = True + + + def outConnectEvent(self): + ProtocolWrapper.connectionMade(self) + + def outCloseEvent(self): + ProtocolWrapper.loseConnection(self) + + def outReadEvent(self, data): + if data == "": + ProtocolWrapper.loseConnection(self) + else: + ProtocolWrapper.dataReceived(self, data) + + + def setServerHandshakeOp(self, **args): + self.tlsStarted = True + AsyncStateMachine.setServerHandshakeOp(self, **args) + + def loseConnection(self): + if not self.tlsStarted: + ProtocolWrapper.loseConnection(self) + else: + AsyncStateMachine.setCloseOp(self) + + def write(self, data): + if not self.tlsStarted: + ProtocolWrapper.write(self, data) + else: + #Because of the FakeSocket, write operations are guaranteed to + #terminate immediately. + AsyncStateMachine.setWriteOp(self, data) + + def writeSequence(self, seq): + if not self.tlsStarted: + ProtocolWrapper.writeSequence(self, seq) + else: + #Because of the FakeSocket, write operations are guaranteed to + #terminate immediately. + AsyncStateMachine.setWriteOp(self, "".join(seq)) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/XMLRPCTransport.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/XMLRPCTransport.py new file mode 100755 index 0000000..3f025e4 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/XMLRPCTransport.py @@ -0,0 +1,137 @@ +"""TLS Lite + xmlrpclib.""" + +import xmlrpclib +import httplib +from gdata.tlslite.integration.HTTPTLSConnection import HTTPTLSConnection +from gdata.tlslite.integration.ClientHelper import ClientHelper + + +class XMLRPCTransport(xmlrpclib.Transport, ClientHelper): + """Handles an HTTPS transaction to an XML-RPC server.""" + + def __init__(self, + username=None, password=None, sharedKey=None, + certChain=None, privateKey=None, + cryptoID=None, protocol=None, + x509Fingerprint=None, + x509TrustList=None, x509CommonName=None, + settings=None): + """Create a new XMLRPCTransport. + + An instance of this class can be passed to L{xmlrpclib.ServerProxy} + to use TLS with XML-RPC calls:: + + from tlslite.api import XMLRPCTransport + from xmlrpclib import ServerProxy + + transport = XMLRPCTransport(user="alice", password="abra123") + server = ServerProxy("https://localhost", transport) + + For client authentication, use one of these argument + combinations: + - username, password (SRP) + - username, sharedKey (shared-key) + - certChain, privateKey (certificate) + + For server authentication, you can either rely on the + implicit mutual authentication performed by SRP or + shared-keys, or you can do certificate-based server + authentication with one of these argument combinations: + - cryptoID[, protocol] (requires cryptoIDlib) + - x509Fingerprint + - x509TrustList[, x509CommonName] (requires cryptlib_py) + + Certificate-based server authentication is compatible with + SRP or certificate-based client authentication. It is + not compatible with shared-keys. + + The constructor does not perform the TLS handshake itself, but + simply stores these arguments for later. The handshake is + performed only when this class needs to connect with the + server. Thus you should be prepared to handle TLS-specific + exceptions when calling methods of L{xmlrpclib.ServerProxy}. See the + client handshake functions in + L{tlslite.TLSConnection.TLSConnection} for details on which + exceptions might be raised. + + @type username: str + @param username: SRP or shared-key username. Requires the + 'password' or 'sharedKey' argument. + + @type password: str + @param password: SRP password for mutual authentication. + Requires the 'username' argument. + + @type sharedKey: str + @param sharedKey: Shared key for mutual authentication. + Requires the 'username' argument. + + @type certChain: L{tlslite.X509CertChain.X509CertChain} or + L{cryptoIDlib.CertChain.CertChain} + @param certChain: Certificate chain for client authentication. + Requires the 'privateKey' argument. Excludes the SRP or + shared-key related arguments. + + @type privateKey: L{tlslite.utils.RSAKey.RSAKey} + @param privateKey: Private key for client authentication. + Requires the 'certChain' argument. Excludes the SRP or + shared-key related arguments. + + @type cryptoID: str + @param cryptoID: cryptoID for server authentication. Mutually + exclusive with the 'x509...' arguments. + + @type protocol: str + @param protocol: cryptoID protocol URI for server + authentication. Requires the 'cryptoID' argument. + + @type x509Fingerprint: str + @param x509Fingerprint: Hex-encoded X.509 fingerprint for + server authentication. Mutually exclusive with the 'cryptoID' + and 'x509TrustList' arguments. + + @type x509TrustList: list of L{tlslite.X509.X509} + @param x509TrustList: A list of trusted root certificates. The + other party must present a certificate chain which extends to + one of these root certificates. The cryptlib_py module must be + installed to use this parameter. Mutually exclusive with the + 'cryptoID' and 'x509Fingerprint' arguments. + + @type x509CommonName: str + @param x509CommonName: The end-entity certificate's 'CN' field + must match this value. For a web server, this is typically a + server name such as 'www.amazon.com'. Mutually exclusive with + the 'cryptoID' and 'x509Fingerprint' arguments. Requires the + 'x509TrustList' argument. + + @type settings: L{tlslite.HandshakeSettings.HandshakeSettings} + @param settings: Various settings which can be used to control + the ciphersuites, certificate types, and SSL/TLS versions + offered by the client. + """ + + ClientHelper.__init__(self, + username, password, sharedKey, + certChain, privateKey, + cryptoID, protocol, + x509Fingerprint, + x509TrustList, x509CommonName, + settings) + + + def make_connection(self, host): + # create a HTTPS connection object from a host descriptor + host, extra_headers, x509 = self.get_host_info(host) + http = HTTPTLSConnection(host, None, + self.username, self.password, + self.sharedKey, + self.certChain, self.privateKey, + self.checker.cryptoID, + self.checker.protocol, + self.checker.x509Fingerprint, + self.checker.x509TrustList, + self.checker.x509CommonName, + self.settings) + http2 = httplib.HTTP() + http2._setup(http) + return http2 diff --git a/gdata.py-1.2.3/src/gdata/tlslite/integration/__init__.py b/gdata.py-1.2.3/src/gdata/tlslite/integration/__init__.py new file mode 100755 index 0000000..960f406 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/integration/__init__.py @@ -0,0 +1,17 @@ +"""Classes for integrating TLS Lite with other packages.""" + +__all__ = ["AsyncStateMachine", + "HTTPTLSConnection", + "POP3_TLS", + "IMAP4_TLS", + "SMTP_TLS", + "XMLRPCTransport", + "TLSSocketServerMixIn", + "TLSAsyncDispatcherMixIn", + "TLSTwistedProtocolWrapper"] + +try: + import twisted + del twisted +except ImportError: + del __all__[__all__.index("TLSTwistedProtocolWrapper")] diff --git a/gdata.py-1.2.3/src/gdata/tlslite/mathtls.py b/gdata.py-1.2.3/src/gdata/tlslite/mathtls.py new file mode 100755 index 0000000..3b8ede6 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/mathtls.py @@ -0,0 +1,170 @@ +"""Miscellaneous helper functions.""" + +from utils.compat import * +from utils.cryptomath import * + +import hmac +import md5 +import sha + +#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups] +goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\ + (2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\ + (2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\ + (2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\ + (5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)] + +def P_hash(hashModule, secret, seed, length): + bytes = createByteArrayZeros(length) + secret = bytesToString(secret) + seed = bytesToString(seed) + A = seed + index = 0 + while 1: + A = hmac.HMAC(secret, A, hashModule).digest() + output = hmac.HMAC(secret, A+seed, hashModule).digest() + for c in output: + if index >= length: + return bytes + bytes[index] = ord(c) + index += 1 + return bytes + +def PRF(secret, label, seed, length): + #Split the secret into left and right halves + S1 = secret[ : int(math.ceil(len(secret)/2.0))] + S2 = secret[ int(math.floor(len(secret)/2.0)) : ] + + #Run the left half through P_MD5 and the right half through P_SHA1 + p_md5 = P_hash(md5, S1, concatArrays(stringToBytes(label), seed), length) + p_sha1 = P_hash(sha, S2, concatArrays(stringToBytes(label), seed), length) + + #XOR the output values and return the result + for x in range(length): + p_md5[x] ^= p_sha1[x] + return p_md5 + + +def PRF_SSL(secret, seed, length): + secretStr = bytesToString(secret) + seedStr = bytesToString(seed) + bytes = createByteArrayZeros(length) + index = 0 + for x in range(26): + A = chr(ord('A')+x) * (x+1) # 'A', 'BB', 'CCC', etc.. + input = secretStr + sha.sha(A + secretStr + seedStr).digest() + output = md5.md5(input).digest() + for c in output: + if index >= length: + return bytes + bytes[index] = ord(c) + index += 1 + return bytes + +def makeX(salt, username, password): + if len(username)>=256: + raise ValueError("username too long") + if len(salt)>=256: + raise ValueError("salt too long") + return stringToNumber(sha.sha(salt + sha.sha(username + ":" + password)\ + .digest()).digest()) + +#This function is used by VerifierDB.makeVerifier +def makeVerifier(username, password, bits): + bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits] + g,N = goodGroupParameters[bitsIndex] + salt = bytesToString(getRandomBytes(16)) + x = makeX(salt, username, password) + verifier = powMod(g, x, N) + return N, g, salt, verifier + +def PAD(n, x): + nLength = len(numberToString(n)) + s = numberToString(x) + if len(s) < nLength: + s = ("\0" * (nLength-len(s))) + s + return s + +def makeU(N, A, B): + return stringToNumber(sha.sha(PAD(N, A) + PAD(N, B)).digest()) + +def makeK(N, g): + return stringToNumber(sha.sha(numberToString(N) + PAD(N, g)).digest()) + + +""" +MAC_SSL +Modified from Python HMAC by Trevor +""" + +class MAC_SSL: + """MAC_SSL class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new MAC_SSL object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod is None: + import md5 + digestmod = md5 + + if key == None: #TREVNEW - for faster copying + return #TREVNEW + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + self.digest_size = digestmod.digest_size + + ipad = "\x36" * 40 + opad = "\x5C" * 40 + + self.inner.update(key) + self.inner.update(ipad) + self.outer.update(key) + self.outer.update(opad) + if msg is not None: + self.update(msg) + + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = MAC_SSL(None) #TREVNEW - for faster copying + other.digest_size = self.digest_size #TREVNEW + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([hex(ord(x))[2:].zfill(2) + for x in tuple(self.digest())]) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/messages.py b/gdata.py-1.2.3/src/gdata/tlslite/messages.py new file mode 100755 index 0000000..afccc79 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/messages.py @@ -0,0 +1,561 @@ +"""Classes representing TLS messages.""" + +from utils.compat import * +from utils.cryptomath import * +from errors import * +from utils.codec import * +from constants import * +from X509 import X509 +from X509CertChain import X509CertChain + +import sha +import md5 + +class RecordHeader3: + def __init__(self): + self.type = 0 + self.version = (0,0) + self.length = 0 + self.ssl2 = False + + def create(self, version, type, length): + self.type = type + self.version = version + self.length = length + return self + + def write(self): + w = Writer(5) + w.add(self.type, 1) + w.add(self.version[0], 1) + w.add(self.version[1], 1) + w.add(self.length, 2) + return w.bytes + + def parse(self, p): + self.type = p.get(1) + self.version = (p.get(1), p.get(1)) + self.length = p.get(2) + self.ssl2 = False + return self + +class RecordHeader2: + def __init__(self): + self.type = 0 + self.version = (0,0) + self.length = 0 + self.ssl2 = True + + def parse(self, p): + if p.get(1)!=128: + raise SyntaxError() + self.type = ContentType.handshake + self.version = (2,0) + #We don't support 2-byte-length-headers; could be a problem + self.length = p.get(1) + return self + + +class Msg: + def preWrite(self, trial): + if trial: + w = Writer() + else: + length = self.write(True) + w = Writer(length) + return w + + def postWrite(self, w, trial): + if trial: + return w.index + else: + return w.bytes + +class Alert(Msg): + def __init__(self): + self.contentType = ContentType.alert + self.level = 0 + self.description = 0 + + def create(self, description, level=AlertLevel.fatal): + self.level = level + self.description = description + return self + + def parse(self, p): + p.setLengthCheck(2) + self.level = p.get(1) + self.description = p.get(1) + p.stopLengthCheck() + return self + + def write(self): + w = Writer(2) + w.add(self.level, 1) + w.add(self.description, 1) + return w.bytes + + +class HandshakeMsg(Msg): + def preWrite(self, handshakeType, trial): + if trial: + w = Writer() + w.add(handshakeType, 1) + w.add(0, 3) + else: + length = self.write(True) + w = Writer(length) + w.add(handshakeType, 1) + w.add(length-4, 3) + return w + + +class ClientHello(HandshakeMsg): + def __init__(self, ssl2=False): + self.contentType = ContentType.handshake + self.ssl2 = ssl2 + self.client_version = (0,0) + self.random = createByteArrayZeros(32) + self.session_id = createByteArraySequence([]) + self.cipher_suites = [] # a list of 16-bit values + self.certificate_types = [CertificateType.x509] + self.compression_methods = [] # a list of 8-bit values + self.srp_username = None # a string + + def create(self, version, random, session_id, cipher_suites, + certificate_types=None, srp_username=None): + self.client_version = version + self.random = random + self.session_id = session_id + self.cipher_suites = cipher_suites + self.certificate_types = certificate_types + self.compression_methods = [0] + self.srp_username = srp_username + return self + + def parse(self, p): + if self.ssl2: + self.client_version = (p.get(1), p.get(1)) + cipherSpecsLength = p.get(2) + sessionIDLength = p.get(2) + randomLength = p.get(2) + self.cipher_suites = p.getFixList(3, int(cipherSpecsLength/3)) + self.session_id = p.getFixBytes(sessionIDLength) + self.random = p.getFixBytes(randomLength) + if len(self.random) < 32: + zeroBytes = 32-len(self.random) + self.random = createByteArrayZeros(zeroBytes) + self.random + self.compression_methods = [0]#Fake this value + + #We're not doing a stopLengthCheck() for SSLv2, oh well.. + else: + p.startLengthCheck(3) + self.client_version = (p.get(1), p.get(1)) + self.random = p.getFixBytes(32) + self.session_id = p.getVarBytes(1) + self.cipher_suites = p.getVarList(2, 2) + self.compression_methods = p.getVarList(1, 1) + if not p.atLengthCheck(): + totalExtLength = p.get(2) + soFar = 0 + while soFar != totalExtLength: + extType = p.get(2) + extLength = p.get(2) + if extType == 6: + self.srp_username = bytesToString(p.getVarBytes(1)) + elif extType == 7: + self.certificate_types = p.getVarList(1, 1) + else: + p.getFixBytes(extLength) + soFar += 4 + extLength + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.client_hello, trial) + w.add(self.client_version[0], 1) + w.add(self.client_version[1], 1) + w.addFixSeq(self.random, 1) + w.addVarSeq(self.session_id, 1, 1) + w.addVarSeq(self.cipher_suites, 2, 2) + w.addVarSeq(self.compression_methods, 1, 1) + + extLength = 0 + if self.certificate_types and self.certificate_types != \ + [CertificateType.x509]: + extLength += 5 + len(self.certificate_types) + if self.srp_username: + extLength += 5 + len(self.srp_username) + if extLength > 0: + w.add(extLength, 2) + + if self.certificate_types and self.certificate_types != \ + [CertificateType.x509]: + w.add(7, 2) + w.add(len(self.certificate_types)+1, 2) + w.addVarSeq(self.certificate_types, 1, 1) + if self.srp_username: + w.add(6, 2) + w.add(len(self.srp_username)+1, 2) + w.addVarSeq(stringToBytes(self.srp_username), 1, 1) + + return HandshakeMsg.postWrite(self, w, trial) + + +class ServerHello(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.server_version = (0,0) + self.random = createByteArrayZeros(32) + self.session_id = createByteArraySequence([]) + self.cipher_suite = 0 + self.certificate_type = CertificateType.x509 + self.compression_method = 0 + + def create(self, version, random, session_id, cipher_suite, + certificate_type): + self.server_version = version + self.random = random + self.session_id = session_id + self.cipher_suite = cipher_suite + self.certificate_type = certificate_type + self.compression_method = 0 + return self + + def parse(self, p): + p.startLengthCheck(3) + self.server_version = (p.get(1), p.get(1)) + self.random = p.getFixBytes(32) + self.session_id = p.getVarBytes(1) + self.cipher_suite = p.get(2) + self.compression_method = p.get(1) + if not p.atLengthCheck(): + totalExtLength = p.get(2) + soFar = 0 + while soFar != totalExtLength: + extType = p.get(2) + extLength = p.get(2) + if extType == 7: + self.certificate_type = p.get(1) + else: + p.getFixBytes(extLength) + soFar += 4 + extLength + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_hello, trial) + w.add(self.server_version[0], 1) + w.add(self.server_version[1], 1) + w.addFixSeq(self.random, 1) + w.addVarSeq(self.session_id, 1, 1) + w.add(self.cipher_suite, 2) + w.add(self.compression_method, 1) + + extLength = 0 + if self.certificate_type and self.certificate_type != \ + CertificateType.x509: + extLength += 5 + + if extLength != 0: + w.add(extLength, 2) + + if self.certificate_type and self.certificate_type != \ + CertificateType.x509: + w.add(7, 2) + w.add(1, 2) + w.add(self.certificate_type, 1) + + return HandshakeMsg.postWrite(self, w, trial) + +class Certificate(HandshakeMsg): + def __init__(self, certificateType): + self.certificateType = certificateType + self.contentType = ContentType.handshake + self.certChain = None + + def create(self, certChain): + self.certChain = certChain + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.certificateType == CertificateType.x509: + chainLength = p.get(3) + index = 0 + certificate_list = [] + while index != chainLength: + certBytes = p.getVarBytes(3) + x509 = X509() + x509.parseBinary(certBytes) + certificate_list.append(x509) + index += len(certBytes)+3 + if certificate_list: + self.certChain = X509CertChain(certificate_list) + elif self.certificateType == CertificateType.cryptoID: + s = bytesToString(p.getVarBytes(2)) + if s: + try: + import cryptoIDlib.CertChain + except ImportError: + raise SyntaxError(\ + "cryptoID cert chain received, cryptoIDlib not present") + self.certChain = cryptoIDlib.CertChain.CertChain().parse(s) + else: + raise AssertionError() + + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate, trial) + if self.certificateType == CertificateType.x509: + chainLength = 0 + if self.certChain: + certificate_list = self.certChain.x509List + else: + certificate_list = [] + #determine length + for cert in certificate_list: + bytes = cert.writeBytes() + chainLength += len(bytes)+3 + #add bytes + w.add(chainLength, 3) + for cert in certificate_list: + bytes = cert.writeBytes() + w.addVarSeq(bytes, 1, 3) + elif self.certificateType == CertificateType.cryptoID: + if self.certChain: + bytes = stringToBytes(self.certChain.write()) + else: + bytes = createByteArraySequence([]) + w.addVarSeq(bytes, 1, 2) + else: + raise AssertionError() + return HandshakeMsg.postWrite(self, w, trial) + +class CertificateRequest(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.certificate_types = [] + #treat as opaque bytes for now + self.certificate_authorities = createByteArraySequence([]) + + def create(self, certificate_types, certificate_authorities): + self.certificate_types = certificate_types + self.certificate_authorities = certificate_authorities + return self + + def parse(self, p): + p.startLengthCheck(3) + self.certificate_types = p.getVarList(1, 1) + self.certificate_authorities = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate_request, + trial) + w.addVarSeq(self.certificate_types, 1, 1) + w.addVarSeq(self.certificate_authorities, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + +class ServerKeyExchange(HandshakeMsg): + def __init__(self, cipherSuite): + self.cipherSuite = cipherSuite + self.contentType = ContentType.handshake + self.srp_N = 0L + self.srp_g = 0L + self.srp_s = createByteArraySequence([]) + self.srp_B = 0L + self.signature = createByteArraySequence([]) + + def createSRP(self, srp_N, srp_g, srp_s, srp_B): + self.srp_N = srp_N + self.srp_g = srp_g + self.srp_s = srp_s + self.srp_B = srp_B + return self + + def parse(self, p): + p.startLengthCheck(3) + self.srp_N = bytesToNumber(p.getVarBytes(2)) + self.srp_g = bytesToNumber(p.getVarBytes(2)) + self.srp_s = p.getVarBytes(1) + self.srp_B = bytesToNumber(p.getVarBytes(2)) + if self.cipherSuite in CipherSuite.srpRsaSuites: + self.signature = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_key_exchange, + trial) + w.addVarSeq(numberToBytes(self.srp_N), 1, 2) + w.addVarSeq(numberToBytes(self.srp_g), 1, 2) + w.addVarSeq(self.srp_s, 1, 1) + w.addVarSeq(numberToBytes(self.srp_B), 1, 2) + if self.cipherSuite in CipherSuite.srpRsaSuites: + w.addVarSeq(self.signature, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + + def hash(self, clientRandom, serverRandom): + oldCipherSuite = self.cipherSuite + self.cipherSuite = None + try: + bytes = clientRandom + serverRandom + self.write()[4:] + s = bytesToString(bytes) + return stringToBytes(md5.md5(s).digest() + sha.sha(s).digest()) + finally: + self.cipherSuite = oldCipherSuite + +class ServerHelloDone(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + + def create(self): + return self + + def parse(self, p): + p.startLengthCheck(3) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.server_hello_done, trial) + return HandshakeMsg.postWrite(self, w, trial) + +class ClientKeyExchange(HandshakeMsg): + def __init__(self, cipherSuite, version=None): + self.cipherSuite = cipherSuite + self.version = version + self.contentType = ContentType.handshake + self.srp_A = 0 + self.encryptedPreMasterSecret = createByteArraySequence([]) + + def createSRP(self, srp_A): + self.srp_A = srp_A + return self + + def createRSA(self, encryptedPreMasterSecret): + self.encryptedPreMasterSecret = encryptedPreMasterSecret + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + self.srp_A = bytesToNumber(p.getVarBytes(2)) + elif self.cipherSuite in CipherSuite.rsaSuites: + if self.version in ((3,1), (3,2)): + self.encryptedPreMasterSecret = p.getVarBytes(2) + elif self.version == (3,0): + self.encryptedPreMasterSecret = \ + p.getFixBytes(len(p.bytes)-p.index) + else: + raise AssertionError() + else: + raise AssertionError() + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.client_key_exchange, + trial) + if self.cipherSuite in CipherSuite.srpSuites + \ + CipherSuite.srpRsaSuites: + w.addVarSeq(numberToBytes(self.srp_A), 1, 2) + elif self.cipherSuite in CipherSuite.rsaSuites: + if self.version in ((3,1), (3,2)): + w.addVarSeq(self.encryptedPreMasterSecret, 1, 2) + elif self.version == (3,0): + w.addFixSeq(self.encryptedPreMasterSecret, 1) + else: + raise AssertionError() + else: + raise AssertionError() + return HandshakeMsg.postWrite(self, w, trial) + +class CertificateVerify(HandshakeMsg): + def __init__(self): + self.contentType = ContentType.handshake + self.signature = createByteArraySequence([]) + + def create(self, signature): + self.signature = signature + return self + + def parse(self, p): + p.startLengthCheck(3) + self.signature = p.getVarBytes(2) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.certificate_verify, + trial) + w.addVarSeq(self.signature, 1, 2) + return HandshakeMsg.postWrite(self, w, trial) + +class ChangeCipherSpec(Msg): + def __init__(self): + self.contentType = ContentType.change_cipher_spec + self.type = 1 + + def create(self): + self.type = 1 + return self + + def parse(self, p): + p.setLengthCheck(1) + self.type = p.get(1) + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = Msg.preWrite(self, trial) + w.add(self.type,1) + return Msg.postWrite(self, w, trial) + + +class Finished(HandshakeMsg): + def __init__(self, version): + self.contentType = ContentType.handshake + self.version = version + self.verify_data = createByteArraySequence([]) + + def create(self, verify_data): + self.verify_data = verify_data + return self + + def parse(self, p): + p.startLengthCheck(3) + if self.version == (3,0): + self.verify_data = p.getFixBytes(36) + elif self.version in ((3,1), (3,2)): + self.verify_data = p.getFixBytes(12) + else: + raise AssertionError() + p.stopLengthCheck() + return self + + def write(self, trial=False): + w = HandshakeMsg.preWrite(self, HandshakeType.finished, trial) + w.addFixSeq(self.verify_data, 1) + return HandshakeMsg.postWrite(self, w, trial) + +class ApplicationData(Msg): + def __init__(self): + self.contentType = ContentType.application_data + self.bytes = createByteArraySequence([]) + + def create(self, bytes): + self.bytes = bytes + return self + + def parse(self, p): + self.bytes = p.bytes + return self + + def write(self): + return self.bytes \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/AES.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/AES.py new file mode 100755 index 0000000..8413f4c --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/AES.py @@ -0,0 +1,31 @@ +"""Abstract class for AES.""" + +class AES: + def __init__(self, key, mode, IV, implementation): + if len(key) not in (16, 24, 32): + raise AssertionError() + if mode != 2: + raise AssertionError() + if len(IV) != 16: + raise AssertionError() + self.isBlockCipher = True + self.block_size = 16 + self.implementation = implementation + if len(key)==16: + self.name = "aes128" + elif len(key)==24: + self.name = "aes192" + elif len(key)==32: + self.name = "aes256" + else: + raise AssertionError() + + #CBC-Mode encryption, returns ciphertext + #WARNING: *MAY* modify the input as well + def encrypt(self, plaintext): + assert(len(plaintext) % 16 == 0) + + #CBC-Mode decryption, returns plaintext + #WARNING: *MAY* modify the input as well + def decrypt(self, ciphertext): + assert(len(ciphertext) % 16 == 0) \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/ASN1Parser.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/ASN1Parser.py new file mode 100755 index 0000000..16b50f2 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/ASN1Parser.py @@ -0,0 +1,34 @@ +"""Class for parsing ASN.1""" +from compat import * +from codec import * + +#Takes a byte array which has a DER TLV field at its head +class ASN1Parser: + def __init__(self, bytes): + p = Parser(bytes) + p.get(1) #skip Type + + #Get Length + self.length = self._getASN1Length(p) + + #Get Value + self.value = p.getFixBytes(self.length) + + #Assuming this is a sequence... + def getChild(self, which): + p = Parser(self.value) + for x in range(which+1): + markIndex = p.index + p.get(1) #skip Type + length = self._getASN1Length(p) + p.getFixBytes(length) + return ASN1Parser(p.bytes[markIndex : p.index]) + + #Decode the ASN.1 DER length field + def _getASN1Length(self, p): + firstLength = p.get(1) + if firstLength<=127: + return firstLength + else: + lengthLength = firstLength & 0x7F + return p.get(lengthLength) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/Cryptlib_AES.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/Cryptlib_AES.py new file mode 100755 index 0000000..9e101fc --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/Cryptlib_AES.py @@ -0,0 +1,34 @@ +"""Cryptlib AES implementation.""" + +from cryptomath import * +from AES import * + +if cryptlibpyLoaded: + + def new(key, mode, IV): + return Cryptlib_AES(key, mode, IV) + + class Cryptlib_AES(AES): + + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "cryptlib") + self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_AES) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key)) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV) + + def __del__(self): + cryptlib_py.cryptDestroyContext(self.context) + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + bytes = stringToBytes(plaintext) + cryptlib_py.cryptEncrypt(self.context, bytes) + return bytesToString(bytes) + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + bytes = stringToBytes(ciphertext) + cryptlib_py.cryptDecrypt(self.context, bytes) + return bytesToString(bytes) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/Cryptlib_RC4.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/Cryptlib_RC4.py new file mode 100755 index 0000000..7c6d087 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/Cryptlib_RC4.py @@ -0,0 +1,28 @@ +"""Cryptlib RC4 implementation.""" + +from cryptomath import * +from RC4 import RC4 + +if cryptlibpyLoaded: + + def new(key): + return Cryptlib_RC4(key) + + class Cryptlib_RC4(RC4): + + def __init__(self, key): + RC4.__init__(self, key, "cryptlib") + self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_RC4) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key)) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key) + + def __del__(self): + cryptlib_py.cryptDestroyContext(self.context) + + def encrypt(self, plaintext): + bytes = stringToBytes(plaintext) + cryptlib_py.cryptEncrypt(self.context, bytes) + return bytesToString(bytes) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/Cryptlib_TripleDES.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/Cryptlib_TripleDES.py new file mode 100755 index 0000000..a4f8155 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/Cryptlib_TripleDES.py @@ -0,0 +1,35 @@ +"""Cryptlib 3DES implementation.""" + +from cryptomath import * + +from TripleDES import * + +if cryptlibpyLoaded: + + def new(key, mode, IV): + return Cryptlib_TripleDES(key, mode, IV) + + class Cryptlib_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "cryptlib") + self.context = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, cryptlib_py.CRYPT_ALGO_3DES) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_MODE, cryptlib_py.CRYPT_MODE_CBC) + cryptlib_py.cryptSetAttribute(self.context, cryptlib_py.CRYPT_CTXINFO_KEYSIZE, len(key)) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_KEY, key) + cryptlib_py.cryptSetAttributeString(self.context, cryptlib_py.CRYPT_CTXINFO_IV, IV) + + def __del__(self): + cryptlib_py.cryptDestroyContext(self.context) + + def encrypt(self, plaintext): + TripleDES.encrypt(self, plaintext) + bytes = stringToBytes(plaintext) + cryptlib_py.cryptEncrypt(self.context, bytes) + return bytesToString(bytes) + + def decrypt(self, ciphertext): + TripleDES.decrypt(self, ciphertext) + bytes = stringToBytes(ciphertext) + cryptlib_py.cryptDecrypt(self.context, bytes) + return bytesToString(bytes) \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_AES.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_AES.py new file mode 100755 index 0000000..e60679b --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_AES.py @@ -0,0 +1,49 @@ +"""OpenSSL/M2Crypto AES implementation.""" + +from cryptomath import * +from AES import * + +if m2cryptoLoaded: + + def new(key, mode, IV): + return OpenSSL_AES(key, mode, IV) + + class OpenSSL_AES(AES): + + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "openssl") + self.key = key + self.IV = IV + + def _createContext(self, encrypt): + context = m2.cipher_ctx_new() + if len(self.key)==16: + cipherType = m2.aes_128_cbc() + if len(self.key)==24: + cipherType = m2.aes_192_cbc() + if len(self.key)==32: + cipherType = m2.aes_256_cbc() + m2.cipher_init(context, cipherType, self.key, self.IV, encrypt) + return context + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + context = self._createContext(1) + ciphertext = m2.cipher_update(context, plaintext) + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return ciphertext + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + context = self._createContext(0) + #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in. + #To work around this, we append sixteen zeros to the string, below: + plaintext = m2.cipher_update(context, ciphertext+('\0'*16)) + + #If this bug is ever fixed, then plaintext will end up having a garbage + #plaintext block on the end. That's okay - the below code will discard it. + plaintext = plaintext[:len(ciphertext)] + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return plaintext diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_RC4.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_RC4.py new file mode 100755 index 0000000..ac433aa --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_RC4.py @@ -0,0 +1,25 @@ +"""OpenSSL/M2Crypto RC4 implementation.""" + +from cryptomath import * +from RC4 import RC4 + +if m2cryptoLoaded: + + def new(key): + return OpenSSL_RC4(key) + + class OpenSSL_RC4(RC4): + + def __init__(self, key): + RC4.__init__(self, key, "openssl") + self.rc4 = m2.rc4_new() + m2.rc4_set_key(self.rc4, key) + + def __del__(self): + m2.rc4_free(self.rc4) + + def encrypt(self, plaintext): + return m2.rc4_update(self.rc4, plaintext) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_RSAKey.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_RSAKey.py new file mode 100755 index 0000000..fe1a3cd --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_RSAKey.py @@ -0,0 +1,148 @@ +"""OpenSSL/M2Crypto RSA implementation.""" + +from cryptomath import * + +from RSAKey import * +from Python_RSAKey import Python_RSAKey + +#copied from M2Crypto.util.py, so when we load the local copy of m2 +#we can still use it +def password_callback(v, prompt1='Enter private key passphrase:', + prompt2='Verify passphrase:'): + from getpass import getpass + while 1: + try: + p1=getpass(prompt1) + if v: + p2=getpass(prompt2) + if p1==p2: + break + else: + break + except KeyboardInterrupt: + return None + return p1 + + +if m2cryptoLoaded: + class OpenSSL_RSAKey(RSAKey): + def __init__(self, n=0, e=0): + self.rsa = None + self._hasPrivateKey = False + if (n and not e) or (e and not n): + raise AssertionError() + if n and e: + self.rsa = m2.rsa_new() + m2.rsa_set_n(self.rsa, numberToMPI(n)) + m2.rsa_set_e(self.rsa, numberToMPI(e)) + + def __del__(self): + if self.rsa: + m2.rsa_free(self.rsa) + + def __getattr__(self, name): + if name == 'e': + if not self.rsa: + return 0 + return mpiToNumber(m2.rsa_get_e(self.rsa)) + elif name == 'n': + if not self.rsa: + return 0 + return mpiToNumber(m2.rsa_get_n(self.rsa)) + else: + raise AttributeError + + def hasPrivateKey(self): + return self._hasPrivateKey + + def hash(self): + return Python_RSAKey(self.n, self.e).hash() + + def _rawPrivateKeyOp(self, m): + s = numberToString(m) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + c = stringToNumber(m2.rsa_private_encrypt(self.rsa, s, + m2.no_padding)) + return c + + def _rawPublicKeyOp(self, c): + s = numberToString(c) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + m = stringToNumber(m2.rsa_public_decrypt(self.rsa, s, + m2.no_padding)) + return m + + def acceptsPassword(self): return True + + def write(self, password=None): + bio = m2.bio_new(m2.bio_s_mem()) + if self._hasPrivateKey: + if password: + def f(v): return password + m2.rsa_write_key(self.rsa, bio, m2.des_ede_cbc(), f) + else: + def f(): pass + m2.rsa_write_key_no_cipher(self.rsa, bio, f) + else: + if password: + raise AssertionError() + m2.rsa_write_pub_key(self.rsa, bio) + s = m2.bio_read(bio, m2.bio_ctrl_pending(bio)) + m2.bio_free(bio) + return s + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = OpenSSL_RSAKey() + def f():pass + key.rsa = m2.rsa_generate_key(bits, 3, f) + key._hasPrivateKey = True + return key + generate = staticmethod(generate) + + def parse(s, passwordCallback=None): + if s.startswith("-----BEGIN "): + if passwordCallback==None: + callback = password_callback + else: + def f(v, prompt1=None, prompt2=None): + return passwordCallback() + callback = f + bio = m2.bio_new(m2.bio_s_mem()) + try: + m2.bio_write(bio, s) + key = OpenSSL_RSAKey() + if s.startswith("-----BEGIN RSA PRIVATE KEY-----"): + def f():pass + key.rsa = m2.rsa_read_key(bio, callback) + if key.rsa == None: + raise SyntaxError() + key._hasPrivateKey = True + elif s.startswith("-----BEGIN PUBLIC KEY-----"): + key.rsa = m2.rsa_read_pub_key(bio) + if key.rsa == None: + raise SyntaxError() + key._hasPrivateKey = False + else: + raise SyntaxError() + return key + finally: + m2.bio_free(bio) + else: + raise SyntaxError() + + parse = staticmethod(parse) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_TripleDES.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_TripleDES.py new file mode 100755 index 0000000..f5ba165 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/OpenSSL_TripleDES.py @@ -0,0 +1,44 @@ +"""OpenSSL/M2Crypto 3DES implementation.""" + +from cryptomath import * +from TripleDES import * + +if m2cryptoLoaded: + + def new(key, mode, IV): + return OpenSSL_TripleDES(key, mode, IV) + + class OpenSSL_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "openssl") + self.key = key + self.IV = IV + + def _createContext(self, encrypt): + context = m2.cipher_ctx_new() + cipherType = m2.des_ede3_cbc() + m2.cipher_init(context, cipherType, self.key, self.IV, encrypt) + return context + + def encrypt(self, plaintext): + TripleDES.encrypt(self, plaintext) + context = self._createContext(1) + ciphertext = m2.cipher_update(context, plaintext) + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return ciphertext + + def decrypt(self, ciphertext): + TripleDES.decrypt(self, ciphertext) + context = self._createContext(0) + #I think M2Crypto has a bug - it fails to decrypt and return the last block passed in. + #To work around this, we append sixteen zeros to the string, below: + plaintext = m2.cipher_update(context, ciphertext+('\0'*16)) + + #If this bug is ever fixed, then plaintext will end up having a garbage + #plaintext block on the end. That's okay - the below code will ignore it. + plaintext = plaintext[:len(ciphertext)] + m2.cipher_ctx_free(context) + self.IV = ciphertext[-self.block_size:] + return plaintext \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_AES.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_AES.py new file mode 100755 index 0000000..e38b19d --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_AES.py @@ -0,0 +1,22 @@ +"""PyCrypto AES implementation.""" + +from cryptomath import * +from AES import * + +if pycryptoLoaded: + import Crypto.Cipher.AES + + def new(key, mode, IV): + return PyCrypto_AES(key, mode, IV) + + class PyCrypto_AES(AES): + + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "pycrypto") + self.context = Crypto.Cipher.AES.new(key, mode, IV) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_RC4.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_RC4.py new file mode 100755 index 0000000..6c6d86a --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_RC4.py @@ -0,0 +1,22 @@ +"""PyCrypto RC4 implementation.""" + +from cryptomath import * +from RC4 import * + +if pycryptoLoaded: + import Crypto.Cipher.ARC4 + + def new(key): + return PyCrypto_RC4(key) + + class PyCrypto_RC4(RC4): + + def __init__(self, key): + RC4.__init__(self, key, "pycrypto") + self.context = Crypto.Cipher.ARC4.new(key) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_RSAKey.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_RSAKey.py new file mode 100755 index 0000000..48b5cef --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_RSAKey.py @@ -0,0 +1,61 @@ +"""PyCrypto RSA implementation.""" + +from cryptomath import * + +from RSAKey import * +from Python_RSAKey import Python_RSAKey + +if pycryptoLoaded: + + from Crypto.PublicKey import RSA + + class PyCrypto_RSAKey(RSAKey): + def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): + if not d: + self.rsa = RSA.construct( (n, e) ) + else: + self.rsa = RSA.construct( (n, e, d, p, q) ) + + def __getattr__(self, name): + return getattr(self.rsa, name) + + def hasPrivateKey(self): + return self.rsa.has_private() + + def hash(self): + return Python_RSAKey(self.n, self.e).hash() + + def _rawPrivateKeyOp(self, m): + s = numberToString(m) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + c = stringToNumber(self.rsa.decrypt((s,))) + return c + + def _rawPublicKeyOp(self, c): + s = numberToString(c) + byteLength = numBytes(self.n) + if len(s)== byteLength: + pass + elif len(s) == byteLength-1: + s = '\0' + s + else: + raise AssertionError() + m = stringToNumber(self.rsa.encrypt(s, None)[0]) + return m + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = PyCrypto_RSAKey() + def f(numBytes): + return bytesToString(getRandomBytes(numBytes)) + key.rsa = RSA.generate(bits, f) + return key + generate = staticmethod(generate) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_TripleDES.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_TripleDES.py new file mode 100755 index 0000000..8c22bb8 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/PyCrypto_TripleDES.py @@ -0,0 +1,22 @@ +"""PyCrypto 3DES implementation.""" + +from cryptomath import * +from TripleDES import * + +if pycryptoLoaded: + import Crypto.Cipher.DES3 + + def new(key, mode, IV): + return PyCrypto_TripleDES(key, mode, IV) + + class PyCrypto_TripleDES(TripleDES): + + def __init__(self, key, mode, IV): + TripleDES.__init__(self, key, mode, IV, "pycrypto") + self.context = Crypto.Cipher.DES3.new(key, mode, IV) + + def encrypt(self, plaintext): + return self.context.encrypt(plaintext) + + def decrypt(self, ciphertext): + return self.context.decrypt(ciphertext) \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/Python_AES.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/Python_AES.py new file mode 100755 index 0000000..657152f --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/Python_AES.py @@ -0,0 +1,68 @@ +"""Pure-Python AES implementation.""" + +from cryptomath import * + +from AES import * +from rijndael import rijndael + +def new(key, mode, IV): + return Python_AES(key, mode, IV) + +class Python_AES(AES): + def __init__(self, key, mode, IV): + AES.__init__(self, key, mode, IV, "python") + self.rijndael = rijndael(key, 16) + self.IV = IV + + def encrypt(self, plaintext): + AES.encrypt(self, plaintext) + + plaintextBytes = stringToBytes(plaintext) + chainBytes = stringToBytes(self.IV) + + #CBC Mode: For each block... + for x in range(len(plaintextBytes)/16): + + #XOR with the chaining block + blockBytes = plaintextBytes[x*16 : (x*16)+16] + for y in range(16): + blockBytes[y] ^= chainBytes[y] + blockString = bytesToString(blockBytes) + + #Encrypt it + encryptedBytes = stringToBytes(self.rijndael.encrypt(blockString)) + + #Overwrite the input with the output + for y in range(16): + plaintextBytes[(x*16)+y] = encryptedBytes[y] + + #Set the next chaining block + chainBytes = encryptedBytes + + self.IV = bytesToString(chainBytes) + return bytesToString(plaintextBytes) + + def decrypt(self, ciphertext): + AES.decrypt(self, ciphertext) + + ciphertextBytes = stringToBytes(ciphertext) + chainBytes = stringToBytes(self.IV) + + #CBC Mode: For each block... + for x in range(len(ciphertextBytes)/16): + + #Decrypt it + blockBytes = ciphertextBytes[x*16 : (x*16)+16] + blockString = bytesToString(blockBytes) + decryptedBytes = stringToBytes(self.rijndael.decrypt(blockString)) + + #XOR with the chaining block and overwrite the input with output + for y in range(16): + decryptedBytes[y] ^= chainBytes[y] + ciphertextBytes[(x*16)+y] = decryptedBytes[y] + + #Set the next chaining block + chainBytes = blockBytes + + self.IV = bytesToString(chainBytes) + return bytesToString(ciphertextBytes) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/Python_RC4.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/Python_RC4.py new file mode 100755 index 0000000..56ce5fb --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/Python_RC4.py @@ -0,0 +1,39 @@ +"""Pure-Python RC4 implementation.""" + +from RC4 import RC4 +from cryptomath import * + +def new(key): + return Python_RC4(key) + +class Python_RC4(RC4): + def __init__(self, key): + RC4.__init__(self, key, "python") + keyBytes = stringToBytes(key) + S = [i for i in range(256)] + j = 0 + for i in range(256): + j = (j + S[i] + keyBytes[i % len(keyBytes)]) % 256 + S[i], S[j] = S[j], S[i] + + self.S = S + self.i = 0 + self.j = 0 + + def encrypt(self, plaintext): + plaintextBytes = stringToBytes(plaintext) + S = self.S + i = self.i + j = self.j + for x in range(len(plaintextBytes)): + i = (i + 1) % 256 + j = (j + S[i]) % 256 + S[i], S[j] = S[j], S[i] + t = (S[i] + S[j]) % 256 + plaintextBytes[x] ^= S[t] + self.i = i + self.j = j + return bytesToString(plaintextBytes) + + def decrypt(self, ciphertext): + return self.encrypt(ciphertext) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/Python_RSAKey.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/Python_RSAKey.py new file mode 100755 index 0000000..2c469b5 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/Python_RSAKey.py @@ -0,0 +1,209 @@ +"""Pure-Python RSA implementation.""" + +from cryptomath import * +import xmltools +from ASN1Parser import ASN1Parser +from RSAKey import * + +class Python_RSAKey(RSAKey): + def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): + if (n and not e) or (e and not n): + raise AssertionError() + self.n = n + self.e = e + self.d = d + self.p = p + self.q = q + self.dP = dP + self.dQ = dQ + self.qInv = qInv + self.blinder = 0 + self.unblinder = 0 + + def hasPrivateKey(self): + return self.d != 0 + + def hash(self): + s = self.writeXMLPublicKey('\t\t') + return hashAndBase64(s.strip()) + + def _rawPrivateKeyOp(self, m): + #Create blinding values, on the first pass: + if not self.blinder: + self.unblinder = getRandomNumber(2, self.n) + self.blinder = powMod(invMod(self.unblinder, self.n), self.e, + self.n) + + #Blind the input + m = (m * self.blinder) % self.n + + #Perform the RSA operation + c = self._rawPrivateKeyOpHelper(m) + + #Unblind the output + c = (c * self.unblinder) % self.n + + #Update blinding values + self.blinder = (self.blinder * self.blinder) % self.n + self.unblinder = (self.unblinder * self.unblinder) % self.n + + #Return the output + return c + + + def _rawPrivateKeyOpHelper(self, m): + #Non-CRT version + #c = powMod(m, self.d, self.n) + + #CRT version (~3x faster) + s1 = powMod(m, self.dP, self.p) + s2 = powMod(m, self.dQ, self.q) + h = ((s1 - s2) * self.qInv) % self.p + c = s2 + self.q * h + return c + + def _rawPublicKeyOp(self, c): + m = powMod(c, self.e, self.n) + return m + + def acceptsPassword(self): return False + + def write(self, indent=''): + if self.d: + s = indent+'\n' + else: + s = indent+'\n' + s += indent+'\t%s\n' % numberToBase64(self.n) + s += indent+'\t%s\n' % numberToBase64(self.e) + if self.d: + s += indent+'\t%s\n' % numberToBase64(self.d) + s += indent+'\t

    %s

    \n' % numberToBase64(self.p) + s += indent+'\t%s\n' % numberToBase64(self.q) + s += indent+'\t%s\n' % numberToBase64(self.dP) + s += indent+'\t%s\n' % numberToBase64(self.dQ) + s += indent+'\t%s\n' % numberToBase64(self.qInv) + s += indent+'
    ' + else: + s += indent+'
    ' + #Only add \n if part of a larger structure + if indent != '': + s += '\n' + return s + + def writeXMLPublicKey(self, indent=''): + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + key = Python_RSAKey() + p = getRandomPrime(bits/2, False) + q = getRandomPrime(bits/2, False) + t = lcm(p-1, q-1) + key.n = p * q + key.e = 3L #Needed to be long, for Java + key.d = invMod(key.e, t) + key.p = p + key.q = q + key.dP = key.d % (p-1) + key.dQ = key.d % (q-1) + key.qInv = invMod(q, p) + return key + generate = staticmethod(generate) + + def parsePEM(s, passwordCallback=None): + """Parse a string containing a or , or + PEM-encoded key.""" + + start = s.find("-----BEGIN PRIVATE KEY-----") + if start != -1: + end = s.find("-----END PRIVATE KEY-----") + if end == -1: + raise SyntaxError("Missing PEM Postfix") + s = s[start+len("-----BEGIN PRIVATE KEY -----") : end] + bytes = base64ToBytes(s) + return Python_RSAKey._parsePKCS8(bytes) + else: + start = s.find("-----BEGIN RSA PRIVATE KEY-----") + if start != -1: + end = s.find("-----END RSA PRIVATE KEY-----") + if end == -1: + raise SyntaxError("Missing PEM Postfix") + s = s[start+len("-----BEGIN RSA PRIVATE KEY -----") : end] + bytes = base64ToBytes(s) + return Python_RSAKey._parseSSLeay(bytes) + raise SyntaxError("Missing PEM Prefix") + parsePEM = staticmethod(parsePEM) + + def parseXML(s): + element = xmltools.parseAndStripWhitespace(s) + return Python_RSAKey._parseXML(element) + parseXML = staticmethod(parseXML) + + def _parsePKCS8(bytes): + p = ASN1Parser(bytes) + + version = p.getChild(0).value[0] + if version != 0: + raise SyntaxError("Unrecognized PKCS8 version") + + rsaOID = p.getChild(1).value + if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]: + raise SyntaxError("Unrecognized AlgorithmIdentifier") + + #Get the privateKey + privateKeyP = p.getChild(2) + + #Adjust for OCTET STRING encapsulation + privateKeyP = ASN1Parser(privateKeyP.value) + + return Python_RSAKey._parseASN1PrivateKey(privateKeyP) + _parsePKCS8 = staticmethod(_parsePKCS8) + + def _parseSSLeay(bytes): + privateKeyP = ASN1Parser(bytes) + return Python_RSAKey._parseASN1PrivateKey(privateKeyP) + _parseSSLeay = staticmethod(_parseSSLeay) + + def _parseASN1PrivateKey(privateKeyP): + version = privateKeyP.getChild(0).value[0] + if version != 0: + raise SyntaxError("Unrecognized RSAPrivateKey version") + n = bytesToNumber(privateKeyP.getChild(1).value) + e = bytesToNumber(privateKeyP.getChild(2).value) + d = bytesToNumber(privateKeyP.getChild(3).value) + p = bytesToNumber(privateKeyP.getChild(4).value) + q = bytesToNumber(privateKeyP.getChild(5).value) + dP = bytesToNumber(privateKeyP.getChild(6).value) + dQ = bytesToNumber(privateKeyP.getChild(7).value) + qInv = bytesToNumber(privateKeyP.getChild(8).value) + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + _parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey) + + def _parseXML(element): + try: + xmltools.checkName(element, "privateKey") + except SyntaxError: + xmltools.checkName(element, "publicKey") + + #Parse attributes + xmltools.getReqAttribute(element, "xmlns", "http://trevp.net/rsa\Z") + xmltools.checkNoMoreAttributes(element) + + #Parse public values ( and ) + n = base64ToNumber(xmltools.getText(xmltools.getChild(element, 0, "n"), xmltools.base64RegEx)) + e = base64ToNumber(xmltools.getText(xmltools.getChild(element, 1, "e"), xmltools.base64RegEx)) + d = 0 + p = 0 + q = 0 + dP = 0 + dQ = 0 + qInv = 0 + #Parse private values, if present + if element.childNodes.length>=3: + d = base64ToNumber(xmltools.getText(xmltools.getChild(element, 2, "d"), xmltools.base64RegEx)) + p = base64ToNumber(xmltools.getText(xmltools.getChild(element, 3, "p"), xmltools.base64RegEx)) + q = base64ToNumber(xmltools.getText(xmltools.getChild(element, 4, "q"), xmltools.base64RegEx)) + dP = base64ToNumber(xmltools.getText(xmltools.getChild(element, 5, "dP"), xmltools.base64RegEx)) + dQ = base64ToNumber(xmltools.getText(xmltools.getChild(element, 6, "dQ"), xmltools.base64RegEx)) + qInv = base64ToNumber(xmltools.getText(xmltools.getLastChild(element, 7, "qInv"), xmltools.base64RegEx)) + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + _parseXML = staticmethod(_parseXML) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/RC4.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/RC4.py new file mode 100755 index 0000000..5506923 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/RC4.py @@ -0,0 +1,17 @@ +"""Abstract class for RC4.""" + +from compat import * #For False + +class RC4: + def __init__(self, keyBytes, implementation): + if len(keyBytes) < 16 or len(keyBytes) > 256: + raise ValueError() + self.isBlockCipher = False + self.name = "rc4" + self.implementation = implementation + + def encrypt(self, plaintext): + raise NotImplementedError() + + def decrypt(self, ciphertext): + raise NotImplementedError() \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/RSAKey.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/RSAKey.py new file mode 100755 index 0000000..37c292d --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/RSAKey.py @@ -0,0 +1,264 @@ +"""Abstract class for RSA.""" + +from cryptomath import * + + +class RSAKey: + """This is an abstract base class for RSA keys. + + Particular implementations of RSA keys, such as + L{OpenSSL_RSAKey.OpenSSL_RSAKey}, + L{Python_RSAKey.Python_RSAKey}, and + L{PyCrypto_RSAKey.PyCrypto_RSAKey}, + inherit from this. + + To create or parse an RSA key, don't use one of these classes + directly. Instead, use the factory functions in + L{tlslite.utils.keyfactory}. + """ + + def __init__(self, n=0, e=0): + """Create a new RSA key. + + If n and e are passed in, the new key will be initialized. + + @type n: int + @param n: RSA modulus. + + @type e: int + @param e: RSA public exponent. + """ + raise NotImplementedError() + + def __len__(self): + """Return the length of this key in bits. + + @rtype: int + """ + return numBits(self.n) + + def hasPrivateKey(self): + """Return whether or not this key has a private component. + + @rtype: bool + """ + raise NotImplementedError() + + def hash(self): + """Return the cryptoID value corresponding to this + key. + + @rtype: str + """ + raise NotImplementedError() + + def getSigningAlgorithm(self): + """Return the cryptoID sigAlgo value corresponding to this key. + + @rtype: str + """ + return "pkcs1-sha1" + + def hashAndSign(self, bytes): + """Hash and sign the passed-in bytes. + + This requires the key to have a private component. It performs + a PKCS1-SHA1 signature on the passed-in data. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: The value which will be hashed and signed. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1-SHA1 signature on the passed-in data. + """ + if not isinstance(bytes, type("")): + bytes = bytesToString(bytes) + hashBytes = stringToBytes(sha.sha(bytes).digest()) + prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) + sigBytes = self.sign(prefixedHashBytes) + return sigBytes + + def hashAndVerify(self, sigBytes, bytes): + """Hash and verify the passed-in bytes with the signature. + + This verifies a PKCS1-SHA1 signature on the passed-in data. + + @type sigBytes: L{array.array} of unsigned bytes + @param sigBytes: A PKCS1-SHA1 signature. + + @type bytes: str or L{array.array} of unsigned bytes + @param bytes: The value which will be hashed and verified. + + @rtype: bool + @return: Whether the signature matches the passed-in data. + """ + if not isinstance(bytes, type("")): + bytes = bytesToString(bytes) + hashBytes = stringToBytes(sha.sha(bytes).digest()) + prefixedHashBytes = self._addPKCS1SHA1Prefix(hashBytes) + return self.verify(sigBytes, prefixedHashBytes) + + def sign(self, bytes): + """Sign the passed-in bytes. + + This requires the key to have a private component. It performs + a PKCS1 signature on the passed-in data. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be signed. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1 signature on the passed-in data. + """ + if not self.hasPrivateKey(): + raise AssertionError() + paddedBytes = self._addPKCS1Padding(bytes, 1) + m = bytesToNumber(paddedBytes) + if m >= self.n: + raise ValueError() + c = self._rawPrivateKeyOp(m) + sigBytes = numberToBytes(c) + return sigBytes + + def verify(self, sigBytes, bytes): + """Verify the passed-in bytes with the signature. + + This verifies a PKCS1 signature on the passed-in data. + + @type sigBytes: L{array.array} of unsigned bytes + @param sigBytes: A PKCS1 signature. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be verified. + + @rtype: bool + @return: Whether the signature matches the passed-in data. + """ + paddedBytes = self._addPKCS1Padding(bytes, 1) + c = bytesToNumber(sigBytes) + if c >= self.n: + return False + m = self._rawPublicKeyOp(c) + checkBytes = numberToBytes(m) + return checkBytes == paddedBytes + + def encrypt(self, bytes): + """Encrypt the passed-in bytes. + + This performs PKCS1 encryption of the passed-in data. + + @type bytes: L{array.array} of unsigned bytes + @param bytes: The value which will be encrypted. + + @rtype: L{array.array} of unsigned bytes. + @return: A PKCS1 encryption of the passed-in data. + """ + paddedBytes = self._addPKCS1Padding(bytes, 2) + m = bytesToNumber(paddedBytes) + if m >= self.n: + raise ValueError() + c = self._rawPublicKeyOp(m) + encBytes = numberToBytes(c) + return encBytes + + def decrypt(self, encBytes): + """Decrypt the passed-in bytes. + + This requires the key to have a private component. It performs + PKCS1 decryption of the passed-in data. + + @type encBytes: L{array.array} of unsigned bytes + @param encBytes: The value which will be decrypted. + + @rtype: L{array.array} of unsigned bytes or None. + @return: A PKCS1 decryption of the passed-in data or None if + the data is not properly formatted. + """ + if not self.hasPrivateKey(): + raise AssertionError() + c = bytesToNumber(encBytes) + if c >= self.n: + return None + m = self._rawPrivateKeyOp(c) + decBytes = numberToBytes(m) + if (len(decBytes) != numBytes(self.n)-1): #Check first byte + return None + if decBytes[0] != 2: #Check second byte + return None + for x in range(len(decBytes)-1): #Scan through for zero separator + if decBytes[x]== 0: + break + else: + return None + return decBytes[x+1:] #Return everything after the separator + + def _rawPrivateKeyOp(self, m): + raise NotImplementedError() + + def _rawPublicKeyOp(self, c): + raise NotImplementedError() + + def acceptsPassword(self): + """Return True if the write() method accepts a password for use + in encrypting the private key. + + @rtype: bool + """ + raise NotImplementedError() + + def write(self, password=None): + """Return a string containing the key. + + @rtype: str + @return: A string describing the key, in whichever format (PEM + or XML) is native to the implementation. + """ + raise NotImplementedError() + + def writeXMLPublicKey(self, indent=''): + """Return a string containing the key. + + @rtype: str + @return: A string describing the public key, in XML format. + """ + return Python_RSAKey(self.n, self.e).write(indent) + + def generate(bits): + """Generate a new key with the specified bit length. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + """ + raise NotImplementedError() + generate = staticmethod(generate) + + + # ************************************************************************** + # Helper Functions for RSA Keys + # ************************************************************************** + + def _addPKCS1SHA1Prefix(self, bytes): + prefixBytes = createByteArraySequence(\ + [48,33,48,9,6,5,43,14,3,2,26,5,0,4,20]) + prefixedBytes = prefixBytes + bytes + return prefixedBytes + + def _addPKCS1Padding(self, bytes, blockType): + padLength = (numBytes(self.n) - (len(bytes)+3)) + if blockType == 1: #Signature padding + pad = [0xFF] * padLength + elif blockType == 2: #Encryption padding + pad = createByteArraySequence([]) + while len(pad) < padLength: + padBytes = getRandomBytes(padLength * 2) + pad = [b for b in padBytes if b != 0] + pad = pad[:padLength] + else: + raise AssertionError() + + #NOTE: To be proper, we should add [0,blockType]. However, + #the zero is lost when the returned padding is converted + #to a number, so we don't even bother with it. Also, + #adding it would cause a misalignment in verify() + padding = createByteArraySequence([blockType] + pad + [0]) + paddedBytes = padding + bytes + return paddedBytes diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/TripleDES.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/TripleDES.py new file mode 100755 index 0000000..2db4588 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/TripleDES.py @@ -0,0 +1,26 @@ +"""Abstract class for 3DES.""" + +from compat import * #For True + +class TripleDES: + def __init__(self, key, mode, IV, implementation): + if len(key) != 24: + raise ValueError() + if mode != 2: + raise ValueError() + if len(IV) != 8: + raise ValueError() + self.isBlockCipher = True + self.block_size = 8 + self.implementation = implementation + self.name = "3des" + + #CBC-Mode encryption, returns ciphertext + #WARNING: *MAY* modify the input as well + def encrypt(self, plaintext): + assert(len(plaintext) % 8 == 0) + + #CBC-Mode decryption, returns plaintext + #WARNING: *MAY* modify the input as well + def decrypt(self, ciphertext): + assert(len(ciphertext) % 8 == 0) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/__init__.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/__init__.py new file mode 100755 index 0000000..e96b4be --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/__init__.py @@ -0,0 +1,31 @@ +"""Toolkit for crypto and other stuff.""" + +__all__ = ["AES", + "ASN1Parser", + "cipherfactory", + "codec", + "Cryptlib_AES", + "Cryptlib_RC4", + "Cryptlib_TripleDES", + "cryptomath: cryptomath module", + "dateFuncs", + "hmac", + "JCE_RSAKey", + "compat", + "keyfactory", + "OpenSSL_AES", + "OpenSSL_RC4", + "OpenSSL_RSAKey", + "OpenSSL_TripleDES", + "PyCrypto_AES", + "PyCrypto_RC4", + "PyCrypto_RSAKey", + "PyCrypto_TripleDES", + "Python_AES", + "Python_RC4", + "Python_RSAKey", + "RC4", + "rijndael", + "RSAKey", + "TripleDES", + "xmltools"] diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/cipherfactory.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/cipherfactory.py new file mode 100755 index 0000000..ccbb6b5 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/cipherfactory.py @@ -0,0 +1,111 @@ +"""Factory functions for symmetric cryptography.""" + +import os + +import Python_AES +import Python_RC4 + +import cryptomath + +tripleDESPresent = False + +if cryptomath.m2cryptoLoaded: + import OpenSSL_AES + import OpenSSL_RC4 + import OpenSSL_TripleDES + tripleDESPresent = True + +if cryptomath.cryptlibpyLoaded: + import Cryptlib_AES + import Cryptlib_RC4 + import Cryptlib_TripleDES + tripleDESPresent = True + +if cryptomath.pycryptoLoaded: + import PyCrypto_AES + import PyCrypto_RC4 + import PyCrypto_TripleDES + tripleDESPresent = True + +# ************************************************************************** +# Factory Functions for AES +# ************************************************************************** + +def createAES(key, IV, implList=None): + """Create a new AES object. + + @type key: str + @param key: A 16, 24, or 32 byte string. + + @type IV: str + @param IV: A 16 byte string + + @rtype: L{tlslite.utils.AES} + @return: An AES object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto", "python"] + + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_AES.new(key, 2, IV) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_AES.new(key, 2, IV) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_AES.new(key, 2, IV) + elif impl == "python": + return Python_AES.new(key, 2, IV) + raise NotImplementedError() + +def createRC4(key, IV, implList=None): + """Create a new RC4 object. + + @type key: str + @param key: A 16 to 32 byte string. + + @type IV: object + @param IV: Ignored, whatever it is. + + @rtype: L{tlslite.utils.RC4} + @return: An RC4 object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto", "python"] + + if len(IV) != 0: + raise AssertionError() + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_RC4.new(key) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RC4.new(key) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RC4.new(key) + elif impl == "python": + return Python_RC4.new(key) + raise NotImplementedError() + +#Create a new TripleDES instance +def createTripleDES(key, IV, implList=None): + """Create a new 3DES object. + + @type key: str + @param key: A 24 byte string. + + @type IV: str + @param IV: An 8 byte string + + @rtype: L{tlslite.utils.TripleDES} + @return: A 3DES object. + """ + if implList == None: + implList = ["cryptlib", "openssl", "pycrypto"] + + for impl in implList: + if impl == "cryptlib" and cryptomath.cryptlibpyLoaded: + return Cryptlib_TripleDES.new(key, 2, IV) + elif impl == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_TripleDES.new(key, 2, IV) + elif impl == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_TripleDES.new(key, 2, IV) + raise NotImplementedError() \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/codec.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/codec.py new file mode 100755 index 0000000..13022a0 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/codec.py @@ -0,0 +1,94 @@ +"""Classes for reading/writing binary data (such as TLS records).""" + +from compat import * + +class Writer: + def __init__(self, length=0): + #If length is zero, then this is just a "trial run" to determine length + self.index = 0 + self.bytes = createByteArrayZeros(length) + + def add(self, x, length): + if self.bytes: + newIndex = self.index+length-1 + while newIndex >= self.index: + self.bytes[newIndex] = x & 0xFF + x >>= 8 + newIndex -= 1 + self.index += length + + def addFixSeq(self, seq, length): + if self.bytes: + for e in seq: + self.add(e, length) + else: + self.index += len(seq)*length + + def addVarSeq(self, seq, length, lengthLength): + if self.bytes: + self.add(len(seq)*length, lengthLength) + for e in seq: + self.add(e, length) + else: + self.index += lengthLength + (len(seq)*length) + + +class Parser: + def __init__(self, bytes): + self.bytes = bytes + self.index = 0 + + def get(self, length): + if self.index + length > len(self.bytes): + raise SyntaxError() + x = 0 + for count in range(length): + x <<= 8 + x |= self.bytes[self.index] + self.index += 1 + return x + + def getFixBytes(self, lengthBytes): + bytes = self.bytes[self.index : self.index+lengthBytes] + self.index += lengthBytes + return bytes + + def getVarBytes(self, lengthLength): + lengthBytes = self.get(lengthLength) + return self.getFixBytes(lengthBytes) + + def getFixList(self, length, lengthList): + l = [0] * lengthList + for x in range(lengthList): + l[x] = self.get(length) + return l + + def getVarList(self, length, lengthLength): + lengthList = self.get(lengthLength) + if lengthList % length != 0: + raise SyntaxError() + lengthList = int(lengthList/length) + l = [0] * lengthList + for x in range(lengthList): + l[x] = self.get(length) + return l + + def startLengthCheck(self, lengthLength): + self.lengthCheck = self.get(lengthLength) + self.indexCheck = self.index + + def setLengthCheck(self, length): + self.lengthCheck = length + self.indexCheck = self.index + + def stopLengthCheck(self): + if (self.index - self.indexCheck) != self.lengthCheck: + raise SyntaxError() + + def atLengthCheck(self): + if (self.index - self.indexCheck) < self.lengthCheck: + return False + elif (self.index - self.indexCheck) == self.lengthCheck: + return True + else: + raise SyntaxError() \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/compat.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/compat.py new file mode 100755 index 0000000..7d2d925 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/compat.py @@ -0,0 +1,140 @@ +"""Miscellaneous functions to mask Python version differences.""" + +import sys +import os + +if sys.version_info < (2,2): + raise AssertionError("Python 2.2 or later required") + +if sys.version_info < (2,3): + + def enumerate(collection): + return zip(range(len(collection)), collection) + + class Set: + def __init__(self, seq=None): + self.values = {} + if seq: + for e in seq: + self.values[e] = None + + def add(self, e): + self.values[e] = None + + def discard(self, e): + if e in self.values.keys(): + del(self.values[e]) + + def union(self, s): + ret = Set() + for e in self.values.keys(): + ret.values[e] = None + for e in s.values.keys(): + ret.values[e] = None + return ret + + def issubset(self, other): + for e in self.values.keys(): + if e not in other.values.keys(): + return False + return True + + def __nonzero__( self): + return len(self.values.keys()) + + def __contains__(self, e): + return e in self.values.keys() + + def __iter__(self): + return iter(set.values.keys()) + + +if os.name != "java": + + import array + def createByteArraySequence(seq): + return array.array('B', seq) + def createByteArrayZeros(howMany): + return array.array('B', [0] * howMany) + def concatArrays(a1, a2): + return a1+a2 + + def bytesToString(bytes): + return bytes.tostring() + def stringToBytes(s): + bytes = createByteArrayZeros(0) + bytes.fromstring(s) + return bytes + + import math + def numBits(n): + if n==0: + return 0 + s = "%x" % n + return ((len(s)-1)*4) + \ + {'0':0, '1':1, '2':2, '3':2, + '4':3, '5':3, '6':3, '7':3, + '8':4, '9':4, 'a':4, 'b':4, + 'c':4, 'd':4, 'e':4, 'f':4, + }[s[0]] + return int(math.floor(math.log(n, 2))+1) + + BaseException = Exception + import sys + import traceback + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + #NOTE: JYTHON SUPPORT NO LONGER WORKS, DUE TO USE OF GENERATORS. + #THIS CODE IS LEFT IN SO THAT ONE JYTHON UPDATES TO 2.2, IT HAS A + #CHANCE OF WORKING AGAIN. + + import java + import jarray + + def createByteArraySequence(seq): + if isinstance(seq, type("")): #If it's a string, convert + seq = [ord(c) for c in seq] + return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed + def createByteArrayZeros(howMany): + return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed + def concatArrays(a1, a2): + l = list(a1)+list(a2) + return createByteArraySequence(l) + + #WAY TOO SLOW - MUST BE REPLACED------------ + def bytesToString(bytes): + return "".join([chr(b) for b in bytes]) + + def stringToBytes(s): + bytes = createByteArrayZeros(len(s)) + for count, c in enumerate(s): + bytes[count] = ord(c) + return bytes + #WAY TOO SLOW - MUST BE REPLACED------------ + + def numBits(n): + if n==0: + return 0 + n= 1L * n; #convert to long, if it isn't already + return n.__tojava__(java.math.BigInteger).bitLength() + + #Adjust the string to an array of bytes + def stringToJavaByteArray(s): + bytes = jarray.zeros(len(s), 'b') + for count, c in enumerate(s): + x = ord(c) + if x >= 128: x -= 256 + bytes[count] = x + return bytes + + BaseException = java.lang.Exception + import sys + import traceback + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/cryptomath.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/cryptomath.py new file mode 100755 index 0000000..51d6dff --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/cryptomath.py @@ -0,0 +1,400 @@ +"""cryptomath module + +This module has basic math/crypto code.""" + +import os +import math +import base64 +import binascii +import sha + +from compat import * + + +# ************************************************************************** +# Load Optional Modules +# ************************************************************************** + +# Try to load M2Crypto/OpenSSL +try: + from M2Crypto import m2 + m2cryptoLoaded = True + +except ImportError: + m2cryptoLoaded = False + + +# Try to load cryptlib +try: + import cryptlib_py + try: + cryptlib_py.cryptInit() + except cryptlib_py.CryptException, e: + #If tlslite and cryptoIDlib are both present, + #they might each try to re-initialize this, + #so we're tolerant of that. + if e[0] != cryptlib_py.CRYPT_ERROR_INITED: + raise + cryptlibpyLoaded = True + +except ImportError: + cryptlibpyLoaded = False + +#Try to load GMPY +try: + import gmpy + gmpyLoaded = True +except ImportError: + gmpyLoaded = False + +#Try to load pycrypto +try: + import Crypto.Cipher.AES + pycryptoLoaded = True +except ImportError: + pycryptoLoaded = False + + +# ************************************************************************** +# PRNG Functions +# ************************************************************************** + +# Get os.urandom PRNG +try: + os.urandom(1) + def getRandomBytes(howMany): + return stringToBytes(os.urandom(howMany)) + prngName = "os.urandom" + +except: + # Else get cryptlib PRNG + if cryptlibpyLoaded: + def getRandomBytes(howMany): + randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED, + cryptlib_py.CRYPT_ALGO_AES) + cryptlib_py.cryptSetAttribute(randomKey, + cryptlib_py.CRYPT_CTXINFO_MODE, + cryptlib_py.CRYPT_MODE_OFB) + cryptlib_py.cryptGenerateKey(randomKey) + bytes = createByteArrayZeros(howMany) + cryptlib_py.cryptEncrypt(randomKey, bytes) + return bytes + prngName = "cryptlib" + + else: + #Else get UNIX /dev/urandom PRNG + try: + devRandomFile = open("/dev/urandom", "rb") + def getRandomBytes(howMany): + return stringToBytes(devRandomFile.read(howMany)) + prngName = "/dev/urandom" + except IOError: + #Else get Win32 CryptoAPI PRNG + try: + import win32prng + def getRandomBytes(howMany): + s = win32prng.getRandomBytes(howMany) + if len(s) != howMany: + raise AssertionError() + return stringToBytes(s) + prngName ="CryptoAPI" + except ImportError: + #Else no PRNG :-( + def getRandomBytes(howMany): + raise NotImplementedError("No Random Number Generator "\ + "available.") + prngName = "None" + +# ************************************************************************** +# Converter Functions +# ************************************************************************** + +def bytesToNumber(bytes): + total = 0L + multiplier = 1L + for count in range(len(bytes)-1, -1, -1): + byte = bytes[count] + total += multiplier * byte + multiplier *= 256 + return total + +def numberToBytes(n): + howManyBytes = numBytes(n) + bytes = createByteArrayZeros(howManyBytes) + for count in range(howManyBytes-1, -1, -1): + bytes[count] = int(n % 256) + n >>= 8 + return bytes + +def bytesToBase64(bytes): + s = bytesToString(bytes) + return stringToBase64(s) + +def base64ToBytes(s): + s = base64ToString(s) + return stringToBytes(s) + +def numberToBase64(n): + bytes = numberToBytes(n) + return bytesToBase64(bytes) + +def base64ToNumber(s): + bytes = base64ToBytes(s) + return bytesToNumber(bytes) + +def stringToNumber(s): + bytes = stringToBytes(s) + return bytesToNumber(bytes) + +def numberToString(s): + bytes = numberToBytes(s) + return bytesToString(bytes) + +def base64ToString(s): + try: + return base64.decodestring(s) + except binascii.Error, e: + raise SyntaxError(e) + except binascii.Incomplete, e: + raise SyntaxError(e) + +def stringToBase64(s): + return base64.encodestring(s).replace("\n", "") + +def mpiToNumber(mpi): #mpi is an openssl-format bignum string + if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number + raise AssertionError() + bytes = stringToBytes(mpi[4:]) + return bytesToNumber(bytes) + +def numberToMPI(n): + bytes = numberToBytes(n) + ext = 0 + #If the high-order bit is going to be set, + #add an extra byte of zeros + if (numBits(n) & 0x7)==0: + ext = 1 + length = numBytes(n) + ext + bytes = concatArrays(createByteArrayZeros(4+ext), bytes) + bytes[0] = (length >> 24) & 0xFF + bytes[1] = (length >> 16) & 0xFF + bytes[2] = (length >> 8) & 0xFF + bytes[3] = length & 0xFF + return bytesToString(bytes) + + + +# ************************************************************************** +# Misc. Utility Functions +# ************************************************************************** + +def numBytes(n): + if n==0: + return 0 + bits = numBits(n) + return int(math.ceil(bits / 8.0)) + +def hashAndBase64(s): + return stringToBase64(sha.sha(s).digest()) + +def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce + bytes = getRandomBytes(numChars) + bytesStr = "".join([chr(b) for b in bytes]) + return stringToBase64(bytesStr)[:numChars] + + +# ************************************************************************** +# Big Number Math +# ************************************************************************** + +def getRandomNumber(low, high): + if low >= high: + raise AssertionError() + howManyBits = numBits(high) + howManyBytes = numBytes(high) + lastBits = howManyBits % 8 + while 1: + bytes = getRandomBytes(howManyBytes) + if lastBits: + bytes[0] = bytes[0] % (1 << lastBits) + n = bytesToNumber(bytes) + if n >= low and n < high: + return n + +def gcd(a,b): + a, b = max(a,b), min(a,b) + while b: + a, b = b, a % b + return a + +def lcm(a, b): + #This will break when python division changes, but we can't use // cause + #of Jython + return (a * b) / gcd(a, b) + +#Returns inverse of a mod b, zero if none +#Uses Extended Euclidean Algorithm +def invMod(a, b): + c, d = a, b + uc, ud = 1, 0 + while c != 0: + #This will break when python division changes, but we can't use // + #cause of Jython + q = d / c + c, d = d-(q*c), c + uc, ud = ud - (q * uc), uc + if d == 1: + return ud % b + return 0 + + +if gmpyLoaded: + def powMod(base, power, modulus): + base = gmpy.mpz(base) + power = gmpy.mpz(power) + modulus = gmpy.mpz(modulus) + result = pow(base, power, modulus) + return long(result) + +else: + #Copied from Bryan G. Olson's post to comp.lang.python + #Does left-to-right instead of pow()'s right-to-left, + #thus about 30% faster than the python built-in with small bases + def powMod(base, power, modulus): + nBitScan = 5 + + """ Return base**power mod modulus, using multi bit scanning + with nBitScan bits at a time.""" + + #TREV - Added support for negative exponents + negativeResult = False + if (power < 0): + power *= -1 + negativeResult = True + + exp2 = 2**nBitScan + mask = exp2 - 1 + + # Break power into a list of digits of nBitScan bits. + # The list is recursive so easy to read in reverse direction. + nibbles = None + while power: + nibbles = int(power & mask), nibbles + power = power >> nBitScan + + # Make a table of powers of base up to 2**nBitScan - 1 + lowPowers = [1] + for i in xrange(1, exp2): + lowPowers.append((lowPowers[i-1] * base) % modulus) + + # To exponentiate by the first nibble, look it up in the table + nib, nibbles = nibbles + prod = lowPowers[nib] + + # For the rest, square nBitScan times, then multiply by + # base^nibble + while nibbles: + nib, nibbles = nibbles + for i in xrange(nBitScan): + prod = (prod * prod) % modulus + if nib: prod = (prod * lowPowers[nib]) % modulus + + #TREV - Added support for negative exponents + if negativeResult: + prodInv = invMod(prod, modulus) + #Check to make sure the inverse is correct + if (prod * prodInv) % modulus != 1: + raise AssertionError() + return prodInv + return prod + + +#Pre-calculate a sieve of the ~100 primes < 1000: +def makeSieve(n): + sieve = range(n) + for count in range(2, int(math.sqrt(n))): + if sieve[count] == 0: + continue + x = sieve[count] * 2 + while x < len(sieve): + sieve[x] = 0 + x += sieve[count] + sieve = [x for x in sieve[2:] if x] + return sieve + +sieve = makeSieve(1000) + +def isPrime(n, iterations=5, display=False): + #Trial division with sieve + for x in sieve: + if x >= n: return True + if n % x == 0: return False + #Passed trial division, proceed to Rabin-Miller + #Rabin-Miller implemented per Ferguson & Schneier + #Compute s, t for Rabin-Miller + if display: print "*", + s, t = n-1, 0 + while s % 2 == 0: + s, t = s/2, t+1 + #Repeat Rabin-Miller x times + a = 2 #Use 2 as a base for first iteration speedup, per HAC + for count in range(iterations): + v = powMod(a, s, n) + if v==1: + continue + i = 0 + while v != n-1: + if i == t-1: + return False + else: + v, i = powMod(v, 2, n), i+1 + a = getRandomNumber(2, n) + return True + +def getRandomPrime(bits, display=False): + if bits < 10: + raise AssertionError() + #The 1.5 ensures the 2 MSBs are set + #Thus, when used for p,q in RSA, n will have its MSB set + # + #Since 30 is lcm(2,3,5), we'll set our test numbers to + #29 % 30 and keep them there + low = (2L ** (bits-1)) * 3/2 + high = 2L ** bits - 30 + p = getRandomNumber(low, high) + p += 29 - (p % 30) + while 1: + if display: print ".", + p += 30 + if p >= high: + p = getRandomNumber(low, high) + p += 29 - (p % 30) + if isPrime(p, display=display): + return p + +#Unused at the moment... +def getRandomSafePrime(bits, display=False): + if bits < 10: + raise AssertionError() + #The 1.5 ensures the 2 MSBs are set + #Thus, when used for p,q in RSA, n will have its MSB set + # + #Since 30 is lcm(2,3,5), we'll set our test numbers to + #29 % 30 and keep them there + low = (2 ** (bits-2)) * 3/2 + high = (2 ** (bits-1)) - 30 + q = getRandomNumber(low, high) + q += 29 - (q % 30) + while 1: + if display: print ".", + q += 30 + if (q >= high): + q = getRandomNumber(low, high) + q += 29 - (q % 30) + #Ideas from Tom Wu's SRP code + #Do trial division on p and q before Rabin-Miller + if isPrime(q, 0, display=display): + p = (2 * q) + 1 + if isPrime(p, display=display): + if isPrime(q, display=display): + return p diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/dateFuncs.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/dateFuncs.py new file mode 100755 index 0000000..38812eb --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/dateFuncs.py @@ -0,0 +1,75 @@ + +import os + +#Functions for manipulating datetime objects +#CCYY-MM-DDThh:mm:ssZ +def parseDateClass(s): + year, month, day = s.split("-") + day, tail = day[:2], day[2:] + hour, minute, second = tail[1:].split(":") + second = second[:2] + year, month, day = int(year), int(month), int(day) + hour, minute, second = int(hour), int(minute), int(second) + return createDateClass(year, month, day, hour, minute, second) + + +if os.name != "java": + from datetime import datetime, timedelta + + #Helper functions for working with a date/time class + def createDateClass(year, month, day, hour, minute, second): + return datetime(year, month, day, hour, minute, second) + + def printDateClass(d): + #Split off fractional seconds, append 'Z' + return d.isoformat().split(".")[0]+"Z" + + def getNow(): + return datetime.utcnow() + + def getHoursFromNow(hours): + return datetime.utcnow() + timedelta(hours=hours) + + def getMinutesFromNow(minutes): + return datetime.utcnow() + timedelta(minutes=minutes) + + def isDateClassExpired(d): + return d < datetime.utcnow() + + def isDateClassBefore(d1, d2): + return d1 < d2 + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + import java + import jarray + + def createDateClass(year, month, day, hour, minute, second): + c = java.util.Calendar.getInstance() + c.setTimeZone(java.util.TimeZone.getTimeZone("UTC")) + c.set(year, month-1, day, hour, minute, second) + return c + + def printDateClass(d): + return "%04d-%02d-%02dT%02d:%02d:%02dZ" % \ + (d.get(d.YEAR), d.get(d.MONTH)+1, d.get(d.DATE), \ + d.get(d.HOUR_OF_DAY), d.get(d.MINUTE), d.get(d.SECOND)) + + def getNow(): + c = java.util.Calendar.getInstance() + c.setTimeZone(java.util.TimeZone.getTimeZone("UTC")) + c.get(c.HOUR) #force refresh? + return c + + def getHoursFromNow(hours): + d = getNow() + d.add(d.HOUR, hours) + return d + + def isDateClassExpired(d): + n = getNow() + return d.before(n) + + def isDateClassBefore(d1, d2): + return d1.before(d2) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/entropy.c b/gdata.py-1.2.3/src/gdata/tlslite/utils/entropy.c new file mode 100755 index 0000000..c627794 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/entropy.c @@ -0,0 +1,173 @@ + +#include "Python.h" + + +#ifdef MS_WINDOWS + +/* The following #define is not needed on VC6 with the Platform SDK, and it +may not be needed on VC7, I'm not sure. I don't think it hurts anything.*/ +#define _WIN32_WINNT 0x0400 + +#include + + +typedef BOOL (WINAPI *CRYPTACQUIRECONTEXTA)(HCRYPTPROV *phProv,\ + LPCSTR pszContainer, LPCSTR pszProvider, DWORD dwProvType,\ + DWORD dwFlags ); +typedef BOOL (WINAPI *CRYPTGENRANDOM)(HCRYPTPROV hProv, DWORD dwLen,\ + BYTE *pbBuffer ); +typedef BOOL (WINAPI *CRYPTRELEASECONTEXT)(HCRYPTPROV hProv,\ + DWORD dwFlags); + + +static PyObject* entropy(PyObject *self, PyObject *args) +{ + int howMany = 0; + HINSTANCE hAdvAPI32 = NULL; + CRYPTACQUIRECONTEXTA pCryptAcquireContextA = NULL; + CRYPTGENRANDOM pCryptGenRandom = NULL; + CRYPTRELEASECONTEXT pCryptReleaseContext = NULL; + HCRYPTPROV hCryptProv = 0; + unsigned char* bytes = NULL; + PyObject* returnVal = NULL; + + + /* Read arguments */ + if (!PyArg_ParseTuple(args, "i", &howMany)) + return(NULL); + + /* Obtain handle to the DLL containing CryptoAPI + This should not fail */ + if( (hAdvAPI32 = GetModuleHandle("advapi32.dll")) == NULL) { + PyErr_Format(PyExc_SystemError, + "Advapi32.dll not found"); + return NULL; + } + + /* Obtain pointers to the CryptoAPI functions + This will fail on some early version of Win95 */ + pCryptAcquireContextA = (CRYPTACQUIRECONTEXTA)GetProcAddress(hAdvAPI32,\ + "CryptAcquireContextA"); + pCryptGenRandom = (CRYPTGENRANDOM)GetProcAddress(hAdvAPI32,\ + "CryptGenRandom"); + pCryptReleaseContext = (CRYPTRELEASECONTEXT) GetProcAddress(hAdvAPI32,\ + "CryptReleaseContext"); + if (pCryptAcquireContextA == NULL || pCryptGenRandom == NULL || + pCryptReleaseContext == NULL) { + PyErr_Format(PyExc_NotImplementedError, + "CryptoAPI not available on this version of Windows"); + return NULL; + } + + /* Allocate bytes */ + if ((bytes = (unsigned char*)PyMem_Malloc(howMany)) == NULL) + return PyErr_NoMemory(); + + + /* Acquire context */ + if(!pCryptAcquireContextA(&hCryptProv, NULL, NULL, PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT)) { + PyErr_Format(PyExc_SystemError, + "CryptAcquireContext failed, error %d", GetLastError()); + PyMem_Free(bytes); + return NULL; + } + + /* Get random data */ + if(!pCryptGenRandom(hCryptProv, howMany, bytes)) { + PyErr_Format(PyExc_SystemError, + "CryptGenRandom failed, error %d", GetLastError()); + PyMem_Free(bytes); + CryptReleaseContext(hCryptProv, 0); + return NULL; + } + + /* Build return value */ + returnVal = Py_BuildValue("s#", bytes, howMany); + PyMem_Free(bytes); + + /* Release context */ + if (!pCryptReleaseContext(hCryptProv, 0)) { + PyErr_Format(PyExc_SystemError, + "CryptReleaseContext failed, error %d", GetLastError()); + return NULL; + } + + return returnVal; +} + +#elif defined(HAVE_UNISTD_H) && defined(HAVE_FCNTL_H) + +#include +#include + +static PyObject* entropy(PyObject *self, PyObject *args) +{ + int howMany; + int fd; + unsigned char* bytes = NULL; + PyObject* returnVal = NULL; + + + /* Read arguments */ + if (!PyArg_ParseTuple(args, "i", &howMany)) + return(NULL); + + /* Allocate bytes */ + if ((bytes = (unsigned char*)PyMem_Malloc(howMany)) == NULL) + return PyErr_NoMemory(); + + /* Open device */ + if ((fd = open("/dev/urandom", O_RDONLY, 0)) == -1) { + PyErr_Format(PyExc_NotImplementedError, + "No entropy source found"); + PyMem_Free(bytes); + return NULL; + } + + /* Get random data */ + if (read(fd, bytes, howMany) < howMany) { + PyErr_Format(PyExc_SystemError, + "Reading from /dev/urandom failed"); + PyMem_Free(bytes); + close(fd); + return NULL; + } + + /* Build return value */ + returnVal = Py_BuildValue("s#", bytes, howMany); + PyMem_Free(bytes); + + /* Close device */ + close(fd); + + return returnVal; +} + +#else + +static PyObject* entropy(PyObject *self, PyObject *args) +{ + PyErr_Format(PyExc_NotImplementedError, + "Function not supported"); + return NULL; +} + +#endif + + + +/* List of functions exported by this module */ + +static struct PyMethodDef entropy_functions[] = { + {"entropy", (PyCFunction)entropy, METH_VARARGS, "Return a string of random bytes produced by a platform-specific\nentropy source."}, + {NULL, NULL} /* Sentinel */ +}; + + +/* Initialize this module. */ + +PyMODINIT_FUNC initentropy(void) +{ + Py_InitModule("entropy", entropy_functions); +} \ No newline at end of file diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/hmac.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/hmac.py new file mode 100755 index 0000000..fe8feec --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/hmac.py @@ -0,0 +1,104 @@ +"""HMAC (Keyed-Hashing for Message Authentication) Python module. + +Implements the HMAC algorithm as described by RFC 2104. + +(This file is modified from the standard library version to do faster +copying) +""" + +def _strxor(s1, s2): + """Utility method. XOR the two strings s1 and s2 (must have same length). + """ + return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2)) + +# The size of the digests returned by HMAC depends on the underlying +# hashing module used. +digest_size = None + +class HMAC: + """RFC2104 HMAC class. + + This supports the API for Cryptographic Hash Functions (PEP 247). + """ + + def __init__(self, key, msg = None, digestmod = None): + """Create a new HMAC object. + + key: key for the keyed hash object. + msg: Initial input for the hash, if provided. + digestmod: A module supporting PEP 247. Defaults to the md5 module. + """ + if digestmod is None: + import md5 + digestmod = md5 + + if key == None: #TREVNEW - for faster copying + return #TREVNEW + + self.digestmod = digestmod + self.outer = digestmod.new() + self.inner = digestmod.new() + self.digest_size = digestmod.digest_size + + blocksize = 64 + ipad = "\x36" * blocksize + opad = "\x5C" * blocksize + + if len(key) > blocksize: + key = digestmod.new(key).digest() + + key = key + chr(0) * (blocksize - len(key)) + self.outer.update(_strxor(key, opad)) + self.inner.update(_strxor(key, ipad)) + if msg is not None: + self.update(msg) + +## def clear(self): +## raise NotImplementedError, "clear() method not available in HMAC." + + def update(self, msg): + """Update this hashing object with the string msg. + """ + self.inner.update(msg) + + def copy(self): + """Return a separate copy of this hashing object. + + An update to this copy won't affect the original object. + """ + other = HMAC(None) #TREVNEW - for faster copying + other.digest_size = self.digest_size #TREVNEW + other.digestmod = self.digestmod + other.inner = self.inner.copy() + other.outer = self.outer.copy() + return other + + def digest(self): + """Return the hash value of this hashing object. + + This returns a string containing 8-bit data. The object is + not altered in any way by this function; you can continue + updating the object after calling this function. + """ + h = self.outer.copy() + h.update(self.inner.digest()) + return h.digest() + + def hexdigest(self): + """Like digest(), but returns a string of hexadecimal digits instead. + """ + return "".join([hex(ord(x))[2:].zfill(2) + for x in tuple(self.digest())]) + +def new(key, msg = None, digestmod = None): + """Create a new hashing object and return it. + + key: The starting key for the hash. + msg: if available, will immediately be hashed into the object's starting + state. + + You can now feed arbitrary strings into the object using its update() + method, and can ask for the hash value at any time by calling its digest() + method. + """ + return HMAC(key, msg, digestmod) diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/jython_compat.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/jython_compat.py new file mode 100755 index 0000000..1245183 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/jython_compat.py @@ -0,0 +1,195 @@ +"""Miscellaneous functions to mask Python/Jython differences.""" + +import os +import sha + +if os.name != "java": + BaseException = Exception + + from sets import Set + import array + import math + + def createByteArraySequence(seq): + return array.array('B', seq) + def createByteArrayZeros(howMany): + return array.array('B', [0] * howMany) + def concatArrays(a1, a2): + return a1+a2 + + def bytesToString(bytes): + return bytes.tostring() + + def stringToBytes(s): + bytes = createByteArrayZeros(0) + bytes.fromstring(s) + return bytes + + def numBits(n): + if n==0: + return 0 + return int(math.floor(math.log(n, 2))+1) + + class CertChainBase: pass + class SelfTestBase: pass + class ReportFuncBase: pass + + #Helper functions for working with sets (from Python 2.3) + def iterSet(set): + return iter(set) + + def getListFromSet(set): + return list(set) + + #Factory function for getting a SHA1 object + def getSHA1(s): + return sha.sha(s) + + import sys + import traceback + + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr + +else: + #Jython 2.1 is missing lots of python 2.3 stuff, + #which we have to emulate here: + import java + import jarray + + BaseException = java.lang.Exception + + def createByteArraySequence(seq): + if isinstance(seq, type("")): #If it's a string, convert + seq = [ord(c) for c in seq] + return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed + def createByteArrayZeros(howMany): + return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed + def concatArrays(a1, a2): + l = list(a1)+list(a2) + return createByteArraySequence(l) + + #WAY TOO SLOW - MUST BE REPLACED------------ + def bytesToString(bytes): + return "".join([chr(b) for b in bytes]) + + def stringToBytes(s): + bytes = createByteArrayZeros(len(s)) + for count, c in enumerate(s): + bytes[count] = ord(c) + return bytes + #WAY TOO SLOW - MUST BE REPLACED------------ + + def numBits(n): + if n==0: + return 0 + n= 1L * n; #convert to long, if it isn't already + return n.__tojava__(java.math.BigInteger).bitLength() + + #This properly creates static methods for Jython + class staticmethod: + def __init__(self, anycallable): self.__call__ = anycallable + + #Properties are not supported for Jython + class property: + def __init__(self, anycallable): pass + + #True and False have to be specially defined + False = 0 + True = 1 + + class StopIteration(Exception): pass + + def enumerate(collection): + return zip(range(len(collection)), collection) + + class Set: + def __init__(self, seq=None): + self.values = {} + if seq: + for e in seq: + self.values[e] = None + + def add(self, e): + self.values[e] = None + + def discard(self, e): + if e in self.values.keys(): + del(self.values[e]) + + def union(self, s): + ret = Set() + for e in self.values.keys(): + ret.values[e] = None + for e in s.values.keys(): + ret.values[e] = None + return ret + + def issubset(self, other): + for e in self.values.keys(): + if e not in other.values.keys(): + return False + return True + + def __nonzero__( self): + return len(self.values.keys()) + + def __contains__(self, e): + return e in self.values.keys() + + def iterSet(set): + return set.values.keys() + + def getListFromSet(set): + return set.values.keys() + + """ + class JCE_SHA1: + def __init__(self, s=None): + self.md = java.security.MessageDigest.getInstance("SHA1") + if s: + self.update(s) + + def update(self, s): + self.md.update(s) + + def copy(self): + sha1 = JCE_SHA1() + sha1.md = self.md.clone() + return sha1 + + def digest(self): + digest = self.md.digest() + bytes = jarray.zeros(20, 'h') + for count in xrange(20): + x = digest[count] + if x < 0: x += 256 + bytes[count] = x + return bytes + """ + + #Factory function for getting a SHA1 object + #The JCE_SHA1 class is way too slow... + #the sha.sha object we use instead is broken in the jython 2.1 + #release, and needs to be patched + def getSHA1(s): + #return JCE_SHA1(s) + return sha.sha(s) + + + #Adjust the string to an array of bytes + def stringToJavaByteArray(s): + bytes = jarray.zeros(len(s), 'b') + for count, c in enumerate(s): + x = ord(c) + if x >= 128: x -= 256 + bytes[count] = x + return bytes + + import sys + import traceback + + def formatExceptionTrace(e): + newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback)) + return newStr diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/keyfactory.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/keyfactory.py new file mode 100755 index 0000000..5005af7 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/keyfactory.py @@ -0,0 +1,243 @@ +"""Factory functions for asymmetric cryptography. +@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey, +parseAsPrivateKey +""" + +from compat import * + +from RSAKey import RSAKey +from Python_RSAKey import Python_RSAKey +import cryptomath + +if cryptomath.m2cryptoLoaded: + from OpenSSL_RSAKey import OpenSSL_RSAKey + +if cryptomath.pycryptoLoaded: + from PyCrypto_RSAKey import PyCrypto_RSAKey + +# ************************************************************************** +# Factory Functions for RSA Keys +# ************************************************************************** + +def generateRSAKey(bits, implementations=["openssl", "python"]): + """Generate an RSA key with the specified bit length. + + @type bits: int + @param bits: Desired bit length of the new key's modulus. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: A new RSA private key. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey.generate(bits) + elif implementation == "python": + return Python_RSAKey.generate(bits) + raise ValueError("No acceptable implementations") + +def parseXMLKey(s, private=False, public=False, implementations=["python"]): + """Parse an XML-format key. + + The XML format used here is specific to tlslite and cryptoIDlib. The + format can store the public component of a key, or the public and + private components. For example:: + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + + + + 4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou... + Aw== + JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy... +

    5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc... + /E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ... + mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6... + qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB... + j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr... + + + @type s: str + @param s: A string containing an XML public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the private + key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will be + discarded, so this function will always return a public key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "python": + key = Python_RSAKey.parseXML(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + +#Parse as an OpenSSL or Python key +def parsePEMKey(s, private=False, public=False, passwordCallback=None, + implementations=["openssl", "python"]): + """Parse a PEM-format key. + + The PEM format is used by OpenSSL and other tools. The + format is typically used to store both the public and private + components of a key. For example:: + + -----BEGIN RSA PRIVATE KEY----- + MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+ + dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH + dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB + AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc + esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO + gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl + aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV + VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV + CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv + i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP + wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG + 6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH + h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe + -----END RSA PRIVATE KEY----- + + To generate a key like this with OpenSSL, run:: + + openssl genrsa 2048 > key.pem + + This format also supports password-encrypted private keys. TLS + Lite can only handle password-encrypted private keys when OpenSSL + and M2Crypto are installed. In this case, passwordCallback will be + invoked to query the user for the password. + + @type s: str + @param s: A string containing a PEM-encoded public or private key. + + @type private: bool + @param private: If True, a L{SyntaxError} will be raised if the + private key component is not present. + + @type public: bool + @param public: If True, the private key component (if present) will + be discarded, so this function will always return a public key. + + @type passwordCallback: callable + @param passwordCallback: This function will be called, with no + arguments, if the PEM-encoded private key is password-encrypted. + The callback should return the password string. If the password is + incorrect, SyntaxError will be raised. If no callback is passed + and the key is password-encrypted, a prompt will be displayed at + the console. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA key. + + @raise SyntaxError: If the key is not properly formatted. + """ + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + key = OpenSSL_RSAKey.parse(s, passwordCallback) + break + elif implementation == "python": + key = Python_RSAKey.parsePEM(s) + break + else: + raise ValueError("No acceptable implementations") + + return _parseKeyHelper(key, private, public) + + +def _parseKeyHelper(key, private, public): + if private: + if not key.hasPrivateKey(): + raise SyntaxError("Not a private key!") + + if public: + return _createPublicKey(key) + + if private: + if hasattr(key, "d"): + return _createPrivateKey(key) + else: + return key + + return key + +def parseAsPublicKey(s): + """Parse an XML or PEM-formatted public key. + + @type s: str + @param s: A string containing an XML or PEM-encoded public or private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA public key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, public=True) + except: + return parseXMLKey(s, public=True) + +def parsePrivateKey(s): + """Parse an XML or PEM-formatted private key. + + @type s: str + @param s: A string containing an XML or PEM-encoded private key. + + @rtype: L{tlslite.utils.RSAKey.RSAKey} + @return: An RSA private key. + + @raise SyntaxError: If the key is not properly formatted. + """ + try: + return parsePEMKey(s, private=True) + except: + return parseXMLKey(s, private=True) + +def _createPublicKey(key): + """ + Create a new public key. Discard any private component, + and return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + return _createPublicRSAKey(key.n, key.e) + +def _createPrivateKey(key): + """ + Create a new private key. Return the most efficient key possible. + """ + if not isinstance(key, RSAKey): + raise AssertionError() + if not key.hasPrivateKey(): + raise AssertionError() + return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP, + key.dQ, key.qInv) + +def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto", + "python"]): + for implementation in implementations: + if implementation == "openssl" and cryptomath.m2cryptoLoaded: + return OpenSSL_RSAKey(n, e) + elif implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e) + elif implementation == "python": + return Python_RSAKey(n, e) + raise ValueError("No acceptable implementations") + +def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv, + implementations = ["pycrypto", "python"]): + for implementation in implementations: + if implementation == "pycrypto" and cryptomath.pycryptoLoaded: + return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv) + elif implementation == "python": + return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv) + raise ValueError("No acceptable implementations") diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/rijndael.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/rijndael.py new file mode 100755 index 0000000..cb2f547 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/rijndael.py @@ -0,0 +1,392 @@ +""" +A pure python (slow) implementation of rijndael with a decent interface + +To include - + +from rijndael import rijndael + +To do a key setup - + +r = rijndael(key, block_size = 16) + +key must be a string of length 16, 24, or 32 +blocksize must be 16, 24, or 32. Default is 16 + +To use - + +ciphertext = r.encrypt(plaintext) +plaintext = r.decrypt(ciphertext) + +If any strings are of the wrong length a ValueError is thrown +""" + +# ported from the Java reference code by Bram Cohen, bram@gawth.com, April 2001 +# this code is public domain, unless someone makes +# an intellectual property claim against the reference +# code, in which case it can be made public domain by +# deleting all the comments and renaming all the variables + +import copy +import string + + + +#----------------------- +#TREV - ADDED BECAUSE THERE'S WARNINGS ABOUT INT OVERFLOW BEHAVIOR CHANGING IN +#2.4..... +import os +if os.name != "java": + import exceptions + if hasattr(exceptions, "FutureWarning"): + import warnings + warnings.filterwarnings("ignore", category=FutureWarning, append=1) +#----------------------- + + + +shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]], + [[0, 0], [1, 5], [2, 4], [3, 3]], + [[0, 0], [1, 7], [3, 5], [4, 4]]] + +# [keysize][block_size] +num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}} + +A = [[1, 1, 1, 1, 1, 0, 0, 0], + [0, 1, 1, 1, 1, 1, 0, 0], + [0, 0, 1, 1, 1, 1, 1, 0], + [0, 0, 0, 1, 1, 1, 1, 1], + [1, 0, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 0, 0, 1, 1, 1], + [1, 1, 1, 0, 0, 0, 1, 1], + [1, 1, 1, 1, 0, 0, 0, 1]] + +# produce log and alog tables, needed for multiplying in the +# field GF(2^m) (generator = 3) +alog = [1] +for i in xrange(255): + j = (alog[-1] << 1) ^ alog[-1] + if j & 0x100 != 0: + j ^= 0x11B + alog.append(j) + +log = [0] * 256 +for i in xrange(1, 255): + log[alog[i]] = i + +# multiply two elements of GF(2^m) +def mul(a, b): + if a == 0 or b == 0: + return 0 + return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255] + +# substitution box based on F^{-1}(x) +box = [[0] * 8 for i in xrange(256)] +box[1][7] = 1 +for i in xrange(2, 256): + j = alog[255 - log[i]] + for t in xrange(8): + box[i][t] = (j >> (7 - t)) & 0x01 + +B = [0, 1, 1, 0, 0, 0, 1, 1] + +# affine transform: box[i] <- B + A*box[i] +cox = [[0] * 8 for i in xrange(256)] +for i in xrange(256): + for t in xrange(8): + cox[i][t] = B[t] + for j in xrange(8): + cox[i][t] ^= A[t][j] * box[i][j] + +# S-boxes and inverse S-boxes +S = [0] * 256 +Si = [0] * 256 +for i in xrange(256): + S[i] = cox[i][0] << 7 + for t in xrange(1, 8): + S[i] ^= cox[i][t] << (7-t) + Si[S[i] & 0xFF] = i + +# T-boxes +G = [[2, 1, 1, 3], + [3, 2, 1, 1], + [1, 3, 2, 1], + [1, 1, 3, 2]] + +AA = [[0] * 8 for i in xrange(4)] + +for i in xrange(4): + for j in xrange(4): + AA[i][j] = G[i][j] + AA[i][i+4] = 1 + +for i in xrange(4): + pivot = AA[i][i] + if pivot == 0: + t = i + 1 + while AA[t][i] == 0 and t < 4: + t += 1 + assert t != 4, 'G matrix must be invertible' + for j in xrange(8): + AA[i][j], AA[t][j] = AA[t][j], AA[i][j] + pivot = AA[i][i] + for j in xrange(8): + if AA[i][j] != 0: + AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255] + for t in xrange(4): + if i != t: + for j in xrange(i+1, 8): + AA[t][j] ^= mul(AA[i][j], AA[t][i]) + AA[t][i] = 0 + +iG = [[0] * 4 for i in xrange(4)] + +for i in xrange(4): + for j in xrange(4): + iG[i][j] = AA[i][j + 4] + +def mul4(a, bs): + if a == 0: + return 0 + r = 0 + for b in bs: + r <<= 8 + if b != 0: + r = r | mul(a, b) + return r + +T1 = [] +T2 = [] +T3 = [] +T4 = [] +T5 = [] +T6 = [] +T7 = [] +T8 = [] +U1 = [] +U2 = [] +U3 = [] +U4 = [] + +for t in xrange(256): + s = S[t] + T1.append(mul4(s, G[0])) + T2.append(mul4(s, G[1])) + T3.append(mul4(s, G[2])) + T4.append(mul4(s, G[3])) + + s = Si[t] + T5.append(mul4(s, iG[0])) + T6.append(mul4(s, iG[1])) + T7.append(mul4(s, iG[2])) + T8.append(mul4(s, iG[3])) + + U1.append(mul4(t, iG[0])) + U2.append(mul4(t, iG[1])) + U3.append(mul4(t, iG[2])) + U4.append(mul4(t, iG[3])) + +# round constants +rcon = [1] +r = 1 +for t in xrange(1, 30): + r = mul(2, r) + rcon.append(r) + +del A +del AA +del pivot +del B +del G +del box +del log +del alog +del i +del j +del r +del s +del t +del mul +del mul4 +del cox +del iG + +class rijndael: + def __init__(self, key, block_size = 16): + if block_size != 16 and block_size != 24 and block_size != 32: + raise ValueError('Invalid block size: ' + str(block_size)) + if len(key) != 16 and len(key) != 24 and len(key) != 32: + raise ValueError('Invalid key size: ' + str(len(key))) + self.block_size = block_size + + ROUNDS = num_rounds[len(key)][block_size] + BC = block_size / 4 + # encryption round keys + Ke = [[0] * BC for i in xrange(ROUNDS + 1)] + # decryption round keys + Kd = [[0] * BC for i in xrange(ROUNDS + 1)] + ROUND_KEY_COUNT = (ROUNDS + 1) * BC + KC = len(key) / 4 + + # copy user material bytes into temporary ints + tk = [] + for i in xrange(0, KC): + tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) | + (ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3])) + + # copy values into round key arrays + t = 0 + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t / BC][t % BC] = tk[j] + Kd[ROUNDS - (t / BC)][t % BC] = tk[j] + j += 1 + t += 1 + tt = 0 + rconpointer = 0 + while t < ROUND_KEY_COUNT: + # extrapolate using phi (the round key evolution function) + tt = tk[KC - 1] + tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \ + (S[ tt & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) ^ \ + (rcon[rconpointer] & 0xFF) << 24 + rconpointer += 1 + if KC != 8: + for i in xrange(1, KC): + tk[i] ^= tk[i-1] + else: + for i in xrange(1, KC / 2): + tk[i] ^= tk[i-1] + tt = tk[KC / 2 - 1] + tk[KC / 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \ + (S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \ + (S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \ + (S[(tt >> 24) & 0xFF] & 0xFF) << 24 + for i in xrange(KC / 2 + 1, KC): + tk[i] ^= tk[i-1] + # copy values into round key arrays + j = 0 + while j < KC and t < ROUND_KEY_COUNT: + Ke[t / BC][t % BC] = tk[j] + Kd[ROUNDS - (t / BC)][t % BC] = tk[j] + j += 1 + t += 1 + # inverse MixColumn where needed + for r in xrange(1, ROUNDS): + for j in xrange(BC): + tt = Kd[r][j] + Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \ + U2[(tt >> 16) & 0xFF] ^ \ + U3[(tt >> 8) & 0xFF] ^ \ + U4[ tt & 0xFF] + self.Ke = Ke + self.Kd = Kd + + def encrypt(self, plaintext): + if len(plaintext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) + Ke = self.Ke + + BC = self.block_size / 4 + ROUNDS = len(Ke) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][0] + s2 = shifts[SC][2][0] + s3 = shifts[SC][3][0] + a = [0] * BC + # temporary work array + t = [] + # plaintext to ints + key + for i in xrange(BC): + t.append((ord(plaintext[i * 4 ]) << 24 | + ord(plaintext[i * 4 + 1]) << 16 | + ord(plaintext[i * 4 + 2]) << 8 | + ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i]) + # apply round transforms + for r in xrange(1, ROUNDS): + for i in xrange(BC): + a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^ + T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in xrange(BC): + tt = Ke[ROUNDS][i] + result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return string.join(map(chr, result), '') + + def decrypt(self, ciphertext): + if len(ciphertext) != self.block_size: + raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext))) + Kd = self.Kd + + BC = self.block_size / 4 + ROUNDS = len(Kd) - 1 + if BC == 4: + SC = 0 + elif BC == 6: + SC = 1 + else: + SC = 2 + s1 = shifts[SC][1][1] + s2 = shifts[SC][2][1] + s3 = shifts[SC][3][1] + a = [0] * BC + # temporary work array + t = [0] * BC + # ciphertext to ints + key + for i in xrange(BC): + t[i] = (ord(ciphertext[i * 4 ]) << 24 | + ord(ciphertext[i * 4 + 1]) << 16 | + ord(ciphertext[i * 4 + 2]) << 8 | + ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i] + # apply round transforms + for r in xrange(1, ROUNDS): + for i in xrange(BC): + a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^ + T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^ + T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^ + T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i] + t = copy.copy(a) + # last round is special + result = [] + for i in xrange(BC): + tt = Kd[ROUNDS][i] + result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF) + result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF) + result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF) + result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF) + return string.join(map(chr, result), '') + +def encrypt(key, block): + return rijndael(key, len(block)).encrypt(block) + +def decrypt(key, block): + return rijndael(key, len(block)).decrypt(block) + +def test(): + def t(kl, bl): + b = 'b' * bl + r = rijndael('a' * kl, bl) + assert r.decrypt(r.encrypt(b)) == b + t(16, 16) + t(16, 24) + t(16, 32) + t(24, 16) + t(24, 24) + t(24, 32) + t(32, 16) + t(32, 24) + t(32, 32) + diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/win32prng.c b/gdata.py-1.2.3/src/gdata/tlslite/utils/win32prng.c new file mode 100755 index 0000000..de08b3b --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/win32prng.c @@ -0,0 +1,63 @@ + +#include "Python.h" +#define _WIN32_WINNT 0x0400 /* Needed for CryptoAPI on some systems */ +#include + + +static PyObject* getRandomBytes(PyObject *self, PyObject *args) +{ + int howMany; + HCRYPTPROV hCryptProv; + unsigned char* bytes = NULL; + PyObject* returnVal = NULL; + + + /* Read Arguments */ + if (!PyArg_ParseTuple(args, "i", &howMany)) + return(NULL); + + /* Get Context */ + if(CryptAcquireContext( + &hCryptProv, + NULL, + NULL, + PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT) == 0) + return Py_BuildValue("s#", NULL, 0); + + + /* Allocate bytes */ + bytes = malloc(howMany); + + + /* Get random data */ + if(CryptGenRandom( + hCryptProv, + howMany, + bytes) == 0) + returnVal = Py_BuildValue("s#", NULL, 0); + else + returnVal = Py_BuildValue("s#", bytes, howMany); + + free(bytes); + CryptReleaseContext(hCryptProv, 0); + + return returnVal; +} + + + +/* List of functions exported by this module */ + +static struct PyMethodDef win32prng_functions[] = { + {"getRandomBytes", (PyCFunction)getRandomBytes, METH_VARARGS}, + {NULL, NULL} /* Sentinel */ +}; + + +/* Initialize this module. */ + +DL_EXPORT(void) initwin32prng(void) +{ + Py_InitModule("win32prng", win32prng_functions); +} diff --git a/gdata.py-1.2.3/src/gdata/tlslite/utils/xmltools.py b/gdata.py-1.2.3/src/gdata/tlslite/utils/xmltools.py new file mode 100755 index 0000000..06f2e43 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/tlslite/utils/xmltools.py @@ -0,0 +1,201 @@ +"""Helper functions for XML. + +This module has misc. helper functions for working with XML DOM nodes.""" + +import re +from compat import * + +import os +if os.name != "java": + from xml.dom import minidom + from xml.sax import saxutils + + def parseDocument(s): + return minidom.parseString(s) +else: + from javax.xml.parsers import * + import java + + builder = DocumentBuilderFactory.newInstance().newDocumentBuilder() + + def parseDocument(s): + stream = java.io.ByteArrayInputStream(java.lang.String(s).getBytes()) + return builder.parse(stream) + +def parseAndStripWhitespace(s): + try: + element = parseDocument(s).documentElement + except BaseException, e: + raise SyntaxError(str(e)) + stripWhitespace(element) + return element + +#Goes through a DOM tree and removes whitespace besides child elements, +#as long as this whitespace is correctly tab-ified +def stripWhitespace(element, tab=0): + element.normalize() + + lastSpacer = "\n" + ("\t"*tab) + spacer = lastSpacer + "\t" + + #Zero children aren't allowed (i.e. ) + #This makes writing output simpler, and matches Canonical XML + if element.childNodes.length==0: #DON'T DO len(element.childNodes) - doesn't work in Jython + raise SyntaxError("Empty XML elements not allowed") + + #If there's a single child, it must be text context + if element.childNodes.length==1: + if element.firstChild.nodeType == element.firstChild.TEXT_NODE: + #If it's an empty element, remove + if element.firstChild.data == lastSpacer: + element.removeChild(element.firstChild) + return + #If not text content, give an error + elif element.firstChild.nodeType == element.firstChild.ELEMENT_NODE: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + else: + raise SyntaxError("Unexpected node type in XML document") + + #Otherwise there's multiple child element + child = element.firstChild + while child: + if child.nodeType == child.ELEMENT_NODE: + stripWhitespace(child, tab+1) + child = child.nextSibling + elif child.nodeType == child.TEXT_NODE: + if child == element.lastChild: + if child.data != lastSpacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + elif child.data != spacer: + raise SyntaxError("Bad whitespace under '%s'" % element.tagName) + next = child.nextSibling + element.removeChild(child) + child = next + else: + raise SyntaxError("Unexpected node type in XML document") + + +def checkName(element, name): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Missing element: '%s'" % name) + + if name == None: + return + + if element.tagName != name: + raise SyntaxError("Wrong element name: should be '%s', is '%s'" % (name, element.tagName)) + +def getChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + checkName(child, name) + return child + +def getChildIter(element, index): + class ChildIter: + def __init__(self, element, index): + self.element = element + self.index = index + + def next(self): + if self.index < len(self.element.childNodes): + retVal = self.element.childNodes.item(self.index) + self.index += 1 + else: + retVal = None + return retVal + + def checkEnd(self): + if self.index != len(self.element.childNodes): + raise SyntaxError("Too many elements under: '%s'" % self.element.tagName) + return ChildIter(element, index) + +def getChildOrNone(element, index): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getChild()") + child = element.childNodes.item(index) + return child + +def getLastChild(element, index, name=None): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getLastChild()") + + child = element.childNodes.item(index) + if child == None: + raise SyntaxError("Missing child: '%s'" % name) + if child != element.lastChild: + raise SyntaxError("Too many elements under: '%s'" % element.tagName) + checkName(child, name) + return child + +#Regular expressions for syntax-checking attribute and element content +nsRegEx = "http://trevp.net/cryptoID\Z" +cryptoIDRegEx = "([a-km-z3-9]{5}\.){3}[a-km-z3-9]{5}\Z" +urlRegEx = "http(s)?://.{1,100}\Z" +sha1Base64RegEx = "[A-Za-z0-9+/]{27}=\Z" +base64RegEx = "[A-Za-z0-9+/]+={0,4}\Z" +certsListRegEx = "(0)?(1)?(2)?(3)?(4)?(5)?(6)?(7)?(8)?(9)?\Z" +keyRegEx = "[A-Z]\Z" +keysListRegEx = "(A)?(B)?(C)?(D)?(E)?(F)?(G)?(H)?(I)?(J)?(K)?(L)?(M)?(N)?(O)?(P)?(Q)?(R)?(S)?(T)?(U)?(V)?(W)?(X)?(Y)?(Z)?\Z" +dateTimeRegEx = "\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\dZ\Z" +shortStringRegEx = ".{1,100}\Z" +exprRegEx = "[a-zA-Z0-9 ,()]{1,200}\Z" +notAfterDeltaRegEx = "0|([1-9][0-9]{0,8})\Z" #A number from 0 to (1 billion)-1 +booleanRegEx = "(true)|(false)" + +def getReqAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getReqAttribute()") + + value = element.getAttribute(attrName) + if not value: + raise SyntaxError("Missing Attribute: " + attrName) + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def getAttribute(element, attrName, regEx=""): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in getAttribute()") + + value = element.getAttribute(attrName) + if value: + if not re.match(regEx, value): + raise SyntaxError("Bad Attribute Value for '%s': '%s' " % (attrName, value)) + element.removeAttribute(attrName) + return str(value) #de-unicode it; this is needed for bsddb, for example + +def checkNoMoreAttributes(element): + if element.nodeType != element.ELEMENT_NODE: + raise SyntaxError("Wrong node type in checkNoMoreAttributes()") + + if element.attributes.length!=0: + raise SyntaxError("Extra attributes on '%s'" % element.tagName) + +def getText(element, regEx=""): + textNode = element.firstChild + if textNode == None: + raise SyntaxError("Empty element '%s'" % element.tagName) + if textNode.nodeType != textNode.TEXT_NODE: + raise SyntaxError("Non-text node: '%s'" % element.tagName) + if not re.match(regEx, textNode.data): + raise SyntaxError("Bad Text Value for '%s': '%s' " % (element.tagName, textNode.data)) + return str(textNode.data) #de-unicode it; this is needed for bsddb, for example + +#Function for adding tabs to a string +def indent(s, steps, ch="\t"): + tabs = ch*steps + if s[-1] != "\n": + s = tabs + s.replace("\n", "\n"+tabs) + else: + s = tabs + s.replace("\n", "\n"+tabs) + s = s[ : -len(tabs)] + return s + +def escape(s): + return saxutils.escape(s) diff --git a/gdata.py-1.2.3/src/gdata/urlfetch.py b/gdata.py-1.2.3/src/gdata/urlfetch.py new file mode 100644 index 0000000..890b257 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/urlfetch.py @@ -0,0 +1,247 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Provides HTTP functions for gdata.service to use on Google App Engine + +AppEngineHttpClient: Provides an HTTP request method which uses App Engine's + urlfetch API. Set the http_client member of a GDataService object to an + instance of an AppEngineHttpClient to allow the gdata library to run on + Google App Engine. + +run_on_appengine: Function which will modify an existing GDataService object + to allow it to run on App Engine. It works by creating a new instance of + the AppEngineHttpClient and replacing the GDataService object's + http_client. + +HttpRequest: Function that wraps google.appengine.api.urlfetch.Fetch in a + common interface which is used by gdata.service.GDataService. In other + words, this module can be used as the gdata service request handler so + that all HTTP requests will be performed by the hosting Google App Engine + server. +""" + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import StringIO +import atom.service +import atom.http_interface +from google.appengine.api import urlfetch + + +def run_on_appengine(gdata_service): + """Modifies a GDataService object to allow it to run on App Engine. + + Args: + gdata_service: An instance of AtomService, GDataService, or any + of their subclasses which has an http_client member. + """ + gdata_service.http_client = AppEngineHttpClient() + + +class AppEngineHttpClient(atom.http_interface.GenericHttpClient): + def __init__(self, headers=None): + self.debug = False + self.headers = headers or {} + + def request(self, operation, url, data=None, headers=None): + """Performs an HTTP call to the server, supports GET, POST, PUT, and + DELETE. + + Usage example, perform and HTTP GET on http://www.google.com/: + import atom.http + client = atom.http.HttpClient() + http_response = client.request('GET', 'http://www.google.com/') + + Args: + operation: str The HTTP operation to be performed. This is usually one + of 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be converted + to a string. Should be set to None when performing a GET or DELETE. + If data is a file-like object which can be read, this method will + read a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be + evaluated and sent. + url: The full URL to which the request should be sent. Can be a string + or atom.url.Url. + headers: dict of strings. HTTP headers which should be sent + in the request. + """ + all_headers = self.headers.copy() + if headers: + all_headers.update(headers) + + # Construct the full payload. + # Assume that data is None or a string. + data_str = data + if data: + if isinstance(data, list): + # If data is a list of different objects, convert them all to strings + # and join them together. + converted_parts = [__ConvertDataPart(x) for x in data] + data_str = ''.join(converted_parts) + else: + data_str = __ConvertDataPart(data) + + # If the list of headers does not include a Content-Length, attempt to + # calculate it based on the data object. + if data and 'Content-Length' not in all_headers: + all_headers['Content-Length'] = len(data_str) + + # Set the content type to the default value if none was set. + if 'Content-Type' not in all_headers: + all_headers['Content-Type'] = 'application/atom+xml' + + # Lookup the urlfetch operation which corresponds to the desired HTTP verb. + if operation == 'GET': + method = urlfetch.GET + elif operation == 'POST': + method = urlfetch.POST + elif operation == 'PUT': + method = urlfetch.PUT + elif operation == 'DELETE': + method = urlfetch.DELETE + else: + method = None + return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str, + method=method, headers=all_headers)) + + +def HttpRequest(service, operation, data, uri, extra_headers=None, + url_params=None, escape_params=True, content_type='application/atom+xml'): + """Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE. + + This function is deprecated, use AppEngineHttpClient.request instead. + + To use this module with gdata.service, you can set this module to be the + http_request_handler so that HTTP requests use Google App Engine's urlfetch. + import gdata.service + import gdata.urlfetch + gdata.service.http_request_handler = gdata.urlfetch + + Args: + service: atom.AtomService object which contains some of the parameters + needed to make the request. The following members are used to + construct the HTTP call: server (str), additional_headers (dict), + port (int), and ssl (bool). + operation: str The HTTP operation to be performed. This is usually one of + 'GET', 'POST', 'PUT', or 'DELETE' + data: filestream, list of parts, or other object which can be + converted to a string. + Should be set to None when performing a GET or PUT. + If data is a file-like object which can be read, this method will read + a chunk of 100K bytes at a time and send them. + If the data is a list of parts to be sent, each part will be evaluated + and sent. + uri: The beginning of the URL to which the request should be sent. + Examples: '/', '/base/feeds/snippets', + '/m8/feeds/contacts/default/base' + extra_headers: dict of strings. HTTP headers which should be sent + in the request. These headers are in addition to those stored in + service.additional_headers. + url_params: dict of strings. Key value pairs to be added to the URL as + URL parameters. For example {'foo':'bar', 'test':'param'} will + become ?foo=bar&test=param. + escape_params: bool default True. If true, the keys and values in + url_params will be URL escaped when the form is constructed + (Special characters converted to %XX form.) + content_type: str The MIME type for the data being sent. Defaults to + 'application/atom+xml', this is only used if data is set. + """ + full_uri = atom.service.BuildUri(uri, url_params, escape_params) + (server, port, ssl, partial_uri) = atom.service.ProcessUrl(service, full_uri) + # Construct the full URL for the request. + if ssl: + full_url = 'https://%s%s' % (server, partial_uri) + else: + full_url = 'http://%s%s' % (server, partial_uri) + + # Construct the full payload. + # Assume that data is None or a string. + data_str = data + if data: + if isinstance(data, list): + # If data is a list of different objects, convert them all to strings + # and join them together. + converted_parts = [__ConvertDataPart(x) for x in data] + data_str = ''.join(converted_parts) + else: + data_str = __ConvertDataPart(data) + + # Construct the dictionary of HTTP headers. + headers = {} + if isinstance(service.additional_headers, dict): + headers = service.additional_headers.copy() + if isinstance(extra_headers, dict): + for header, value in extra_headers.iteritems(): + headers[header] = value + # Add the content type header (we don't need to calculate content length, + # since urlfetch.Fetch will calculate for us). + if content_type: + headers['Content-Type'] = content_type + + # Lookup the urlfetch operation which corresponds to the desired HTTP verb. + if operation == 'GET': + method = urlfetch.GET + elif operation == 'POST': + method = urlfetch.POST + elif operation == 'PUT': + method = urlfetch.PUT + elif operation == 'DELETE': + method = urlfetch.DELETE + else: + method = None + return HttpResponse(urlfetch.Fetch(url=full_url, payload=data_str, + method=method, headers=headers)) + + +def __ConvertDataPart(data): + if not data or isinstance(data, str): + return data + elif hasattr(data, 'read'): + # data is a file like object, so read it completely. + return data.read() + # The data object was not a file. + # Try to convert to a string and send the data. + return str(data) + + +class HttpResponse(object): + """Translates a urlfetch resoinse to look like an hhtplib resoinse. + + Used to allow the resoinse from HttpRequest to be usable by gdata.service + methods. + """ + + def __init__(self, urlfetch_response): + self.body = StringIO.StringIO(urlfetch_response.content) + self.headers = urlfetch_response.headers + self.status = urlfetch_response.status_code + self.reason = '' + + def read(self, length=None): + if not length: + return self.body.read() + else: + return self.body.read(length) + + def getheader(self, name): + if not self.headers.has_key(name): + return self.headers[name.lower()] + return self.headers[name] + diff --git a/gdata.py-1.2.3/src/gdata/webmastertools/__init__.py b/gdata.py-1.2.3/src/gdata/webmastertools/__init__.py new file mode 100644 index 0000000..c40a641 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/webmastertools/__init__.py @@ -0,0 +1,542 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains extensions to Atom objects used with Google Webmaster Tools.""" + + +__author__ = 'livibetter (Yu-Jie Lin)' + + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom +import gdata + + +# XML namespaces which are often used in Google Webmaster Tools entities. +GWEBMASTERTOOLS_NAMESPACE = 'http://schemas.google.com/webmasters/tools/2007' +GWEBMASTERTOOLS_TEMPLATE = '{http://schemas.google.com/webmasters/tools/2007}%s' + + +class Indexed(atom.AtomBase): + _tag = 'indexed' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def IndexedFromString(xml_string): + return atom.CreateClassFromXMLString(Indexed, xml_string) + + +class Crawled(atom.Date): + _tag = 'crawled' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def CrawledFromString(xml_string): + return atom.CreateClassFromXMLString(Crawled, xml_string) + + +class GeoLocation(atom.AtomBase): + _tag = 'geolocation' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def GeoLocationFromString(xml_string): + return atom.CreateClassFromXMLString(GeoLocation, xml_string) + + +class PreferredDomain(atom.AtomBase): + _tag = 'preferred-domain' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def PreferredDomainFromString(xml_string): + return atom.CreateClassFromXMLString(PreferredDomain, xml_string) + + +class CrawlRate(atom.AtomBase): + _tag = 'crawl-rate' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def CrawlRateFromString(xml_string): + return atom.CreateClassFromXMLString(CrawlRate, xml_string) + + +class EnhancedImageSearch(atom.AtomBase): + _tag = 'enhanced-image-search' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def EnhancedImageSearchFromString(xml_string): + return atom.CreateClassFromXMLString(EnhancedImageSearch, xml_string) + + +class Verified(atom.AtomBase): + _tag = 'verified' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def VerifiedFromString(xml_string): + return atom.CreateClassFromXMLString(Verified, xml_string) + + +class VerificationMethodMeta(atom.AtomBase): + _tag = 'meta' + _namespace = atom.ATOM_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _attributes['name'] = 'name' + _attributes['content'] = 'content' + + def __init__(self, text=None, name=None, content=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.name = name + self.content = content + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def VerificationMethodMetaFromString(xml_string): + return atom.CreateClassFromXMLString(VerificationMethodMeta, xml_string) + + +class VerificationMethod(atom.AtomBase): + _tag = 'verification-method' + _namespace = GWEBMASTERTOOLS_NAMESPACE + _children = atom.Text._children.copy() + _attributes = atom.Text._attributes.copy() + _children['{%s}meta' % atom.ATOM_NAMESPACE] = ( + 'meta', VerificationMethodMeta) + _attributes['in-use'] = 'in_use' + + def __init__(self, text=None, in_use=None, meta=None, + extension_elements=None, extension_attributes=None): + self.text = text + self.in_use = in_use + self.meta = meta + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def VerificationMethodFromString(xml_string): + return atom.CreateClassFromXMLString(VerificationMethod, xml_string) + + +class MarkupLanguage(atom.AtomBase): + _tag = 'markup-language' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def MarkupLanguageFromString(xml_string): + return atom.CreateClassFromXMLString(MarkupLanguage, xml_string) + + +class SitemapMobile(atom.AtomBase): + _tag = 'sitemap-mobile' + _namespace = GWEBMASTERTOOLS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}markup-language' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'markup_language', [MarkupLanguage]) + + def __init__(self, markup_language=None, + extension_elements=None, extension_attributes=None, text=None): + + self.markup_language = markup_language or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitemapMobileFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapMobile, xml_string) + + +class SitemapMobileMarkupLanguage(atom.AtomBase): + _tag = 'sitemap-mobile-markup-language' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapMobileMarkupLanguageFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapMobileMarkupLanguage, xml_string) + + +class PublicationLabel(atom.AtomBase): + _tag = 'publication-label' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def PublicationLabelFromString(xml_string): + return atom.CreateClassFromXMLString(PublicationLabel, xml_string) + + +class SitemapNews(atom.AtomBase): + _tag = 'sitemap-news' + _namespace = GWEBMASTERTOOLS_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}publication-label' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'publication_label', [PublicationLabel]) + + def __init__(self, publication_label=None, + extension_elements=None, extension_attributes=None, text=None): + + self.publication_label = publication_label or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitemapNewsFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapNews, xml_string) + + +class SitemapNewsPublicationLabel(atom.AtomBase): + _tag = 'sitemap-news-publication-label' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapNewsPublicationLabelFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapNewsPublicationLabel, xml_string) + + +class SitemapLastDownloaded(atom.Date): + _tag = 'sitemap-last-downloaded' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapLastDownloadedFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapLastDownloaded, xml_string) + + +class SitemapType(atom.AtomBase): + _tag = 'sitemap-type' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapTypeFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapType, xml_string) + + +class SitemapStatus(atom.AtomBase): + _tag = 'sitemap-status' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapStatusFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapStatus, xml_string) + + +class SitemapUrlCount(atom.AtomBase): + _tag = 'sitemap-url-count' + _namespace = GWEBMASTERTOOLS_NAMESPACE + + +def SitemapUrlCountFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapUrlCount, xml_string) + + +class LinkFinder(atom.LinkFinder): + """An "interface" providing methods to find link elements + + SitesEntry elements often contain multiple links which differ in the rel + attribute or content type. Often, developers are interested in a specific + type of link so this class provides methods to find specific classes of links. + + This class is used as a mixin in SitesEntry. + """ + + def GetSelfLink(self): + """Find the first link with rel set to 'self' + + Returns: + An atom.Link or none if none of the links had rel equal to 'self' + """ + + for a_link in self.link: + if a_link.rel == 'self': + return a_link + return None + + def GetEditLink(self): + for a_link in self.link: + if a_link.rel == 'edit': + return a_link + return None + + def GetPostLink(self): + """Get a link containing the POST target URL. + + The POST target URL is used to insert new entries. + + Returns: + A link object with a rel matching the POST type. + """ + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#post': + return a_link + return None + + def GetFeedLink(self): + for a_link in self.link: + if a_link.rel == 'http://schemas.google.com/g/2005#feed': + return a_link + return None + + +class SitesEntry(atom.Entry, LinkFinder): + """A Google Webmaster Tools meta Entry flavor of an Atom Entry """ + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + _children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ( + 'entry_link', [gdata.EntryLink]) + _children['{%s}indexed' % GWEBMASTERTOOLS_NAMESPACE] = ('indexed', Indexed) + _children['{%s}crawled' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'crawled', Crawled) + _children['{%s}geolocation' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'geolocation', GeoLocation) + _children['{%s}preferred-domain' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'preferred_domain', PreferredDomain) + _children['{%s}crawl-rate' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'crawl_rate', CrawlRate) + _children['{%s}enhanced-image-search' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'enhanced_image_search', EnhancedImageSearch) + _children['{%s}verified' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'verified', Verified) + _children['{%s}verification-method' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'verification_method', [VerificationMethod]) + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + entry_link=None, indexed=None, crawled=None, + geolocation=None, preferred_domain=None, crawl_rate=None, + enhanced_image_search=None, + verified=None, verification_method=None, + extension_elements=None, extension_attributes=None, text=None): + atom.Entry.__init__(self, category=category, + content=content, atom_id=atom_id, link=link, + title=title, updated=updated, text=text) + + self.entry_link = entry_link or [] + self.indexed = indexed + self.crawled = crawled + self.geolocation = geolocation + self.preferred_domain = preferred_domain + self.crawl_rate = crawl_rate + self.enhanced_image_search = enhanced_image_search + self.verified = verified + self.verification_method = verification_method or [] + + +def SitesEntryFromString(xml_string): + return atom.CreateClassFromXMLString(SitesEntry, xml_string) + + +class SitesFeed(atom.Feed, LinkFinder): + """A Google Webmaster Tools meta Sites feed flavor of an Atom Feed""" + + _tag = atom.Feed._tag + _namespace = atom.Feed._namespace + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}startIndex' % gdata.OPENSEARCH_NAMESPACE] = ( + 'start_index', gdata.StartIndex) + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [SitesEntry]) + del _children['{%s}generator' % atom.ATOM_NAMESPACE] + del _children['{%s}author' % atom.ATOM_NAMESPACE] + del _children['{%s}contributor' % atom.ATOM_NAMESPACE] + del _children['{%s}logo' % atom.ATOM_NAMESPACE] + del _children['{%s}icon' % atom.ATOM_NAMESPACE] + del _children['{%s}rights' % atom.ATOM_NAMESPACE] + del _children['{%s}subtitle' % atom.ATOM_NAMESPACE] + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, start_index=None, atom_id=None, title=None, entry=None, + category=None, link=None, updated=None, + extension_elements=None, extension_attributes=None, text=None): + """Constructor for Source + + Args: + category: list (optional) A list of Category instances + id: Id (optional) The entry's Id element + link: list (optional) A list of Link instances + title: Title (optional) the entry's title element + updated: Updated (optional) the entry's updated element + entry: list (optional) A list of the Entry instances contained in the + feed. + text: String (optional) The text contents of the element. This is the + contents of the Entry's XML text node. + (Example: This is the text) + extension_elements: list (optional) A list of ExtensionElement instances + which are children of this element. + extension_attributes: dict (optional) A dictionary of strings which are + the values for additional XML attributes of this element. + """ + + self.start_index = start_index + self.category = category or [] + self.id = atom_id + self.link = link or [] + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitesFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SitesFeed, xml_string) + + +class SitemapsEntry(atom.Entry, LinkFinder): + """A Google Webmaster Tools meta Sitemaps Entry flavor of an Atom Entry """ + + _tag = atom.Entry._tag + _namespace = atom.Entry._namespace + _children = atom.Entry._children.copy() + _attributes = atom.Entry._attributes.copy() + _children['{%s}sitemap-type' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_type', SitemapType) + _children['{%s}sitemap-status' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_status', SitemapStatus) + _children['{%s}sitemap-last-downloaded' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_last_downloaded', SitemapLastDownloaded) + _children['{%s}sitemap-url-count' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_url_count', SitemapUrlCount) + _children['{%s}sitemap-mobile-markup-language' % GWEBMASTERTOOLS_NAMESPACE] \ + = ('sitemap_mobile_markup_language', SitemapMobileMarkupLanguage) + _children['{%s}sitemap-news-publication-label' % GWEBMASTERTOOLS_NAMESPACE] \ + = ('sitemap_news_publication_label', SitemapNewsPublicationLabel) + + def __GetId(self): + return self.__id + + # This method was created to strip the unwanted whitespace from the id's + # text node. + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + sitemap_type=None, sitemap_status=None, sitemap_last_downloaded=None, + sitemap_url_count=None, sitemap_mobile_markup_language=None, + sitemap_news_publication_label=None, + extension_elements=None, extension_attributes=None, text=None): + atom.Entry.__init__(self, category=category, + content=content, atom_id=atom_id, link=link, + title=title, updated=updated, text=text) + + self.sitemap_type = sitemap_type + self.sitemap_status = sitemap_status + self.sitemap_last_downloaded = sitemap_last_downloaded + self.sitemap_url_count = sitemap_url_count + self.sitemap_mobile_markup_language = sitemap_mobile_markup_language + self.sitemap_news_publication_label = sitemap_news_publication_label + + +def SitemapsEntryFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapsEntry, xml_string) + + +class SitemapsFeed(atom.Feed, LinkFinder): + """A Google Webmaster Tools meta Sitemaps feed flavor of an Atom Feed""" + + _tag = atom.Feed._tag + _namespace = atom.Feed._namespace + _children = atom.Feed._children.copy() + _attributes = atom.Feed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [SitemapsEntry]) + _children['{%s}sitemap-mobile' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_mobile', SitemapMobile) + _children['{%s}sitemap-news' % GWEBMASTERTOOLS_NAMESPACE] = ( + 'sitemap_news', SitemapNews) + del _children['{%s}generator' % atom.ATOM_NAMESPACE] + del _children['{%s}author' % atom.ATOM_NAMESPACE] + del _children['{%s}contributor' % atom.ATOM_NAMESPACE] + del _children['{%s}logo' % atom.ATOM_NAMESPACE] + del _children['{%s}icon' % atom.ATOM_NAMESPACE] + del _children['{%s}rights' % atom.ATOM_NAMESPACE] + del _children['{%s}subtitle' % atom.ATOM_NAMESPACE] + + def __GetId(self): + return self.__id + + def __SetId(self, id): + self.__id = id + if id is not None and id.text is not None: + self.__id.text = id.text.strip() + + id = property(__GetId, __SetId) + + def __init__(self, category=None, content=None, + atom_id=None, link=None, title=None, updated=None, + entry=None, sitemap_mobile=None, sitemap_news=None, + extension_elements=None, extension_attributes=None, text=None): + + self.category = category or [] + self.id = atom_id + self.link = link or [] + self.title = title + self.updated = updated + self.entry = entry or [] + self.text = text + self.sitemap_mobile = sitemap_mobile + self.sitemap_news = sitemap_news + self.extension_elements = extension_elements or [] + self.extension_attributes = extension_attributes or {} + + +def SitemapsFeedFromString(xml_string): + return atom.CreateClassFromXMLString(SitemapsFeed, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/webmastertools/service.py b/gdata.py-1.2.3/src/gdata/webmastertools/service.py new file mode 100644 index 0000000..cc51acd --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/webmastertools/service.py @@ -0,0 +1,507 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GWebmasterToolsService extends the GDataService to streamline +Google Webmaster Tools operations. + + GWebmasterToolsService: Provides methods to query feeds and manipulate items. + Extends GDataService. +""" + +__author__ = 'livibetter (Yu-Jie Lin)' + +import urllib +import gdata +import atom.service +import gdata.service +import gdata.webmastertools as webmastertools +import atom + + +FEED_BASE = 'https://www.google.com/webmasters/tools/feeds/' +SITES_FEED = FEED_BASE + 'sites/' +SITE_TEMPLATE = SITES_FEED + '%s' +SITEMAPS_FEED_TEMPLATE = FEED_BASE + '%(site_id)s/sitemaps/' +SITEMAP_TEMPLATE = SITEMAPS_FEED_TEMPLATE + '%(sitemap_id)s' + + +class Error(Exception): + pass + + +class RequestError(Error): + pass + + +class GWebmasterToolsService(gdata.service.GDataService): + """Client for the Google Webmaster Tools service.""" + + def __init__(self, email=None, password=None, source=None, + server='www.google.com', api_key=None, + additional_headers=None, handler=None): + gdata.service.GDataService.__init__(self, email=email, password=password, + service='sitemaps', source=source, + server=server, + additional_headers=additional_headers, + handler=handler) + + def GetSitesFeed(self, uri=SITES_FEED, + converter=webmastertools.SitesFeedFromString): + """Gets sites feed. + + Args: + uri: str (optional) URI to retrieve sites feed. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitesFeedFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesFeed object. + """ + return self.Get(uri, converter=converter) + + def AddSite(self, site_uri, uri=SITES_FEED, + url_params=None, escape_params=True, converter=None): + """Adds a site to Google Webmaster Tools. + + Args: + site_uri: str URI of which site to add. + uri: str (optional) URI to add a site. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitesEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry() + site_entry.content = atom.Content(src=site_uri) + response = self.Post(site_entry, uri, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def DeleteSite(self, site_uri, uri=SITE_TEMPLATE, + url_params=None, escape_params=True): + """Removes a site from Google Webmaster Tools. + + Args: + site_uri: str URI of which site to remove. + uri: str (optional) A URI template to send DELETE request. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete( + uri % urllib.quote_plus(site_uri), + url_params=url_params, escape_params=escape_params) + + def VerifySite(self, site_uri, verification_method, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Requests a verification of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + verification_method: str The method to verify a site. Valid values are + 'htmlpage', and 'metatag'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + verification_method=webmastertools.VerificationMethod( + type=verification_method, in_user='true') + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + + def UpdateGeoLocation(self, site_uri, geolocation, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates geolocation setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + geolocation: str The geographic location. Valid values are listed in + http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + geolocation=webmastertools.GeoLocation(text=geolocation) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdateCrawlRate(self, site_uri, crawl_rate, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates crawl rate setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + crawl_rate: str The crawl rate for a site. Valid values are 'slower', + 'normal', and 'faster'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + crawl_rate=webmastertools.CrawlRate(text=crawl_rate) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdatePreferredDomain(self, site_uri, preferred_domain, uri=SITE_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Updates preferred domain setting of a site. + + Note that if using 'preferwww', will also need www.example.com in account to + take effect. + + Args: + site_uri: str URI of which site to add sitemap for. + preferred_domain: str The preferred domain for a site. Valid values are 'none', + 'preferwww', and 'prefernowww'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + preferred_domain=webmastertools.PreferredDomain(text=preferred_domain) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def UpdateEnhancedImageSearch(self, site_uri, enhanced_image_search, + uri=SITE_TEMPLATE, url_params=None, escape_params=True, converter=None): + """Updates enhanced image search setting of a site. + + Args: + site_uri: str URI of which site to add sitemap for. + enhanced_image_search: str The enhanced image search setting for a site. + Valid values are 'true', and 'false'. + uri: str (optional) URI template to update a site. + Default SITE_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitesEntry object. + """ + + site_entry = webmastertools.SitesEntry( + atom_id=atom.Id(text=site_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sites-info'), + enhanced_image_search=webmastertools.EnhancedImageSearch( + text=enhanced_image_search) + ) + response = self.Put( + site_entry, + uri % urllib.quote_plus(site_uri), + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitesEntryFromString(response.ToString()) + return response + + def GetSitemapsFeed(self, site_uri, uri=SITEMAPS_FEED_TEMPLATE, + converter=webmastertools.SitemapsFeedFromString): + """Gets sitemaps feed of a site. + + Args: + site_uri: str (optional) URI of which site to retrieve its sitemaps feed. + uri: str (optional) URI to retrieve sites feed. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsFeedFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsFeed object. + """ + return self.Get(uri % {'site_id': urllib.quote_plus(site_uri)}, + converter=converter) + + def AddSitemap(self, site_uri, sitemap_uri, sitemap_type='WEB', + uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a regular sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_type: str Type of added sitemap. Valid types: WEB, VIDEO, or CODE. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-regular'), + sitemap_type=webmastertools.SitemapType(text=sitemap_type)) + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def AddMobileSitemap(self, site_uri, sitemap_uri, + sitemap_mobile_markup_language='XHTML', uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a mobile sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_mobile_markup_language: str Format of added sitemap. Valid types: + XHTML, WML, or cHTML. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + # FIXME + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-mobile'), + sitemap_mobile_markup_language=\ + webmastertools.SitemapMobileMarkupLanguage( + text=sitemap_mobile_markup_language)) + print sitemap_entry + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def AddNewsSitemap(self, site_uri, sitemap_uri, + sitemap_news_publication_label, uri=SITEMAPS_FEED_TEMPLATE, + url_params=None, escape_params=True, converter=None): + """Adds a news sitemap to a site. + + Args: + site_uri: str URI of which site to add sitemap for. + sitemap_uri: str URI of sitemap to add to a site. + sitemap_news_publication_label: str, list of str Publication Labels for + sitemap. + uri: str (optional) URI template to add a sitemap. + Default SITEMAP_FEED_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + converter: func (optional) Function which is executed on the server's + response before it is returned. Usually this is a function like + SitemapsEntryFromString which will parse the response and turn it into + an object. + + Returns: + If converter is defined, the results of running converter on the server's + response. Otherwise, it will be a SitemapsEntry object. + """ + + sitemap_entry = webmastertools.SitemapsEntry( + atom_id=atom.Id(text=sitemap_uri), + category=atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/webmasters/tools/2007#sitemap-news'), + sitemap_news_publication_label=[], + ) + if isinstance(sitemap_news_publication_label, str): + sitemap_news_publication_label = [sitemap_news_publication_label] + for label in sitemap_news_publication_label: + sitemap_entry.sitemap_news_publication_label.append( + webmastertools.SitemapNewsPublicationLabel(text=label)) + print sitemap_entry + response = self.Post( + sitemap_entry, + uri % {'site_id': urllib.quote_plus(site_uri)}, + url_params=url_params, + escape_params=escape_params, converter=converter) + if not converter and isinstance(response, atom.Entry): + return webmastertools.SitemapsEntryFromString(response.ToString()) + return response + + def DeleteSitemap(self, site_uri, sitemap_uri, uri=SITEMAP_TEMPLATE, + url_params=None, escape_params=True): + """Removes a sitemap from a site. + + Args: + site_uri: str URI of which site to remove a sitemap from. + sitemap_uri: str URI of sitemap to remove from a site. + uri: str (optional) A URI template to send DELETE request. + Default SITEMAP_TEMPLATE. + url_params: dict (optional) Additional URL parameters to be included + in the insertion request. + escape_params: boolean (optional) If true, the url_parameters will be + escaped before they are included in the request. + + Returns: + True if the delete succeeded. + """ + + return self.Delete( + uri % {'site_id': urllib.quote_plus(site_uri), + 'sitemap_id': urllib.quote_plus(sitemap_uri)}, + url_params=url_params, escape_params=escape_params) diff --git a/gdata.py-1.2.3/src/gdata/youtube/__init__.py b/gdata.py-1.2.3/src/gdata/youtube/__init__.py new file mode 100755 index 0000000..c41aaea --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/youtube/__init__.py @@ -0,0 +1,684 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = ('api.stephaniel@gmail.com (Stephanie Liu)' + ', api.jhartmann@gmail.com (Jochen Hartmann)') + +import atom +import gdata +import gdata.media as Media +import gdata.geo as Geo + +YOUTUBE_NAMESPACE = 'http://gdata.youtube.com/schemas/2007' +YOUTUBE_FORMAT = '{http://gdata.youtube.com/schemas/2007}format' +YOUTUBE_DEVELOPER_TAG_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, + 'developertags.cat') +YOUTUBE_SUBSCRIPTION_TYPE_SCHEME = '%s/%s' % (YOUTUBE_NAMESPACE, + 'subscriptiontypes.cat') + +class Username(atom.AtomBase): + """The YouTube Username element""" + _tag = 'username' + _namespace = YOUTUBE_NAMESPACE + +class QueryString(atom.AtomBase): + """The YouTube QueryString element""" + _tag = 'queryString' + _namespace = YOUTUBE_NAMESPACE + + +class FirstName(atom.AtomBase): + """The YouTube FirstName element""" + _tag = 'firstName' + _namespace = YOUTUBE_NAMESPACE + + +class LastName(atom.AtomBase): + """The YouTube LastName element""" + _tag = 'lastName' + _namespace = YOUTUBE_NAMESPACE + + +class Age(atom.AtomBase): + """The YouTube Age element""" + _tag = 'age' + _namespace = YOUTUBE_NAMESPACE + + +class Books(atom.AtomBase): + """The YouTube Books element""" + _tag = 'books' + _namespace = YOUTUBE_NAMESPACE + + +class Gender(atom.AtomBase): + """The YouTube Gender element""" + _tag = 'gender' + _namespace = YOUTUBE_NAMESPACE + + +class Company(atom.AtomBase): + """The YouTube Company element""" + _tag = 'company' + _namespace = YOUTUBE_NAMESPACE + + +class Hobbies(atom.AtomBase): + """The YouTube Hobbies element""" + _tag = 'hobbies' + _namespace = YOUTUBE_NAMESPACE + + +class Hometown(atom.AtomBase): + """The YouTube Hometown element""" + _tag = 'hometown' + _namespace = YOUTUBE_NAMESPACE + + +class Location(atom.AtomBase): + """The YouTube Location element""" + _tag = 'location' + _namespace = YOUTUBE_NAMESPACE + + +class Movies(atom.AtomBase): + """The YouTube Movies element""" + _tag = 'movies' + _namespace = YOUTUBE_NAMESPACE + + +class Music(atom.AtomBase): + """The YouTube Music element""" + _tag = 'music' + _namespace = YOUTUBE_NAMESPACE + + +class Occupation(atom.AtomBase): + """The YouTube Occupation element""" + _tag = 'occupation' + _namespace = YOUTUBE_NAMESPACE + + +class School(atom.AtomBase): + """The YouTube School element""" + _tag = 'school' + _namespace = YOUTUBE_NAMESPACE + + +class Relationship(atom.AtomBase): + """The YouTube Relationship element""" + _tag = 'relationship' + _namespace = YOUTUBE_NAMESPACE + + +class Recorded(atom.AtomBase): + """The YouTube Recorded element""" + _tag = 'recorded' + _namespace = YOUTUBE_NAMESPACE + + +class Statistics(atom.AtomBase): + """The YouTube Statistics element.""" + _tag = 'statistics' + _namespace = YOUTUBE_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['viewCount'] = 'view_count' + _attributes['videoWatchCount'] = 'video_watch_count' + _attributes['subscriberCount'] = 'subscriber_count' + _attributes['lastWebAccess'] = 'last_web_access' + _attributes['favoriteCount'] = 'favorite_count' + + def __init__(self, view_count=None, video_watch_count=None, + favorite_count=None, subscriber_count=None, last_web_access=None, + extension_elements=None, extension_attributes=None, text=None): + + self.view_count = view_count + self.video_watch_count = video_watch_count + self.subscriber_count = subscriber_count + self.last_web_access = last_web_access + self.favorite_count = favorite_count + + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Status(atom.AtomBase): + """The YouTube Status element""" + _tag = 'status' + _namespace = YOUTUBE_NAMESPACE + + +class Position(atom.AtomBase): + """The YouTube Position element. The position in a playlist feed.""" + _tag = 'position' + _namespace = YOUTUBE_NAMESPACE + + +class Racy(atom.AtomBase): + """The YouTube Racy element.""" + _tag = 'racy' + _namespace = YOUTUBE_NAMESPACE + +class Description(atom.AtomBase): + """The YouTube Description element.""" + _tag = 'description' + _namespace = YOUTUBE_NAMESPACE + + +class Private(atom.AtomBase): + """The YouTube Private element.""" + _tag = 'private' + _namespace = YOUTUBE_NAMESPACE + + +class NoEmbed(atom.AtomBase): + """The YouTube VideoShare element. Whether a video can be embedded or not.""" + _tag = 'noembed' + _namespace = YOUTUBE_NAMESPACE + + +class Comments(atom.AtomBase): + """The GData Comments element""" + _tag = 'comments' + _namespace = gdata.GDATA_NAMESPACE + _children = atom.AtomBase._children.copy() + _attributes = atom.AtomBase._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + + self.feed_link = feed_link + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class Rating(atom.AtomBase): + """The GData Rating element""" + _tag = 'rating' + _namespace = gdata.GDATA_NAMESPACE + _attributes = atom.AtomBase._attributes.copy() + _attributes['min'] = 'min' + _attributes['max'] = 'max' + _attributes['numRaters'] = 'num_raters' + _attributes['average'] = 'average' + + def __init__(self, min=None, max=None, + num_raters=None, average=None, extension_elements=None, + extension_attributes=None, text=None): + + self.min = min + self.max = max + self.num_raters = num_raters + self.average = average + + atom.AtomBase.__init__(self, extension_elements=extension_elements, + extension_attributes=extension_attributes, text=text) + + +class YouTubePlaylistVideoEntry(gdata.GDataEntry): + """Represents a YouTubeVideoEntry on a YouTubePlaylist.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) + _children['{%s}position' % YOUTUBE_NAMESPACE] = ('position', Position) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, feed_link=None, description=None, + rating=None, comments=None, statistics=None, + location=None, position=None, media=None, + extension_elements=None, extension_attributes=None): + + self.feed_link = feed_link + self.description = description + self.rating = rating + self.comments = comments + self.statistics = statistics + self.location = location + self.position = position + self.media = media + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, title=title, + updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + +class YouTubeVideoCommentEntry(gdata.GDataEntry): + """Represents a comment on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + + +class YouTubeSubscriptionEntry(gdata.GDataEntry): + """Represents a subscription entry on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}queryString' % YOUTUBE_NAMESPACE] = ( + 'query_string', QueryString) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, username=None, query_string=None, feed_link=None, + extension_elements=None, extension_attributes=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.username = username + self.query_string = query_string + self.feed_link = feed_link + + + def GetSubscriptionType(self): + """Retrieve the type of this subscription. + + Returns: + A string that is either 'channel, 'query' or 'favorites' + """ + for category in self.category: + if category.scheme == YOUTUBE_SUBSCRIPTION_TYPE_SCHEME: + return category.term + + +class YouTubeVideoResponseEntry(gdata.GDataEntry): + """Represents a video response. """ + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, rating=None, + noembed=None, statistics=None, racy=None, media=None, + extension_elements=None, extension_attributes=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.rating = rating + self.noembed = noembed + self.statistics = statistics + self.racy = racy + self.media = media or Media.Group() + + +class YouTubeContactEntry(gdata.GDataEntry): + """Represents a contact entry.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}status' % YOUTUBE_NAMESPACE] = ('status', Status) + + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, + username=None, status=None, extension_elements=None, + extension_attributes=None, text=None): + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated) + + self.username = username + self.status = status + + +class YouTubeVideoEntry(gdata.GDataEntry): + """Represents a video on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}rating' % gdata.GDATA_NAMESPACE] = ('rating', Rating) + _children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments) + _children['{%s}noembed' % YOUTUBE_NAMESPACE] = ('noembed', NoEmbed) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}recorded' % YOUTUBE_NAMESPACE] = ('recorded', Recorded) + _children['{%s}racy' % YOUTUBE_NAMESPACE] = ('racy', Racy) + _children['{%s}group' % gdata.media.MEDIA_NAMESPACE] = ('media', Media.Group) + _children['{%s}where' % gdata.geo.GEORSS_NAMESPACE] = ('geo', Geo.Where) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, rating=None, + noembed=None, statistics=None, racy=None, media=None, geo=None, + recorded=None, comments=None, extension_elements=None, + extension_attributes=None): + + self.rating = rating + self.noembed = noembed + self.statistics = statistics + self.racy = racy + self.comments = comments + self.media = media or Media.Group() + self.geo = geo + self.recorded = recorded + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, link=link, + published=published, title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + def GetSwfUrl(self): + """Return the URL for the embeddable Video + + Returns: + URL of the embeddable video + """ + if self.media.content: + for content in self.media.content: + if content.extension_attributes[YOUTUBE_FORMAT] == '5': + return content.url + else: + return None + + def AddDeveloperTags(self, developer_tags): + """Add a developer tag for this entry. + + Developer tags can only be set during the initial upload. + + Arguments: + developer_tags: A list of developer tags as strings. + + Returns: + A list of all developer tags for this video entry. + """ + for tag_text in developer_tags: + self.media.category.append(gdata.media.Category( + text=tag_text, label=tag_text, scheme=YOUTUBE_DEVELOPER_TAG_SCHEME)) + + return self.GetDeveloperTags() + + def GetDeveloperTags(self): + """Retrieve developer tags for this video entry.""" + developer_tags = [] + for category in self.media.category: + if category.scheme == YOUTUBE_DEVELOPER_TAG_SCHEME: + developer_tags.append(category) + if len(developer_tags) > 0: + return developer_tags + + def GetYouTubeCategoryAsString(self): + """Convenience method to return the YouTube category as string. + + YouTubeVideoEntries can contain multiple Category objects with differing + schemes. This method returns only the category with the correct + scheme, ignoring developer tags. + """ + for category in self.media.category: + if category.scheme != YOUTUBE_DEVELOPER_TAG_SCHEME: + return category.text + +class YouTubeUserEntry(gdata.GDataEntry): + """Represents a user on YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}username' % YOUTUBE_NAMESPACE] = ('username', Username) + _children['{%s}firstName' % YOUTUBE_NAMESPACE] = ('first_name', FirstName) + _children['{%s}lastName' % YOUTUBE_NAMESPACE] = ('last_name', LastName) + _children['{%s}age' % YOUTUBE_NAMESPACE] = ('age', Age) + _children['{%s}books' % YOUTUBE_NAMESPACE] = ('books', Books) + _children['{%s}gender' % YOUTUBE_NAMESPACE] = ('gender', Gender) + _children['{%s}company' % YOUTUBE_NAMESPACE] = ('company', Company) + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}hobbies' % YOUTUBE_NAMESPACE] = ('hobbies', Hobbies) + _children['{%s}hometown' % YOUTUBE_NAMESPACE] = ('hometown', Hometown) + _children['{%s}location' % YOUTUBE_NAMESPACE] = ('location', Location) + _children['{%s}movies' % YOUTUBE_NAMESPACE] = ('movies', Movies) + _children['{%s}music' % YOUTUBE_NAMESPACE] = ('music', Music) + _children['{%s}occupation' % YOUTUBE_NAMESPACE] = ('occupation', Occupation) + _children['{%s}school' % YOUTUBE_NAMESPACE] = ('school', School) + _children['{%s}relationship' % YOUTUBE_NAMESPACE] = ('relationship', + Relationship) + _children['{%s}statistics' % YOUTUBE_NAMESPACE] = ('statistics', Statistics) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + _children['{%s}thumbnail' % gdata.media.MEDIA_NAMESPACE] = ('thumbnail', + Media.Thumbnail) + + def __init__(self, author=None, category=None, content=None, atom_id=None, + link=None, published=None, title=None, updated=None, + username=None, first_name=None, last_name=None, age=None, + books=None, gender=None, company=None, description=None, + hobbies=None, hometown=None, location=None, movies=None, + music=None, occupation=None, school=None, relationship=None, + statistics=None, feed_link=None, extension_elements=None, + extension_attributes=None, text=None): + + self.username = username + self.first_name = first_name + self.last_name = last_name + self.age = age + self.books = books + self.gender = gender + self.company = company + self.description = description + self.hobbies = hobbies + self.hometown = hometown + self.location = location + self.movies = movies + self.music = music + self.occupation = occupation + self.school = school + self.relationship = relationship + self.statistics = statistics + self.feed_link = feed_link + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, + title=title, updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes, + text=text) + + +class YouTubeVideoFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a video feed on YouTube.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [YouTubeVideoEntry]) + +class YouTubePlaylistEntry(gdata.GDataEntry): + """Represents a playlist in YouTube.""" + _tag = gdata.GDataEntry._tag + _namespace = gdata.GDataEntry._namespace + _children = gdata.GDataEntry._children.copy() + _attributes = gdata.GDataEntry._attributes.copy() + _children['{%s}description' % YOUTUBE_NAMESPACE] = ('description', + Description) + _children['{%s}private' % YOUTUBE_NAMESPACE] = ('private', + Private) + _children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link', + [gdata.FeedLink]) + + def __init__(self, author=None, category=None, content=None, + atom_id=None, link=None, published=None, title=None, + updated=None, private=None, feed_link=None, + description=None, extension_elements=None, + extension_attributes=None): + + self.description = description + self.private = private + self.feed_link = feed_link + + gdata.GDataEntry.__init__(self, author=author, category=category, + content=content, atom_id=atom_id, + link=link, published=published, title=title, + updated=updated, + extension_elements=extension_elements, + extension_attributes=extension_attributes) + + + +class YouTubePlaylistFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a user's playlists """ + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubePlaylistEntry]) + + +class YouTubePlaylistVideoFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of video entry on a playlist.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubePlaylistVideoEntry]) + + +class YouTubeContactFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a users contacts.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeContactEntry]) + + +class YouTubeSubscriptionFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of a users subscriptions.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeSubscriptionEntry]) + + +class YouTubeVideoCommentFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of comments for a video.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeVideoCommentEntry]) + + +class YouTubeVideoResponseFeed(gdata.GDataFeed, gdata.LinkFinder): + """Represents a feed of video responses.""" + _tag = gdata.GDataFeed._tag + _namespace = gdata.GDataFeed._namespace + _children = gdata.GDataFeed._children.copy() + _attributes = gdata.GDataFeed._attributes.copy() + _children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', + [YouTubeVideoResponseEntry]) + + +def YouTubeVideoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) + + +def YouTubeVideoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoEntry, xml_string) + + +def YouTubeContactFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeContactFeed, xml_string) + + +def YouTubeContactEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeContactEntry, xml_string) + + +def YouTubeVideoCommentFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoCommentFeed, xml_string) + + +def YouTubeVideoCommentEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoCommentEntry, xml_string) + + +def YouTubeUserFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoFeed, xml_string) + + +def YouTubeUserEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeUserEntry, xml_string) + + +def YouTubePlaylistFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistFeed, xml_string) + + +def YouTubePlaylistVideoFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistVideoFeed, xml_string) + + +def YouTubePlaylistEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistEntry, xml_string) + + +def YouTubePlaylistVideoEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubePlaylistVideoEntry, xml_string) + + +def YouTubeSubscriptionFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeSubscriptionFeed, xml_string) + + +def YouTubeSubscriptionEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeSubscriptionEntry, xml_string) + + +def YouTubeVideoResponseFeedFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoResponseFeed, xml_string) + + +def YouTubeVideoResponseEntryFromString(xml_string): + return atom.CreateClassFromXMLString(YouTubeVideoResponseEntry, xml_string) diff --git a/gdata.py-1.2.3/src/gdata/youtube/service.py b/gdata.py-1.2.3/src/gdata/youtube/service.py new file mode 100644 index 0000000..ef7c0f1 --- /dev/null +++ b/gdata.py-1.2.3/src/gdata/youtube/service.py @@ -0,0 +1,1545 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""YouTubeService extends GDataService to streamline YouTube operations. + + YouTubeService: Provides methods to perform CRUD operations on YouTube feeds. + Extends GDataService. +""" + +__author__ = ('api.stephaniel@gmail.com (Stephanie Liu), ' + 'api.jhartmann@gmail.com (Jochen Hartmann)') + +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import os +import atom +import gdata +import gdata.service +import gdata.youtube + +YOUTUBE_SERVER = 'gdata.youtube.com' +YOUTUBE_SERVICE = 'youtube' +YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL = 'https://www.google.com/youtube/accounts/ClientLogin' +YOUTUBE_SUPPORTED_UPLOAD_TYPES = ('mov', 'avi', 'wmv', 'mpg', 'quicktime', + 'flv') +YOUTUBE_QUERY_VALID_TIME_PARAMETERS = ('today', 'this_week', 'this_month', + 'all_time') +YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS = ('published', 'viewCount', 'rating', + 'relevance') +YOUTUBE_QUERY_VALID_RACY_PARAMETERS = ('include', 'exclude') +YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS = ('1', '5', '6') +YOUTUBE_STANDARDFEEDS = ('most_recent', 'recently_featured', + 'top_rated', 'most_viewed','watch_on_mobile') +YOUTUBE_UPLOAD_URI = 'http://uploads.gdata.youtube.com/feeds/api/users' +YOUTUBE_UPLOAD_TOKEN_URI = 'http://gdata.youtube.com/action/GetUploadToken' +YOUTUBE_VIDEO_URI = 'http://gdata.youtube.com/feeds/api/videos' +YOUTUBE_USER_FEED_URI = 'http://gdata.youtube.com/feeds/api/users' +YOUTUBE_PLAYLIST_FEED_URI = 'http://gdata.youtube.com/feeds/api/playlists' + +YOUTUBE_STANDARD_FEEDS = 'http://gdata.youtube.com/feeds/api/standardfeeds' +YOUTUBE_STANDARD_TOP_RATED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, 'top_rated') +YOUTUBE_STANDARD_MOST_VIEWED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_viewed') +YOUTUBE_STANDARD_RECENTLY_FEATURED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'recently_featured') +YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'watch_on_mobile') +YOUTUBE_STANDARD_TOP_FAVORITES_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'top_favorites') +YOUTUBE_STANDARD_MOST_RECENT_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_recent') +YOUTUBE_STANDARD_MOST_DISCUSSED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_discussed') +YOUTUBE_STANDARD_MOST_LINKED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_linked') +YOUTUBE_STANDARD_MOST_RESPONDED_URI = '%s/%s' % (YOUTUBE_STANDARD_FEEDS, + 'most_responded') +YOUTUBE_SCHEMA = 'http://gdata.youtube.com/schemas' + +YOUTUBE_RATING_LINK_REL = '%s#video.ratings' % YOUTUBE_SCHEMA + +YOUTUBE_COMPLAINT_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, + 'complaint-reasons.cat') +YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME = '%s/%s' % (YOUTUBE_SCHEMA, + 'subscriptiontypes.cat') + +YOUTUBE_COMPLAINT_CATEGORY_TERMS = ('PORN', 'VIOLENCE', 'HATE', 'DANGEROUS', + 'RIGHTS', 'SPAM') +YOUTUBE_CONTACT_STATUS = ('accepted', 'rejected') +YOUTUBE_CONTACT_CATEGORY = ('Friends', 'Family') + +UNKOWN_ERROR = 1000 +YOUTUBE_BAD_REQUEST = 400 +YOUTUBE_CONFLICT = 409 +YOUTUBE_INTERNAL_SERVER_ERROR = 500 +YOUTUBE_INVALID_ARGUMENT = 601 +YOUTUBE_INVALID_CONTENT_TYPE = 602 +YOUTUBE_NOT_A_VIDEO = 603 +YOUTUBE_INVALID_KIND = 604 + + +class Error(Exception): + """Base class for errors within the YouTube service.""" + pass + +class RequestError(Error): + """Error class that is thrown in response to an invalid HTTP Request.""" + pass + +class YouTubeError(Error): + """YouTube service specific error class.""" + pass + +class YouTubeService(gdata.service.GDataService): + + """Client for the YouTube service. + + Performs all documented Google Data YouTube API functions, such as inserting, + updating and deleting videos, comments, playlist, subscriptions etc. + YouTube Service requires authentication for any write, update or delete + actions. + + Attributes: + email: An optional string identifying the user. Required only for + authenticated actions. + password: An optional string identifying the user's password. + source: An optional string identifying the name of your application. + server: An optional address of the YouTube API server. gdata.youtube.com + is provided as the default value. + additional_headers: An optional dictionary containing additional headers + to be passed along with each request. Use to store developer key. + client_id: An optional string identifying your application, required for + authenticated requests, along with a developer key. + developer_key: An optional string value. Register your application at + http://code.google.com/apis/youtube/dashboard to obtain a (free) key. + """ + + def __init__(self, email=None, password=None, source=None, + server=YOUTUBE_SERVER, additional_headers=None, client_id=None, + developer_key=None): + self.additional_headers = {} + if client_id is not None and developer_key is not None: + self.additional_headers = {'X-Gdata-Client': client_id, + 'X-GData-Key': 'key=%s' % developer_key} + + gdata.service.GDataService.__init__( + self, email=email, password=password, service=YOUTUBE_SERVICE, + source=source, server=server, + additional_headers=self.additional_headers) + elif developer_key and not client_id: + raise YouTubeError('You must also specify the clientId') + else: + gdata.service.GDataService.__init__( + self, email=email, password=password, service=YOUTUBE_SERVICE, + source=source, server=server, additional_headers=additional_headers) + self.auth_service_url = YOUTUBE_CLIENTLOGIN_AUTHENTICATION_URL + + def GetYouTubeVideoFeed(self, uri): + """Retrieve a YouTubeVideoFeed. + + Args: + uri: A string representing the URI of the feed that is to be retrieved. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubeVideoFeedFromString) + + def GetYouTubeVideoEntry(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoEntry. + + Either a uri or a video_id must be provided. + + Args: + uri: An optional string representing the URI of the entry that is to + be retrieved. + video_id: An optional string representing the ID of the video. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoEntry() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoEntry() method') + elif video_id and not uri: + uri = '%s/%s' % (YOUTUBE_VIDEO_URI, video_id) + return self.Get(uri, converter=gdata.youtube.YouTubeVideoEntryFromString) + + def GetYouTubeContactFeed(self, uri=None, username='default'): + """Retrieve a YouTubeContactFeed. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the contact feed that + is to be retrieved. + username: An optional string representing the username. Defaults to the + currently authenticated user. + + Returns: + A YouTubeContactFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeContactFeed() method. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'contacts') + return self.Get(uri, converter=gdata.youtube.YouTubeContactFeedFromString) + + def GetYouTubeContactEntry(self, uri): + """Retrieve a YouTubeContactEntry. + + Args: + uri: A string representing the URI of the contact entry that is to + be retrieved. + + Returns: + A YouTubeContactEntry if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubeContactEntryFromString) + + def GetYouTubeVideoCommentFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoCommentFeed. + + Either a uri or a video_id must be provided. + + Args: + uri: An optional string representing the URI of the comment feed that + is to be retrieved. + video_id: An optional string representing the ID of the video for which + to retrieve the comment feed. + + Returns: + A YouTubeVideoCommentFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoCommentFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoCommentFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'comments') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoCommentFeedFromString) + + def GetYouTubeVideoCommentEntry(self, uri): + """Retrieve a YouTubeVideoCommentEntry. + + Args: + uri: A string representing the URI of the comment entry that is to + be retrieved. + + Returns: + A YouTubeCommentEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoCommentEntryFromString) + + def GetYouTubeUserFeed(self, uri=None, username=None): + """Retrieve a YouTubeUserFeed. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the user feed that is + to be retrieved. + username: An optional string representing the username. + + Returns: + A YouTubeUserFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeUserFeed() method. + """ + if uri is None and username is None: + raise YouTubeError('You must provide at least a uri or a username ' + 'to the GetYouTubeUserFeed() method') + elif username and not uri: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'uploads') + return self.Get(uri, converter=gdata.youtube.YouTubeUserFeedFromString) + + def GetYouTubeUserEntry(self, uri=None, username=None): + """Retrieve a YouTubeUserEntry. + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the user entry that is + to be retrieved. + username: An optional string representing the username. + + Returns: + A YouTubeUserEntry if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubeUserEntry() method. + """ + if uri is None and username is None: + raise YouTubeError('You must provide at least a uri or a username ' + 'to the GetYouTubeUserEntry() method') + elif username and not uri: + uri = '%s/%s' % (YOUTUBE_USER_FEED_URI, username) + return self.Get(uri, converter=gdata.youtube.YouTubeUserEntryFromString) + + def GetYouTubePlaylistFeed(self, uri=None, username='default'): + """Retrieve a YouTubePlaylistFeed (a feed of playlists for a user). + + Either a uri or a username must be provided. + + Args: + uri: An optional string representing the URI of the playlist feed that + is to be retrieved. + username: An optional string representing the username. Defaults to the + currently authenticated user. + + Returns: + A YouTubePlaylistFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a username to the + GetYouTubePlaylistFeed() method. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'playlists') + return self.Get(uri, converter=gdata.youtube.YouTubePlaylistFeedFromString) + + def GetYouTubePlaylistEntry(self, uri): + """Retrieve a YouTubePlaylistEntry. + + Args: + uri: A string representing the URI of the playlist feed that is to + be retrieved. + + Returns: + A YouTubePlaylistEntry if successfully retrieved. + """ + return self.Get(uri, converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def GetYouTubePlaylistVideoFeed(self, uri=None, playlist_id=None): + """Retrieve a YouTubePlaylistVideoFeed (a feed of videos on a playlist). + + Either a uri or a playlist_id must be provided. + + Args: + uri: An optional string representing the URI of the playlist video feed + that is to be retrieved. + playlist_id: An optional string representing the Id of the playlist whose + playlist video feed is to be retrieved. + + Returns: + A YouTubePlaylistVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a playlist_id to the + GetYouTubePlaylistVideoFeed() method. + """ + if uri is None and playlist_id is None: + raise YouTubeError('You must provide at least a uri or a playlist_id ' + 'to the GetYouTubePlaylistVideoFeed() method') + elif playlist_id and not uri: + uri = '%s/%s' % (YOUTUBE_PLAYLIST_FEED_URI, playlist_id) + return self.Get( + uri, converter=gdata.youtube.YouTubePlaylistVideoFeedFromString) + + def GetYouTubeVideoResponseFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeVideoResponseFeed. + + Either a uri or a playlist_id must be provided. + + Args: + uri: An optional string representing the URI of the video response feed + that is to be retrieved. + video_id: An optional string representing the ID of the video whose + response feed is to be retrieved. + + Returns: + A YouTubeVideoResponseFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeVideoResponseFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeVideoResponseFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoResponseFeedFromString) + + def GetYouTubeVideoResponseEntry(self, uri): + """Retrieve a YouTubeVideoResponseEntry. + + Args: + uri: A string representing the URI of the video response entry that + is to be retrieved. + + Returns: + A YouTubeVideoResponseEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoResponseEntryFromString) + + def GetYouTubeSubscriptionFeed(self, uri=None, username='default'): + """Retrieve a YouTubeSubscriptionFeed. + + Either the uri of the feed or a username must be provided. + + Args: + uri: An optional string representing the URI of the feed that is to + be retrieved. + username: An optional string representing the username whose subscription + feed is to be retrieved. Defaults to the currently authenticted user. + + Returns: + A YouTubeVideoSubscriptionFeed if successfully retrieved. + """ + if uri is None: + uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'subscriptions') + return self.Get( + uri, converter=gdata.youtube.YouTubeSubscriptionFeedFromString) + + def GetYouTubeSubscriptionEntry(self, uri): + """Retrieve a YouTubeSubscriptionEntry. + + Args: + uri: A string representing the URI of the entry that is to be retrieved. + + Returns: + A YouTubeVideoSubscriptionEntry if successfully retrieved. + """ + return self.Get( + uri, converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def GetYouTubeRelatedVideoFeed(self, uri=None, video_id=None): + """Retrieve a YouTubeRelatedVideoFeed. + + Either a uri for the feed or a video_id is required. + + Args: + uri: An optional string representing the URI of the feed that is to + be retrieved. + video_id: An optional string representing the ID of the video for which + to retrieve the related video feed. + + Returns: + A YouTubeRelatedVideoFeed if successfully retrieved. + + Raises: + YouTubeError: You must provide at least a uri or a video_id to the + GetYouTubeRelatedVideoFeed() method. + """ + if uri is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the GetYouTubeRelatedVideoFeed() method') + elif video_id and not uri: + uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'related') + return self.Get( + uri, converter=gdata.youtube.YouTubeVideoFeedFromString) + + def GetTopRatedVideoFeed(self): + """Retrieve the 'top_rated' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_RATED_URI) + + def GetMostViewedVideoFeed(self): + """Retrieve the 'most_viewed' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_VIEWED_URI) + + def GetRecentlyFeaturedVideoFeed(self): + """Retrieve the 'recently_featured' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_RECENTLY_FEATURED_URI) + + def GetWatchOnMobileVideoFeed(self): + """Retrieve the 'watch_on_mobile' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_WATCH_ON_MOBILE_URI) + + def GetTopFavoritesVideoFeed(self): + """Retrieve the 'top_favorites' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_TOP_FAVORITES_URI) + + def GetMostRecentVideoFeed(self): + """Retrieve the 'most_recent' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RECENT_URI) + + def GetMostDiscussedVideoFeed(self): + """Retrieve the 'most_discussed' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_DISCUSSED_URI) + + def GetMostLinkedVideoFeed(self): + """Retrieve the 'most_linked' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_LINKED_URI) + + def GetMostRespondedVideoFeed(self): + """Retrieve the 'most_responded' standard video feed. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + return self.GetYouTubeVideoFeed(YOUTUBE_STANDARD_MOST_RESPONDED_URI) + + def GetUserFavoritesFeed(self, username='default'): + """Retrieve the favorites feed for a given user. + + Args: + username: An optional string representing the username whose favorites + feed is to be retrieved. Defaults to the currently authenticated user. + + Returns: + A YouTubeVideoFeed if successfully retrieved. + """ + favorites_feed_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, + 'favorites') + return self.GetYouTubeVideoFeed(favorites_feed_uri) + + def InsertVideoEntry(self, video_entry, filename_or_handle, + youtube_username='default', + content_type='video/quicktime'): + """Upload a new video to YouTube using the direct upload mechanism. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to upload. + filename_or_handle: A file-like object or file name where the video + will be read from. + youtube_username: An optional string representing the username into whose + account this video is to be uploaded to. Defaults to the currently + authenticated user. + content_type: An optional string representing internet media type + (a.k.a. mime type) of the media object. Currently the YouTube API + supports these types: + o video/mpeg + o video/quicktime + o video/x-msvideo + o video/mp4 + o video/x-flv + + Returns: + The newly created YouTubeVideoEntry if successful. + + Raises: + AssertionError: video_entry must be a gdata.youtube.VideoEntry instance. + YouTubeError: An error occurred trying to read the video file provided. + gdata.service.RequestError: An error occurred trying to upload the video + to the API server. + """ + + # We need to perform a series of checks on the video_entry and on the + # file that we plan to upload, such as checking whether we have a valid + # video_entry and that the file is the correct type and readable, prior + # to performing the actual POST request. + + try: + assert(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry)) + except AssertionError: + raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, + 'body':'`video_entry` must be a gdata.youtube.VideoEntry instance', + 'reason':'Found %s, not VideoEntry' % type(video_entry) + }) + majtype, mintype = content_type.split('/') + + try: + assert(mintype in YOUTUBE_SUPPORTED_UPLOAD_TYPES) + except (ValueError, AssertionError): + raise YouTubeError({'status':YOUTUBE_INVALID_CONTENT_TYPE, + 'body':'This is not a valid content type: %s' % content_type, + 'reason':'Accepted content types: %s' % + ['video/%s' % (t) for t in YOUTUBE_SUPPORTED_UPLOAD_TYPES]}) + + if (isinstance(filename_or_handle, (str, unicode)) + and os.path.exists(filename_or_handle)): + mediasource = gdata.MediaSource() + mediasource.setFile(filename_or_handle, content_type) + elif hasattr(filename_or_handle, 'read'): + import StringIO + if hasattr(filename_or_handle, 'seek'): + filename_or_handle.seek(0) + file_handle = StringIO.StringIO(filename_or_handle.read()) + name = 'video' + if hasattr(filename_or_handle, 'name'): + name = filename_or_handle.name + mediasource = gdata.MediaSource(file_handle, content_type, + content_length=file_handle.len, file_name=name) + else: + raise YouTubeError({'status':YOUTUBE_INVALID_ARGUMENT, 'body': + '`filename_or_handle` must be a path name or a file-like object', + 'reason': ('Found %s, not path name or object ' + 'with a .read() method' % type(filename_or_handle))}) + upload_uri = '%s/%s/%s' % (YOUTUBE_UPLOAD_URI, youtube_username, + 'uploads') + self.additional_headers['Slug'] = mediasource.file_name + + # Using a nested try statement to retain Python 2.4 compatibility + try: + try: + return self.Post(video_entry, uri=upload_uri, media_source=mediasource, + converter=gdata.youtube.YouTubeVideoEntryFromString) + except gdata.service.RequestError, e: + raise YouTubeError(e.args[0]) + finally: + del(self.additional_headers['Slug']) + + def CheckUploadStatus(self, video_entry=None, video_id=None): + """Check upload status on a recently uploaded video entry. + + Needs authentication. Either video_entry or video_id must be provided. + + Args: + video_entry: An optional YouTubeVideoEntry whose upload status to check + video_id: An optional string representing the ID of the uploaded video + whose status is to be checked. + + Returns: + A tuple containing (video_upload_state, detailed_message) or None if + no status information is found. + + Raises: + YouTubeError: You must provide at least a video_entry or a video_id to the + CheckUploadStatus() method. + """ + if video_entry is None and video_id is None: + raise YouTubeError('You must provide at least a uri or a video_id ' + 'to the CheckUploadStatus() method') + elif video_id and not video_entry: + video_entry = self.GetYouTubeVideoEntry(video_id=video_id) + + control = video_entry.control + if control is not None: + draft = control.draft + if draft is not None: + if draft.text == 'yes': + yt_state = control.extension_elements[0] + if yt_state is not None: + state_value = yt_state.attributes['name'] + message = '' + if yt_state.text is not None: + message = yt_state.text + + return (state_value, message) + + def GetFormUploadToken(self, video_entry, uri=YOUTUBE_UPLOAD_TOKEN_URI): + """Receives a YouTube Token and a YouTube PostUrl from a YouTubeVideoEntry. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to upload (meta-data only). + uri: An optional string representing the URI from where to fetch the + token information. Defaults to the YOUTUBE_UPLOADTOKEN_URI. + + Returns: + A tuple containing the URL to which to post your video file, along + with the youtube token that must be included with your upload in the + form of: (post_url, youtube_token). + """ + try: + response = self.Post(video_entry, uri) + except gdata.service.RequestError, e: + raise YouTubeError(e.args[0]) + + tree = ElementTree.fromstring(response) + + for child in tree: + if child.tag == 'url': + post_url = child.text + elif child.tag == 'token': + youtube_token = child.text + return (post_url, youtube_token) + + def UpdateVideoEntry(self, video_entry): + """Updates a video entry's meta-data. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to update, containing updated + meta-data. + + Returns: + An updated YouTubeVideoEntry on success or None. + """ + for link in video_entry.link: + if link.rel == 'edit': + edit_uri = link.href + return self.Put(video_entry, uri=edit_uri, + converter=gdata.youtube.YouTubeVideoEntryFromString) + + def DeleteVideoEntry(self, video_entry): + """Deletes a video entry. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to be deleted. + + Returns: + True if entry was deleted successfully. + """ + for link in video_entry.link: + if link.rel == 'edit': + edit_uri = link.href + return self.Delete(edit_uri) + + def AddRating(self, rating_value, video_entry): + """Add a rating to a video entry. + + Needs authentication. + + Args: + rating_value: The integer value for the rating (between 1 and 5). + video_entry: The YouTubeVideoEntry to be rated. + + Returns: + True if the rating was added successfully. + + Raises: + YouTubeError: rating_value must be between 1 and 5 in AddRating(). + """ + if rating_value < 1 or rating_value > 5: + raise YouTubeError('rating_value must be between 1 and 5 in AddRating()') + + entry = gdata.GDataEntry() + rating = gdata.youtube.Rating(min='1', max='5') + rating.extension_attributes['name'] = 'value' + rating.extension_attributes['value'] = str(rating_value) + entry.extension_elements.append(rating) + + for link in video_entry.link: + if link.rel == YOUTUBE_RATING_LINK_REL: + rating_uri = link.href + + return self.Post(entry, uri=rating_uri) + + def AddComment(self, comment_text, video_entry): + """Add a comment to a video entry. + + Needs authentication. Note that each comment that is posted must contain + the video entry that it is to be posted to. + + Args: + comment_text: A string representing the text of the comment. + video_entry: The YouTubeVideoEntry to be commented on. + + Returns: + True if the comment was added successfully. + """ + content = atom.Content(text=comment_text) + comment_entry = gdata.youtube.YouTubeVideoCommentEntry(content=content) + comment_post_uri = video_entry.comments.feed_link[0].href + + return self.Post(comment_entry, uri=comment_post_uri) + + def AddVideoResponse(self, video_id_to_respond_to, video_response): + """Add a video response. + + Needs authentication. + + Args: + video_id_to_respond_to: A string representing the ID of the video to be + responded to. + video_response: YouTubeVideoEntry to be posted as a response. + + Returns: + True if video response was posted successfully. + """ + post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id_to_respond_to, + 'responses') + return self.Post(video_response, uri=post_uri) + + def DeleteVideoResponse(self, video_id, response_video_id): + """Delete a video response. + + Needs authentication. + + Args: + video_id: A string representing the ID of video that contains the + response. + response_video_id: A string representing the ID of the video that was + posted as a response. + + Returns: + True if video response was deleted succcessfully. + """ + delete_uri = '%s/%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'responses', + response_video_id) + return self.Delete(delete_uri) + + def AddComplaint(self, complaint_text, complaint_term, video_id): + """Add a complaint for a particular video entry. + + Needs authentication. + + Args: + complaint_text: A string representing the complaint text. + complaint_term: A string representing the complaint category term. + video_id: A string representing the ID of YouTubeVideoEntry to + complain about. + + Returns: + True if posted successfully. + + Raises: + YouTubeError: Your complaint_term is not valid. + """ + if complaint_term not in YOUTUBE_COMPLAINT_CATEGORY_TERMS: + raise YouTubeError('Your complaint_term is not valid') + + content = atom.Content(text=complaint_text) + category = atom.Category(term=complaint_term, + scheme=YOUTUBE_COMPLAINT_CATEGORY_SCHEME) + + complaint_entry = gdata.GDataEntry(content=content, category=[category]) + post_uri = '%s/%s/%s' % (YOUTUBE_VIDEO_URI, video_id, 'complaints') + + return self.Post(complaint_entry, post_uri) + + def AddVideoEntryToFavorites(self, video_entry, username='default'): + """Add a video entry to a users favorite feed. + + Needs authentication. + + Args: + video_entry: The YouTubeVideoEntry to add. + username: An optional string representing the username to whose favorite + feed you wish to add the entry. Defaults to the currently + authenticated user. + Returns: + The posted YouTubeVideoEntry if successfully posted. + """ + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites') + + return self.Post(video_entry, post_uri, + converter=gdata.youtube.YouTubeVideoEntryFromString) + + def DeleteVideoEntryFromFavorites(self, video_id, username='default'): + """Delete a video entry from the users favorite feed. + + Needs authentication. + + Args: + video_id: A string representing the ID of the video that is to be removed + username: An optional string representing the username of the user's + favorite feed. Defaults to the currently authenticated user. + + Returns: + True if entry was successfully deleted. + """ + edit_link = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, username, 'favorites', + video_id) + return self.Delete(edit_link) + + def AddPlaylist(self, playlist_title, playlist_description, + playlist_private=None): + """Add a new playlist to the currently authenticated users account. + + Needs authentication. + + Args: + playlist_title: A string representing the title for the new playlist. + playlist_description: A string representing the description of the + playlist. + playlist_private: An optional boolean, set to True if the playlist is + to be private. + + Returns: + The YouTubePlaylistEntry if successfully posted. + """ + playlist_entry = gdata.youtube.YouTubePlaylistEntry( + title=atom.Title(text=playlist_title), + description=gdata.youtube.Description(text=playlist_description)) + if playlist_private: + playlist_entry.private = gdata.youtube.Private() + + playlist_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, 'default', + 'playlists') + return self.Post(playlist_entry, playlist_post_uri, + converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def UpdatePlaylist(self, playlist_id, new_playlist_title, + new_playlist_description, playlist_private=None, + username='default'): + """Update a playlist with new meta-data. + + Needs authentication. + + Args: + playlist_id: A string representing the ID of the playlist to be updated. + new_playlist_title: A string representing a new title for the playlist. + new_playlist_description: A string representing a new description for the + playlist. + playlist_private: An optional boolean, set to True if the playlist is + to be private. + username: An optional string representing the username whose playlist is + to be updated. Defaults to the currently authenticated user. + + Returns: + A YouTubePlaylistEntry if the update was successful. + """ + updated_playlist = gdata.youtube.YouTubePlaylistEntry( + title=atom.Title(text=new_playlist_title), + description=gdata.youtube.Description(text=new_playlist_description)) + if playlist_private: + updated_playlist.private = gdata.youtube.Private() + + playlist_put_uri = '%s/%s/playlists/%s' % (YOUTUBE_USER_FEED_URI, username, + playlist_id) + + return self.Put(updated_playlist, playlist_put_uri, + converter=gdata.youtube.YouTubePlaylistEntryFromString) + + def DeletePlaylist(self, playlist_uri): + """Delete a playlist from the currently authenticated users playlists. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist that is + to be deleted. + + Returns: + True if successfully deleted. + """ + return self.Delete(playlist_uri) + + def AddPlaylistVideoEntryToPlaylist( + self, playlist_uri, video_id, custom_video_title=None, + custom_video_description=None): + """Add a video entry to a playlist, optionally providing a custom title + and description. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist to which this + video entry is to be added. + video_id: A string representing the ID of the video entry to add. + custom_video_title: An optional string representing a custom title for + the video (only shown on the playlist). + custom_video_description: An optional string representing a custom + description for the video (only shown on the playlist). + + Returns: + A YouTubePlaylistVideoEntry if successfully posted. + """ + playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry( + atom_id=atom.Id(text=video_id)) + if custom_video_title: + playlist_video_entry.title = atom.Title(text=custom_video_title) + if custom_video_description: + playlist_video_entry.description = gdata.youtube.Description( + text=custom_video_description) + + return self.Post(playlist_video_entry, playlist_uri, + converter=gdata.youtube.YouTubePlaylistVideoEntryFromString) + + def UpdatePlaylistVideoEntryMetaData( + self, playlist_uri, playlist_entry_id, new_video_title, + new_video_description, new_video_position): + """Update the meta data for a YouTubePlaylistVideoEntry. + + Needs authentication. + + Args: + playlist_uri: A string representing the URI of the playlist that contains + the entry to be updated. + playlist_entry_id: A string representing the ID of the entry to be + updated. + new_video_title: A string representing the new title for the video entry. + new_video_description: A string representing the new description for + the video entry. + new_video_position: An integer representing the new position on the + playlist for the video. + + Returns: + A YouTubePlaylistVideoEntry if the update was successful. + """ + playlist_video_entry = gdata.youtube.YouTubePlaylistVideoEntry( + title=atom.Title(text=new_video_title), + description=gdata.youtube.Description(text=new_video_description), + position=gdata.youtube.Position(text=str(new_video_position))) + + playlist_put_uri = playlist_uri + '/' + playlist_entry_id + + return self.Put(playlist_video_entry, playlist_put_uri, + converter=gdata.youtube.YouTubePlaylistVideoEntryFromString) + + def DeletePlaylistVideoEntry(self, playlist_uri, playlist_video_entry_id): + """Delete a playlist video entry from a playlist. + + Needs authentication. + + Args: + playlist_uri: A URI representing the playlist from which the playlist + video entry is to be removed from. + playlist_video_entry_id: A string representing id of the playlist video + entry that is to be removed. + + Returns: + True if entry was successfully deleted. + """ + delete_uri = '%s/%s' % (playlist_uri, playlist_video_entry_id) + return self.Delete(delete_uri) + + def AddSubscriptionToChannel(self, username_to_subscribe_to, + my_username = 'default'): + """Add a new channel subscription to the currently authenticated users + account. + + Needs authentication. + + Args: + username_to_subscribe_to: A string representing the username of the + channel to which we want to subscribe to. + my_username: An optional string representing the name of the user which + we want to subscribe. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successfully posted. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='channel') + subscription_username = gdata.youtube.Username( + text=username_to_subscribe_to) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + username=subscription_username) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def AddSubscriptionToFavorites(self, username, my_username = 'default'): + """Add a new subscription to a users favorites to the currently + authenticated user's account. + + Needs authentication + + Args: + username: A string representing the username of the user's favorite feed + to subscribe to. + my_username: An optional string representing the username of the user + that is to be subscribed. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successful. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='favorites') + subscription_username = gdata.youtube.Username(text=username) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + username=subscription_username) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + def AddSubscriptionToQuery(self, query, my_username = 'default'): + """Add a new subscription to a specific keyword query to the currently + authenticated user's account. + + Needs authentication + + Args: + query: A string representing the keyword query to subscribe to. + my_username: An optional string representing the username of the user + that is to be subscribed. Defaults to currently authenticated user. + + Returns: + A new YouTubeSubscriptionEntry if successful. + """ + subscription_category = atom.Category( + scheme=YOUTUBE_SUBSCRIPTION_CATEGORY_SCHEME, + term='query') + subscription_query_string = gdata.youtube.QueryString(text=query) + + subscription_entry = gdata.youtube.YouTubeSubscriptionEntry( + category=subscription_category, + query_string=subscription_query_string) + + post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'subscriptions') + + return self.Post(subscription_entry, post_uri, + converter=gdata.youtube.YouTubeSubscriptionEntryFromString) + + + + def DeleteSubscription(self, subscription_uri): + """Delete a subscription from the currently authenticated user's account. + + Needs authentication. + + Args: + subscription_uri: A string representing the URI of the subscription that + is to be deleted. + + Returns: + True if deleted successfully. + """ + return self.Delete(subscription_uri) + + def AddContact(self, contact_username, my_username='default'): + """Add a new contact to the currently authenticated user's contact feed. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that you wish to add. + my_username: An optional string representing the username to whose + contact the new contact is to be added. + + Returns: + A YouTubeContactEntry if added successfully. + """ + contact_category = atom.Category( + scheme = 'http://gdata.youtube.com/schemas/2007/contact.cat', + term = 'Friends') + contact_username = gdata.youtube.Username(text=contact_username) + contact_entry = gdata.youtube.YouTubeContactEntry( + category=contact_category, + username=contact_username) + + contact_post_uri = '%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts') + + return self.Post(contact_entry, contact_post_uri, + converter=gdata.youtube.YouTubeContactEntryFromString) + + def UpdateContact(self, contact_username, new_contact_status, + new_contact_category, my_username='default'): + """Update a contact, providing a new status and a new category. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that is to be updated. + new_contact_status: A string representing the new status of the contact. + This can either be set to 'accepted' or 'rejected'. + new_contact_category: A string representing the new category for the + contact, either 'Friends' or 'Family'. + my_username: An optional string representing the username of the user + whose contact feed we are modifying. Defaults to the currently + authenticated user. + + Returns: + A YouTubeContactEntry if updated succesfully. + + Raises: + YouTubeError: New contact status must be within the accepted values. Or + new contact category must be within the accepted categories. + """ + if new_contact_status not in YOUTUBE_CONTACT_STATUS: + raise YouTubeError('New contact status must be one of %s' % + (' '.join(YOUTUBE_CONTACT_STATUS))) + if new_contact_category not in YOUTUBE_CONTACT_CATEGORY: + raise YouTubeError('New contact category must be one of %s' % + (' '.join(YOUTUBE_CONTACT_CATEGORY))) + + contact_category = atom.Category( + scheme='http://gdata.youtube.com/schemas/2007/contact.cat', + term=new_contact_category) + + contact_status = gdata.youtube.Status(text=new_contact_status) + contact_entry = gdata.youtube.YouTubeContactEntry( + category=contact_category, + status=contact_status) + + contact_put_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts', contact_username) + + return self.Put(contact_entry, contact_put_uri, + converter=gdata.youtube.YouTubeContactEntryFromString) + + def DeleteContact(self, contact_username, my_username='default'): + """Delete a contact from a users contact feed. + + Needs authentication. + + Args: + contact_username: A string representing the username of the contact + that is to be deleted. + my_username: An optional string representing the username of the user's + contact feed from which to delete the contact. Defaults to the + currently authenticated user. + + Returns: + True if the contact was deleted successfully + """ + contact_edit_uri = '%s/%s/%s/%s' % (YOUTUBE_USER_FEED_URI, my_username, + 'contacts', contact_username) + return self.Delete(contact_edit_uri) + + def _GetDeveloperKey(self): + """Getter for Developer Key property. + + Returns: + If the developer key has been set, a string representing the developer key + is returned or None. + """ + if 'X-GData-Key' in self.additional_headers: + return self.additional_headers['X-GData-Key'][4:] + else: + return None + + def _SetDeveloperKey(self, developer_key): + """Setter for Developer Key property. + + Sets the developer key in the 'X-GData-Key' header. The actual value that + is set is 'key=' plus the developer_key that was passed. + """ + self.additional_headers['X-GData-Key'] = 'key=' + developer_key + + developer_key = property(_GetDeveloperKey, _SetDeveloperKey, + doc="""The Developer Key property""") + + def _GetClientId(self): + """Getter for Client Id property. + + Returns: + If the client_id has been set, a string representing it is returned + or None. + """ + if 'X-Gdata-Client' in self.additional_headers: + return self.additional_headers['X-Gdata-Client'] + else: + return None + + def _SetClientId(self, client_id): + """Setter for Client Id property. + + Sets the 'X-Gdata-Client' header. + """ + self.additional_headers['X-Gdata-Client'] = client_id + + client_id = property(_GetClientId, _SetClientId, + doc="""The ClientId property""") + + def Query(self, uri): + """Performs a query and returns a resulting feed or entry. + + Args: + uri: A string representing the URI of the feed that is to be queried. + + Returns: + On success, a tuple in the form: + (boolean succeeded=True, ElementTree._Element result) + On failure, a tuple in the form: + (boolean succeeded=False, {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server's response}) + """ + result = self.Get(uri) + return result + + def YouTubeQuery(self, query): + """Performs a YouTube specific query and returns a resulting feed or entry. + + Args: + query: A Query object or one if its sub-classes (YouTubeVideoQuery, + YouTubeUserQuery or YouTubePlaylistQuery). + + Returns: + Depending on the type of Query object submitted returns either a + YouTubeVideoFeed, a YouTubeUserFeed, a YouTubePlaylistFeed. If the + Query object provided was not YouTube-related, a tuple is returned. + On success the tuple will be in this form: + (boolean succeeded=True, ElementTree._Element result) + On failure, the tuple will be in this form: + (boolean succeeded=False, {'status': HTTP status code from server, + 'reason': HTTP reason from the server, + 'body': HTTP body of the server response}) + """ + result = self.Query(query.ToUri()) + if isinstance(query, YouTubeVideoQuery): + return gdata.youtube.YouTubeVideoFeedFromString(result.ToString()) + elif isinstance(query, YouTubeUserQuery): + return gdata.youtube.YouTubeUserFeedFromString(result.ToString()) + elif isinstance(query, YouTubePlaylistQuery): + return gdata.youtube.YouTubePlaylistFeedFromString(result.ToString()) + else: + return result + +class YouTubeVideoQuery(gdata.service.Query): + + """Subclasses gdata.service.Query to represent a YouTube Data API query. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. Please refer to the API documentation for details. + + Attributes: + vq: The vq parameter, which is only supported for video feeds, specifies a + search query term. Refer to API documentation for further details. + orderby: The orderby parameter, which is only supported for video feeds, + specifies the value that will be used to sort videos in the search + result set. Valid values for this parameter are relevance, published, + viewCount and rating. + time: The time parameter, which is only available for the top_rated, + top_favorites, most_viewed, most_discussed, most_linked and + most_responded standard feeds, restricts the search to videos uploaded + within the specified time. Valid values for this parameter are today + (1 day), this_week (7 days), this_month (1 month) and all_time. + The default value for this parameter is all_time. + format: The format parameter specifies that videos must be available in a + particular video format. Refer to the API documentation for details. + racy: The racy parameter allows a search result set to include restricted + content as well as standard content. Valid values for this parameter + are include and exclude. By default, restricted content is excluded. + lr: The lr parameter restricts the search to videos that have a title, + description or keywords in a specific language. Valid values for the lr + parameter are ISO 639-1 two-letter language codes. + restriction: The restriction parameter identifies the IP address that + should be used to filter videos that can only be played in specific + countries. + location: A string of geo coordinates. Note that this is not used when the + search is performed but rather to filter the returned videos for ones + that match to the location entered. + """ + + def __init__(self, video_id=None, feed_type=None, text_query=None, + params=None, categories=None): + + if feed_type in YOUTUBE_STANDARDFEEDS: + feed = 'http://%s/feeds/standardfeeds/%s' % (YOUTUBE_SERVER, feed_type) + elif feed_type is 'responses' or feed_type is 'comments' and video_id: + feed = 'http://%s/feeds/videos/%s/%s' % (YOUTUBE_SERVER, video_id, + feed_type) + else: + feed = 'http://%s/feeds/videos' % (YOUTUBE_SERVER) + + gdata.service.Query.__init__(self, feed, text_query=text_query, + params=params, categories=categories) + + def _GetVideoQuery(self): + if 'vq' in self: + return self['vq'] + else: + return None + + def _SetVideoQuery(self, val): + self['vq'] = val + + vq = property(_GetVideoQuery, _SetVideoQuery, + doc="""The video query (vq) query parameter""") + + def _GetOrderBy(self): + if 'orderby' in self: + return self['orderby'] + else: + return None + + def _SetOrderBy(self, val): + if val not in YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS: + if val.startswith('relevance_lang_') is False: + raise YouTubeError('OrderBy must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_ORDERBY_PARAMETERS)) + self['orderby'] = val + + orderby = property(_GetOrderBy, _SetOrderBy, + doc="""The orderby query parameter""") + + def _GetTime(self): + if 'time' in self: + return self['time'] + else: + return None + + def _SetTime(self, val): + if val not in YOUTUBE_QUERY_VALID_TIME_PARAMETERS: + raise YouTubeError('Time must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_TIME_PARAMETERS)) + self['time'] = val + + time = property(_GetTime, _SetTime, + doc="""The time query parameter""") + + def _GetFormat(self): + if 'format' in self: + return self['format'] + else: + return None + + def _SetFormat(self, val): + if val not in YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS: + raise YouTubeError('Format must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_FORMAT_PARAMETERS)) + self['format'] = val + + format = property(_GetFormat, _SetFormat, + doc="""The format query parameter""") + + def _GetRacy(self): + if 'racy' in self: + return self['racy'] + else: + return None + + def _SetRacy(self, val): + if val not in YOUTUBE_QUERY_VALID_RACY_PARAMETERS: + raise YouTubeError('Racy must be one of: %s ' % + ' '.join(YOUTUBE_QUERY_VALID_RACY_PARAMETERS)) + self['racy'] = val + + racy = property(_GetRacy, _SetRacy, + doc="""The racy query parameter""") + + def _GetLanguageRestriction(self): + if 'lr' in self: + return self['lr'] + else: + return None + + def _SetLanguageRestriction(self, val): + self['lr'] = val + + lr = property(_GetLanguageRestriction, _SetLanguageRestriction, + doc="""The lr (language restriction) query parameter""") + + def _GetIPRestriction(self): + if 'restriction' in self: + return self['restriction'] + else: + return None + + def _SetIPRestriction(self, val): + self['restriction'] = val + + restriction = property(_GetIPRestriction, _SetIPRestriction, + doc="""The restriction query parameter""") + + def _GetLocation(self): + if 'location' in self: + return self['location'] + else: + return None + + def _SetLocation(self, val): + self['location'] = val + + location = property(_GetLocation, _SetLocation, + doc="""The location query parameter""") + + + +class YouTubeUserQuery(YouTubeVideoQuery): + + """Subclasses YouTubeVideoQuery to perform user-specific queries. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. + """ + + def __init__(self, username=None, feed_type=None, subscription_id=None, + text_query=None, params=None, categories=None): + + uploads_favorites_playlists = ('uploads', 'favorites', 'playlists') + + if feed_type is 'subscriptions' and subscription_id and username: + feed = "http://%s/feeds/users/%s/%s/%s" % (YOUTUBE_SERVER, username, + feed_type, subscription_id) + elif feed_type is 'subscriptions' and not subscription_id and username: + feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username, + feed_type) + elif feed_type in uploads_favorites_playlists: + feed = "http://%s/feeds/users/%s/%s" % (YOUTUBE_SERVER, username, + feed_type) + else: + feed = "http://%s/feeds/users" % (YOUTUBE_SERVER) + + YouTubeVideoQuery.__init__(self, feed, text_query=text_query, + params=params, categories=categories) + + +class YouTubePlaylistQuery(YouTubeVideoQuery): + + """Subclasses YouTubeVideoQuery to perform playlist-specific queries. + + Attributes are set dynamically via properties. Properties correspond to + the standard Google Data API query parameters with YouTube Data API + extensions. + """ + + def __init__(self, playlist_id, text_query=None, params=None, + categories=None): + if playlist_id: + feed = "http://%s/feeds/playlists/%s" % (YOUTUBE_SERVER, playlist_id) + else: + feed = "http://%s/feeds/playlists" % (YOUTUBE_SERVER) + + YouTubeVideoQuery.__init__(self, feed, text_query=text_query, + params=params, categories=categories) diff --git a/gdata.py-1.2.3/tests/__init__.py b/gdata.py-1.2.3/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/atom_test.py b/gdata.py-1.2.3/tests/atom_test.py new file mode 100755 index 0000000..38b8f75 --- /dev/null +++ b/gdata.py-1.2.3/tests/atom_test.py @@ -0,0 +1,601 @@ +#!/usr/bin/python +# -*-*- encoding: utf-8 -*-*- +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder@gmail.com (Jeff Scudder)' + + +import sys +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +from gdata import test_data + + +class AuthorTest(unittest.TestCase): + + def setUp(self): + self.author = atom.Author() + + def testEmptyAuthorShouldHaveEmptyExtensionsList(self): + self.assert_(isinstance(self.author.extension_elements, list)) + self.assert_(len(self.author.extension_elements) == 0) + + def testNormalAuthorShouldHaveNoExtensionElements(self): + self.author.name = atom.Name(text='Jeff Scudder') + self.assert_(self.author.name.text == 'Jeff Scudder') + self.assert_(len(self.author.extension_elements) == 0) + new_author = atom.AuthorFromString(self.author.ToString()) + self.assert_(len(self.author.extension_elements) == 0) + + self.author.extension_elements.append(atom.ExtensionElement( + 'foo', text='bar')) + self.assert_(len(self.author.extension_elements) == 1) + self.assert_(self.author.name.text == 'Jeff Scudder') + new_author = atom.AuthorFromString(self.author.ToString()) + self.assert_(len(self.author.extension_elements) == 1) + self.assert_(new_author.name.text == 'Jeff Scudder') + + def testEmptyAuthorToAndFromStringShouldMatch(self): + string_from_author = self.author.ToString() + new_author = atom.AuthorFromString(string_from_author) + string_from_new_author = new_author.ToString() + self.assert_(string_from_author == string_from_new_author) + + def testAuthorWithNameToAndFromStringShouldMatch(self): + self.author.name = atom.Name() + self.author.name.text = 'Jeff Scudder' + string_from_author = self.author.ToString() + new_author = atom.AuthorFromString(string_from_author) + string_from_new_author = new_author.ToString() + self.assert_(string_from_author == string_from_new_author) + self.assert_(self.author.name.text == new_author.name.text) + + def testExtensionElements(self): + self.author.extension_attributes['foo1'] = 'bar' + self.author.extension_attributes['foo2'] = 'rab' + self.assert_(self.author.extension_attributes['foo1'] == 'bar') + self.assert_(self.author.extension_attributes['foo2'] == 'rab') + new_author = atom.AuthorFromString(self.author.ToString()) + self.assert_(new_author.extension_attributes['foo1'] == 'bar') + self.assert_(new_author.extension_attributes['foo2'] == 'rab') + + def testConvertFullAuthorToAndFromString(self): + author = atom.AuthorFromString(test_data.TEST_AUTHOR) + self.assert_(author.name.text == 'John Doe') + self.assert_(author.email.text == 'johndoes@someemailadress.com') + self.assert_(author.uri.text == 'http://www.google.com') + + +class EmailTest(unittest.TestCase): + + def setUp(self): + self.email = atom.Email() + + def testEmailToAndFromString(self): + self.email.text = 'This is a test' + new_email = atom.EmailFromString(self.email.ToString()) + self.assert_(self.email.text == new_email.text) + self.assert_(self.email.extension_elements == + new_email.extension_elements) + + +class NameTest(unittest.TestCase): + + def setUp(self): + self.name = atom.Name() + + def testEmptyNameToAndFromStringShouldMatch(self): + string_from_name = self.name.ToString() + new_name = atom.NameFromString(string_from_name) + string_from_new_name = new_name.ToString() + self.assert_(string_from_name == string_from_new_name) + + def testText(self): + self.assert_(self.name.text is None) + self.name.text = 'Jeff Scudder' + self.assert_(self.name.text == 'Jeff Scudder') + new_name = atom.NameFromString(self.name.ToString()) + self.assert_(new_name.text == self.name.text) + + def testExtensionElements(self): + self.name.extension_attributes['foo'] = 'bar' + self.assert_(self.name.extension_attributes['foo'] == 'bar') + new_name = atom.NameFromString(self.name.ToString()) + self.assert_(new_name.extension_attributes['foo'] == 'bar') + + +class ExtensionElementTest(unittest.TestCase): + + def setUp(self): + self.ee = atom.ExtensionElement('foo') + + def testEmptyEEShouldProduceEmptyString(self): + pass + + def testEEParsesTreeCorrectly(self): + deep_tree = atom.ExtensionElementFromString(test_data.EXTENSION_TREE) + self.assert_(deep_tree.tag == 'feed') + self.assert_(deep_tree.namespace == 'http://www.w3.org/2005/Atom') + self.assert_(deep_tree.children[0].tag == 'author') + self.assert_(deep_tree.children[0].namespace == 'http://www.google.com') + self.assert_(deep_tree.children[0].children[0].tag == 'name') + self.assert_(deep_tree.children[0].children[0].namespace == + 'http://www.google.com') + self.assert_(deep_tree.children[0].children[0].text.strip() == 'John Doe') + self.assert_(deep_tree.children[0].children[0].children[0].text.strip() == + 'Bar') + foo = deep_tree.children[0].children[0].children[0] + self.assert_(foo.tag == 'foo') + self.assert_(foo.namespace == 'http://www.google.com') + self.assert_(foo.attributes['up'] == 'down') + self.assert_(foo.attributes['yes'] == 'no') + self.assert_(foo.children == []) + + def testEEToAndFromStringShouldMatch(self): + string_from_ee = self.ee.ToString() + new_ee = atom.ExtensionElementFromString(string_from_ee) + string_from_new_ee = new_ee.ToString() + self.assert_(string_from_ee == string_from_new_ee) + + deep_tree = atom.ExtensionElementFromString(test_data.EXTENSION_TREE) + string_from_deep_tree = deep_tree.ToString() + new_deep_tree = atom.ExtensionElementFromString(string_from_deep_tree) + string_from_new_deep_tree = new_deep_tree.ToString() + self.assert_(string_from_deep_tree == string_from_new_deep_tree) + + +class LinkTest(unittest.TestCase): + + def setUp(self): + self.link = atom.Link() + + def testLinkToAndFromString(self): + self.link.href = 'test href' + self.link.hreflang = 'english' + self.link.type = 'text/html' + self.link.extension_attributes['foo'] = 'bar' + self.assert_(self.link.href == 'test href') + self.assert_(self.link.hreflang == 'english') + self.assert_(self.link.type == 'text/html') + self.assert_(self.link.extension_attributes['foo'] == 'bar') + new_link = atom.LinkFromString(self.link.ToString()) + self.assert_(self.link.href == new_link.href) + self.assert_(self.link.type == new_link.type) + self.assert_(self.link.hreflang == new_link.hreflang) + self.assert_(self.link.extension_attributes['foo'] == + new_link.extension_attributes['foo']) + + def testLinkType(self): + test_link = atom.Link(link_type='text/html') + self.assert_(test_link.type == 'text/html') + + +class GeneratorTest(unittest.TestCase): + + def setUp(self): + self.generator = atom.Generator() + + def testGeneratorToAndFromString(self): + self.generator.uri = 'www.google.com' + self.generator.version = '1.0' + self.generator.extension_attributes['foo'] = 'bar' + self.assert_(self.generator.uri == 'www.google.com') + self.assert_(self.generator.version == '1.0') + self.assert_(self.generator.extension_attributes['foo'] == 'bar') + new_generator = atom.GeneratorFromString(self.generator.ToString()) + self.assert_(self.generator.uri == new_generator.uri) + self.assert_(self.generator.version == new_generator.version) + self.assert_(self.generator.extension_attributes['foo'] == + new_generator.extension_attributes['foo']) + + +class TitleTest(unittest.TestCase): + + def setUp(self): + self.title = atom.Title() + + def testTitleToAndFromString(self): + self.title.type = 'text' + self.title.text = 'Less: <' + self.assert_(self.title.type == 'text') + self.assert_(self.title.text == 'Less: <') + new_title = atom.TitleFromString(self.title.ToString()) + self.assert_(self.title.type == new_title.type) + self.assert_(self.title.text == new_title.text) + + +class SubtitleTest(unittest.TestCase): + + def setUp(self): + self.subtitle = atom.Subtitle() + + def testTitleToAndFromString(self): + self.subtitle.type = 'text' + self.subtitle.text = 'sub & title' + self.assert_(self.subtitle.type == 'text') + self.assert_(self.subtitle.text == 'sub & title') + new_subtitle = atom.SubtitleFromString(self.subtitle.ToString()) + self.assert_(self.subtitle.type == new_subtitle.type) + self.assert_(self.subtitle.text == new_subtitle.text) + + + +class SummaryTest(unittest.TestCase): + + def setUp(self): + self.summary = atom.Summary() + + def testTitleToAndFromString(self): + self.summary.type = 'text' + self.summary.text = 'Less: <' + self.assert_(self.summary.type == 'text') + self.assert_(self.summary.text == 'Less: <') + new_summary = atom.SummaryFromString(self.summary.ToString()) + self.assert_(self.summary.type == new_summary.type) + self.assert_(self.summary.text == new_summary.text) + + +class CategoryTest(unittest.TestCase): + + def setUp(self): + self.category = atom.Category() + + def testCategoryToAndFromString(self): + self.category.term = 'x' + self.category.scheme = 'y' + self.category.label = 'z' + self.assert_(self.category.term == 'x') + self.assert_(self.category.scheme == 'y') + self.assert_(self.category.label == 'z') + new_category = atom.CategoryFromString(self.category.ToString()) + self.assert_(self.category.term == new_category.term) + self.assert_(self.category.scheme == new_category.scheme) + self.assert_(self.category.label == new_category.label) + + +class ContributorTest(unittest.TestCase): + + def setUp(self): + self.contributor = atom.Contributor() + + def testContributorToAndFromString(self): + self.contributor.name = atom.Name(text='J Scud') + self.contributor.email = atom.Email(text='nobody@nowhere') + self.contributor.uri = atom.Uri(text='http://www.google.com') + self.assert_(self.contributor.name.text == 'J Scud') + self.assert_(self.contributor.email.text == 'nobody@nowhere') + self.assert_(self.contributor.uri.text == 'http://www.google.com') + new_contributor = atom.ContributorFromString(self.contributor.ToString()) + self.assert_(self.contributor.name.text == new_contributor.name.text) + self.assert_(self.contributor.email.text == new_contributor.email.text) + self.assert_(self.contributor.uri.text == new_contributor.uri.text) + + +class IdTest(unittest.TestCase): + + def setUp(self): + self.my_id = atom.Id() + + def testIdToAndFromString(self): + self.my_id.text = 'my nifty id' + self.assert_(self.my_id.text == 'my nifty id') + new_id = atom.IdFromString(self.my_id.ToString()) + self.assert_(self.my_id.text == new_id.text) + + +class IconTest(unittest.TestCase): + + def setUp(self): + self.icon = atom.Icon() + + def testIconToAndFromString(self): + self.icon.text = 'my picture' + self.assert_(self.icon.text == 'my picture') + new_icon = atom.IconFromString(str(self.icon)) + self.assert_(self.icon.text == new_icon.text) + + +class LogoTest(unittest.TestCase): + + def setUp(self): + self.logo = atom.Logo() + + def testLogoToAndFromString(self): + self.logo.text = 'my logo' + self.assert_(self.logo.text == 'my logo') + new_logo = atom.LogoFromString(self.logo.ToString()) + self.assert_(self.logo.text == new_logo.text) + + +class RightsTest(unittest.TestCase): + + def setUp(self): + self.rights = atom.Rights() + + def testContributorToAndFromString(self): + self.rights.text = 'you have the right to remain silent' + self.rights.type = 'text' + self.assert_(self.rights.text == 'you have the right to remain silent') + self.assert_(self.rights.type == 'text') + new_rights = atom.RightsFromString(self.rights.ToString()) + self.assert_(self.rights.text == new_rights.text) + self.assert_(self.rights.type == new_rights.type) + + +class UpdatedTest(unittest.TestCase): + + def setUp(self): + self.updated = atom.Updated() + + def testUpdatedToAndFromString(self): + self.updated.text = 'my time' + self.assert_(self.updated.text == 'my time') + new_updated = atom.UpdatedFromString(self.updated.ToString()) + self.assert_(self.updated.text == new_updated.text) + + +class PublishedTest(unittest.TestCase): + + def setUp(self): + self.published = atom.Published() + + def testPublishedToAndFromString(self): + self.published.text = 'pub time' + self.assert_(self.published.text == 'pub time') + new_published = atom.PublishedFromString(self.published.ToString()) + self.assert_(self.published.text == new_published.text) + + +class FeedEntryParentTest(unittest.TestCase): + """The test accesses hidden methods in atom.FeedEntryParent""" + + def testConvertToAndFromElementTree(self): + # Use entry because FeedEntryParent doesn't have a tag or namespace. + original = atom.Entry() + copy = atom.FeedEntryParent() + + original.author.append(atom.Author(name=atom.Name(text='J Scud'))) + self.assert_(original.author[0].name.text == 'J Scud') + self.assert_(copy.author == []) + + original.id = atom.Id(text='test id') + self.assert_(original.id.text == 'test id') + self.assert_(copy.id is None) + + copy._HarvestElementTree(original._ToElementTree()) + self.assert_(original.author[0].name.text == copy.author[0].name.text) + self.assert_(original.id.text == copy.id.text) + + +class EntryTest(unittest.TestCase): + + def testConvertToAndFromString(self): + entry = atom.Entry() + entry.author.append(atom.Author(name=atom.Name(text='js'))) + entry.title = atom.Title(text='my test entry') + self.assert_(entry.author[0].name.text == 'js') + self.assert_(entry.title.text == 'my test entry') + new_entry = atom.EntryFromString(entry.ToString()) + self.assert_(new_entry.author[0].name.text == 'js') + self.assert_(new_entry.title.text == 'my test entry') + + def testEntryCorrectlyConvertsActualData(self): + entry = atom.EntryFromString(test_data.XML_ENTRY_1) + self.assert_(entry.category[0].scheme == + 'http://base.google.com/categories/itemtypes') + self.assert_(entry.category[0].term == 'products') + self.assert_(entry.id.text == ' http://www.google.com/test/id/url ') + self.assert_(entry.title.text == 'Testing 2000 series laptop') + self.assert_(entry.title.type == 'text') + self.assert_(entry.content.type == 'xhtml') + #TODO check all other values for the test entry + + def testAppControl(self): + entry = atom.EntryFromString(test_data.TEST_BASE_ENTRY) + self.assertEquals(entry.control.draft.text, 'yes') + self.assertEquals(len(entry.control.extension_elements), 1) + self.assertEquals(entry.control.extension_elements[0].tag, 'disapproved') + + +class ControlTest(unittest.TestCase): + + def testConvertToAndFromString(self): + control = atom.Control() + control.text = 'some text' + control.draft = atom.Draft(text='yes') + self.assertEquals(control.draft.text, 'yes') + self.assertEquals(control.text, 'some text') + self.assertEquals(isinstance(control.draft, atom.Draft), True) + new_control = atom.ControlFromString(str(control)) + self.assertEquals(control.draft.text, new_control.draft.text) + self.assertEquals(control.text, new_control.text) + self.assertEquals(isinstance(new_control.draft, atom.Draft), True) + + +class DraftTest(unittest.TestCase): + + def testConvertToAndFromString(self): + draft = atom.Draft() + draft.text = 'maybe' + draft.extension_attributes['foo'] = 'bar' + self.assertEquals(draft.text, 'maybe') + self.assertEquals(draft.extension_attributes['foo'], 'bar') + new_draft = atom.DraftFromString(str(draft)) + self.assertEquals(draft.text, new_draft.text) + self.assertEquals(draft.extension_attributes['foo'], + new_draft.extension_attributes['foo']) + + + +class SourceTest(unittest.TestCase): + + def testConvertToAndFromString(self): + source = atom.Source() + source.author.append(atom.Author(name=atom.Name(text='js'))) + source.title = atom.Title(text='my test source') + source.generator = atom.Generator(text='gen') + self.assert_(source.author[0].name.text == 'js') + self.assert_(source.title.text == 'my test source') + self.assert_(source.generator.text == 'gen') + new_source = atom.SourceFromString(source.ToString()) + self.assert_(new_source.author[0].name.text == 'js') + self.assert_(new_source.title.text == 'my test source') + self.assert_(new_source.generator.text == 'gen') + + +class FeedTest(unittest.TestCase): + + def testConvertToAndFromString(self): + feed = atom.Feed() + feed.author.append(atom.Author(name=atom.Name(text='js'))) + feed.title = atom.Title(text='my test source') + feed.generator = atom.Generator(text='gen') + feed.entry.append(atom.Entry(author=[atom.Author(name=atom.Name(text='entry author'))])) + self.assert_(feed.author[0].name.text == 'js') + self.assert_(feed.title.text == 'my test source') + self.assert_(feed.generator.text == 'gen') + self.assert_(feed.entry[0].author[0].name.text == 'entry author') + new_feed = atom.FeedFromString(feed.ToString()) + self.assert_(new_feed.author[0].name.text == 'js') + self.assert_(new_feed.title.text == 'my test source') + self.assert_(new_feed.generator.text == 'gen') + self.assert_(new_feed.entry[0].author[0].name.text == 'entry author') + + +class ContentEntryParentTest(unittest.TestCase): + """The test accesses hidden methods in atom.FeedEntryParent""" + + def setUp(self): + self.content = atom.Content() + + def testConvertToAndFromElementTree(self): + self.content.text = 'my content' + self.content.type = 'text' + self.content.src = 'my source' + self.assert_(self.content.text == 'my content') + self.assert_(self.content.type == 'text') + self.assert_(self.content.src == 'my source') + new_content = atom.ContentFromString(self.content.ToString()) + self.assert_(self.content.text == new_content.text) + self.assert_(self.content.type == new_content.type) + self.assert_(self.content.src == new_content.src) + + def testContentConstructorSetsSrc(self): + new_content = atom.Content(src='abcd') + self.assertEquals(new_content.src, 'abcd') + + +class PreserveUnkownElementTest(unittest.TestCase): + """Tests correct preservation of XML elements which are non Atom""" + + def setUp(self): + self.feed = atom.FeedFromString(test_data.GBASE_ATTRIBUTE_FEED) + + def testCaptureOpenSearchElements(self): + self.assertEquals(self.feed.FindExtensions('totalResults')[0].tag, + 'totalResults') + self.assertEquals(self.feed.FindExtensions('totalResults')[0].namespace, + 'http://a9.com/-/spec/opensearchrss/1.0/') + open_search_extensions = self.feed.FindExtensions( + namespace='http://a9.com/-/spec/opensearchrss/1.0/') + self.assertEquals(len(open_search_extensions), 3) + for element in open_search_extensions: + self.assertEquals(element.namespace, + 'http://a9.com/-/spec/opensearchrss/1.0/') + + def testCaptureMetaElements(self): + meta_elements = self.feed.entry[0].FindExtensions( + namespace='http://base.google.com/ns-metadata/1.0') + self.assertEquals(len(meta_elements), 1) + self.assertEquals(meta_elements[0].attributes['count'], '4416629') + self.assertEquals(len(meta_elements[0].children), 10) + + def testCaptureMetaChildElements(self): + meta_elements = self.feed.entry[0].FindExtensions( + namespace='http://base.google.com/ns-metadata/1.0') + meta_children = meta_elements[0].FindChildren( + namespace='http://base.google.com/ns-metadata/1.0') + self.assertEquals(len(meta_children), 10) + for child in meta_children: + self.assertEquals(child.tag, 'value') + + +class LinkFinderTest(unittest.TestCase): + + def setUp(self): + self.entry = atom.EntryFromString(test_data.XML_ENTRY_1) + + def testLinkFinderGetsLicenseLink(self): + self.assertEquals(isinstance(self.entry.GetLicenseLink(), atom.Link), + True) + self.assertEquals(self.entry.GetLicenseLink().href, + 'http://creativecommons.org/licenses/by-nc/2.5/rdf') + self.assertEquals(self.entry.GetLicenseLink().rel, 'license') + + def testLinkFinderGetsAlternateLink(self): + self.assertEquals(isinstance(self.entry.GetAlternateLink(), atom.Link), + True) + self.assertEquals(self.entry.GetAlternateLink().href, + 'http://www.provider-host.com/123456789') + self.assertEquals(self.entry.GetAlternateLink().rel, 'alternate') + + +class AtomBaseTest(unittest.TestCase): + + def testAtomBaseConvertsExtensions(self): + # Using Id because it adds no additional members. + atom_base = atom.Id() + extension_child = atom.ExtensionElement('foo', namespace='http://ns0.com') + extension_grandchild = atom.ExtensionElement('bar', namespace='http://ns0.com') + extension_child.children.append(extension_grandchild) + atom_base.extension_elements.append(extension_child) + self.assertEquals(len(atom_base.extension_elements), 1) + self.assertEquals(len(atom_base.extension_elements[0].children), 1) + self.assertEquals(atom_base.extension_elements[0].tag, 'foo') + self.assertEquals(atom_base.extension_elements[0].children[0].tag, 'bar') + + element_tree = atom_base._ToElementTree() + self.assert_(element_tree.find('{http://ns0.com}foo') is not None) + self.assert_(element_tree.find('{http://ns0.com}foo').find('{http://ns0.com}bar') is not None) + + +class UtfParsingTest(unittest.TestCase): + + def setUp(self): + self.test_xml = u""" + http://www.google.com/test/id/url + \u03B1\u03BB\u03C6\u03B1 +""" + + def testMemberStringEncoding(self): + atom_entry = atom.EntryFromString(self.test_xml) + self.assert_(atom_entry.title.type == u'\u03B1\u03BB\u03C6\u03B1'.encode('utf-8')) + self.assert_(atom_entry.title.text == u'\u03B1\u03BB\u03C6\u03B1'.encode('utf-8')) + + def testConvertExampleXML(self): + try: + entry = atom.CreateClassFromXMLString(atom.Entry, test_data.GBASE_STRING_ENCODING_ENTRY) + except UnicodeDecodeError: + self.fail('Error when converting XML') + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/atom_tests/__init__.py b/gdata.py-1.2.3/tests/atom_tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/atom_tests/core_test.py b/gdata.py-1.2.3/tests/atom_tests/core_test.py new file mode 100755 index 0000000..06d2c21 --- /dev/null +++ b/gdata.py-1.2.3/tests/atom_tests/core_test.py @@ -0,0 +1,304 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'j.s@google.com (Jeff Scudder)' + + +import unittest +try: + from xml.etree import cElementTree as ElementTree +except ImportError: + try: + import cElementTree as ElementTree + except ImportError: + try: + from xml.etree import ElementTree + except ImportError: + from elementtree import ElementTree +import atom.core + + +SAMPLE_XML = ('' + '' + '' + '' + 'Some Test' + 'Different Namespace' + '' + '' + '') + + +NO_NAMESPACE_XML = ('Baz Text!') + + +V1_XML = ('' + '' + 'Greetings!' + '' + '') + + +V2_XML = ('' + '' + 'Greetings!' + '' + '') + + +class Child(atom.core.XmlElement): + _qname = ('{http://example.com/1}child', '{http://example.com/2}child') + + +class Foo(atom.core.XmlElement): + _qname = 'foo' + + +class Example(atom.core.XmlElement): + _qname = '{http://example.com}foo' + child = Child + foos = [Foo] + tag = 'tag' + versioned_attr = ('attr', '{http://new_ns}attr') + + +# Example XmlElement subclass declarations. +class Inner(atom.core.XmlElement): + _qname = '{http://example.com/xml/1}inner' + my_x = 'x' + + +class Outer(atom.core.XmlElement): + _qname = '{http://example.com/xml/1}outer' + innards = [Inner] + + +class XmlElementTest(unittest.TestCase): + + def testGetQName(self): + class Unversioned(atom.core.XmlElement): + _qname = '{http://example.com}foo' + + class Versioned(atom.core.XmlElement): + _qname = ('{http://example.com/1}foo', '{http://example.com/2}foo') + + self.assert_( + atom.core._get_qname(Unversioned, 1) == '{http://example.com}foo') + self.assert_( + atom.core._get_qname(Unversioned, 2) == '{http://example.com}foo') + self.assert_( + atom.core._get_qname(Versioned, 1) == '{http://example.com/1}foo') + self.assert_( + atom.core._get_qname(Versioned, 2) == '{http://example.com/2}foo') + + def testConstructor(self): + e = Example() + self.assert_(e.child is None) + self.assert_(e.tag is None) + self.assert_(e.versioned_attr is None) + self.assert_(e.foos == []) + self.assert_(e.text is None) + + def testGetRules(self): + rules1 = Example._get_rules(1) + self.assert_(rules1[0] == '{http://example.com}foo') + self.assert_(rules1[1]['{http://example.com/1}child'] == ('child', Child, + False)) + self.assert_(rules1[1]['foo'] == ('foos', Foo, True)) + self.assert_(rules1[2]['tag'] == 'tag') + self.assert_(rules1[2]['attr'] == 'versioned_attr') + # Check to make sure we don't recalculate the rules. + self.assert_(rules1 == Example._get_rules(1)) + rules2 = Example._get_rules(2) + self.assert_(rules2[0] == '{http://example.com}foo') + self.assert_(rules2[1]['{http://example.com/2}child'] == ('child', Child, + False)) + self.assert_(rules2[1]['foo'] == ('foos', Foo, True)) + self.assert_(rules2[2]['tag'] == 'tag') + self.assert_(rules2[2]['{http://new_ns}attr'] == 'versioned_attr') + + def testGetElements(self): + e = Example() + e.child = Child() + e.child.text = 'child text' + e.foos.append(Foo()) + e.foos[0].text = 'foo1' + e.foos.append(Foo()) + e.foos[1].text = 'foo2' + e._other_elements.append(atom.core.XmlElement()) + e._other_elements[0]._qname = 'bar' + e._other_elements[0].text = 'other1' + e._other_elements.append(atom.core.XmlElement()) + e._other_elements[1]._qname = 'child' + e._other_elements[1].text = 'other2' + + self.contains_expected_elements(e.get_elements(), + ['foo1', 'foo2', 'child text', 'other1', 'other2']) + self.contains_expected_elements(e.get_elements('child'), + ['child text', 'other2']) + self.contains_expected_elements( + e.get_elements('child', 'http://example.com/1'), ['child text']) + self.contains_expected_elements( + e.get_elements('child', 'http://example.com/2'), []) + self.contains_expected_elements( + e.get_elements('child', 'http://example.com/2', 2), ['child text']) + self.contains_expected_elements( + e.get_elements('child', 'http://example.com/1', 2), []) + self.contains_expected_elements( + e.get_elements('child', 'http://example.com/2', 3), ['child text']) + self.contains_expected_elements(e.get_elements('bar'), ['other1']) + self.contains_expected_elements(e.get_elements('bar', version=2), + ['other1']) + self.contains_expected_elements(e.get_elements('bar', version=3), + ['other1']) + + def contains_expected_elements(self, elements, expected_texts): + self.assert_(len(elements) == len(expected_texts)) + for element in elements: + self.assert_(element.text in expected_texts) + + def testConstructorKwargs(self): + e = Example('hello', child=Child('world'), versioned_attr='1') + self.assert_(e.text == 'hello') + self.assert_(e.child.text == 'world') + self.assert_(e.versioned_attr == '1') + self.assert_(e.foos == []) + self.assert_(e.tag is None) + + e = Example(foos=[Foo('1', ignored=1), Foo(text='2')], tag='ok') + self.assert_(e.text is None) + self.assert_(e.child is None) + self.assert_(e.versioned_attr is None) + self.assert_(len(e.foos) == 2) + self.assert_(e.foos[0].text == '1') + self.assert_(e.foos[1].text == '2') + self.assert_('ignored' not in e.foos[0].__dict__) + self.assert_(e.tag == 'ok') + + def testParseBasicXmlElement(self): + element = atom.core.xml_element_from_string(SAMPLE_XML, + atom.core.XmlElement) + inners = element.get_elements('inner') + self.assert_(len(inners) == 3) + self.assert_(inners[0].get_attributes('x')[0].value == '123') + self.assert_(inners[0].get_attributes('y') == []) + self.assert_(inners[1].get_attributes('x')[0].value == '234') + self.assert_(inners[1].get_attributes('y')[0].value == 'abc') + self.assert_(inners[2].get_attributes('x') == []) + inners = element.get_elements('inner', 'http://example.com/xml/1') + self.assert_(len(inners) == 3) + inners = element.get_elements(None, 'http://example.com/xml/1') + self.assert_(len(inners) == 4) + inners = element.get_elements() + self.assert_(len(inners) == 4) + inners = element.get_elements('other') + self.assert_(len(inners) == 1) + self.assert_(inners[0].get_attributes( + 'z', 'http://example.com/xml/2')[0].value == 'true') + inners = element.get_elements('missing') + self.assert_(len(inners) == 0) + + def testBasicXmlElementPreservesMarkup(self): + element = atom.core.xml_element_from_string(SAMPLE_XML, + atom.core.XmlElement) + tree1 = ElementTree.fromstring(SAMPLE_XML) + tree2 = ElementTree.fromstring(element.to_string()) + self.assert_trees_similar(tree1, tree2) + + def testSchemaParse(self): + outer = atom.core.xml_element_from_string(SAMPLE_XML, Outer) + self.assert_(isinstance(outer.innards, list)) + self.assert_(len(outer.innards) == 3) + self.assert_(outer.innards[0].my_x == '123') + + def testSchemaParsePreservesMarkup(self): + outer = atom.core.xml_element_from_string(SAMPLE_XML, Outer) + tree1 = ElementTree.fromstring(SAMPLE_XML) + tree2 = ElementTree.fromstring(outer.to_string()) + self.assert_trees_similar(tree1, tree2) + found_x_and_y = False + found_x_123 = False + child = tree1.find('{http://example.com/xml/1}inner') + matching_children = tree2.findall(child.tag) + for match in matching_children: + if 'y' in match.attrib and match.attrib['y'] == 'abc': + if match.attrib['x'] == '234': + found_x_and_y = True + self.assert_(match.attrib['x'] == '234') + if 'x' in match.attrib and match.attrib['x'] == '123': + self.assert_('y' not in match.attrib) + found_x_123 = True + self.assert_(found_x_and_y) + self.assert_(found_x_123) + + def assert_trees_similar(self, a, b): + """Compares two XML trees for approximate matching.""" + for child in a: + self.assert_(len(a.findall(child.tag)) == len(b.findall(child.tag))) + for child in b: + self.assert_(len(a.findall(child.tag)) == len(b.findall(child.tag))) + self.assert_(len(a) == len(b)) + self.assert_(a.text == b.text) + self.assert_(a.attrib == b.attrib) + + +class UtilityFunctionTest(unittest.TestCase): + + def testMatchQnames(self): + self.assert_(atom.core._qname_matches( + 'foo', 'http://example.com', '{http://example.com}foo')) + self.assert_(atom.core._qname_matches( + None, None, '{http://example.com}foo')) + self.assert_(atom.core._qname_matches( + None, None, 'foo')) + self.assert_(atom.core._qname_matches( + None, None, None)) + self.assert_(atom.core._qname_matches( + None, None, '{http://example.com}')) + self.assert_(atom.core._qname_matches( + 'foo', None, '{http://example.com}foo')) + self.assert_(atom.core._qname_matches( + None, 'http://example.com', '{http://example.com}foo')) + self.assert_(atom.core._qname_matches( + None, '', 'foo')) + self.assert_(atom.core._qname_matches( + 'foo', '', 'foo')) + self.assert_(atom.core._qname_matches( + 'foo', '', 'foo')) + self.assert_(atom.core._qname_matches( + 'foo', 'http://google.com', '{http://example.com}foo') == False) + self.assert_(atom.core._qname_matches( + 'foo', 'http://example.com', '{http://example.com}bar') == False) + self.assert_(atom.core._qname_matches( + 'foo', 'http://example.com', '{http://google.com}foo') == False) + self.assert_(atom.core._qname_matches( + 'bar', 'http://example.com', '{http://google.com}foo') == False) + self.assert_(atom.core._qname_matches( + 'foo', None, '{http://example.com}bar') == False) + self.assert_(atom.core._qname_matches( + None, 'http://google.com', '{http://example.com}foo') == False) + self.assert_(atom.core._qname_matches( + None, '', '{http://example.com}foo') == False) + self.assert_(atom.core._qname_matches( + 'foo', '', 'bar') == False) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/atom_tests/http_interface_test.py b/gdata.py-1.2.3/tests/atom_tests/http_interface_test.py new file mode 100755 index 0000000..93b9b5b --- /dev/null +++ b/gdata.py-1.2.3/tests/atom_tests/http_interface_test.py @@ -0,0 +1,43 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import unittest +import atom.http_interface +import StringIO + + +class HttpResponseTest(unittest.TestCase): + + def testConstructorWithStrings(self): + resp = atom.http_interface.HttpResponse(body='Hi there!', status=200, + reason='OK', headers={'Content-Length':'9'}) + self.assert_(resp.read(amt=1) == 'H') + self.assert_(resp.read(amt=2) == 'i ') + self.assert_(resp.read() == 'there!') + self.assert_(resp.read() == '') + self.assert_(resp.reason == 'OK') + self.assert_(resp.status == 200) + self.assert_(resp.getheader('Content-Length') == '9') + self.assert_(resp.getheader('Missing') is None) + self.assert_(resp.getheader('Missing', default='yes') == 'yes') + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/atom_tests/mock_client_test.py b/gdata.py-1.2.3/tests/atom_tests/mock_client_test.py new file mode 100755 index 0000000..45f6701 --- /dev/null +++ b/gdata.py-1.2.3/tests/atom_tests/mock_client_test.py @@ -0,0 +1,71 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import unittest +import atom.mock_http +import atom.http + + +class MockHttpClientUnitTest(unittest.TestCase): + + def setUp(self): + self.client = atom.mock_http.MockHttpClient() + + def testRepondToGet(self): + mock_response = atom.http_interface.HttpResponse(body='Hooray!', + status=200, reason='OK') + self.client.add_response(mock_response, 'GET', + 'http://example.com/hooray') + + response = self.client.request('GET', 'http://example.com/hooray') + + self.assertEquals(len(self.client.recordings), 1) + self.assertEquals(response.status, 200) + self.assertEquals(response.read(), 'Hooray!') + + def testRecordResponse(self): + # Turn on pass-through record mode. + self.client.real_client = atom.http.ProxiedHttpClient() + live_response = self.client.request('GET', + 'http://www.google.com/base/feeds/snippets?max-results=1') + live_response_body = live_response.read() + self.assertEquals(live_response.status, 200) + self.assertEquals(live_response_body.startswith('= len('/service/subservice?')) + self.assert_(path.find('newname=newvalue') >= len('/service/subservice?')) + + def testParseHttpsUrl(self): + as = atom.service.AtomService('code.google.com') + self.assertEquals(as.server, 'code.google.com') + (host, port, ssl, path) = atom.service.ProcessUrl(as, + 'https://www.google.com/service/subservice?name=value&newname=newvalue') + + self.assertEquals(ssl, True) + self.assertEquals(host, 'www.google.com') + self.assertEquals(port, 443) + self.assert_(path.startswith('/service/subservice?')) + self.assert_(path.find('name=value') >= len('/service/subservice?')) + self.assert_(path.find('newname=newvalue') >= len('/service/subservice?')) + + def testParseHttpsUrlWithPort(self): + as = atom.service.AtomService('code.google.com') + self.assertEquals(as.server, 'code.google.com') + (host, port, ssl, path) = atom.service.ProcessUrl(as, + 'https://www.google.com:13981/service/subservice?name=value&newname=newvalue') + + self.assertEquals(ssl, True) + self.assertEquals(host, 'www.google.com') + self.assertEquals(port, 13981) + self.assert_(path.startswith('/service/subservice?')) + self.assert_(path.find('name=value') >= len('/service/subservice?')) + self.assert_(path.find('newname=newvalue') >= len('/service/subservice?')) + + def testSetBasicAuth(self): + client = atom.service.AtomService() + client.UseBasicAuth('foo', 'bar') + token = client.token_store.find_token('http://') + self.assert_(isinstance(token, atom.service.BasicAuthToken)) + self.assertEquals(token.auth_header, 'Basic Zm9vOmJhcg==') + client.UseBasicAuth('','') + token = client.token_store.find_token('http://') + self.assert_(isinstance(token, atom.service.BasicAuthToken)) + self.assertEquals(token.auth_header, 'Basic Og==') + + def testProcessUrlWithStringForService(self): + (server, port, ssl, uri) = atom.service.ProcessUrl( + service='www.google.com', url='/base/feeds/items') + self.assertEquals(server, 'www.google.com') + self.assertEquals(port, 80) + self.assertEquals(ssl, False) + self.assert_(uri.startswith('/base/feeds/items')) + + client = atom.service.AtomService() + client.server = 'www.google.com' + client.ssl = True + (server, port, ssl, uri) = atom.service.ProcessUrl( + service=client, url='/base/feeds/items') + self.assertEquals(server, 'www.google.com') + self.assertEquals(ssl, True) + self.assert_(uri.startswith('/base/feeds/items')) + + (server, port, ssl, uri) = atom.service.ProcessUrl(service=None, + url='https://www.google.com/base/feeds/items') + self.assertEquals(server, 'www.google.com') + self.assertEquals(port, 443) + self.assertEquals(ssl, True) + self.assert_(uri.startswith('/base/feeds/items')) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/atom_tests/token_store_test.py b/gdata.py-1.2.3/tests/atom_tests/token_store_test.py new file mode 100755 index 0000000..5124212 --- /dev/null +++ b/gdata.py-1.2.3/tests/atom_tests/token_store_test.py @@ -0,0 +1,74 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import unittest +import atom.token_store +import atom.http_interface +import atom.service +import atom.url + +class TokenStoreTest(unittest.TestCase): + + def setUp(self): + self.token = atom.service.BasicAuthToken('aaa1', scopes=[ + 'http://example.com/', 'http://example.org']) + self.tokens = atom.token_store.TokenStore() + self.tokens.add_token(self.token) + + def testAddAndFindTokens(self): + self.assert_(self.tokens.find_token('http://example.com/') == self.token) + self.assert_(self.tokens.find_token('http://example.org/') == self.token) + self.assert_(self.tokens.find_token('http://example.org/foo?ok=1') == ( + self.token)) + self.assert_(isinstance(self.tokens.find_token('http://example.net/'), + atom.http_interface.GenericToken)) + self.assert_(isinstance(self.tokens.find_token('example.com/'), + atom.http_interface.GenericToken)) + + def testFindTokenUsingMultipleUrls(self): + self.assert_(self.tokens.find_token( + 'http://example.com/') == self.token) + self.assert_(self.tokens.find_token( + 'http://example.org/bar') == self.token) + self.assert_(isinstance(self.tokens.find_token(''), + atom.http_interface.GenericToken)) + self.assert_(isinstance(self.tokens.find_token( + 'http://example.net/'), + atom.http_interface.GenericToken)) + + def testFindTokenWithPartialScopes(self): + token = atom.service.BasicAuthToken('aaa1', + scopes=[atom.url.Url(host='www.example.com', path='/foo'), + atom.url.Url(host='www.example.net')]) + token_store = atom.token_store.TokenStore() + token_store.add_token(token) + self.assert_(token_store.find_token( + 'http://www.example.com/foobar') == token) + self.assert_(token_store.find_token( + 'https://www.example.com:443/foobar') == token) + self.assert_(token_store.find_token( + 'http://www.example.net/xyz') == token) + self.assert_(token_store.find_token('http://www.example.org/') != token) + self.assert_(isinstance(token_store.find_token('http://example.org/'), + atom.http_interface.GenericToken)) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/atom_tests/url_test.py b/gdata.py-1.2.3/tests/atom_tests/url_test.py new file mode 100755 index 0000000..1e239f4 --- /dev/null +++ b/gdata.py-1.2.3/tests/atom_tests/url_test.py @@ -0,0 +1,91 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import unittest +import atom.url + + +class UrlTest(unittest.TestCase): + + def testParseUrl(self): + url = atom.url.parse_url('http://www.google.com/calendar/feeds') + self.assert_(url.protocol == 'http') + self.assert_(url.host == 'www.google.com') + self.assert_(url.path == '/calendar/feeds') + self.assert_(url.params == {}) + + url = atom.url.parse_url('http://example.com:6091/calendar/feeds') + self.assert_(url.protocol == 'http') + self.assert_(url.host == 'example.com') + self.assert_(url.port == '6091') + self.assert_(url.path == '/calendar/feeds') + self.assert_(url.params == {}) + + url = atom.url.parse_url('/calendar/feeds?foo=bar') + self.assert_(url.protocol is None) + self.assert_(url.host is None) + self.assert_(url.path == '/calendar/feeds') + self.assert_(len(url.params.keys()) == 1) + self.assert_('foo' in url.params) + self.assert_(url.params['foo'] == 'bar') + + url = atom.url.parse_url('/calendar/feeds?my+foo=bar%3Dx') + self.assert_(len(url.params.keys()) == 1) + self.assert_('my foo' in url.params) + self.assert_(url.params['my foo'] == 'bar=x') + + def testUrlToString(self): + url = atom.url.Url(port=80) + url.host = 'example.com' + self.assert_(str(url), '//example.com:80') + + url = atom.url.Url(protocol='http', host='example.com', path='/feed') + url.params['has spaces'] = 'sneaky=values?&!' + self.assert_(url.to_string() == ( + 'http://example.com/feed?has+spaces=sneaky%3Dvalues%3F%26%21')) + + def testGetRequestUri(self): + url = atom.url.Url(protocol='http', host='example.com', path='/feed') + url.params['has spaces'] = 'sneaky=values?&!' + self.assert_(url.get_request_uri() == ( + '/feed?has+spaces=sneaky%3Dvalues%3F%26%21')) + self.assert_(url.get_param_string() == ( + 'has+spaces=sneaky%3Dvalues%3F%26%21')) + + def testComparistons(self): + url1 = atom.url.Url(protocol='http', host='example.com', path='/feed', + params={'x':'1', 'y':'2'}) + url2 = atom.url.Url(host='example.com', port=80, path='/feed', + params={'y':'2', 'x':'1'}) + self.assertEquals(url1, url2) + url3 = atom.url.Url(host='example.com', port=81, path='/feed', + params={'x':'1', 'y':'2'}) + self.assert_(url1 != url3) + self.assert_(url2 != url3) + url4 = atom.url.Url(protocol='ftp', host='example.com', path='/feed', + params={'x':'1', 'y':'2'}) + self.assert_(url1 != url4) + self.assert_(url2 != url4) + self.assert_(url3 != url4) + + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_test.py b/gdata.py-1.2.3/tests/gdata_test.py new file mode 100755 index 0000000..1600ef0 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_test.py @@ -0,0 +1,348 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder@gmail.com (Jeff Scudder)' + + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import gdata +import atom +from gdata import test_data + + +class StartIndexTest(unittest.TestCase): + + def setUp(self): + self.start_index = gdata.StartIndex() + + def testToAndFromString(self): + self.start_index.text = '1' + self.assert_(self.start_index.text == '1') + new_start_index = gdata.StartIndexFromString(self.start_index.ToString()) + self.assert_(self.start_index.text == new_start_index.text) + + +class ItemsPerPageTest(unittest.TestCase): + + def setUp(self): + self.items_per_page = gdata.ItemsPerPage() + + def testToAndFromString(self): + self.items_per_page.text = '10' + self.assert_(self.items_per_page.text == '10') + new_items_per_page = gdata.ItemsPerPageFromString( + self.items_per_page.ToString()) + self.assert_(self.items_per_page.text == new_items_per_page.text) + + +class GDataEntryTest(unittest.TestCase): + + def testIdShouldBeCleaned(self): + entry = gdata.GDataEntryFromString(test_data.XML_ENTRY_1) + element_tree = ElementTree.fromstring(test_data.XML_ENTRY_1) + self.assert_(element_tree.findall( + '{http://www.w3.org/2005/Atom}id')[0].text != entry.id.text) + self.assert_(entry.id.text == 'http://www.google.com/test/id/url') + + def testGeneratorShouldBeCleaned(self): + feed = gdata.GDataFeedFromString(test_data.GBASE_FEED) + element_tree = ElementTree.fromstring(test_data.GBASE_FEED) + self.assert_(element_tree.findall('{http://www.w3.org/2005/Atom}generator' + )[0].text != feed.generator.text) + self.assert_(feed.generator.text == 'GoogleBase') + + def testAllowsEmptyId(self): + entry = gdata.GDataEntry() + try: + entry.id = atom.Id() + except AttributeError: + self.fail('Empty id should not raise an attribute error.') + + +class LinkFinderTest(unittest.TestCase): + + def setUp(self): + self.entry = gdata.GDataEntryFromString(test_data.XML_ENTRY_1) + + def testLinkFinderGetsLicenseLink(self): + self.assertEquals(isinstance(self.entry.GetLicenseLink(), atom.Link), + True) + self.assertEquals(self.entry.GetLicenseLink().href, + 'http://creativecommons.org/licenses/by-nc/2.5/rdf') + self.assertEquals(self.entry.GetLicenseLink().rel, 'license') + + def testLinkFinderGetsAlternateLink(self): + self.assertEquals(isinstance(self.entry.GetAlternateLink(), atom.Link), + True) + self.assertEquals(self.entry.GetAlternateLink().href, + 'http://www.provider-host.com/123456789') + self.assertEquals(self.entry.GetAlternateLink().rel, 'alternate') + + +class GDataFeedTest(unittest.TestCase): + + def testCorrectConversionToElementTree(self): + test_feed = gdata.GDataFeedFromString(test_data.GBASE_FEED) + self.assert_(test_feed.total_results is not None) + element_tree = test_feed._ToElementTree() + feed = element_tree.find('{http://www.w3.org/2005/Atom}feed') + self.assert_(element_tree.find( + '{http://a9.com/-/spec/opensearchrss/1.0/}totalResults') is not None) + + def testAllowsEmptyId(self): + feed = gdata.GDataFeed() + try: + feed.id = atom.Id() + except AttributeError: + self.fail('Empty id should not raise an attribute error.') + + +class BatchEntryTest(unittest.TestCase): + + def testCorrectConversionFromAndToString(self): + batch_entry = gdata.BatchEntryFromString(test_data.BATCH_ENTRY) + + self.assertEquals(batch_entry.batch_id.text, 'itemB') + self.assertEquals(batch_entry.id.text, + 'http://www.google.com/base/feeds/items/' + '2173859253842813008') + self.assertEquals(batch_entry.batch_operation.type, 'insert') + self.assertEquals(batch_entry.batch_status.code, '201') + self.assertEquals(batch_entry.batch_status.reason, 'Created') + + new_entry = gdata.BatchEntryFromString(str(batch_entry)) + + self.assertEquals(batch_entry.batch_id.text, new_entry.batch_id.text) + self.assertEquals(batch_entry.id.text, new_entry.id.text) + self.assertEquals(batch_entry.batch_operation.type, + new_entry.batch_operation.type) + self.assertEquals(batch_entry.batch_status.code, + new_entry.batch_status.code) + self.assertEquals(batch_entry.batch_status.reason, + new_entry.batch_status.reason) + + +class BatchFeedTest(unittest.TestCase): + + def setUp(self): + self.batch_feed = gdata.BatchFeed() + self.example_entry = gdata.BatchEntry( + atom_id=atom.Id(text='http://example.com/1'), text='This is a test') + + def testConvertRequestFeed(self): + batch_feed = gdata.BatchFeedFromString(test_data.BATCH_FEED_REQUEST) + + self.assertEquals(len(batch_feed.entry), 4) + for entry in batch_feed.entry: + self.assert_(isinstance(entry, gdata.BatchEntry)) + self.assertEquals(batch_feed.title.text, 'My Batch Feed') + + new_feed = gdata.BatchFeedFromString(str(batch_feed)) + + self.assertEquals(len(new_feed.entry), 4) + for entry in new_feed.entry: + self.assert_(isinstance(entry, gdata.BatchEntry)) + self.assertEquals(new_feed.title.text, 'My Batch Feed') + + def testConvertResultFeed(self): + batch_feed = gdata.BatchFeedFromString(test_data.BATCH_FEED_RESULT) + + self.assertEquals(len(batch_feed.entry), 4) + for entry in batch_feed.entry: + self.assert_(isinstance(entry, gdata.BatchEntry)) + if entry.id.text == ('http://www.google.com/base/feeds/items/' + '2173859253842813008'): + self.assertEquals(entry.batch_operation.type, 'insert') + self.assertEquals(entry.batch_id.text, 'itemB') + self.assertEquals(entry.batch_status.code, '201') + self.assertEquals(entry.batch_status.reason, 'Created') + self.assertEquals(batch_feed.title.text, 'My Batch') + + new_feed = gdata.BatchFeedFromString(str(batch_feed)) + + self.assertEquals(len(new_feed.entry), 4) + for entry in new_feed.entry: + self.assert_(isinstance(entry, gdata.BatchEntry)) + if entry.id.text == ('http://www.google.com/base/feeds/items/' + '2173859253842813008'): + self.assertEquals(entry.batch_operation.type, 'insert') + self.assertEquals(entry.batch_id.text, 'itemB') + self.assertEquals(entry.batch_status.code, '201') + self.assertEquals(entry.batch_status.reason, 'Created') + self.assertEquals(new_feed.title.text, 'My Batch') + + def testAddBatchEntry(self): + try: + self.batch_feed.AddBatchEntry(batch_id_string='a') + self.fail('AddBatchEntry with neither entry or URL should raise Error') + except gdata.MissingRequiredParameters: + pass + + new_entry = self.batch_feed.AddBatchEntry( + id_url_string='http://example.com/1') + self.assertEquals(len(self.batch_feed.entry), 1) + self.assertEquals(self.batch_feed.entry[0].id.text, + 'http://example.com/1') + self.assertEquals(self.batch_feed.entry[0].batch_id.text, '0') + self.assertEquals(new_entry.id.text, 'http://example.com/1') + self.assertEquals(new_entry.batch_id.text, '0') + + to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId')) + new_entry = self.batch_feed.AddBatchEntry(entry=to_add, + batch_id_string='foo') + self.assertEquals(new_entry.batch_id.text, 'foo') + self.assertEquals(new_entry.id.text, 'originalId') + + to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'), + batch_id=gdata.BatchId(text='bar')) + new_entry = self.batch_feed.AddBatchEntry(entry=to_add, + id_url_string='newId', + batch_id_string='foo') + self.assertEquals(new_entry.batch_id.text, 'foo') + self.assertEquals(new_entry.id.text, 'originalId') + + to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'), + batch_id=gdata.BatchId(text='bar')) + new_entry = self.batch_feed.AddBatchEntry(entry=to_add, + id_url_string='newId') + self.assertEquals(new_entry.batch_id.text, 'bar') + self.assertEquals(new_entry.id.text, 'originalId') + + to_add = gdata.BatchEntry(atom_id=atom.Id(text='originalId'), + batch_id=gdata.BatchId(text='bar'), + batch_operation=gdata.BatchOperation( + op_type=gdata.BATCH_INSERT)) + self.assertEquals(to_add.batch_operation.type, gdata.BATCH_INSERT) + new_entry = self.batch_feed.AddBatchEntry(entry=to_add, + id_url_string='newId', batch_id_string='foo', + operation_string=gdata.BATCH_UPDATE) + self.assertEquals(new_entry.batch_operation.type, gdata.BATCH_UPDATE) + + + def testAddInsert(self): + + first_entry = gdata.BatchEntry( + atom_id=atom.Id(text='http://example.com/1'), text='This is a test1') + self.batch_feed.AddInsert(first_entry) + self.assertEquals(self.batch_feed.entry[0].batch_operation.type, + gdata.BATCH_INSERT) + self.assertEquals(self.batch_feed.entry[0].batch_id.text, '0') + + second_entry = gdata.BatchEntry( + atom_id=atom.Id(text='http://example.com/2'), text='This is a test2') + self.batch_feed.AddInsert(second_entry, batch_id_string='foo') + self.assertEquals(self.batch_feed.entry[1].batch_operation.type, + gdata.BATCH_INSERT) + self.assertEquals(self.batch_feed.entry[1].batch_id.text, 'foo') + + + third_entry = gdata.BatchEntry( + atom_id=atom.Id(text='http://example.com/3'), text='This is a test3') + third_entry.batch_operation = gdata.BatchOperation( + op_type=gdata.BATCH_DELETE) + # Add an entry with a delete operation already assigned. + self.batch_feed.AddInsert(third_entry) + # The batch entry should not have the original operation, it should + # have been changed to an insert. + self.assertEquals(self.batch_feed.entry[2].batch_operation.type, + gdata.BATCH_INSERT) + self.assertEquals(self.batch_feed.entry[2].batch_id.text, '2') + + def testAddDelete(self): + # Try deleting an entry + delete_entry = gdata.BatchEntry( + atom_id=atom.Id(text='http://example.com/1'), text='This is a test') + self.batch_feed.AddDelete(entry=delete_entry) + self.assertEquals(self.batch_feed.entry[0].batch_operation.type, + gdata.BATCH_DELETE) + self.assertEquals(self.batch_feed.entry[0].id.text, + 'http://example.com/1') + self.assertEquals(self.batch_feed.entry[0].text, 'This is a test') + + # Try deleting a URL + self.batch_feed.AddDelete(url_string='http://example.com/2') + self.assertEquals(self.batch_feed.entry[0].batch_operation.type, + gdata.BATCH_DELETE) + self.assertEquals(self.batch_feed.entry[1].id.text, + 'http://example.com/2') + self.assert_(self.batch_feed.entry[1].text is None) + + def testAddQuery(self): + # Try querying with an existing batch entry + delete_entry = gdata.BatchEntry( + atom_id=atom.Id(text='http://example.com/1')) + self.batch_feed.AddQuery(entry=delete_entry) + self.assertEquals(self.batch_feed.entry[0].batch_operation.type, + gdata.BATCH_QUERY) + self.assertEquals(self.batch_feed.entry[0].id.text, + 'http://example.com/1') + + # Try querying a URL + self.batch_feed.AddQuery(url_string='http://example.com/2') + self.assertEquals(self.batch_feed.entry[0].batch_operation.type, + gdata.BATCH_QUERY) + self.assertEquals(self.batch_feed.entry[1].id.text, + 'http://example.com/2') + + def testAddUpdate(self): + # Try updating an entry + delete_entry = gdata.BatchEntry( + atom_id=atom.Id(text='http://example.com/1'), text='This is a test') + self.batch_feed.AddUpdate(entry=delete_entry) + self.assertEquals(self.batch_feed.entry[0].batch_operation.type, + gdata.BATCH_UPDATE) + self.assertEquals(self.batch_feed.entry[0].id.text, + 'http://example.com/1') + self.assertEquals(self.batch_feed.entry[0].text, 'This is a test') + + +class ExtendedPropertyTest(unittest.TestCase): + + def testXmlBlobRoundTrip(self): + ep = gdata.ExtendedProperty(name='blobby') + ep.SetXmlBlob('') + extension = ep.GetXmlBlobExtensionElement() + self.assertEquals(extension.tag, 'some_xml') + self.assert_(extension.namespace is None) + self.assertEquals(extension.attributes['attr'], 'test') + + ep2 = gdata.ExtendedPropertyFromString(ep.ToString()) + + extension = ep2.GetXmlBlobExtensionElement() + self.assertEquals(extension.tag, 'some_xml') + self.assert_(extension.namespace is None) + self.assertEquals(extension.attributes['attr'], 'test') + + def testGettersShouldReturnNoneWithNoBlob(self): + ep = gdata.ExtendedProperty(name='no blob') + self.assert_(ep.GetXmlBlobExtensionElement() is None) + self.assert_(ep.GetXmlBlobString() is None) + + def testGettersReturnCorrectTypes(self): + ep = gdata.ExtendedProperty(name='has blob') + ep.SetXmlBlob('') + self.assert_(isinstance(ep.GetXmlBlobExtensionElement(), + atom.ExtensionElement)) + self.assert_(isinstance(ep.GetXmlBlobString(), str)) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/apps/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/apps/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/apps/emailsettings/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/apps/emailsettings/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/apps/emailsettings/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/apps/emailsettings/service_test.py new file mode 100755 index 0000000..a9224ba --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/apps/emailsettings/service_test.py @@ -0,0 +1,122 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test for Email Settings service.""" + + +__author__ = 'google-apps-apis@googlegroups.com' + + +import getpass +import gdata.apps.emailsettings.service +import unittest + + +domain = '' +admin_email = '' +admin_password = '' +username = '' + + +class EmailSettingsTest(unittest.TestCase): + """Test for the EmailSettingsService.""" + + def setUp(self): + self.es = gdata.apps.emailsettings.service.EmailSettingsService( + email=admin_email, password=admin_password, domain=domain) + self.es.ProgrammaticLogin() + + def testCreateLabel(self): + result = self.es.CreateLabel(username, label='New label!!!') + self.assertEquals(result['label'], 'New label!!!') + + def testCreateFilter(self): + result = self.es.CreateFilter(username, + from_='from_foo', + to='to_foo', + subject='subject_foo', + has_the_word='has_the_words_foo', + does_not_have_the_word='doesnt_have_foo', + has_attachment=True, + label='label_foo', + should_mark_as_read=True, + should_archive=True) + self.assertEquals(result['from'], 'from_foo') + self.assertEquals(result['to'], 'to_foo') + self.assertEquals(result['subject'], 'subject_foo') + + def testCreateSendAsAlias(self): + result = self.es.CreateSendAsAlias(username, + name='Send-as Alias', + address='user2@sizzles.org', + reply_to='user3@sizzles.org', + make_default=True) + self.assertEquals(result['name'], 'Send-as Alias') + + def testUpdateForwarding(self): + result = self.es.UpdateForwarding(username, + enable=True, + forward_to='user4@sizzles.org', + action=gdata.apps.emailsettings.service.KEEP) + self.assertEquals(result['enable'], 'true') + + def testUpdatePop(self): + result = self.es.UpdatePop(username, + enable=True, + enable_for=gdata.apps.emailsettings.service.ALL_MAIL, + action=gdata.apps.emailsettings.service.ARCHIVE) + self.assertEquals(result['enable'], 'true') + + def testUpdateImap(self): + result = self.es.UpdateImap(username, enable=True) + self.assertEquals(result['enable'], 'true') + + def testUpdateVacation(self): + result = self.es.UpdateVacation(username, + enable=True, + subject='Hawaii', + message='Wish you were here!', + contacts_only=True) + self.assertEquals(result['subject'], 'Hawaii') + + def testUpdateSignature(self): + result = self.es.UpdateSignature(username, signature='Signature') + self.assertEquals(result['signature'], 'Signature') + + def testUpdateLanguage(self): + result = self.es.UpdateLanguage(username, language='fr') + self.assertEquals(result['language'], 'fr') + + def testUpdateGeneral(self): + result = self.es.UpdateGeneral(username, + page_size=100, + shortcuts=True, + arrows=True, + snippets=True, + unicode=True) + self.assertEquals(result['pageSize'], '100') + + +if __name__ == '__main__': + print("""Google Apps Email Settings Service Tests + +NOTE: Please run these tests only with a test user account. +""") + domain = raw_input('Google Apps domain: ') + admin_email = '%s@%s' % (raw_input('Administrator username: '), domain) + admin_password = getpass.getpass('Administrator password: ') + username = raw_input('Test username: ') + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/apps/migration/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/apps/migration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/apps/migration/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/apps/migration/service_test.py new file mode 100755 index 0000000..4c9ef1f --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/apps/migration/service_test.py @@ -0,0 +1,72 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test for Email Migration service.""" + + +__author__ = 'google-apps-apis@googlegroups.com' + + +import getpass +import gdata.apps.migration.service +import unittest + + +domain = '' +admin_email = '' +admin_password = '' +username = '' +MESS="""From: joe@blow.com +To: jane@doe.com +Date: Mon, 29 Sep 2008 20:00:34 -0500 (CDT) +Subject: %s + +%s""" + + +class MigrationTest(unittest.TestCase): + """Test for the MigrationService.""" + + def setUp(self): + self.ms = gdata.apps.migration.service.MigrationService( + email=admin_email, password=admin_password, domain=domain) + self.ms.ProgrammaticLogin() + + + def testImportMail(self): + self.ms.ImportMail(user_name=username, + mail_message=MESS%('Test subject', 'Test body'), + mail_item_properties=['IS_STARRED'], + mail_labels=['Test']) + + def testBatch(self): + for i in xrange(1,10): + self.ms.AddBatchEntry(mail_message=MESS%('Test batch %d'%i, 'Test batch'), + mail_item_properties=['IS_INBOX'], + mail_labels=['Test', 'Batch']) + self.ms.SubmitBatch(user_name=username) + + +if __name__ == '__main__': + print("""Google Apps Email Migration Service Tests + +NOTE: Please run these tests only with a test user account. +""") + domain = raw_input('Google Apps domain: ') + admin_email = '%s@%s' % (raw_input('Administrator username: '), domain) + admin_password = getpass.getpass('Administrator password: ') + username = raw_input('Test username: ') + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/apps/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/apps/service_test.py new file mode 100755 index 0000000..df8b9c0 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/apps/service_test.py @@ -0,0 +1,502 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'tmatsuo@sios.com (Takashi Matsuo)' + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import gdata.apps +import gdata.apps.service +import getpass +import time + + +apps_domain = '' +apps_username = '' +apps_password = '' + + +class AppsServiceUnitTest01(unittest.TestCase): + + def setUp(self): + self.postfix = time.strftime("%Y%m%d%H%M%S") + email = apps_username + '@' + apps_domain + self.apps_client = gdata.apps.service.AppsService( + email=email, domain=apps_domain, password=apps_password, + source='AppsClient "Unit" Tests') + self.apps_client.ProgrammaticLogin() + self.created_user = None + + def tearDown(self): + if self.created_user is not None: + try: + self.apps_client.DeleteUser(self.created_user.login.user_name) + except Exception, e: + pass + + def test001RetrieveUser(self): + """Tests RetrieveUser method""" + + try: + self_user_entry = self.apps_client.RetrieveUser(apps_username) + except: + self.fail('Unexpected exception occurred') + self.assert_(isinstance(self_user_entry, gdata.apps.UserEntry), + "The return value of RetrieveUser() must be an instance of " + + "apps.UserEntry: %s" % self_user_entry) + self.assertEquals(self_user_entry.login.user_name, apps_username) + + def test002RetrieveUserRaisesException(self): + """Tests if RetrieveUser() raises AppsForYourDomainException with + appropriate error code""" + + try: + non_existance = self.apps_client.RetrieveUser('nobody-' + self.postfix) + except gdata.apps.service.AppsForYourDomainException, e: + self.assertEquals(e.error_code, gdata.apps.service.ENTITY_DOES_NOT_EXIST) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + else: + self.fail('No exception occurred') + + def testSuspendAndRestoreUser(self): + # Create a test user + user_name = 'an-apps-service-test-account-' + self.postfix + family_name = 'Tester' + given_name = 'Apps' + password = '123$$abc' + suspended = 'false' + + created_user = self.apps_client.CreateUser( + user_name=user_name, family_name=family_name, given_name=given_name, + password=password, suspended=suspended) + + # Suspend then restore the new user. + entry = self.apps_client.SuspendUser(created_user.login.user_name) + self.assertEquals(entry.login.suspended, 'true') + entry = self.apps_client.RestoreUser(created_user.login.user_name) + self.assertEquals(entry.login.suspended, 'false') + + # Clean up, delete the test user. + self.apps_client.DeleteUser(user_name) + + def test003MethodsForUser(self): + """Tests methods for user""" + + user_name = 'TakashiMatsuo-' + self.postfix + family_name = 'Matsuo' + given_name = 'Takashi' + password = '123$$abc' + suspended = 'false' + + try: + created_user = self.apps_client.CreateUser( + user_name=user_name, family_name=family_name, given_name=given_name, + password=password, suspended=suspended) + except Exception, e: + self.assert_(False, 'Unexpected exception occurred: %s' % e) + + self.created_user = created_user + self.assertEquals(created_user.login.user_name, user_name) + self.assertEquals(created_user.login.suspended, suspended) + self.assertEquals(created_user.name.family_name, family_name) + self.assertEquals(created_user.name.given_name, given_name) + + # self.assertEquals(created_user.quota.limit, + # gdata.apps.service.DEFAULT_QUOTA_LIMIT) + + """Tests RetrieveAllUsers method""" + + try: + user_feed = self.apps_client.RetrieveAllUsers() + except Exception, e: + self.assert_(False, 'Unexpected exception occurred: %s' % e) + + succeed = False + for a_entry in user_feed.entry: + if a_entry.login.user_name == user_name: + succeed = True + self.assert_(succeed, 'There must be a user: %s' % user_name) + + """Tests UpdateUser method""" + + new_family_name = 'NewFamilyName' + new_given_name = 'NewGivenName' + new_quota = '4096' + + created_user.name.family_name = new_family_name + created_user.name.given_name = new_given_name + created_user.quota.limit = new_quota + created_user.login.suspended = 'true' + + try: + new_user_entry = self.apps_client.UpdateUser(user_name, created_user) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + self.assert_(isinstance(new_user_entry, gdata.apps.UserEntry), + "new user entry must be an instance of gdata.apps.UserEntry: %s" + % new_user_entry) + self.assertEquals(new_user_entry.name.family_name, new_family_name) + self.assertEquals(new_user_entry.name.given_name, new_given_name) + self.assertEquals(new_user_entry.login.suspended, 'true') + + # quota limit update does not always success. + # self.assertEquals(new_user_entry.quota.limit, new_quota) + + nobody = gdata.apps.UserEntry() + nobody.login = gdata.apps.Login(user_name='nobody-' + self.postfix) + nobody.name = gdata.apps.Name(family_name='nobody', given_name='nobody') + + # make sure that there is no account with nobody- + self.postfix + try: + tmp_entry = self.apps_client.RetrieveUser('nobody-' + self.postfix) + except gdata.apps.service.AppsForYourDomainException, e: + self.assertEquals(e.error_code, gdata.apps.service.ENTITY_DOES_NOT_EXIST) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + else: + self.fail('No exception occurred') + + # make sure that UpdateUser fails with AppsForYourDomainException. + try: + new_user_entry = self.apps_client.UpdateUser('nobody-' + self.postfix, + nobody) + except gdata.apps.service.AppsForYourDomainException, e: + self.assertEquals(e.error_code, gdata.apps.service.ENTITY_DOES_NOT_EXIST) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + else: + self.fail('No exception occurred') + + """Tests DeleteUser method""" + + try: + self.apps_client.DeleteUser(user_name) + except Exception, e: + self.assert_(False, 'Unexpected exception occurred: %s' % e) + + # make sure that the account deleted + try: + self.apps_client.RetrieveUser(user_name) + except gdata.apps.service.AppsForYourDomainException, e: + self.assertEquals(e.error_code, gdata.apps.service.ENTITY_DOES_NOT_EXIST) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + else: + self.fail('No exception occurred') + self.created_user = None + + # make sure that DeleteUser fails with AppsForYourDomainException. + try: + self.apps_client.DeleteUser(user_name) + except gdata.apps.service.AppsForYourDomainException, e: + self.assertEquals(e.error_code, gdata.apps.service.ENTITY_DOES_NOT_EXIST) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + else: + self.fail('No exception occurred') + + def test004MethodsForNickname(self): + """Tests methods for nickname""" + + # first create a user account + user_name = 'EmmyMatsuo-' + self.postfix + family_name = 'Matsuo' + given_name = 'Emmy' + password = '123$$abc' + suspended = 'false' + + try: + created_user = self.apps_client.CreateUser( + user_name=user_name, family_name=family_name, given_name=given_name, + password=password, suspended=suspended) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + self.created_user = created_user + # tests CreateNickname method + nickname = 'emmy-' + self.postfix + try: + created_nickname = self.apps_client.CreateNickname(user_name, nickname) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + self.assert_(isinstance(created_nickname, gdata.apps.NicknameEntry), + "Return value of CreateNickname method must be an instance of " + + "gdata.apps.NicknameEntry: %s" % created_nickname) + self.assertEquals(created_nickname.login.user_name, user_name) + self.assertEquals(created_nickname.nickname.name, nickname) + + # tests RetrieveNickname method + retrieved_nickname = self.apps_client.RetrieveNickname(nickname) + self.assert_(isinstance(retrieved_nickname, gdata.apps.NicknameEntry), + "Return value of RetrieveNickname method must be an instance of " + + "gdata.apps.NicknameEntry: %s" % retrieved_nickname) + self.assertEquals(retrieved_nickname.login.user_name, user_name) + self.assertEquals(retrieved_nickname.nickname.name, nickname) + + # tests RetrieveNicknames method + nickname_feed = self.apps_client.RetrieveNicknames(user_name) + self.assert_(isinstance(nickname_feed, gdata.apps.NicknameFeed), + "Return value of RetrieveNicknames method must be an instance of " + + "gdata.apps.NicknameFeed: %s" % nickname_feed) + self.assertEquals(nickname_feed.entry[0].login.user_name, user_name) + self.assertEquals(nickname_feed.entry[0].nickname.name, nickname) + + # tests RetrieveAllNicknames method + nickname_feed = self.apps_client.RetrieveAllNicknames() + self.assert_(isinstance(nickname_feed, gdata.apps.NicknameFeed), + "Return value of RetrieveAllNicknames method must be an instance of " + + "gdata.apps.NicknameFeed: %s" % nickname_feed) + succeed = False + for a_entry in nickname_feed.entry: + if a_entry.login.user_name == user_name and \ + a_entry.nickname.name == nickname: + succeed = True + self.assert_(succeed, + "There must be a nickname entry named %s." % nickname) + + # tests DeleteNickname method + self.apps_client.DeleteNickname(nickname) + try: + non_existence = self.apps_client.RetrieveNickname(nickname) + except gdata.apps.service.AppsForYourDomainException, e: + self.assertEquals(e.error_code, gdata.apps.service.ENTITY_DOES_NOT_EXIST) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + else: + self.fail('No exception occurred') + +class AppsServiceUnitTest02(unittest.TestCase): + + def setUp(self): + self.postfix = time.strftime("%Y%m%d%H%M%S") + email = apps_username + '@' + apps_domain + self.apps_client = gdata.apps.service.AppsService( + email=email, domain=apps_domain, password=apps_password, + source='AppsClient "Unit" Tests') + self.apps_client.ProgrammaticLogin() + self.created_users = [] + self.created_email_lists = [] + + def tearDown(self): + for user in self.created_users: + try: + self.apps_client.DeleteUser(user.login.user_name) + except Exception, e: + print e + for email_list in self.created_email_lists: + try: + self.apps_client.DeleteEmailList(email_list.email_list.name) + except Exception, e: + print e + + def test001MethodsForEmaillist(self): + """Tests methods for emaillist """ + + user_name = 'YujiMatsuo-' + self.postfix + family_name = 'Matsuo' + given_name = 'Yuji' + password = '123$$abc' + suspended = 'false' + + try: + user_yuji = self.apps_client.CreateUser( + user_name=user_name, family_name=family_name, given_name=given_name, + password=password, suspended=suspended) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + self.created_users.append(user_yuji) + + user_name = 'TaroMatsuo-' + self.postfix + family_name = 'Matsuo' + given_name = 'Taro' + password = '123$$abc' + suspended = 'false' + + try: + user_taro = self.apps_client.CreateUser( + user_name=user_name, family_name=family_name, given_name=given_name, + password=password, suspended=suspended) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + self.created_users.append(user_taro) + + # tests CreateEmailList method + list_name = 'list01-' + self.postfix + try: + created_email_list = self.apps_client.CreateEmailList(list_name) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + self.assert_(isinstance(created_email_list, gdata.apps.EmailListEntry), + "Return value of CreateEmailList method must be an instance of " + + "EmailListEntry: %s" % created_email_list) + self.assertEquals(created_email_list.email_list.name, list_name) + self.created_email_lists.append(created_email_list) + + # tests AddRecipientToEmailList method + try: + recipient = self.apps_client.AddRecipientToEmailList( + user_yuji.login.user_name + '@' + apps_domain, + list_name) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + self.assert_(isinstance(recipient, gdata.apps.EmailListRecipientEntry), + "Return value of AddRecipientToEmailList method must be an instance " + + "of EmailListRecipientEntry: %s" % recipient) + self.assertEquals(recipient.who.email, + user_yuji.login.user_name + '@' + apps_domain) + + try: + recipient = self.apps_client.AddRecipientToEmailList( + user_taro.login.user_name + '@' + apps_domain, + list_name) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + # tests RetrieveAllRecipients method + try: + recipient_feed = self.apps_client.RetrieveAllRecipients(list_name) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + self.assert_(isinstance(recipient_feed, gdata.apps.EmailListRecipientFeed), + "Return value of RetrieveAllRecipients method must be an instance " + + "of EmailListRecipientFeed: %s" % recipient_feed) + self.assertEquals(len(recipient_feed.entry), 2) + + # tests RemoveRecipientFromEmailList method + try: + self.apps_client.RemoveRecipientFromEmailList( + user_taro.login.user_name + '@' + apps_domain, list_name) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + # make sure that removal succeeded. + try: + recipient_feed = self.apps_client.RetrieveAllRecipients(list_name) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + + self.assert_(isinstance(recipient_feed, gdata.apps.EmailListRecipientFeed), + "Return value of RetrieveAllRecipients method must be an instance " + + "of EmailListRecipientFeed: %s" % recipient_feed) + self.assertEquals(len(recipient_feed.entry), 1) + + # tests RetrieveAllEmailLists + try: + list_feed = self.apps_client.RetrieveAllEmailLists() + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + self.assert_(isinstance(list_feed, gdata.apps.EmailListFeed), + "Return value of RetrieveAllEmailLists method must be an instance" + + "of EmailListFeed: %s" % list_feed) + succeed = False + for email_list in list_feed.entry: + if email_list.email_list.name == list_name: + succeed = True + self.assert_(succeed, "There must be an email list named %s" % list_name) + + # tests RetrieveEmailLists method. + try: + list_feed = self.apps_client.RetrieveEmailLists( + user_yuji.login.user_name + '@' + apps_domain) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + self.assert_(isinstance(list_feed, gdata.apps.EmailListFeed), + "Return value of RetrieveEmailLists method must be an instance" + + "of EmailListFeed: %s" % list_feed) + succeed = False + for email_list in list_feed.entry: + if email_list.email_list.name == list_name: + succeed = True + self.assert_(succeed, "There must be an email list named %s" % list_name) + + def testRetrieveEmailList(self): + new_list = self.apps_client.CreateEmailList('my_testing_email_list') + retrieved_list = self.apps_client.RetrieveEmailList('my_testing_email_list') + self.assertEquals(new_list.title.text, retrieved_list.title.text) + self.assertEquals(new_list.id.text, retrieved_list.id.text) + self.assertEquals(new_list.email_list.name, retrieved_list.email_list.name) + + self.apps_client.DeleteEmailList('my_testing_email_list') + + # Should not be able to retrieve the deleted list. + try: + removed_list = self.apps_client.RetrieveEmailList('my_testing_email_list') + self.fail() + except gdata.apps.service.AppsForYourDomainException: + pass + + +class AppsServiceUnitTest03(unittest.TestCase): + + def setUp(self): + self.postfix = time.strftime("%Y%m%d%H%M%S") + email = apps_username + '@' + apps_domain + self.apps_client = gdata.apps.service.AppsService( + email=email, domain=apps_domain, password=apps_password, + source='AppsClient "Unit" Tests') + self.apps_client.ProgrammaticLogin() + self.created_users = [] + self.created_email_lists = [] + + def tearDown(self): + for user in self.created_users: + try: + self.apps_client.DeleteUser(user.login.user_name) + except Exception, e: + print e + for email_list in self.created_email_lists: + try: + self.apps_client.DeleteEmailList(email_list.email_list.name) + except Exception, e: + print e + + def test001Pagenation(self): + """Tests for pagination. It takes toooo long.""" + + list_feed = self.apps_client.RetrieveAllEmailLists() + quantity = len(list_feed.entry) + list_nums = 101 + for i in range(list_nums): + list_name = 'list%03d-' % i + self.postfix + try: + created_email_list = self.apps_client.CreateEmailList(list_name) + except Exception, e: + self.fail('Unexpected exception occurred: %s' % e) + self.created_email_lists.append(created_email_list) + + list_feed = self.apps_client.RetrieveAllEmailLists() + self.assertEquals(len(list_feed.entry), list_nums + quantity) + +if __name__ == '__main__': + print ('Google Apps Service Tests\nNOTE: Please run these tests only with ' + 'a test domain. The tests may delete or update your domain\'s ' + 'account data.') + apps_domain = raw_input('Please enter your domain: ') + apps_username = raw_input('Please enter your username of admin account: ') + apps_password = getpass.getpass() + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/apps_test.py b/gdata.py-1.2.3/tests/gdata_tests/apps_test.py new file mode 100755 index 0000000..0299232 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/apps_test.py @@ -0,0 +1,583 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 SIOS Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'tmatsuo@sios.com (Takashi MATSUO)' + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import gdata +from gdata import test_data +import gdata.apps + +class AppsEmailListRecipientFeedTest(unittest.TestCase): + + def setUp(self): + self.rcpt_feed = gdata.apps.EmailListRecipientFeedFromString( + test_data.EMAIL_LIST_RECIPIENT_FEED) + + def testEmailListRecipientEntryCount(self): + """Count EmailListRecipient entries in EmailListRecipientFeed""" + + self.assertEquals(len(self.rcpt_feed.entry), 2) + + def testLinkFinderFindsHtmlLink(self): + """Tests the return value of GetXXXLink() methods""" + + self.assert_(self.rcpt_feed.GetSelfLink() is not None) + self.assert_(self.rcpt_feed.GetNextLink() is not None) + self.assert_(self.rcpt_feed.GetEditLink() is None) + self.assert_(self.rcpt_feed.GetHtmlLink() is None) + + def testStartItem(self): + """Tests the existence of in + EmailListRecipientFeed and verifies the value""" + + self.assert_(isinstance(self.rcpt_feed.start_index, gdata.StartIndex), + "EmailListRecipient feed element must be " + + "an instance of gdata.OpenSearch: %s" % self.rcpt_feed.start_index) + self.assertEquals(self.rcpt_feed.start_index.text, "1") + + def testEmailListRecipientEntries(self): + """Tests the existence of in EmailListRecipientFeed + and simply verifies the value""" + + for a_entry in self.rcpt_feed.entry: + self.assert_(isinstance(a_entry, gdata.apps.EmailListRecipientEntry), + "EmailListRecipient Feed must be an instance of " + + "apps.EmailListRecipientEntry: %s" % a_entry) + + self.assertEquals(self.rcpt_feed.entry[0].who.email, "joe@example.com") + self.assertEquals(self.rcpt_feed.entry[1].who.email, "susan@example.com") + +class AppsEmailListFeedTest(unittest.TestCase): + + def setUp(self): + self.list_feed = gdata.apps.EmailListFeedFromString( + test_data.EMAIL_LIST_FEED) + + def testEmailListEntryCount(self): + """Count EmailList entries in EmailListFeed""" + + self.assertEquals(len(self.list_feed.entry), 2) + + def testLinkFinderFindsHtmlLink(self): + """Tests the return value of GetXXXLink() methods""" + + self.assert_(self.list_feed.GetSelfLink() is not None) + self.assert_(self.list_feed.GetNextLink() is not None) + self.assert_(self.list_feed.GetEditLink() is None) + self.assert_(self.list_feed.GetHtmlLink() is None) + + def testStartItem(self): + """Tests the existence of in EmailListFeed + and verifies the value""" + + self.assert_(isinstance(self.list_feed.start_index, gdata.StartIndex), + "EmailList feed element must be an instance " + + "of gdata.OpenSearch: %s" % self.list_feed.start_index) + self.assertEquals(self.list_feed.start_index.text, "1") + + def testUserEntries(self): + """Tests the existence of in EmailListFeed and simply + verifies the value""" + + for a_entry in self.list_feed.entry: + self.assert_(isinstance(a_entry, gdata.apps.EmailListEntry), + "EmailList Feed must be an instance of " + + "apps.EmailListEntry: %s" % a_entry) + + self.assertEquals(self.list_feed.entry[0].email_list.name, "us-sales") + self.assertEquals(self.list_feed.entry[1].email_list.name, "us-eng") + +class AppsUserFeedTest(unittest.TestCase): + + def setUp(self): + self.user_feed = gdata.apps.UserFeedFromString(test_data.USER_FEED) + + def testUserEntryCount(self): + """Count User entries in UserFeed""" + + self.assertEquals(len(self.user_feed.entry), 2) + + def testLinkFinderFindsHtmlLink(self): + """Tests the return value of GetXXXLink() methods""" + + self.assert_(self.user_feed.GetSelfLink() is not None) + self.assert_(self.user_feed.GetNextLink() is not None) + self.assert_(self.user_feed.GetEditLink() is None) + self.assert_(self.user_feed.GetHtmlLink() is None) + + def testStartItem(self): + """Tests the existence of in UserFeed and + verifies the value""" + + self.assert_(isinstance(self.user_feed.start_index, gdata.StartIndex), + "User feed element must be an instance " + + "of gdata.OpenSearch: %s" % self.user_feed.start_index) + self.assertEquals(self.user_feed.start_index.text, "1") + + def testUserEntries(self): + """Tests the existence of in UserFeed and simply + verifies the value""" + + for a_entry in self.user_feed.entry: + self.assert_(isinstance(a_entry, gdata.apps.UserEntry), + "User Feed must be an instance of " + + "apps.UserEntry: %s" % a_entry) + + self.assertEquals(self.user_feed.entry[0].login.user_name, "TestUser") + self.assertEquals(self.user_feed.entry[0].who.email, + "TestUser@example.com") + self.assertEquals(self.user_feed.entry[1].login.user_name, "JohnSmith") + self.assertEquals(self.user_feed.entry[1].who.email, + "JohnSmith@example.com") + +class AppsNicknameFeedTest(unittest.TestCase): + + def setUp(self): + self.nick_feed = gdata.apps.NicknameFeedFromString(test_data.NICK_FEED) + + def testNicknameEntryCount(self): + """Count Nickname entries in NicknameFeed""" + + self.assertEquals(len(self.nick_feed.entry), 2) + + def testId(self): + """Tests the existence of in NicknameFeed and verifies + the value""" + + self.assert_(isinstance(self.nick_feed.id, atom.Id), + "Nickname feed element must be an instance of " + + "atom.Id: %s" % self.nick_feed.id) + + self.assertEquals(self.nick_feed.id.text, + "http://apps-apis.google.com/a/feeds/example.com/nickname/2.0") + + def testStartItem(self): + """Tests the existence of in NicknameFeed + and verifies the value""" + + self.assert_(isinstance(self.nick_feed.start_index, gdata.StartIndex), + "Nickname feed element must be an instance " + + "of gdata.OpenSearch: %s" % self.nick_feed.start_index) + self.assertEquals(self.nick_feed.start_index.text, "1") + + def testItemsPerPage(self): + """Tests the existence of in + NicknameFeed and verifies the value""" + + self.assert_(isinstance(self.nick_feed.items_per_page, gdata.ItemsPerPage), + "Nickname feed element must be an " + + "instance of gdata.ItemsPerPage: %s" % self.nick_feed.items_per_page) + + self.assertEquals(self.nick_feed.items_per_page.text, "2") + + def testLinkFinderFindsHtmlLink(self): + """Tests the return value of GetXXXLink() methods""" + + self.assert_(self.nick_feed.GetSelfLink() is not None) + self.assert_(self.nick_feed.GetEditLink() is None) + self.assert_(self.nick_feed.GetHtmlLink() is None) + + def testNicknameEntries(self): + """Tests the existence of in NicknameFeed and simply + verifies the value""" + + for a_entry in self.nick_feed.entry: + self.assert_(isinstance(a_entry, gdata.apps.NicknameEntry), + "Nickname Feed must be an instance of " + + "apps.NicknameEntry: %s" % a_entry) + + self.assertEquals(self.nick_feed.entry[0].nickname.name, "Foo") + self.assertEquals(self.nick_feed.entry[1].nickname.name, "Bar") + +class AppsEmailListRecipientEntryTest(unittest.TestCase): + + def setUp(self): + + self.rcpt_entry = gdata.apps.EmailListRecipientEntryFromString( + test_data.EMAIL_LIST_RECIPIENT_ENTRY) + + def testId(self): + """Tests the existence of in EmailListRecipientEntry and + verifies the value""" + + self.assert_( + isinstance(self.rcpt_entry.id, atom.Id), + "EmailListRecipient entry element must be an instance of " + + "atom.Id: %s" % + self.rcpt_entry.id) + + self.assertEquals( + self.rcpt_entry.id.text, + 'https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/us-sales/' + + 'recipient/TestUser%40example.com') + + def testUpdated(self): + """Tests the existence of in + EmailListRecipientEntry and verifies the value""" + + self.assert_( + isinstance(self.rcpt_entry.updated, atom.Updated), + "EmailListRecipient entry element must be an instance " + + "of atom.Updated: %s" % self.rcpt_entry.updated) + + self.assertEquals(self.rcpt_entry.updated.text, + '1970-01-01T00:00:00.000Z') + + def testCategory(self): + """Tests the existence of in + EmailListRecipientEntry and verifies the value""" + + for a_category in self.rcpt_entry.category: + self.assert_( + isinstance(a_category, atom.Category), + "EmailListRecipient entry element must be an " + + "instance of atom.Category: %s" % a_category) + + self.assertEquals(a_category.scheme, + "http://schemas.google.com/g/2005#kind") + + self.assertEquals(a_category.term, + "http://schemas.google.com/apps/2006#" + + "emailList.recipient") + + def testTitle(self): + """Tests the existence of in EmailListRecipientEntry + and verifies the value""" + + self.assert_( + isinstance(self.rcpt_entry.title, atom.Title), + "EmailListRecipient entry element must be an instance of " + + "atom.Title: %s" % self.rcpt_entry.title) + + self.assertEquals(self.rcpt_entry.title.text, 'TestUser') + + def testLinkFinderFindsHtmlLink(self): + """Tests the return value of GetXXXLink() methods""" + + self.assert_(self.rcpt_entry.GetSelfLink() is not None) + self.assert_(self.rcpt_entry.GetEditLink() is not None) + self.assert_(self.rcpt_entry.GetHtmlLink() is None) + + def testWho(self): + """Tests the existence of a in EmailListRecipientEntry + and verifies the value""" + + self.assert_(isinstance(self.rcpt_entry.who, gdata.apps.Who), + "EmailListRecipient entry must be an instance of " + + "apps.Who: %s" % self.rcpt_entry.who) + self.assertEquals(self.rcpt_entry.who.email, 'TestUser@example.com') + +class AppsEmailListEntryTest(unittest.TestCase): + + def setUp(self): + + self.list_entry = gdata.apps.EmailListEntryFromString( + test_data.EMAIL_LIST_ENTRY) + + def testId(self): + """Tests the existence of in EmailListEntry and verifies + the value""" + + self.assert_( + isinstance(self.list_entry.id, atom.Id), + "EmailList entry element must be an instance of atom.Id: %s" % + self.list_entry.id) + + self.assertEquals( + self.list_entry.id.text, + 'https://apps-apis.google.com/a/feeds/example.com/emailList/2.0/testlist') + + def testUpdated(self): + """Tests the existence of in EmailListEntry and + verifies the value""" + + self.assert_( + isinstance(self.list_entry.updated, atom.Updated), + "EmailList entry element must be an instance of " + + "atom.Updated: %s" % self.list_entry.updated) + + self.assertEquals(self.list_entry.updated.text, + '1970-01-01T00:00:00.000Z') + + def testCategory(self): + """Tests the existence of in EmailListEntry and + verifies the value""" + + for a_category in self.list_entry.category: + self.assert_( + isinstance(a_category, atom.Category), + "EmailList entry element must be an instance " + + "of atom.Category: %s" % a_category) + + self.assertEquals(a_category.scheme, + "http://schemas.google.com/g/2005#kind") + + self.assertEquals(a_category.term, + "http://schemas.google.com/apps/2006#emailList") + + def testTitle(self): + """Tests the existence of in EmailListEntry and verifies + the value""" + + self.assert_( + isinstance(self.list_entry.title, atom.Title), + "EmailList entry element must be an instance of " + + "atom.Title: %s" % self.list_entry.title) + + self.assertEquals(self.list_entry.title.text, 'testlist') + + def testLinkFinderFindsHtmlLink(self): + """Tests the return value of GetXXXLink() methods""" + + self.assert_(self.list_entry.GetSelfLink() is not None) + self.assert_(self.list_entry.GetEditLink() is not None) + self.assert_(self.list_entry.GetHtmlLink() is None) + + def testEmailList(self): + """Tests the existence of a in EmailListEntry and + verifies the value""" + + self.assert_(isinstance(self.list_entry.email_list, gdata.apps.EmailList), + "EmailList entry must be an instance of " + + "apps.EmailList: %s" % self.list_entry.email_list) + self.assertEquals(self.list_entry.email_list.name, 'testlist') + + def testFeedLink(self): + """Test the existence of a in EmailListEntry and + verifies the value""" + + for an_feed_link in self.list_entry.feed_link: + self.assert_(isinstance(an_feed_link, gdata.FeedLink), + "EmailList entry must be an instance of " + + "gdata.FeedLink: %s" % an_feed_link) + self.assertEquals(self.list_entry.feed_link[0].rel, + 'http://schemas.google.com/apps/2006#' + + 'emailList.recipients') + self.assertEquals(self.list_entry.feed_link[0].href, + 'http://apps-apis.google.com/a/feeds/example.com/emailList/' + + '2.0/testlist/recipient/') + +class AppsNicknameEntryTest(unittest.TestCase): + + def setUp(self): + self.nick_entry = gdata.apps.NicknameEntryFromString(test_data.NICK_ENTRY) + + def testId(self): + """Tests the existence of in NicknameEntry and verifies + the value""" + + self.assert_( + isinstance(self.nick_entry.id, atom.Id), + "Nickname entry element must be an instance of atom.Id: %s" % + self.nick_entry.id) + + self.assertEquals( + self.nick_entry.id.text, + 'https://apps-apis.google.com/a/feeds/example.com/nickname/2.0/Foo') + + def testCategory(self): + """Tests the existence of in NicknameEntry and + verifies the value""" + + for a_category in self.nick_entry.category: + self.assert_( + isinstance(a_category, atom.Category), + "Nickname entry element must be an instance " + + "of atom.Category: %s" % a_category) + + self.assertEquals(a_category.scheme, + "http://schemas.google.com/g/2005#kind") + + self.assertEquals(a_category.term, + "http://schemas.google.com/apps/2006#nickname") + + def testTitle(self): + """Tests the existence of in NicknameEntry and + verifies the value""" + + self.assert_(isinstance(self.nick_entry.title, atom.Title), + "Nickname entry element must be an instance " + + "of atom.Title: %s" % self.nick_entry.title) + + self.assertEquals(self.nick_entry.title.text, "Foo") + + def testLogin(self): + """Tests the existence of in NicknameEntry and + verifies the value""" + + self.assert_(isinstance(self.nick_entry.login, gdata.apps.Login), + "Nickname entry element must be an instance " + + "of apps.Login: %s" % self.nick_entry.login) + self.assertEquals(self.nick_entry.login.user_name, "TestUser") + + def testNickname(self): + """Tests the existence of in NicknameEntry and + verifies the value""" + + self.assert_(isinstance(self.nick_entry.nickname, gdata.apps.Nickname), + "Nickname entry element must be an instance " + + "of apps.Nickname: %s" % self.nick_entry.nickname) + self.assertEquals(self.nick_entry.nickname.name, "Foo") + + def testLinkFinderFindsHtmlLink(self): + """Tests the return value of GetXXXLink() methods""" + + self.assert_(self.nick_entry.GetSelfLink() is not None) + self.assert_(self.nick_entry.GetEditLink() is not None) + self.assert_(self.nick_entry.GetHtmlLink() is None) + +class AppsUserEntryTest(unittest.TestCase): + + def setUp(self): + self.user_entry = gdata.apps.UserEntryFromString(test_data.USER_ENTRY) + + def testId(self): + """Tests the existence of in UserEntry and verifies the + value""" + + self.assert_( + isinstance(self.user_entry.id, atom.Id), + "User entry element must be an instance of atom.Id: %s" % + self.user_entry.id) + + self.assertEquals( + self.user_entry.id.text, + 'https://apps-apis.google.com/a/feeds/example.com/user/2.0/TestUser') + + def testUpdated(self): + """Tests the existence of in UserEntry and verifies + the value""" + + self.assert_( + isinstance(self.user_entry.updated, atom.Updated), + "User entry element must be an instance of " + + "atom.Updated: %s" % self.user_entry.updated) + + self.assertEquals(self.user_entry.updated.text, + '1970-01-01T00:00:00.000Z') + + def testCategory(self): + """Tests the existence of in UserEntry and + verifies the value""" + + for a_category in self.user_entry.category: + self.assert_( + isinstance(a_category, atom.Category), + "User entry element must be an instance " + + "of atom.Category: %s" % a_category) + + self.assertEquals(a_category.scheme, + "http://schemas.google.com/g/2005#kind") + + self.assertEquals(a_category.term, + "http://schemas.google.com/apps/2006#user") + + def testTitle(self): + """Tests the existence of in UserEntry and verifies + the value""" + + self.assert_( + isinstance(self.user_entry.title, atom.Title), + "User entry element must be an instance of atom.Title: %s" % + self.user_entry.title) + + self.assertEquals(self.user_entry.title.text, 'TestUser') + + def testLinkFinderFindsHtmlLink(self): + """Tests the return value of GetXXXLink() methods""" + + self.assert_(self.user_entry.GetSelfLink() is not None) + self.assert_(self.user_entry.GetEditLink() is not None) + self.assert_(self.user_entry.GetHtmlLink() is None) + + def testLogin(self): + """Tests the existence of in UserEntry and verifies + the value""" + + self.assert_(isinstance(self.user_entry.login, gdata.apps.Login), + "User entry element must be an instance of apps.Login: %s" + % self.user_entry.login) + + self.assertEquals(self.user_entry.login.user_name, 'TestUser') + self.assertEquals(self.user_entry.login.password, 'password') + self.assertEquals(self.user_entry.login.suspended, 'false') + self.assertEquals(self.user_entry.login.ip_whitelisted, 'false') + self.assertEquals(self.user_entry.login.hash_function_name, 'SHA-1') + + def testName(self): + """Tests the existence of in UserEntry and verifies + the value""" + + self.assert_(isinstance(self.user_entry.name, gdata.apps.Name), + "User entry element must be an instance of apps.Name: %s" + % self.user_entry.name) + self.assertEquals(self.user_entry.name.family_name, 'Test') + self.assertEquals(self.user_entry.name.given_name, 'User') + + def testQuota(self): + """Tests the existence of in UserEntry and verifies + the value""" + + self.assert_(isinstance(self.user_entry.quota, gdata.apps.Quota), + "User entry element must be an instance of apps.Quota: %s" + % self.user_entry.quota) + self.assertEquals(self.user_entry.quota.limit, '1024') + + def testFeedLink(self): + """Test the existence of a in UserEntry and + verifies the value""" + + for an_feed_link in self.user_entry.feed_link: + self.assert_(isinstance(an_feed_link, gdata.FeedLink), + "User entry must be an instance of gdata.FeedLink" + + ": %s" % an_feed_link) + self.assertEquals(self.user_entry.feed_link[0].rel, + 'http://schemas.google.com/apps/2006#user.nicknames') + self.assertEquals(self.user_entry.feed_link[0].href, + 'https://apps-apis.google.com/a/feeds/example.com/nickname/' + + '2.0?username=Test-3121') + self.assertEquals(self.user_entry.feed_link[1].rel, + 'http://schemas.google.com/apps/2006#user.emailLists') + self.assertEquals(self.user_entry.feed_link[1].href, + 'https://apps-apis.google.com/a/feeds/example.com/emailList/' + + '2.0?recipient=testlist@example.com') + + def testUpdate(self): + """Tests for modifing attributes of UserEntry""" + + self.user_entry.name.family_name = 'ModifiedFamilyName' + self.user_entry.name.given_name = 'ModifiedGivenName' + self.user_entry.quota.limit = '2048' + self.user_entry.login.password = 'ModifiedPassword' + self.user_entry.login.suspended = 'true' + modified = gdata.apps.UserEntryFromString(self.user_entry.ToString()) + + self.assertEquals(modified.name.family_name, 'ModifiedFamilyName') + self.assertEquals(modified.name.given_name, 'ModifiedGivenName') + self.assertEquals(modified.quota.limit, '2048') + self.assertEquals(modified.login.password, 'ModifiedPassword') + self.assertEquals(modified.login.suspended, 'true') + +if __name__ == '__main__': + unittest.main() + diff --git a/gdata.py-1.2.3/tests/gdata_tests/auth_test.py b/gdata.py-1.2.3/tests/gdata_tests/auth_test.py new file mode 100755 index 0000000..f17f028 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/auth_test.py @@ -0,0 +1,569 @@ +#!/usr/bin/python +# +# Copyright (C) 2007, 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import re +import unittest +import urllib +import gdata.auth + + +CONSUMER_KEY = 'www.yourwebapp.com' +CONSUMER_SECRET = 'qB1P2kCFDpRjF+/Iww4' + +RSA_KEY = """-----BEGIN RSA PRIVATE KEY----- +MIICXAIBAAKBgQDVbOaFW+KXecfFJn1PIzYHnNXFxhaQ36QM0K5uSb0Y8NeQUlD2 +6t8aKgnm6mcb4vaopHjjdIGWgAzM5Dt0oPIiDXo+jSQbvCIXRduuAt+0cFGb2d+L +hALk4AwB8IVIkDJWwgo5Z2OLsP2r/wQlUYKm/tnvQaevK24jNYMLWVJl2QIDAQAB +AoGAU93ERBlUVEPFjaJPUX67p4gotNvfWDSZiXOjZ7FQPnG9s3e1WyH2Y5irZXMs +61dnp+NhobfRiGtvHEB/YJgyLRk/CJDnMKslo95e7o65IE9VkcyY6Yvt7YTslsRX +Eu7T0xLEA7ON46ypCwNLeWxpJ9SWisEKu2yZJnWauCXEsgUCQQD7b2ZuhGx3msoP +YEnwvucp0UxneCvb68otfERZ1J6NfNP47QJw6OwD3r1sWCJ27QZmpvtQH1f8sCk9 +t22anGG7AkEA2UzXdtQ8H1uLAN/XXX2qoLuvJK5jRswHS4GeOg4pnnDSiHg3Vbva +AxmMIL93ufvIy/xdoENwDPfcI4CbYlrDewJAGWy7W+OSIEoLsqBW+bwkHetnIXNa +ZAOkzxKoyrigS8hamupEe+xhqUaFuwXyfjobkpfCA+kXeZrKoM4CjEbR7wJAHMbf +Vd4/ZAu0edYq6DenLAgO5rWtcge9A5PTx25utovMZcQ917273mM4unGAwoGEkvcF +0x57LUx5u73hVgIdFwJBAKWGuHRwGPgTWYvhpHM0qveH+8KdU9BUt/kV4ONxIVDB +ftetEmJirqOGLECbImoLcUwQrgfMW4ZCxOioJMz/gY0= +-----END RSA PRIVATE KEY----- +""" + + +class AuthModuleUtilitiesTest(unittest.TestCase): + + def testGenerateClientLoginRequestBody(self): + body = gdata.auth.GenerateClientLoginRequestBody('jo@gmail.com', + 'password', 'test service', 'gdata.auth test') + expected_parameters = {'Email':r'jo%40gmail.com', 'Passwd':'password', + 'service':'test+service', 'source':'gdata.auth+test', + 'accountType':'HOSTED_OR_GOOGLE'} + self.__matchBody(body, expected_parameters) + + body = gdata.auth.GenerateClientLoginRequestBody('jo@gmail.com', + 'password', 'test service', 'gdata.auth test', account_type='A TEST', + captcha_token='12345', captcha_response='test') + expected_parameters['accountType'] = 'A+TEST' + expected_parameters['logintoken'] = '12345' + expected_parameters['logincaptcha'] = 'test' + self.__matchBody(body, expected_parameters) + + def __matchBody(self, body, expected_name_value_pairs): + parameters = body.split('&') + for param in parameters: + (name, value) = param.split('=') + self.assert_(expected_name_value_pairs[name] == value) + + def testGenerateClientLoginAuthToken(self): + http_body = ('SID=DQAAAGgA7Zg8CTN\r\n' + 'LSID=DQAAAGsAlk8BBbG\r\n' + 'Auth=DQAAAGgAdk3fA5N') + self.assert_(gdata.auth.GenerateClientLoginAuthToken(http_body) == + 'GoogleLogin auth=DQAAAGgAdk3fA5N') + + +class GenerateClientLoginRequestBodyTest(unittest.TestCase): + + def testPostBodyShouldMatchShortExample(self): + auth_body = gdata.auth.GenerateClientLoginRequestBody('johndoe@gmail.com', + 'north23AZ', 'cl', 'Gulp-CalGulp-1.05') + self.assert_(-1 < auth_body.find('Email=johndoe%40gmail.com')) + self.assert_(-1 < auth_body.find('Passwd=north23AZ')) + self.assert_(-1 < auth_body.find('service=cl')) + self.assert_(-1 < auth_body.find('source=Gulp-CalGulp-1.05')) + + def testPostBodyShouldMatchLongExample(self): + auth_body = gdata.auth.GenerateClientLoginRequestBody('johndoe@gmail.com', + 'north23AZ', 'cl', 'Gulp-CalGulp-1.05', + captcha_token='DQAAAGgA...dkI1', captcha_response='brinmar') + self.assert_(-1 < auth_body.find('logintoken=DQAAAGgA...dkI1')) + self.assert_(-1 < auth_body.find('logincaptcha=brinmar')) + + def testEquivalenceWithOldLogic(self): + email = 'jo@gmail.com' + password = 'password' + account_type = 'HOSTED' + service = 'test' + source = 'auth test' + old_request_body = urllib.urlencode({'Email': email, + 'Passwd': password, + 'accountType': account_type, + 'service': service, + 'source': source}) + new_request_body = gdata.auth.GenerateClientLoginRequestBody(email, + password, service, source, account_type=account_type) + for parameter in old_request_body.split('&'): + self.assert_(-1 < new_request_body.find(parameter)) + + +class GenerateAuthSubUrlTest(unittest.TestCase): + + def testDefaultParameters(self): + url = gdata.auth.GenerateAuthSubUrl('http://example.com/xyz?x=5', + 'http://www.google.com/test/feeds') + self.assert_(-1 < url.find( + r'scope=http%3A%2F%2Fwww.google.com%2Ftest%2Ffeeds')) + self.assert_(-1 < url.find( + r'next=http%3A%2F%2Fexample.com%2Fxyz%3Fx%3D5')) + self.assert_(-1 < url.find('secure=0')) + self.assert_(-1 < url.find('session=1')) + + def testAllParameters(self): + url = gdata.auth.GenerateAuthSubUrl('http://example.com/xyz?x=5', + 'http://www.google.com/test/feeds', secure=True, session=False, + request_url='https://example.com/auth') + self.assert_(-1 < url.find( + r'scope=http%3A%2F%2Fwww.google.com%2Ftest%2Ffeeds')) + self.assert_(-1 < url.find( + r'next=http%3A%2F%2Fexample.com%2Fxyz%3Fx%3D5')) + self.assert_(-1 < url.find('secure=1')) + self.assert_(-1 < url.find('session=0')) + self.assert_(url.startswith('https://example.com/auth')) + + +class GenerateOAuthRequestTokenUrlTest(unittest.TestCase): + + def testDefaultParameters(self): + oauth_input_params = gdata.auth.OAuthInputParams( + gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY, + rsa_key=RSA_KEY) + scopes = [ + 'http://abcd.example.com/feeds', + 'http://www.example.com/abcd/feeds' + ] + url = gdata.auth.GenerateOAuthRequestTokenUrl( + oauth_input_params, scopes=scopes) + self.assertEquals('https', url.protocol) + self.assertEquals('www.google.com', url.host) + self.assertEquals('/accounts/OAuthGetRequestToken', url.path) + self.assertEquals('1.0', url.params['oauth_version']) + self.assertEquals('RSA-SHA1', url.params['oauth_signature_method']) + self.assert_(url.params['oauth_nonce']) + self.assert_(url.params['oauth_timestamp']) + actual_scopes = url.params['scope'].split(' ') + self.assertEquals(2, len(actual_scopes)) + for scope in actual_scopes: + self.assert_(scope in scopes) + self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key']) + self.assert_(url.params['oauth_signature']) + + def testAllParameters(self): + oauth_input_params = gdata.auth.OAuthInputParams( + gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY, + consumer_secret=CONSUMER_SECRET) + scopes = ['http://abcd.example.com/feeds'] + url = gdata.auth.GenerateOAuthRequestTokenUrl( + oauth_input_params, scopes=scopes, + request_token_url='https://www.example.com/accounts/OAuthRequestToken', + extra_parameters={'oauth_version': '2.0', 'my_param': 'my_value'}) + self.assertEquals('https', url.protocol) + self.assertEquals('www.example.com', url.host) + self.assertEquals('/accounts/OAuthRequestToken', url.path) + self.assertEquals('2.0', url.params['oauth_version']) + self.assertEquals('HMAC-SHA1', url.params['oauth_signature_method']) + self.assert_(url.params['oauth_nonce']) + self.assert_(url.params['oauth_timestamp']) + actual_scopes = url.params['scope'].split(' ') + self.assertEquals(1, len(actual_scopes)) + for scope in actual_scopes: + self.assert_(scope in scopes) + self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key']) + self.assert_(url.params['oauth_signature']) + self.assertEquals('my_value', url.params['my_param']) + + +class GenerateOAuthAuthorizationUrlTest(unittest.TestCase): + + def testDefaultParameters(self): + token_key = 'ABCDDSFFDSG' + token_secret = 'SDFDSGSDADADSAF' + request_token = gdata.auth.OAuthToken(key=token_key, secret=token_secret) + url = gdata.auth.GenerateOAuthAuthorizationUrl(request_token) + self.assertEquals('https', url.protocol) + self.assertEquals('www.google.com', url.host) + self.assertEquals('/accounts/OAuthAuthorizeToken', url.path) + self.assertEquals(token_key, url.params['oauth_token']) + + def testAllParameters(self): + token_key = 'ABCDDSFFDSG' + token_secret = 'SDFDSGSDADADSAF' + scopes = [ + 'http://abcd.example.com/feeds', + 'http://www.example.com/abcd/feeds' + ] + request_token = gdata.auth.OAuthToken(key=token_key, secret=token_secret, + scopes=scopes) + url = gdata.auth.GenerateOAuthAuthorizationUrl( + request_token, + authorization_url='https://www.example.com/accounts/OAuthAuthToken', + callback_url='http://www.yourwebapp.com/print', + extra_params={'permission': '1'}, + include_scopes_in_callback=True, scopes_param_prefix='token_scope') + self.assertEquals('https', url.protocol) + self.assertEquals('www.example.com', url.host) + self.assertEquals('/accounts/OAuthAuthToken', url.path) + self.assertEquals(token_key, url.params['oauth_token']) + expected_callback_url = ('http://www.yourwebapp.com/print?' + 'token_scope=http%3A%2F%2Fabcd.example.com%2Ffeeds' + '+http%3A%2F%2Fwww.example.com%2Fabcd%2Ffeeds') + self.assertEquals(expected_callback_url, url.params['oauth_callback']) + + +class GenerateOAuthAccessTokenUrlTest(unittest.TestCase): + + def testDefaultParameters(self): + token_key = 'ABCDDSFFDSG' + token_secret = 'SDFDSGSDADADSAF' + authorized_request_token = gdata.auth.OAuthToken(key=token_key, + secret=token_secret) + oauth_input_params = gdata.auth.OAuthInputParams( + gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY, + consumer_secret=CONSUMER_SECRET) + url = gdata.auth.GenerateOAuthAccessTokenUrl(authorized_request_token, + oauth_input_params) + self.assertEquals('https', url.protocol) + self.assertEquals('www.google.com', url.host) + self.assertEquals('/accounts/OAuthGetAccessToken', url.path) + self.assertEquals(token_key, url.params['oauth_token']) + self.assertEquals('1.0', url.params['oauth_version']) + self.assertEquals('HMAC-SHA1', url.params['oauth_signature_method']) + self.assert_(url.params['oauth_nonce']) + self.assert_(url.params['oauth_timestamp']) + self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key']) + self.assert_(url.params['oauth_signature']) + + def testAllParameters(self): + token_key = 'ABCDDSFFDSG' + authorized_request_token = gdata.auth.OAuthToken(key=token_key) + oauth_input_params = gdata.auth.OAuthInputParams( + gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY, + rsa_key=RSA_KEY) + url = gdata.auth.GenerateOAuthAccessTokenUrl( + authorized_request_token, oauth_input_params, + access_token_url='https://www.example.com/accounts/OAuthGetAccessToken', + oauth_version= '2.0') + self.assertEquals('https', url.protocol) + self.assertEquals('www.example.com', url.host) + self.assertEquals('/accounts/OAuthGetAccessToken', url.path) + self.assertEquals(token_key, url.params['oauth_token']) + self.assertEquals('2.0', url.params['oauth_version']) + self.assertEquals('RSA-SHA1', url.params['oauth_signature_method']) + self.assert_(url.params['oauth_nonce']) + self.assert_(url.params['oauth_timestamp']) + self.assertEquals(CONSUMER_KEY, url.params['oauth_consumer_key']) + self.assert_(url.params['oauth_signature']) + + +class ExtractAuthSubTokensTest(unittest.TestCase): + + def testGetTokenFromUrl(self): + url = 'http://www.yourwebapp.com/showcalendar.html?token=CKF50YzIH' + self.assert_(gdata.auth.AuthSubTokenFromUrl(url) == + 'AuthSub token=CKF50YzIH') + self.assert_(gdata.auth.TokenFromUrl(url) == 'CKF50YzIH') + url = 'http://www.yourwebapp.com/showcalendar.html?token==tokenCKF50YzIH=' + self.assert_(gdata.auth.AuthSubTokenFromUrl(url) == + 'AuthSub token==tokenCKF50YzIH=') + self.assert_(gdata.auth.TokenFromUrl(url) == '=tokenCKF50YzIH=') + + def testGetTokenFromHttpResponse(self): + response_body = ('Token=DQAA...7DCTN\r\n' + 'Expiration=20061004T123456Z') + self.assert_(gdata.auth.AuthSubTokenFromHttpBody(response_body) == + 'AuthSub token=DQAA...7DCTN') + +class CreateAuthSubTokenFlowTest(unittest.TestCase): + + def testGenerateRequest(self): + request_url = gdata.auth.generate_auth_sub_url(next='http://example.com', + scopes=['http://www.blogger.com/feeds/', + 'http://www.google.com/base/feeds/']) + self.assertEquals(request_url.protocol, 'https') + self.assertEquals(request_url.host, 'www.google.com') + self.assertEquals(request_url.params['scope'], + 'http://www.blogger.com/feeds/ http://www.google.com/base/feeds/') + self.assertEquals(request_url.params['hd'], 'default') + self.assert_(request_url.params['next'].find('auth_sub_scopes') > -1) + self.assert_(request_url.params['next'].startswith('http://example.com')) + + # Use a more complicated 'next' URL. + request_url = gdata.auth.generate_auth_sub_url( + next='http://example.com/?token_scope=http://www.blogger.com/feeds/', + scopes=['http://www.blogger.com/feeds/', + 'http://www.google.com/base/feeds/']) + self.assert_(request_url.params['next'].find('auth_sub_scopes') > -1) + self.assert_(request_url.params['next'].find('token_scope') > -1) + self.assert_(request_url.params['next'].startswith('http://example.com/')) + + def testParseNextUrl(self): + url = ('http://example.com/?auth_sub_scopes=http%3A%2F%2Fwww.blogger.com' + '%2Ffeeds%2F+http%3A%2F%2Fwww.google.com%2Fbase%2Ffeeds%2F&' + 'token=my_nifty_token') + token = gdata.auth.extract_auth_sub_token_from_url(url) + self.assertEquals(token.get_token_string(), 'my_nifty_token') + self.assert_(isinstance(token, gdata.auth.AuthSubToken)) + self.assert_(token.valid_for_scope('http://www.blogger.com/feeds/')) + self.assert_(token.valid_for_scope('http://www.google.com/base/feeds/')) + self.assert_( + not token.valid_for_scope('http://www.google.com/calendar/feeds/')) + + # Parse a more complicated response. + url = ('http://example.com/?auth_sub_scopes=http%3A%2F%2Fwww.blogger.com' + '%2Ffeeds%2F+http%3A%2F%2Fwww.google.com%2Fbase%2Ffeeds%2F&' + 'token_scope=http%3A%2F%2Fwww.blogger.com%2Ffeeds%2F&' + 'token=second_token') + token = gdata.auth.extract_auth_sub_token_from_url(url) + self.assertEquals(token.get_token_string(), 'second_token') + self.assert_(isinstance(token, gdata.auth.AuthSubToken)) + self.assert_(token.valid_for_scope('http://www.blogger.com/feeds/')) + self.assert_(token.valid_for_scope('http://www.google.com/base/feeds/')) + self.assert_( + not token.valid_for_scope('http://www.google.com/calendar/feeds/')) + + def testParseNextWithNoToken(self): + token = gdata.auth.extract_auth_sub_token_from_url('http://example.com/') + self.assert_(token is None) + token = gdata.auth.extract_auth_sub_token_from_url( + 'http://example.com/?no_token=foo&other=1') + self.assert_(token is None) + + +class ExtractClientLoginTokenTest(unittest.TestCase): + + def testExtractFromBodyWithScopes(self): + http_body_string = ('SID=DQAAAGgA7Zg8CTN\r\n' + 'LSID=DQAAAGsAlk8BBbG\r\n' + 'Auth=DQAAAGgAdk3fA5N') + token = gdata.auth.extract_client_login_token(http_body_string, + ['http://docs.google.com/feeds/']) + self.assertEquals(token.get_token_string(), 'DQAAAGgAdk3fA5N') + self.assert_(isinstance(token, gdata.auth.ClientLoginToken)) + self.assert_(token.valid_for_scope('http://docs.google.com/feeds/')) + self.assert_(not token.valid_for_scope('http://www.blogger.com/feeds')) + + +class ExtractOAuthTokensTest(unittest.TestCase): + + def testOAuthTokenFromUrl(self): + scope_1 = 'http://docs.google.com/feeds/' + scope_2 = 'http://www.blogger.com/feeds/' + # Case 1: token and scopes both are present. + url = ('http://dummy.com/?oauth_token_scope=http%3A%2F%2Fwww.blogger.com' + '%2Ffeeds%2F+http%3A%2F%2Fdocs.google.com%2Ffeeds%2F&' + 'oauth_token=CMns6t7MCxDz__8B') + token = gdata.auth.OAuthTokenFromUrl(url) + self.assertEquals('CMns6t7MCxDz__8B', token.key) + self.assertEquals(2, len(token.scopes)) + self.assert_(scope_1 in token.scopes) + self.assert_(scope_2 in token.scopes) + # Case 2: token and scopes both are present but scope_param_prefix + # passed does not match the one present in the URL. + url = ('http://dummy.com/?oauth_token_scope=http%3A%2F%2Fwww.blogger.com' + '%2Ffeeds%2F+http%3A%2F%2Fdocs.google.com%2Ffeeds%2F&' + 'oauth_token=CMns6t7MCxDz__8B') + token = gdata.auth.OAuthTokenFromUrl(url, + scopes_param_prefix='token_scope') + self.assertEquals('CMns6t7MCxDz__8B', token.key) + self.assert_(not token.scopes) + # Case 3: None present. + url = ('http://dummy.com/?no_oauth_token_scope=http%3A%2F%2Fwww.blogger.com' + '%2Ffeeds%2F+http%3A%2F%2Fdocs.google.com%2Ffeeds%2F&' + 'no_oauth_token=CMns6t7MCxDz__8B') + token = gdata.auth.OAuthTokenFromUrl(url) + self.assert_(token is None) + + def testOAuthTokenFromHttpBody(self): + token_key = 'ABCD' + token_secret = 'XYZ' + # Case 1: token key and secret both present single time. + http_body = 'oauth_token=%s&oauth_token_secret=%s' % (token_key, + token_secret) + token = gdata.auth.OAuthTokenFromHttpBody(http_body) + self.assertEquals(token_key, token.key) + self.assertEquals(token_secret, token.secret) + + +class OAuthInputParametersTest(unittest.TestCase): + + def setUp(self): + self.oauth_input_parameters_hmac = gdata.auth.OAuthInputParams( + gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY, + consumer_secret=CONSUMER_SECRET) + self.oauth_input_parameters_rsa = gdata.auth.OAuthInputParams( + gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY, + rsa_key=RSA_KEY) + + def testGetSignatureMethod(self): + self.assertEquals( + 'HMAC-SHA1', + self.oauth_input_parameters_hmac.GetSignatureMethod().get_name()) + rsa_signature_method = self.oauth_input_parameters_rsa.GetSignatureMethod() + self.assertEquals('RSA-SHA1', rsa_signature_method.get_name()) + self.assertEquals(RSA_KEY, rsa_signature_method._fetch_private_cert(None)) + + def testGetConsumer(self): + self.assertEquals(CONSUMER_KEY, + self.oauth_input_parameters_hmac.GetConsumer().key) + self.assertEquals(CONSUMER_KEY, + self.oauth_input_parameters_rsa.GetConsumer().key) + self.assertEquals(CONSUMER_SECRET, + self.oauth_input_parameters_hmac.GetConsumer().secret) + self.assert_(self.oauth_input_parameters_rsa.GetConsumer().secret is None) + + +class TokenClassesTest(unittest.TestCase): + + def testClientLoginToAndFromString(self): + token = gdata.auth.ClientLoginToken() + token.set_token_string('foo') + self.assertEquals(token.get_token_string(), 'foo') + self.assertEquals(token.auth_header, '%s%s' % ( + gdata.auth.PROGRAMMATIC_AUTH_LABEL, 'foo')) + token.set_token_string(token.get_token_string()) + self.assertEquals(token.get_token_string(), 'foo') + + def testAuthSubToAndFromString(self): + token = gdata.auth.AuthSubToken() + token.set_token_string('foo') + self.assertEquals(token.get_token_string(), 'foo') + self.assertEquals(token.auth_header, '%s%s' % ( + gdata.auth.AUTHSUB_AUTH_LABEL, 'foo')) + token.set_token_string(token.get_token_string()) + self.assertEquals(token.get_token_string(), 'foo') + + def testSecureAuthSubToAndFromString(self): + # Case 1: no token. + token = gdata.auth.SecureAuthSubToken(RSA_KEY) + token.set_token_string('foo') + self.assertEquals(token.get_token_string(), 'foo') + token.set_token_string(token.get_token_string()) + self.assertEquals(token.get_token_string(), 'foo') + self.assertEquals(str(token), 'foo') + # Case 2: token is a string + token = gdata.auth.SecureAuthSubToken(RSA_KEY, token_string='foo') + self.assertEquals(token.get_token_string(), 'foo') + token.set_token_string(token.get_token_string()) + self.assertEquals(token.get_token_string(), 'foo') + self.assertEquals(str(token), 'foo') + + def testOAuthToAndFromString(self): + token_key = 'ABCD' + token_secret = 'XYZ' + # Case 1: token key and secret both present single time. + token_string = 'oauth_token=%s&oauth_token_secret=%s' % (token_key, + token_secret) + token = gdata.auth.OAuthToken() + token.set_token_string(token_string) + self.assert_(-1 < token.get_token_string().find(token_string.split('&')[0])) + self.assert_(-1 < token.get_token_string().find(token_string.split('&')[1])) + self.assertEquals(token_key, token.key) + self.assertEquals(token_secret, token.secret) + # Case 2: token key and secret both present multiple times with unwanted + # parameters. + token_string = ('oauth_token=%s&oauth_token_secret=%s&' + 'oauth_token=%s&ExtraParams=GarbageString' % (token_key, + token_secret, + 'LMNO')) + token = gdata.auth.OAuthToken() + token.set_token_string(token_string) + self.assert_(-1 < token.get_token_string().find(token_string.split('&')[0])) + self.assert_(-1 < token.get_token_string().find(token_string.split('&')[1])) + self.assertEquals(token_key, token.key) + self.assertEquals(token_secret, token.secret) + # Case 3: Only token key present. + token_string = 'oauth_token=%s' % (token_key,) + token = gdata.auth.OAuthToken() + token.set_token_string(token_string) + self.assertEquals(token_string, token.get_token_string()) + self.assertEquals(token_key, token.key) + self.assert_(not token.secret) + # Case 4: Only token key present. + token_string = 'oauth_token_secret=%s' % (token_secret,) + token = gdata.auth.OAuthToken() + token.set_token_string(token_string) + self.assertEquals(token_string, token.get_token_string()) + self.assertEquals(token_secret, token.secret) + self.assert_(not token.key) + # Case 5: None present. + token_string = '' + token = gdata.auth.OAuthToken() + token.set_token_string(token_string) + self.assert_(token.get_token_string() is None) + self.assert_(not token.key) + self.assert_(not token.secret) + + def testSecureAuthSubGetAuthHeader(self): + # Case 1: Presence of OAuth token (in case of 3-legged OAuth) + url = 'http://dummy.com/?q=notebook&s=true' + token = gdata.auth.SecureAuthSubToken(RSA_KEY, token_string='foo') + auth_header = token.GetAuthHeader('GET', url) + self.assert_('Authorization' in auth_header) + header_value = auth_header['Authorization'] + self.assert_(header_value.startswith(r'AuthSub token="foo"')) + self.assert_(-1 < header_value.find(r'sigalg="rsa-sha1"')) + self.assert_(-1 < header_value.find(r'data="')) + self.assert_(-1 < header_value.find(r'sig="')) + m = re.search(r'data="(.*?)"', header_value) + self.assert_(m is not None) + data = m.group(1) + self.assert_(data.startswith('GET')) + self.assert_(-1 < data.find(url)) + + def testOAuthGetAuthHeader(self): + # Case 1: Presence of OAuth token (in case of 3-legged OAuth) + oauth_input_params = gdata.auth.OAuthInputParams( + gdata.auth.OAuthSignatureMethod.RSA_SHA1, CONSUMER_KEY, + rsa_key=RSA_KEY) + token = gdata.auth.OAuthToken(key='ABCDDSFFDSG', + oauth_input_params=oauth_input_params) + auth_header = token.GetAuthHeader('GET', + 'http://dummy.com/?q=notebook&s=true', + realm='http://dummy.com') + self.assert_('Authorization' in auth_header) + header_value = auth_header['Authorization'] + self.assert_(-1 < header_value.find(r'OAuth realm="http://dummy.com"')) + self.assert_(-1 < header_value.find(r'oauth_version="1.0"')) + self.assert_(-1 < header_value.find(r'oauth_token="ABCDDSFFDSG"')) + self.assert_(-1 < header_value.find(r'oauth_nonce="')) + self.assert_(-1 < header_value.find(r'oauth_timestamp="')) + self.assert_(-1 < header_value.find(r'oauth_signature="')) + self.assert_(-1 < header_value.find( + r'oauth_consumer_key="%s"' % CONSUMER_KEY)) + self.assert_(-1 < header_value.find(r'oauth_signature_method="RSA-SHA1"')) + # Case 2: Absence of OAuth token (in case of 2-legged OAuth) + oauth_input_params = gdata.auth.OAuthInputParams( + gdata.auth.OAuthSignatureMethod.HMAC_SHA1, CONSUMER_KEY, + consumer_secret=CONSUMER_SECRET) + token = gdata.auth.OAuthToken(oauth_input_params=oauth_input_params) + auth_header = token.GetAuthHeader( + 'GET', 'http://dummy.com/?xoauth_requestor_id=user@gmail.com&q=book') + self.assert_('Authorization' in auth_header) + header_value = auth_header['Authorization'] + self.assert_(-1 < header_value.find(r'OAuth realm=""')) + self.assert_(-1 < header_value.find(r'oauth_version="1.0"')) + self.assertEquals(-1, header_value.find(r'oauth_token=')) + self.assert_(-1 < header_value.find(r'oauth_nonce="')) + self.assert_(-1 < header_value.find(r'oauth_timestamp="')) + self.assert_(-1 < header_value.find(r'oauth_signature="')) + self.assert_(-1 < header_value.find( + r'oauth_consumer_key="%s"' % CONSUMER_KEY)) + self.assert_(-1 < header_value.find(r'oauth_signature_method="HMAC-SHA1"')) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/base/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/base/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/base/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/base/service_test.py new file mode 100755 index 0000000..0e160e4 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/base/service_test.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import unittest +import getpass +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import gdata.base.service +import gdata.service +import atom.service +import gdata.base +import atom +from gdata import test_data + + +username = '' +password = '' + + +class GBaseServiceUnitTest(unittest.TestCase): + + def setUp(self): + self.gd_client = gdata.base.service.GBaseService() + self.gd_client.email = username + self.gd_client.password = password + self.gd_client.source = 'BaseClient "Unit" Tests' + self.gd_client.api_key = 'ABQIAAAAoLioN3buSs9KqIIq9VmkFxT2yXp_ZAY8_ufC' +\ + '3CFXhHIE1NvwkxRK8C1Q8OWhsWA2AIKv-cVKlVrNhQ' + + def tearDown(self): + # No teardown needed + pass + + def testProperties(self): + email_string = 'Test Email' + password_string = 'Passwd' + api_key_string = 'my API key' + + self.gd_client.email = email_string + self.assertEquals(self.gd_client.email, email_string) + self.gd_client.password = password_string + self.assertEquals(self.gd_client.password, password_string) + self.gd_client.api_key = api_key_string + self.assertEquals(self.gd_client.api_key, api_key_string) + self.gd_client.api_key = None + self.assert_(self.gd_client.api_key is None) + + def testQuery(self): + my_query = gdata.base.service.BaseQuery(feed='/base/feeds/snippets') + my_query['max-results'] = '25' + my_query.bq = 'digital camera [item type: products]' + result = self.gd_client.Query(my_query.ToUri()) + self.assert_(isinstance(result, atom.Feed)) + + service = gdata.base.service.GBaseService(username, password) + query = gdata.base.service.BaseQuery() + query.feed = '/base/feeds/snippets' + query.bq = 'digital camera' + feed = service.Query(query.ToUri()) + + def testQueryWithConverter(self): + my_query = gdata.base.service.BaseQuery(feed='/base/feeds/snippets') + my_query['max-results'] = '1' + my_query.bq = 'digital camera [item type: products]' + result = self.gd_client.Query(my_query.ToUri(), + converter=gdata.base.GBaseSnippetFeedFromString) + self.assert_(isinstance(result, gdata.base.GBaseSnippetFeed)) + + def testCorrectReturnTypes(self): + q = gdata.base.service.BaseQuery() + q.feed = '/base/feeds/snippets' + q.bq = 'digital camera' + result = self.gd_client.QuerySnippetsFeed(q.ToUri()) + self.assert_(isinstance(result, gdata.base.GBaseSnippetFeed)) + + q.feed = '/base/feeds/attributes' + result = self.gd_client.QueryAttributesFeed(q.ToUri()) + self.assert_(isinstance(result, gdata.base.GBaseAttributesFeed)) + + q = gdata.base.service.BaseQuery() + q.feed = '/base/feeds/itemtypes/en_US' + result = self.gd_client.QueryItemTypesFeed(q.ToUri()) + self.assert_(isinstance(result, gdata.base.GBaseItemTypesFeed)) + + q = gdata.base.service.BaseQuery() + q.feed = '/base/feeds/locales' + result = self.gd_client.QueryLocalesFeed(q.ToUri()) + self.assert_(isinstance(result, gdata.base.GBaseLocalesFeed)) + + def testInsertItemUpdateItemAndDeleteItem(self): + try: + self.gd_client.ProgrammaticLogin() + self.assert_(self.gd_client.GetClientLoginToken() is not None) + self.assert_(self.gd_client.captcha_token is None) + self.assert_(self.gd_client.captcha_url is None) + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + + proposed_item = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY) + result = self.gd_client.InsertItem(proposed_item) + + item_id = result.id.text + self.assertEquals(result.id.text != None, True) + + updated_item = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY) + updated_item.label[0].text = 'Test Item' + result = self.gd_client.UpdateItem(item_id, updated_item) + + # Try to update an incorrect item_id. + try: + result = self.gd_client.UpdateItem(item_id + '2', updated_item) + self.fail() + except gdata.service.RequestError: + pass + + result = self.gd_client.DeleteItem(item_id) + self.assert_(result) + + # Delete and already deleted item. + try: + result = self.gd_client.DeleteItem(item_id) + self.fail() + except gdata.service.RequestError: + pass + + def testInsertItemUpdateItemAndDeleteItemWithConverter(self): + try: + self.gd_client.ProgrammaticLogin() + self.assert_(self.gd_client.GetClientLoginToken() is not None) + self.assert_(self.gd_client.captcha_token is None) + self.assert_(self.gd_client.captcha_url is None) + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + + proposed_item = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY) + result = self.gd_client.InsertItem(proposed_item, + converter=atom.EntryFromString) + self.assertEquals(isinstance(result, atom.Entry), True) + self.assertEquals(isinstance(result, gdata.base.GBaseItem), False) + + item_id = result.id.text + self.assertEquals(result.id.text != None, True) + + updated_item = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY) + updated_item.label[0].text = 'Test Item' + result = self.gd_client.UpdateItem(item_id, updated_item, + converter=atom.EntryFromString) + self.assertEquals(isinstance(result, atom.Entry), True) + self.assertEquals(isinstance(result, gdata.base.GBaseItem), False) + + result = self.gd_client.DeleteItem(item_id) + self.assertEquals(result, True) + + def testMakeBatchRequests(self): + try: + self.gd_client.ProgrammaticLogin() + self.assert_(self.gd_client.GetClientLoginToken() is not None) + self.assert_(self.gd_client.captcha_token is None) + self.assert_(self.gd_client.captcha_url is None) + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + + request_feed = gdata.base.GBaseItemFeed(atom_id=atom.Id( + text='test batch')) + entry1 = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY) + entry1.title.text = 'first batch request item' + entry2 = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY) + entry2.title.text = 'second batch request item' + request_feed.AddInsert(entry1) + request_feed.AddInsert(entry2) + + result_feed = self.gd_client.ExecuteBatch(request_feed) + self.assertEquals(result_feed.entry[0].batch_status.code, '201') + self.assertEquals(result_feed.entry[0].batch_status.reason, 'Created') + self.assertEquals(result_feed.entry[0].title.text, 'first batch request item') + self.assertEquals(result_feed.entry[0].item_type.text, 'products') + self.assertEquals(result_feed.entry[1].batch_status.code, '201') + self.assertEquals(result_feed.entry[1].batch_status.reason, 'Created') + self.assertEquals(result_feed.entry[1].title.text, 'second batch request item') + + # Now delete the newly created items. + request_feed = gdata.base.GBaseItemFeed(atom_id=atom.Id( + text='test deletions')) + request_feed.AddDelete(entry=result_feed.entry[0]) + request_feed.AddDelete(entry=result_feed.entry[1]) + self.assertEquals(request_feed.entry[0].batch_operation.type, + gdata.BATCH_DELETE) + self.assertEquals(request_feed.entry[1].batch_operation.type, + gdata.BATCH_DELETE) + + result_feed = self.gd_client.ExecuteBatch(request_feed) + self.assertEquals(result_feed.entry[0].batch_status.code, '200') + self.assertEquals(result_feed.entry[0].batch_status.reason, 'Success') + self.assertEquals(result_feed.entry[0].title.text, 'first batch request item') + self.assertEquals(result_feed.entry[1].batch_status.code, '200') + self.assertEquals(result_feed.entry[1].batch_status.reason, 'Success') + self.assertEquals(result_feed.entry[1].title.text, 'second batch request item') + + +if __name__ == '__main__': + print ('Google Base Tests\nNOTE: Please run these tests only with a test ' + 'account. The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/base_test.py b/gdata.py-1.2.3/tests/gdata_tests/base_test.py new file mode 100755 index 0000000..e7171e5 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/base_test.py @@ -0,0 +1,335 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder@gmail.com (Jeff Scudder)' + + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import gdata +from gdata import test_data +import gdata.base + + +class LabelTest(unittest.TestCase): + + def setUp(self): + self.label = gdata.base.Label() + + def testToAndFromString(self): + self.label.text = 'test label' + self.assert_(self.label.text == 'test label') + new_label = gdata.base.LabelFromString(self.label.ToString()) + self.assert_(self.label.text == new_label.text) + + +class ItemTypeTest(unittest.TestCase): + + def setUp(self): + self.item_type = gdata.base.ItemType() + + def testToAndFromString(self): + self.item_type.text = 'product' + self.item_type.type = 'text' + self.assert_(self.item_type.text == 'product') + self.assert_(self.item_type.type == 'text') + new_item_type = gdata.base.ItemTypeFromString(self.item_type.ToString()) + self.assert_(self.item_type.text == new_item_type.text) + self.assert_(self.item_type.type == new_item_type.type) + + +class GBaseItemTest(unittest.TestCase): + + def setUp(self): + self.item = gdata.base.GBaseItem() + + def testToAndFromString(self): + self.item.label.append(gdata.base.Label(text='my label')) + self.assert_(self.item.label[0].text == 'my label') + self.item.item_type = gdata.base.ItemType(text='products') + self.assert_(self.item.item_type.text == 'products') + self.item.item_attributes.append(gdata.base.ItemAttribute('extra', text='foo')) + self.assert_(self.item.item_attributes[0].text == 'foo') + self.assert_(self.item.item_attributes[0].name == 'extra') + new_item = gdata.base.GBaseItemFromString(self.item.ToString()) + self.assert_(self.item.label[0].text == new_item.label[0].text) + self.assert_(self.item.item_type.text == new_item.item_type.text) + self.assert_(self.item.item_attributes[0].text == + new_item.item_attributes[0].text) + + def testCustomItemAttributes(self): + self.item.AddItemAttribute('test_attrib', 'foo') + self.assert_(self.item.FindItemAttribute('test_attrib') == 'foo') + self.item.SetItemAttribute('test_attrib', 'bar') + self.assert_(self.item.FindItemAttribute('test_attrib') == 'bar') + self.item.RemoveItemAttribute('test_attrib') + self.assert_(self.item.FindItemAttribute('test_attrib') is None) + + def testConvertActualData(self): + feed = gdata.base.GBaseSnippetFeedFromString(test_data.GBASE_FEED) + for an_entry in feed.entry: + if an_entry.author[0].email.text == 'anon-szot0wdsq0at@base.google.com': + for attrib in an_entry.item_attributes: + if attrib.name == 'payment_notes': + self.assert_(attrib.text == + 'PayPal & Bill Me Later credit available online only.') + if attrib.name == 'condition': + self.assert_(attrib.text == 'new') +# self.assert_(an_entry.item_attributes['condition'].text == 'new') + + def testModifyCustomItemAttributes(self): + self.item.AddItemAttribute('test_attrib', 'foo', value_type='test1') + self.item.AddItemAttribute('test_attrib', 'bar', value_type='test2') + self.assertEquals(self.item.item_attributes[0].name, 'test_attrib') + self.assertEquals(self.item.item_attributes[1].name, 'test_attrib') + self.assertEquals(self.item.item_attributes[0].text, 'foo') + self.assertEquals(self.item.item_attributes[1].text, 'bar') + + # Get one of the custom attributes from the item. + attributes = self.item.GetItemAttributes('test_attrib') + self.assertEquals(len(attributes), 2) + self.assertEquals(attributes[0].text, 'foo') + # Change the contents of the found item attribute. + attributes[0].text = 'new foo' + self.assertEquals(attributes[0].text, 'new foo') + # Make sure that the change is reflected in the item. + self.assertEquals(self.item.item_attributes[0].text, 'new foo') + + +class GBaseItemFeedTest(unittest.TestCase): + + def setUp(self): + self.item_feed = gdata.base.GBaseItemFeedFromString(test_data.GBASE_FEED) + + def testToAndFromString(self): + self.assert_(len(self.item_feed.entry) == 3) + for an_entry in self.item_feed.entry: + self.assert_(isinstance(an_entry, gdata.base.GBaseItem)) + new_item_feed = gdata.base.GBaseItemFeedFromString(str(self.item_feed)) + for an_entry in new_item_feed.entry: + self.assert_(isinstance(an_entry, gdata.base.GBaseItem)) + + #self.item_feed.label.append(gdata.base.Label(text='my label')) + #self.assert_(self.item.label[0].text == 'my label') + #self.item.item_type = gdata.base.ItemType(text='products') + #self.assert_(self.item.item_type.text == 'products') + #new_item = gdata.base.GBaseItemFromString(self.item.ToString()) + #self.assert_(self.item.label[0].text == new_item.label[0].text) + #self.assert_(self.item.item_type.text == new_item.item_type.text) + + def testLinkFinderFindsHtmlLink(self): + for entry in self.item_feed.entry: + # All Base entries should have a self link + self.assert_(entry.GetSelfLink() is not None) + # All Base items should have an HTML link + self.assert_(entry.GetHtmlLink() is not None) + # None of the Base items should have an edit link + self.assert_(entry.GetEditLink() is None) + + +class GBaseSnippetFeedTest(unittest.TestCase): + + def setUp(self): + #self.item_feed = gdata.base.GBaseItemFeed() + self.snippet_feed = gdata.base.GBaseSnippetFeedFromString(test_data.GBASE_FEED) + + def testToAndFromString(self): + self.assert_(len(self.snippet_feed.entry) == 3) + for an_entry in self.snippet_feed.entry: + self.assert_(isinstance(an_entry, gdata.base.GBaseSnippet)) + new_snippet_feed = gdata.base.GBaseSnippetFeedFromString(str(self.snippet_feed)) + for an_entry in new_snippet_feed.entry: + self.assert_(isinstance(an_entry, gdata.base.GBaseSnippet)) + + +class ItemAttributeTest(unittest.TestCase): + + def testToAndFromStirng(self): + attrib = gdata.base.ItemAttribute('price') + attrib.type = 'float' + self.assert_(attrib.name == 'price') + self.assert_(attrib.type == 'float') + new_attrib = gdata.base.ItemAttributeFromString(str(attrib)) + self.assert_(new_attrib.name == attrib.name) + self.assert_(new_attrib.type == attrib.type) + + def testClassConvertsActualData(self): + attrib = gdata.base.ItemAttributeFromString(test_data.TEST_GBASE_ATTRIBUTE) + self.assert_(attrib.name == 'brand') + self.assert_(attrib.type == 'text') + self.assert_(len(attrib.extension_elements) == 0) + + # Test conversion to en ElementTree + element = attrib._ToElementTree() + self.assert_(element.tag == gdata.base.GBASE_TEMPLATE % 'brand') + + +class AttributeTest(unittest.TestCase): + + def testAttributeToAndFromString(self): + attrib = gdata.base.Attribute() + attrib.type = 'float' + attrib.count = '44000' + attrib.name = 'test attribute' + attrib.value.append(gdata.base.Value(count='500', text='a value')) + self.assert_(attrib.type == 'float') + self.assert_(attrib.count == '44000') + self.assert_(attrib.name == 'test attribute') + self.assert_(attrib.value[0].count == '500') + self.assert_(attrib.value[0].text == 'a value') + new_attrib = gdata.base.AttributeFromString(str(attrib)) + self.assert_(attrib.type == new_attrib.type) + self.assert_(attrib.count == new_attrib.count) + self.assert_(attrib.value[0].count == new_attrib.value[0].count) + self.assert_(attrib.value[0].text == new_attrib.value[0].text) + self.assert_(attrib.name == new_attrib.name) + + +class ValueTest(unittest.TestCase): + + def testValueToAndFromString(self): + value = gdata.base.Value() + value.count = '5123' + value.text = 'super great' + self.assert_(value.count == '5123') + self.assert_(value.text == 'super great') + new_value = gdata.base.ValueFromString(str(value)) + self.assert_(new_value.count == value.count) + self.assert_(new_value.text == value.text) + + +class AttributeEntryTest(unittest.TestCase): + + def testAttributeEntryToAndFromString(self): + value = gdata.base.Value(count='500', text='happy') + attribute = gdata.base.Attribute(count='600', value=[value]) + a_entry = gdata.base.GBaseAttributeEntry(attribute=[attribute]) + self.assert_(a_entry.attribute[0].count == '600') + self.assert_(a_entry.attribute[0].value[0].count == '500') + self.assert_(a_entry.attribute[0].value[0].text == 'happy') + new_entry = gdata.base.GBaseAttributeEntryFromString(str(a_entry)) + self.assert_(new_entry.attribute[0].count == '600') + self.assert_(new_entry.attribute[0].value[0].count == '500') + self.assert_(new_entry.attribute[0].value[0].text == 'happy') + + +class GBaseAttributeEntryTest(unittest.TestCase): + + def testAttribteEntryFromExampleData(self): + entry = gdata.base.GBaseAttributeEntryFromString( + test_data.GBASE_ATTRIBUTE_ENTRY) + self.assert_(len(entry.attribute) == 1) + self.assert_(len(entry.attribute[0].value) == 10) + self.assert_(entry.attribute[0].name == 'job industry') + for val in entry.attribute[0].value: + if val.text == 'it internet': + self.assert_(val.count == '380772') + elif val.text == 'healthcare': + self.assert_(val.count == '261565') + + + +class GBaseAttributesFeedTest(unittest.TestCase): + + def testAttributesFeedExampleData(self): + feed = gdata.base.GBaseAttributesFeedFromString(test_data.GBASE_ATTRIBUTE_FEED) + self.assert_(len(feed.entry) == 1) + self.assert_(isinstance(feed.entry[0], gdata.base.GBaseAttributeEntry)) + + def testAttributesFeedToAndFromString(self): + value = gdata.base.Value(count='500', text='happy') + attribute = gdata.base.Attribute(count='600', value=[value]) + a_entry = gdata.base.GBaseAttributeEntry(attribute=[attribute]) + feed = gdata.base.GBaseAttributesFeed(entry=[a_entry]) + self.assert_(feed.entry[0].attribute[0].count == '600') + self.assert_(feed.entry[0].attribute[0].value[0].count == '500') + self.assert_(feed.entry[0].attribute[0].value[0].text == 'happy') + new_feed = gdata.base.GBaseAttributesFeedFromString(str(feed)) + self.assert_(new_feed.entry[0].attribute[0].count == '600') + self.assert_(new_feed.entry[0].attribute[0].value[0].count == '500') + self.assert_(new_feed.entry[0].attribute[0].value[0].text == 'happy') + + +class GBaseLocalesFeedTest(unittest.TestCase): + + def testLocatesFeedWithExampleData(self): + feed = gdata.base.GBaseLocalesFeedFromString(test_data.GBASE_LOCALES_FEED) + self.assert_(len(feed.entry) == 3) + self.assert_(feed.GetSelfLink().href == + 'http://www.google.com/base/feeds/locales/') + for an_entry in feed.entry: + if an_entry.title.text == 'en_US': + self.assert_(an_entry.category[0].term == 'en_US') + self.assert_(an_entry.title.text == an_entry.category[0].term) + + +class GBaseItemTypesFeedAndEntryTest(unittest.TestCase): + + def testItemTypesFeedToAndFromString(self): + feed = gdata.base.GBaseItemTypesFeed() + entry = gdata.base.GBaseItemTypeEntry() + entry.attribute.append(gdata.base.Attribute(name='location', + attribute_type='location')) + entry.item_type = gdata.base.ItemType(text='jobs') + feed.entry.append(entry) + self.assert_(len(feed.entry) == 1) + self.assert_(feed.entry[0].attribute[0].name == 'location') + new_feed = gdata.base.GBaseItemTypesFeedFromString(str(feed)) + self.assert_(len(new_feed.entry) == 1) + self.assert_(new_feed.entry[0].attribute[0].name == 'location') + +class GBaseImageLinkTest(unittest.TestCase): + + def testImageLinkToAndFromString(self): + image_link = gdata.base.ImageLink() + image_link.type = 'url' + image_link.text = 'example.com' + thumbnail = gdata.base.Thumbnail() + thumbnail.width = '60' + thumbnail.height = '80' + thumbnail.text = 'example text' + image_link.thumbnail.append(thumbnail) + xml = image_link.ToString() + parsed = gdata.base.ImageLinkFromString(xml) + + self.assert_(parsed.type == image_link.type) + self.assert_(parsed.text == image_link.text) + self.assert_(len(parsed.thumbnail) == 1) + self.assert_(parsed.thumbnail[0].width == thumbnail.width) + self.assert_(parsed.thumbnail[0].height == thumbnail.height) + self.assert_(parsed.thumbnail[0].text == thumbnail.text) + + +class GBaseItemAttributeAccessElement(unittest.TestCase): + + def testItemAttributeAccessAttribute(self): + item = gdata.base.GBaseItem() + item.AddItemAttribute('test', '1', value_type='int', access='private') + private_attribute = item.GetItemAttributes('test')[0] + self.assert_(private_attribute.access == 'private') + xml = item.ToString() + new_item = gdata.base.GBaseItemFromString(xml) + new_attributes = new_item.GetItemAttributes('test') + self.assert_(len(new_attributes) == 1) + #self.assert_(new_attributes[0].access == 'private') + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/blogger/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/blogger/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/blogger/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/blogger/service_test.py new file mode 100755 index 0000000..f32f0aa --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/blogger/service_test.py @@ -0,0 +1,107 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Unit tests to exercise server interactions for blogger.""" + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import unittest +import getpass +import atom +from gdata import test_data +import gdata.blogger +import gdata.blogger.service + +username = '' +password = '' +test_blog_id = '' + +class BloggerCrudTests(unittest.TestCase): + + def setUp(self): + self.client = gdata.blogger.service.BloggerService(email=username, + password=password, source='GoogleInc-PythonBloggerUnitTests-1') + # TODO: if the test_blog_id is not set, get the list of the user's blogs + # and prompt for which blog to add the test posts to. + self.client.ProgrammaticLogin() + + def testPostDraftUpdateAndDelete(self): + new_entry = gdata.blogger.BlogPostEntry(title=atom.Title( + text='Unit Test Post')) + new_entry.content = atom.Content('text', None, 'Hello World') + # Make this post a draft so it will not appear publicly on the blog. + new_entry.control = atom.Control(draft=atom.Draft(text='yes')) + new_entry.AddLabel('test') + + posted = self.client.AddPost(new_entry, blog_id=test_blog_id) + + self.assertEquals(posted.title.text, new_entry.title.text) + # Should be one category in the posted entry for the 'test' label. + self.assertEquals(len(posted.category), 1) + self.assert_(isinstance(posted, gdata.blogger.BlogPostEntry)) + + # Change the title and add more labels. + posted.title.text = 'Updated' + posted.AddLabel('second') + updated = self.client.UpdatePost(entry=posted) + + self.assertEquals(updated.title.text, 'Updated') + self.assertEquals(len(updated.category), 2) + + # Cleanup and delete the draft blog post. + self.client.DeletePost(entry=posted) + + def testAddComment(self): + # Create a test post to add comments to. + new_entry = gdata.blogger.BlogPostEntry(title=atom.Title( + text='Comments Test Post')) + new_entry.content = atom.Content('text', None, 'Hello Comments') + target_post = self.client.AddPost(new_entry, blog_id=test_blog_id) + + blog_id = target_post.GetBlogId() + post_id = target_post.GetPostId() + + new_comment = gdata.blogger.CommentEntry() + new_comment.content = atom.Content(text='Test comment') + posted = self.client.AddComment(new_comment, blog_id=blog_id, + post_id=post_id) + self.assertEquals(posted.content.text, new_comment.content.text) + + # Cleanup and delete the comment test blog post. + self.client.DeletePost(entry=target_post) + + +class BloggerQueryTests(unittest.TestCase): + + def testConstructBlogQuery(self): + pass + + def testConstructBlogQuery(self): + pass + + def testConstructBlogQuery(self): + pass + + +if __name__ == '__main__': + print ('NOTE: Please run these tests only with a test account. ' + + 'The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + test_blog_id = raw_input('Please enter the blog id for the test blog: ') + unittest.main() + diff --git a/gdata.py-1.2.3/tests/gdata_tests/blogger_test.py b/gdata.py-1.2.3/tests/gdata_tests/blogger_test.py new file mode 100755 index 0000000..7e6a772 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/blogger_test.py @@ -0,0 +1,95 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import unittest +from gdata import test_data +import gdata.blogger +import atom + + +class BlogEntryTest(unittest.TestCase): + + def testBlogEntryFromString(self): + entry = gdata.blogger.BlogEntryFromString(test_data.BLOG_ENTRY) + self.assertEquals(entry.GetBlogName(), 'blogName') + self.assertEquals(entry.GetBlogId(), 'blogID') + self.assertEquals(entry.title.text, 'Lizzy\'s Diary') + + def testBlogPostFeedFromString(self): + feed = gdata.blogger.BlogPostFeedFromString(test_data.BLOG_POSTS_FEED) + self.assertEquals(len(feed.entry), 1) + self.assert_(isinstance(feed, gdata.blogger.BlogPostFeed)) + self.assert_(isinstance(feed.entry[0], gdata.blogger.BlogPostEntry)) + self.assertEquals(feed.entry[0].GetPostId(), 'postID') + self.assertEquals(feed.entry[0].GetBlogId(), 'blogID') + self.assertEquals(feed.entry[0].title.text, 'Quite disagreeable') + + def testCommentFeedFromString(self): + feed = gdata.blogger.CommentFeedFromString(test_data.BLOG_COMMENTS_FEED) + self.assertEquals(len(feed.entry), 1) + self.assert_(isinstance(feed, gdata.blogger.CommentFeed)) + self.assert_(isinstance(feed.entry[0], gdata.blogger.CommentEntry)) + self.assertEquals(feed.entry[0].GetBlogId(), 'blogID') + self.assertEquals(feed.entry[0].GetCommentId(), 'commentID') + self.assertEquals(feed.entry[0].title.text, 'This is my first comment') + self.assertEquals(feed.entry[0].in_reply_to.source, + 'http://blogName.blogspot.com/feeds/posts/default/postID') + self.assertEquals(feed.entry[0].in_reply_to.ref, + 'tag:blogger.com,1999:blog-blogID.post-postID') + self.assertEquals(feed.entry[0].in_reply_to.href, + 'http://blogName.blogspot.com/2007/04/first-post.html') + self.assertEquals(feed.entry[0].in_reply_to.type, 'text/html') + + def testIdParsing(self): + entry = gdata.blogger.BlogEntry() + entry.id = atom.Id( + text='tag:blogger.com,1999:user-146606542.blog-4023408167658848') + self.assertEquals(entry.GetBlogId(), '4023408167658848') + entry.id = atom.Id(text='tag:blogger.com,1999:blog-4023408167658848') + self.assertEquals(entry.GetBlogId(), '4023408167658848') + + +class InReplyToTest(unittest.TestCase): + + def testToAndFromString(self): + in_reply_to = gdata.blogger.InReplyTo(href='http://example.com/href', + ref='http://example.com/ref', source='http://example.com/my_post', + type='text/html') + xml_string = str(in_reply_to) + parsed = gdata.blogger.InReplyToFromString(xml_string) + self.assertEquals(parsed.source, in_reply_to.source) + self.assertEquals(parsed.href, in_reply_to.href) + self.assertEquals(parsed.ref, in_reply_to.ref) + self.assertEquals(parsed.type, in_reply_to.type) + + +class CommentEntryTest(unittest.TestCase): + + def testToAndFromString(self): + comment = gdata.blogger.CommentEntry(content=atom.Content(text='Nifty!'), + in_reply_to=gdata.blogger.InReplyTo( + source='http://example.com/my_post')) + parsed = gdata.blogger.CommentEntryFromString(str(comment)) + self.assertEquals(parsed.in_reply_to.source, comment.in_reply_to.source) + self.assertEquals(parsed.content.text, comment.content.text) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/calendar/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/calendar/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/calendar/calendar_acl_test.py b/gdata.py-1.2.3/tests/gdata_tests/calendar/calendar_acl_test.py new file mode 100644 index 0000000..4e7c3c6 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/calendar/calendar_acl_test.py @@ -0,0 +1,201 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'api.lliabraa@google.com (Lane LiaBraaten)' + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import gdata.calendar +import gdata.calendar.service +import gdata.service +import random +import getpass +from gdata import test_data + +username = '' +password = '' + +class CalendarServiceAclUnitTest(unittest.TestCase): + _aclFeedUri = "/calendar/feeds/default/acl/full" + _aclEntryUri = "%s/user:%s" % (_aclFeedUri, "user@gmail.com",) + + def setUp(self): + self.cal_client = gdata.calendar.service.CalendarService() + self.cal_client.email = username + self.cal_client.password = password + self.cal_client.source = 'GCalendarClient ACL "Unit" Tests' + + def tearDown(self): + # No teardown needed + pass + + def _getRandomNumber(self): + """Return a random number as a string for testing""" + r = random.Random() + r.seed() + return str(r.randint(100000,1000000)) + + def _generateAclEntry(self, role="owner", scope_type="user", scope_value=None): + """Generates a ACL rule from parameters or makes a random user an owner by default""" + if (scope_type=="user" and scope_value is None): + scope_value = "user%s@gmail.com" % (self._getRandomNumber()) + rule = gdata.calendar.CalendarAclEntry() + rule.title = atom.Title(text=role) + rule.scope = gdata.calendar.Scope(value=scope_value, type="user") + rule.role = gdata.calendar.Role(value="http://schemas.google.com/gCal/2005#%s" % (role)) + return rule + + def assertEqualAclEntry(self, expected, actual): + """Compares the values of two ACL entries""" + self.assertEqual(expected.role.value, actual.role.value) + self.assertEqual(expected.scope.value, actual.scope.value) + self.assertEqual(expected.scope.type, actual.scope.type) + + def testGetAclFeedUnauthenticated(self): + """Fiendishly try to get an ACL feed without authenticating""" + try: + self.cal_client.GetCalendarAclFeed(self._aclFeedUri) + self.fail("Unauthenticated request should fail") + except gdata.service.RequestError, error: + self.assertEqual(error[0]['status'], 401) + self.assertEqual(error[0]['reason'], "Authorization required") + + def testGetAclFeed(self): + """Get an ACL feed""" + self.cal_client.ProgrammaticLogin() + feed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri) + self.assertNotEqual(0,len(feed.entry)) + + def testGetAclEntryUnauthenticated(self): + """Fiendishly try to get an ACL entry without authenticating""" + try: + self.cal_client.GetCalendarAclEntry(self._aclEntryUri) + self.fail("Unauthenticated request should fail"); + except gdata.service.RequestError, error: + self.assertEqual(error[0]['status'], 401) + self.assertEqual(error[0]['reason'], "Authorization required") + + def testGetAclEntry(self): + """Get an ACL entry""" + self.cal_client.ProgrammaticLogin() + self.cal_client.GetCalendarAclEntry(self._aclEntryUri) + + def testCalendarAclFeedFromString(self): + """Create an ACL feed from a hard-coded string""" + aclFeed = gdata.calendar.CalendarAclFeedFromString(test_data.ACL_FEED) + self.assertEqual("Elizabeth Bennet's access control list", aclFeed.title.text) + self.assertEqual(2,len(aclFeed.entry)) + + def testCalendarAclEntryFromString(self): + """Create an ACL entry from a hard-coded string""" + aclEntry = gdata.calendar.CalendarAclEntryFromString(test_data.ACL_ENTRY) + self.assertEqual("owner", aclEntry.title.text) + self.assertEqual("user", aclEntry.scope.type) + self.assertEqual("liz@gmail.com", aclEntry.scope.value) + self.assertEqual("http://schemas.google.com/gCal/2005#owner", aclEntry.role.value) + + def testCreateAndDeleteAclEntry(self): + """Add an ACL rule and verify that is it returned in the ACL feed. Then delete the rule and + verify that the rule is no longer included in the ACL feed.""" + # Get the current number of ACL rules + self.cal_client.ProgrammaticLogin() + aclFeed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri) + original_rule_count = len(aclFeed.entry) + + # Insert entry + rule = self._generateAclEntry() + returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri) + + # Verify rule was added with correct ACL values + aclFeed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri) + self.assertEqual(original_rule_count+1, len(aclFeed.entry)) + self.assertEqualAclEntry(rule, returned_rule) + + # Delete the event + self.cal_client.DeleteAclEntry(returned_rule.GetEditLink().href) + aclFeed = self.cal_client.GetCalendarAclFeed(self._aclFeedUri) + self.assertEquals(original_rule_count, len(aclFeed.entry)) + + def testUpdateAclChangeScopeValue(self): + """Fiendishly try to insert a test ACL rule and attempt to change the scope value (i.e. username). + Verify that an exception is thrown, then delete the test rule.""" + # Insert a user-scoped owner role ot random user + aclEntry = self._generateAclEntry("owner","user"); + self.cal_client.ProgrammaticLogin() + rule = self._generateAclEntry() + returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri) + + # Change the scope value (i.e. what user is the owner) and update the entry + updated_rule = returned_rule + updated_rule.scope.value = "user_%s@gmail.com" % (self._getRandomNumber()) + try: + returned_rule = self.cal_client.UpdateAclEntry(returned_rule.GetEditLink().href, updated_rule) + except gdata.service.RequestError, error: + self.assertEqual(error[0]['status'], 403) + self.assertEqual(error[0]['reason'], "Forbidden") + + self.cal_client.DeleteAclEntry(updated_rule.GetEditLink().href) + + + def testUpdateAclChangeScopeType(self): + """Fiendishly try to insert a test ACL rule and attempt to change the scope type (i.e. from 'user' to 'domain'). + Verify that an exception is thrown, then delete the test rule.""" + # Insert a user-scoped owner role ot random user + aclEntry = self._generateAclEntry("owner","user"); + self.cal_client.ProgrammaticLogin() + rule = self._generateAclEntry() + returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri) + + # Change the scope value (i.e. what user is the owner) and update the entry + updated_rule = returned_rule + updated_rule.scope.type = "domain" + try: + returned_rule = self.cal_client.UpdateAclEntry(returned_rule.GetEditLink().href, updated_rule) + except gdata.service.RequestError, error: + self.assertEqual(error[0]['status'], 403) + self.assertEqual(error[0]['reason'], "Forbidden") + + self.cal_client.DeleteAclEntry(updated_rule.GetEditLink().href) + + + def testUpdateAclChangeRoleValue(self): + """Insert a test ACL rule and attempt to change the scope type (i.e. from 'owner' to 'editor'). + Verify that an exception is thrown, then delete the test rule.""" + # Insert a user-scoped owner role ot random user + aclEntry = self._generateAclEntry("owner","user"); + self.cal_client.ProgrammaticLogin() + rule = self._generateAclEntry() + returned_rule = self.cal_client.InsertAclEntry(rule, self._aclFeedUri) + + # Change the scope value (i.e. what user is the owner) and update the entry + updated_rule = returned_rule + updated_rule.role.value = "http://schemas.google.com/gCal/2005#editor" + returned_rule = self.cal_client.UpdateAclEntry(returned_rule.GetEditLink().href, updated_rule) + self.assertEqualAclEntry(updated_rule, returned_rule) + + self.cal_client.DeleteAclEntry(updated_rule.GetEditLink().href) + +if __name__ == '__main__': + print ('NOTE: Please run these tests only with a test account. ' + + 'The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + unittest.main() + diff --git a/gdata.py-1.2.3/tests/gdata_tests/calendar/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/calendar/service_test.py new file mode 100755 index 0000000..ac0a31a --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/calendar/service_test.py @@ -0,0 +1,380 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'api.rboyd@google.com (Ryan Boyd)' + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import atom.mock_http +import gdata.calendar +import gdata.calendar.service +import random +import getpass + +# Commented out as dateutil is not in this repository +#from dateutil.parser import parse + + +username = '' +password = '' + + +class CalendarServiceUnitTest(unittest.TestCase): + + def setUp(self): + self.cal_client = gdata.calendar.service.CalendarService() + self.cal_client.email = username + self.cal_client.password = password + self.cal_client.source = 'GCalendarClient "Unit" Tests' + + def tearDown(self): + # No teardown needed + pass + + def testPostUpdateAndDeleteSubscription(self): + """Test posting a new subscription, updating it, deleting it""" + self.cal_client.ProgrammaticLogin() + + subscription_id = 'c4o4i7m2lbamc4k26sc2vokh5g%40group.calendar.google.com' + subscription_url = '%s%s' % ( + 'http://www.google.com/calendar/feeds/default/allcalendars/full/', + subscription_id) + + # Subscribe to Google Doodles calendar + calendar = gdata.calendar.CalendarListEntry() + calendar.id = atom.Id(text=subscription_id) + returned_calendar = self.cal_client.InsertCalendarSubscription(calendar) + self.assertEquals(subscription_url, returned_calendar.id.text) + self.assertEquals('Google Doodles', returned_calendar.title.text) + + # Update subscription + calendar_to_update = self.cal_client.GetCalendarListEntry(subscription_url) + self.assertEquals('Google Doodles', calendar_to_update.title.text) + self.assertEquals('true', calendar_to_update.selected.value) + calendar_to_update.selected.value = 'false' + self.assertEquals('false', calendar_to_update.selected.value) + updated_calendar = self.cal_client.UpdateCalendar(calendar_to_update) + self.assertEquals('false', updated_calendar.selected.value) + + # Delete subscription + response = self.cal_client.DeleteCalendarEntry( + returned_calendar.GetEditLink().href) + self.assertEquals(True, response) + + def testPostUpdateAndDeleteCalendar(self): + """Test posting a new calendar, updating it, deleting it""" + self.cal_client.ProgrammaticLogin() + + # New calendar to create + title='Little League Schedule' + description='This calendar contains practice and game times' + time_zone='America/Los_Angeles' + hidden=False + location='Oakland' + color='#2952A3' + + # Calendar object + calendar = gdata.calendar.CalendarListEntry() + calendar.title = atom.Title(text=title) + calendar.summary = atom.Summary(text=description) + calendar.where = gdata.calendar.Where(value_string=location) + calendar.color = gdata.calendar.Color(value=color) + calendar.timezone = gdata.calendar.Timezone(value=time_zone) + if hidden: + calendar.hidden = gdata.calendar.Hidden(value='true') + else: + calendar.hidden = gdata.calendar.Hidden(value='false') + + # Create calendar + new_calendar = self.cal_client.InsertCalendar(new_calendar=calendar) + self.assertEquals(title, new_calendar.title.text) + self.assertEquals(description, new_calendar.summary.text) + self.assertEquals(location, new_calendar.where.value_string) + self.assertEquals(color, new_calendar.color.value) + self.assertEquals(time_zone, new_calendar.timezone.value) + if hidden: + self.assertEquals('true', new_calendar.hidden.value) + else: + self.assertEquals('false', new_calendar.hidden.value) + + # Update calendar + calendar_to_update = self.cal_client.GetCalendarListEntry( + new_calendar.id.text) + updated_title = 'This is the updated title' + calendar_to_update.title.text = updated_title + updated_calendar = self.cal_client.UpdateCalendar(calendar_to_update) + self.assertEquals(updated_title, updated_calendar.title.text) + + # Delete calendar + calendar_to_delete = self.cal_client.GetCalendarListEntry( + new_calendar.id.text) + self.cal_client.Delete(calendar_to_delete.GetEditLink().href) + + return new_calendar + + def testPostAndDeleteExtendedPropertyEvent(self): + """Test posting a new entry with an extended property, deleting it""" + # Get random data for creating event + r = random.Random() + r.seed() + random_event_number = str(r.randint(100000,1000000)) + random_event_title = 'My Random Extended Property Test Event %s' % ( + random_event_number) + + # Set event data + event = gdata.calendar.CalendarEventEntry() + event.author.append(atom.Author(name=atom.Name(text='GData Test user'))) + event.title = atom.Title(text=random_event_title) + event.content = atom.Content(text='Picnic with some lunch') + event.extended_property.append(gdata.calendar.ExtendedProperty( + name='prop test name', value='prop test value')) + + # Insert event + self.cal_client.ProgrammaticLogin() + new_event = self.cal_client.InsertEvent(event, + '/calendar/feeds/default/private/full') + + self.assertEquals(event.extended_property[0].value, + new_event.extended_property[0].value) + + # Delete the event + self.cal_client.DeleteEvent(new_event.GetEditLink().href) + + # WARNING: Due to server-side issues, this test takes a while (~60seconds) + def testPostEntryWithCommentAndDelete(self): + """Test posting a new entry with an extended property, deleting it""" + # Get random data for creating event + r = random.Random() + r.seed() + random_event_number = str(r.randint(100000,1000000)) + random_event_title = 'My Random Comments Test Event %s' % ( + random_event_number) + + # Set event data + event = gdata.calendar.CalendarEventEntry() + event.author.append(atom.Author(name=atom.Name(text='GData Test user'))) + event.title = atom.Title(text=random_event_title) + event.content = atom.Content(text='Picnic with some lunch') + + # Insert event + self.cal_client.ProgrammaticLogin() + new_event = self.cal_client.InsertEvent(event, + '/calendar/feeds/default/private/full') + + # Get comments feed + comments_url = new_event.comments.feed_link.href + comments_query = gdata.calendar.service.CalendarEventCommentQuery(comments_url) + comments_feed = self.cal_client.CalendarQuery(comments_query) + + # Add comment + comments_entry = gdata.calendar.CalendarEventCommentEntry() + comments_entry.content = atom.Content(text='Comments content') + comments_entry.author.append( + atom.Author(name=atom.Name(text='GData Test user'), + email=atom.Email(text='gdata.ops.demo@gmail.com'))) + new_comments_entry = self.cal_client.InsertEventComment(comments_entry, + comments_feed.GetPostLink().href) + + # Delete the event + event_to_delete = self.cal_client.GetCalendarEventEntry(new_event.id.text) + self.cal_client.DeleteEvent(event_to_delete.GetEditLink().href) + + def testPostQueryUpdateAndDeleteEvents(self): + """Test posting a new entry, updating it, deleting it, querying for it""" + + # Get random data for creating event + r = random.Random() + r.seed() + random_event_number = str(r.randint(100000,1000000)) + random_event_title = 'My Random Test Event %s' % random_event_number + + random_start_hour = (r.randint(1,1000000) % 23) + random_end_hour = random_start_hour + 1 + non_random_start_minute = 0 + non_random_end_minute = 0 + random_month = (r.randint(1,1000000) % 12 + 1) + random_day_of_month = (r.randint(1,1000000) % 28 + 1) + non_random_year = 2008 + start_time = '%04d-%02d-%02dT%02d:%02d:00.000-05:00' % ( + non_random_year, random_month, random_day_of_month, + random_start_hour, non_random_start_minute,) + end_time = '%04d-%02d-%02dT%02d:%02d:00.000-05:00' % ( + non_random_year, random_month, random_day_of_month, + random_end_hour, non_random_end_minute,) + + # Set event data + event = gdata.calendar.CalendarEventEntry() + event.author.append(atom.Author(name=atom.Name(text='GData Test user'))) + event.title = atom.Title(text=random_event_title) + event.content = atom.Content(text='Picnic with some lunch') + event.where.append(gdata.calendar.Where(value_string='Down by the river')) + event.when.append(gdata.calendar.When(start_time=start_time,end_time=end_time)) + + # Insert event + self.cal_client.ProgrammaticLogin() + new_event = self.cal_client.InsertEvent(event, + '/calendar/feeds/default/private/full') + + # Ensure that atom data returned from calendar server equals atom data sent + self.assertEquals(event.title.text, new_event.title.text) + self.assertEquals(event.content.text, new_event.content.text) + + # Ensure that gd:where data returned from calendar equals value sent + self.assertEquals(event.where[0].value_string, + new_event.where[0].value_string) + + # Commented out as dateutil is not in this repository + # Ensure that dates returned from calendar server equals dates sent + #start_time_py = parse(event.when[0].start_time) + #start_time_py_new = parse(new_event.when[0].start_time) + #self.assertEquals(start_time_py, start_time_py_new) + + #end_time_py = parse(event.when[0].end_time) + #end_time_py_new = parse(new_event.when[0].end_time) + #self.assertEquals(end_time_py, end_time_py_new) + + # Update event + event_to_update = new_event + updated_title_text = event_to_update.title.text + ' - UPDATED' + event_to_update.title = atom.Title(text=updated_title_text) + + updated_event = self.cal_client.UpdateEvent( + event_to_update.GetEditLink().href, event_to_update) + + # Ensure that updated title was set in the updated event + self.assertEquals(event_to_update.title.text, updated_event.title.text) + + # Delete the event + self.cal_client.DeleteEvent(updated_event.GetEditLink().href) + + # Ensure deleted event is marked as canceled in the feed + after_delete_query = gdata.calendar.service.CalendarEventQuery() + after_delete_query.updated_min = '2007-01-01' + after_delete_query.text_query = str(random_event_number) + after_delete_query.max_results = '1' + after_delete_query_result = self.cal_client.CalendarQuery( + after_delete_query) + + # Ensure feed returned at max after_delete_query.max_results events + self.assert_( + len(after_delete_query_result.entry) <= after_delete_query.max_results) + + # Ensure status of returned event is canceled + self.assertEquals(after_delete_query_result.entry[0].event_status.value, + 'CANCELED') + + def testCreateAndDeleteEventUsingBatch(self): + # Get random data for creating event + r = random.Random() + r.seed() + random_event_number = str(r.randint(100000,1000000)) + random_event_title = 'My Random Comments Test Event %s' % ( + random_event_number) + + # Set event data + event = gdata.calendar.CalendarEventEntry() + event.author.append(atom.Author(name=atom.Name(text='GData Test user'))) + event.title = atom.Title(text=random_event_title) + event.content = atom.Content(text='Picnic with some lunch') + + # Form a batch request + batch_request = gdata.calendar.CalendarEventFeed() + batch_request.AddInsert(entry=event) + + # Execute the batch request to insert the event. + self.cal_client.ProgrammaticLogin() + batch_result = self.cal_client.ExecuteBatch(batch_request, + gdata.calendar.service.DEFAULT_BATCH_URL) + + self.assertEquals(len(batch_result.entry), 1) + self.assertEquals(batch_result.entry[0].title.text, random_event_title) + self.assertEquals(batch_result.entry[0].batch_operation.type, + gdata.BATCH_INSERT) + self.assertEquals(batch_result.GetBatchLink().href, + gdata.calendar.service.DEFAULT_BATCH_URL) + + # Create a batch request to delete the newly created entry. + batch_delete_request = gdata.calendar.CalendarEventFeed() + batch_delete_request.AddDelete(entry=batch_result.entry[0]) + + batch_delete_result = self.cal_client.ExecuteBatch(batch_delete_request, + batch_result.GetBatchLink().href) + self.assertEquals(len(batch_delete_result.entry), 1) + self.assertEquals(batch_delete_result.entry[0].batch_operation.type, + gdata.BATCH_DELETE) + + def testCorrectReturnTypesForGetMethods(self): + self.cal_client.ProgrammaticLogin() + + result = self.cal_client.GetCalendarEventFeed() + self.assertEquals(isinstance(result, gdata.calendar.CalendarEventFeed), + True) + + def testValidHostName(self): + mock_http = atom.mock_http.MockHttpClient() + response = atom.mock_http.MockResponse(body='', status=200, + reason='OK') + mock_http.add_response(response, 'GET', + 'https://www.google.com/calendar/feeds/default/allcalendars/full') + self.cal_client.ssl = True + self.cal_client.http_client = mock_http + self.cal_client.SetAuthSubToken('foo') + self.assertEquals(str(self.cal_client.token_store.find_token( + 'https://www.google.com/calendar/feeds/default/allcalendars/full')), + 'AuthSub token=foo') + resp = self.cal_client.Get('/calendar/feeds/default/allcalendars/full') + self.assert_(resp is not None) + + +class CalendarEventQueryUnitTest(unittest.TestCase): + + def setUp(self): + self.query = gdata.calendar.service.CalendarEventQuery() + + def testOrderByValidatesValues(self): + self.query.orderby = 'lastmodified' + self.assertEquals(self.query.orderby, 'lastmodified') + try: + self.query.orderby = 'illegal input' + self.fail() + except gdata.calendar.service.Error: + self.assertEquals(self.query.orderby, 'lastmodified') + + def testSortOrderValidatesValues(self): + self.query.sortorder = 'a' + self.assertEquals(self.query.sortorder, 'a') + try: + self.query.sortorder = 'illegal input' + self.fail() + except gdata.calendar.service.Error: + self.assertEquals(self.query.sortorder, 'a') + + def testTimezoneParameter(self): + self.query.ctz = 'America/Los_Angeles' + self.assertEquals(self.query['ctz'], 'America/Los_Angeles') + self.assert_(self.query.ToUri().find('America%2FLos_Angeles') > -1) + + +if __name__ == '__main__': + print ('Google Calendar Test\nNOTE: Please run these tests only with a ' + 'test account. The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/calendar_test.py b/gdata.py-1.2.3/tests/gdata_tests/calendar_test.py new file mode 100755 index 0000000..cd3e0c1 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/calendar_test.py @@ -0,0 +1,878 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder@gmail.com (Jeff Scudder)' + + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import atom +import gdata +from gdata import test_data +import gdata.calendar + + +class CalendarFeedTest(unittest.TestCase): + + def setUp(self): + self.calendar_feed = gdata.calendar.CalendarListFeedFromString( + test_data.CALENDAR_FEED) + + def testEntryCount(self): + # Assert the number of items in the feed of calendars + self.assertEquals(len(self.calendar_feed.entry),2) + + + + def testToAndFromString(self): + # Assert the appropriate type for each entry + for an_entry in self.calendar_feed.entry: + self.assert_(isinstance(an_entry, gdata.calendar.CalendarListEntry), + 'Entry must be an instance of CalendarListEntry') + + # Regenerate feed from xml text + new_calendar_feed = ( + gdata.calendar.CalendarListFeedFromString(str(self.calendar_feed))) + for an_entry in new_calendar_feed.entry: + self.assert_(isinstance(an_entry, gdata.calendar.CalendarListEntry), + 'Entry in regenerated feed must be an instance of CalendarListEntry') + + def testAuthor(self): + """Tests the existence of a and verifies the name and email""" + + # Assert that each element in the feed author list is an atom.Author + for an_author in self.calendar_feed.author: + self.assert_(isinstance(an_author, atom.Author), + "Calendar feed element must be an instance of " + + "atom.Author: %s" % an_author) + + # Assert the feed author name is as expected + self.assertEquals(self.calendar_feed.author[0].name.text, 'GData Ops Demo') + + # Assert the feed author name is as expected + self.assertEquals(self.calendar_feed.author[0].email.text, + 'gdata.ops.demo@gmail.com') + + # Assert one of the values for an entry author + self.assertEquals(self.calendar_feed.entry[0].author[0].name.text, + 'GData Ops Demo') + self.assertEquals(self.calendar_feed.entry[0].author[0].email.text, + 'gdata.ops.demo@gmail.com') + + def testId(self): + """Tests the existence of a in the feed and entries + and verifies the value""" + + # Assert the feed id exists and is an atom.Id + self.assert_(isinstance(self.calendar_feed.id, atom.Id), + "Calendar feed element must be an instance of atom.Id: %s" % ( + self.calendar_feed.id)) + + # Assert the feed id value is as expected + self.assertEquals(self.calendar_feed.id.text, + 'http://www.google.com/calendar/feeds/default') + + # Assert that each entry has an id which is an atom.Id + for an_entry in self.calendar_feed.entry: + self.assert_(isinstance(an_entry.id, atom.Id), + "Calendar entry element must be an instance of " + + "atom.Id: %s" % an_entry.id) + + # Assert one of the values for an id + self.assertEquals(self.calendar_feed.entry[1].id.text, + 'http://www.google.com/calendar/feeds/default/' + + 'jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com') + + def testPublished(self): + """Tests the existence of a in the entries + and verifies the value""" + + # Assert that each entry has a published value which is an atom.Published + for an_entry in self.calendar_feed.entry: + self.assert_(isinstance(an_entry.published, atom.Published), + "Calendar entry element must be an instance of " + + "atom.Published: %s" % an_entry.published) + + # Assert one of the values for published is as expected + self.assertEquals(self.calendar_feed.entry[1].published.text, + '2007-03-20T22:48:57.837Z') + + def testUpdated(self): + """Tests the existence of a in the feed and the entries + and verifies the value""" + + # Assert that the feed updated element exists and is an atom.Updated + self.assert_(isinstance(self.calendar_feed.updated, atom.Updated), + "Calendar feed element must be an instance of " + + "atom.Updated: %s" % self.calendar_feed.updated) + + # Assert that each entry has a updated value which is an atom.Updated + for an_entry in self.calendar_feed.entry: + self.assert_(isinstance(an_entry.updated, atom.Updated), + "Calendar entry element must be an instance of" + + "atom.Updated: %s" % an_entry.updated) + + # Assert the feed updated value is as expected + self.assertEquals(self.calendar_feed.updated.text, + '2007-03-20T22:48:57.833Z') + + # Assert one of the values for updated + self.assertEquals(self.calendar_feed.entry[0].updated.text, + '2007-03-20T22:48:52.000Z') + + def testTitle(self): + """Tests the existence of a in the feed and the entries and + verifies the value""" + + # Assert that the feed title element exists and is an atom.Title + self.assert_(isinstance(self.calendar_feed.title, atom.Title), + "Calendar feed element must be an instance of " + + "atom.Title: %s" % self.calendar_feed.title) + + # Assert that each entry has a title value which is an atom.Title + for an_entry in self.calendar_feed.entry: + self.assert_(isinstance(an_entry.title, atom.Title), + "Calendar entry element must be an instance of " + + "atom.Title: %s" % an_entry.title) + + # Assert the feed title value is as expected + self.assertEquals(self.calendar_feed.title.text, + 'GData Ops Demo\'s Calendar List') + + # Assert one of the values for title + self.assertEquals(self.calendar_feed.entry[0].title.text, 'GData Ops Demo') + + def testColor(self): + """Tests the existence of a and verifies the value""" + + # Assert the color is present and is a gdata.calendar.Color + for an_entry in self.calendar_feed.entry: + self.assert_(isinstance(an_entry.color, gdata.calendar.Color), + "Calendar feed element must be an instance of " + + "gdata.calendar.Color: %s" % an_entry.color) + + # Assert the color value is as expected + self.assertEquals(self.calendar_feed.entry[0].color.value, '#2952A3') + + def testAccessLevel(self): + """Tests the existence of a element and verifies the + value""" + + # Assert the access_level is present and is a gdata.calendar.AccessLevel + for an_entry in self.calendar_feed.entry: + self.assert_(isinstance(an_entry.access_level, gdata.calendar.AccessLevel), + "Calendar feed element must be an instance of " + + "gdata.calendar.AccessLevel: %s" % an_entry.access_level) + + # Assert the access_level value is as expected + self.assertEquals(self.calendar_feed.entry[0].access_level.value, 'owner') + + def testTimezone(self): + """Tests the existence of a element and verifies the + value""" + + # Assert the timezone is present and is a gdata.calendar.Timezone + for an_entry in self.calendar_feed.entry: + self.assert_(isinstance(an_entry.timezone, gdata.calendar.Timezone), + "Calendar feed element must be an instance of " + + "gdata.calendar.Timezone: %s" % an_entry.timezone) + + # Assert the timezone value is as expected + self.assertEquals(self.calendar_feed.entry[0].timezone.value, + 'America/Los_Angeles') + + def testHidden(self): + """Tests the existence of a element and verifies the + value""" + + # Assert the hidden is present and is a gdata.calendar.Hidden + for an_entry in self.calendar_feed.entry: + self.assert_(isinstance(an_entry.hidden, gdata.calendar.Hidden), + "Calendar feed element must be an instance of " + + "gdata.calendar.Hidden: %s" % an_entry.hidden) + + # Assert the hidden value is as expected + self.assertEquals(self.calendar_feed.entry[0].hidden.value, 'false') + + def testOpenSearch(self): + """Tests the existence of """ + # Assert that the elements exist and are the appropriate type + self.assert_(isinstance(self.calendar_feed.start_index, gdata.StartIndex), + "Calendar feed element must be an " + + "instance of gdata.StartIndex: %s" % self.calendar_feed.start_index) + + # Assert the values for each openSearch element are as expected + self.assertEquals(self.calendar_feed.start_index.text, '1') + + def testGenerator(self): + """Tests the existence of and verifies the value""" + + # Assert that the element exists and is of the appropriate type + self.assert_(isinstance(self.calendar_feed.generator, atom.Generator), + "Calendar feed element must be an instance of " + + "atom.Generator: %s" % self.calendar_feed.generator) + + # Assert the generator version, uri and text are as expected + self.assertEquals(self.calendar_feed.generator.text, 'Google Calendar') + self.assertEquals(self.calendar_feed.generator.version, '1.0') + self.assertEquals(self.calendar_feed.generator.uri, + 'http://www.google.com/calendar') + + def testEntryLink(self): + """Makes sure entry links in the private composite feed are parsed.""" + + entry = gdata.calendar.CalendarEventEntryFromString( + test_data.RECURRENCE_EXCEPTION_ENTRY) + + self.assert_(isinstance(entry.recurrence_exception, list)) + self.assert_(isinstance(entry.recurrence_exception[0].entry_link, + gdata.EntryLink)) + self.assert_(isinstance(entry.recurrence_exception[0].entry_link.entry, + gdata.calendar.CalendarEventEntry)) + self.assertEquals( + entry.recurrence_exception[0].entry_link.entry.author[0].name.text, + 'gdata ops') + + def testOriginalEntry(self): + """Make sure original entry in the private composite feed are parsed.""" + + entry = gdata.calendar.CalendarEventEntryFromString( + test_data.RECURRENCE_EXCEPTION_ENTRY) + + self.assertEquals( + entry.recurrence_exception[0].entry_link.entry.original_event.id, + 'i7lgfj69mjqjgnodklif3vbm7g') + + +class CalendarFeedTestRegenerated(CalendarFeedTest): + def setUp(self): + old_calendar_feed = ( + gdata.calendar.CalendarListFeedFromString(test_data.CALENDAR_FEED)) + self.calendar_feed = ( + gdata.calendar.CalendarListFeedFromString(str(old_calendar_feed))) + tree = ElementTree.fromstring(str(old_calendar_feed)) + + +class CalendarEventFeedTest(unittest.TestCase): + + def setUp(self): + self.calendar_event_feed = ( + gdata.calendar.CalendarEventFeedFromString( + test_data.CALENDAR_FULL_EVENT_FEED)) + + def testEntryCount(self): + # Assert the number of items in the feed of events + self.assertEquals(len(self.calendar_event_feed.entry),11) + + def testToAndFromString(self): + # Assert the appropriate type for each entry + for an_entry in self.calendar_event_feed.entry: + self.assert_(isinstance(an_entry, gdata.calendar.CalendarEventEntry), + "Entry must be an instance of a CalendarEventEntry") + + # Regenerate feed from xml text + new_calendar_event_feed = gdata.calendar.CalendarEventFeedFromString( + str(self.calendar_event_feed)) + for an_entry in new_calendar_event_feed.entry: + self.assert_(isinstance(an_entry, gdata.calendar.CalendarEventEntry), + "Entry in regenerated feed must be an instance of CalendarEventEntry") + + def testAuthor(self): + """Tests the existence of a and verifies the name and email""" + + # Assert that each element in the feed author list is an atom.Author + for an_author in self.calendar_event_feed.author: + self.assert_(isinstance(an_author, atom.Author), + "Calendar event feed element must be an instance of " + + "atom.Author: %s" % an_author) + + # Assert the feed author name is as expected + self.assertEquals(self.calendar_event_feed.author[0].name.text, + 'GData Ops Demo') + + # Assert the feed author name is as expected + self.assertEquals(self.calendar_event_feed.author[0].email.text, + 'gdata.ops.demo@gmail.com') + + # Assert one of the values for an entry author + self.assertEquals(self.calendar_event_feed.entry[0].author[0].name.text, + 'GData Ops Demo') + self.assertEquals(self.calendar_event_feed.entry[0].author[0].email.text, + 'gdata.ops.demo@gmail.com') + + def testId(self): + """Tests the existence of a in the feed and entries and + verifies the value""" + + # Assert the feed id exists and is an atom.Id + self.assert_(isinstance(self.calendar_event_feed.id, atom.Id), + "Calendar event feed element must be an instance of " + + "atom.Id: %s" % self.calendar_event_feed.id) + + # Assert the feed id value is as expected + self.assertEquals(self.calendar_event_feed.id.text, + 'http://www.google.com/calendar/feeds/default/private/full') + + # Assert that each entry has an id which is an atom.Id + for an_entry in self.calendar_event_feed.entry: + self.assert_(isinstance(an_entry.id, atom.Id), + "Calendar event entry element must be an " + + "instance of atom.Id: %s" % an_entry.id) + + # Assert one of the values for an id + self.assertEquals(self.calendar_event_feed.entry[1].id.text, + 'http://www.google.com/calendar/feeds/default/private/full/' + + '2qt3ao5hbaq7m9igr5ak9esjo0') + + def testPublished(self): + """Tests the existence of a in the entries and + verifies the value""" + + # Assert that each entry has a published value which is an atom.Published + for an_entry in self.calendar_event_feed.entry: + self.assert_(isinstance(an_entry.published, atom.Published), + "Calendar event entry element must be an instance " + + "of atom.Published: %s" % an_entry.published) + + # Assert one of the values for published is as expected + self.assertEquals(self.calendar_event_feed.entry[1].published.text, + '2007-03-20T21:26:04.000Z') + + def testUpdated(self): + """Tests the existence of a in the feed and the entries and + verifies the value""" + + # Assert that the feed updated element exists and is an atom.Updated + self.assert_(isinstance(self.calendar_event_feed.updated, atom.Updated), + "Calendar feed element must be an instance of " + + "atom.Updated: %s" % self.calendar_event_feed.updated) + + # Assert that each entry has a updated value which is an atom.Updated + for an_entry in self.calendar_event_feed.entry: + self.assert_(isinstance(an_entry.updated, atom.Updated), + "Calendar event entry element must be an instance " + + "of atom.Updated: %s" % an_entry.updated) + + # Assert the feed updated value is as expected + self.assertEquals(self.calendar_event_feed.updated.text, + '2007-03-20T21:29:57.000Z') + + # Assert one of the values for updated + self.assertEquals(self.calendar_event_feed.entry[3].updated.text, + '2007-03-20T21:25:46.000Z') + + def testTitle(self): + """Tests the existence of a in the feed and the entries + and verifies the value""" + + # Assert that the feed title element exists and is an atom.Title + self.assert_(isinstance(self.calendar_event_feed.title, atom.Title), + "Calendar feed element must be an instance of " + + "atom.Title: %s" % self.calendar_event_feed.title) + + # Assert that each entry has a title value which is an atom.Title + for an_entry in self.calendar_event_feed.entry: + self.assert_(isinstance(an_entry.title, atom.Title), + "Calendar event entry element must be an instance of " + + "atom.Title: %s" % an_entry.title) + + # Assert the feed title value is as expected + self.assertEquals(self.calendar_event_feed.title.text, 'GData Ops Demo') + + # Assert one of the values for title + self.assertEquals(self.calendar_event_feed.entry[0].title.text, + 'test deleted') + + def testPostLink(self): + """Tests the existence of a with a rel='...#post' + and verifies the value""" + + # Assert that each link in the feed is an atom.Link + for a_link in self.calendar_event_feed.link: + self.assert_(isinstance(a_link, atom.Link), + "Calendar event entry element must be an instance of " + + "atom.Link: %s" % a_link) + + # Assert post link exists + self.assert_(self.calendar_event_feed.GetPostLink() is not None) + + # Assert the post link value is as expected + self.assertEquals(self.calendar_event_feed.GetPostLink().href, + 'http://www.google.com/calendar/feeds/default/private/full') + + def testEditLink(self): + """Tests the existence of a with a rel='edit' in each entry + and verifies the value""" + + # Assert that each link in the feed is an atom.Link + for a_link in self.calendar_event_feed.link: + self.assert_(isinstance(a_link, atom.Link), + "Calendar event entry element must be an instance of " + + "atom.Link: %s" % a_link) + + # Assert edit link exists + for a_entry in self.calendar_event_feed.entry: + self.assert_(a_entry.GetEditLink() is not None) + + # Assert the edit link value is as expected + self.assertEquals(self.calendar_event_feed.entry[0].GetEditLink().href, + 'http://www.google.com/calendar/feeds/default/private/full/o99flmgm' + + 'kfkfrr8u745ghr3100/63310109397') + self.assertEquals(self.calendar_event_feed.entry[0].GetEditLink().type, + 'application/atom+xml') + + def testOpenSearch(self): + """Tests the existence of , + , """ + + # Assert that the elements exist and are the appropriate type + self.assert_(isinstance(self.calendar_event_feed.total_results, + gdata.TotalResults), + "Calendar event feed element must be an " + + "instance of gdata.TotalResults: %s" % ( + self.calendar_event_feed.total_results)) + self.assert_( + isinstance(self.calendar_event_feed.start_index, gdata.StartIndex), + "Calendar event feed element must be an " + + "instance of gdata.StartIndex: %s" % ( + self.calendar_event_feed.start_index)) + self.assert_( + isinstance(self.calendar_event_feed.items_per_page, gdata.ItemsPerPage), + "Calendar event feed element must be an " + + "instance of gdata.ItemsPerPage: %s" % ( + self.calendar_event_feed.items_per_page)) + + # Assert the values for each openSearch element are as expected + self.assertEquals(self.calendar_event_feed.total_results.text, '10') + self.assertEquals(self.calendar_event_feed.start_index.text, '1') + self.assertEquals(self.calendar_event_feed.items_per_page.text, '25') + + def testGenerator(self): + """Tests the existence of and verifies the value""" + + # Assert that the element exists and is of the appropriate type + self.assert_(isinstance(self.calendar_event_feed.generator, atom.Generator), + "Calendar event feed element must be an instance " + + "of atom.Generator: %s" % self.calendar_event_feed.generator) + + # Assert the generator version, uri and text are as expected + self.assertEquals(self.calendar_event_feed.generator.text, + 'Google Calendar') + self.assertEquals(self.calendar_event_feed.generator.version, '1.0') + self.assertEquals(self.calendar_event_feed.generator.uri, + 'http://www.google.com/calendar') + + def testCategory(self): + """Tests the existence of and verifies the value""" + + # Assert that the element exists and is of the appropriate type and value + for a_category in self.calendar_event_feed.category: + self.assert_(isinstance(a_category, atom.Category), + "Calendar event feed element must be an instance " + + "of atom.Category: %s" % a_category) + self.assertEquals(a_category.scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(a_category.term, + 'http://schemas.google.com/g/2005#event') + + for an_event in self.calendar_event_feed.entry: + for a_category in an_event.category: + self.assert_(isinstance(a_category, atom.Category), + "Calendar event feed entry element must be an " + + "instance of atom.Category: %s" % a_category) + self.assertEquals(a_category.scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(a_category.term, + 'http://schemas.google.com/g/2005#event') + + + def testSendEventNotifications(self): + """Test the existence of + and verifies the value""" + + # Assert that the element exists and is of the appropriate type and value + for an_event in self.calendar_event_feed.entry: + self.assert_(isinstance(an_event.send_event_notifications, + gdata.calendar.SendEventNotifications), + ("Calendar event feed entry element " + + "must be an instance of gdata.calendar.SendEventNotifications: %s") % ( + an_event.send_event_notifications,)) + + # Assert the are as expected + self.assertEquals( + self.calendar_event_feed.entry[0].send_event_notifications.value, + 'false') + + self.assertEquals( + self.calendar_event_feed.entry[2].send_event_notifications.value, + 'true') + + def testQuickAdd(self): + """Test the existence of + and verifies the value""" + + entry = gdata.calendar.CalendarEventEntry() + entry.quick_add = gdata.calendar.QuickAdd(value='true') + unmarshalled_entry = entry.ToString() + tag = '{%s}quickadd' % (gdata.calendar.GCAL_NAMESPACE) + marshalled_entry = ElementTree.fromstring(unmarshalled_entry).find(tag) + self.assert_(marshalled_entry.attrib['value'],'true') + self.assert_(marshalled_entry.tag,tag) + + def testEventStatus(self): + """Test the existence of + and verifies the value""" + + # Assert that the element exists and is of the appropriate type and value + for an_event in self.calendar_event_feed.entry: + self.assert_(isinstance(an_event.event_status, + gdata.calendar.EventStatus), + ("Calendar event feed entry element " + + "must be an instance of gdata.calendar.EventStatus: %s") % ( + an_event.event_status,)) + + # Assert the are as expected + self.assertEquals( + self.calendar_event_feed.entry[0].event_status.value, + 'CANCELED') + + self.assertEquals( + self.calendar_event_feed.entry[1].event_status.value, + 'CONFIRMED') + + def testComments(self): + """Tests the existence of and verifies the value""" + + # Assert that the element exists and is of the appropriate type and value + for an_event in self.calendar_event_feed.entry: + self.assert_(an_event.comments is None or isinstance(an_event.comments, + gdata.calendar.Comments), + ("Calendar event feed entry element " + + "must be an instance of gdata.calendar.Comments: %s") % ( + an_event.comments,)) + + def testVisibility(self): + """Test the existence of and verifies the value""" + + # Assert that the element exists and is of the appropriate type and value + for an_event in self.calendar_event_feed.entry: + self.assert_(isinstance(an_event.visibility, + gdata.calendar.Visibility), + ("Calendar event feed entry element " + + "must be an instance of gdata.calendar.Visibility: %s") % ( + an_event.visibility,)) + + # Assert the are as expected + self.assertEquals( + self.calendar_event_feed.entry[0].visibility.value, + 'DEFAULT') + + self.assertEquals( + self.calendar_event_feed.entry[1].visibility.value, + 'PRIVATE') + + self.assertEquals( + self.calendar_event_feed.entry[2].visibility.value, + 'PUBLIC') + + def testTransparency(self): + """Test the existence of and verifies the value""" + + # Assert that the element exists and is of the appropriate type and value + for an_event in self.calendar_event_feed.entry: + self.assert_(isinstance(an_event.transparency, + gdata.calendar.Transparency), + ("Calendar event feed entry element " + + "must be an instance of gdata.calendar.Transparency: %s") % ( + an_event.transparency,)) + + # Assert the are as expected + self.assertEquals( + self.calendar_event_feed.entry[0].transparency.value, + 'OPAQUE') + + self.assertEquals( + self.calendar_event_feed.entry[1].transparency.value, + 'OPAQUE') + + self.assertEquals( + self.calendar_event_feed.entry[2].transparency.value, + 'OPAQUE') + + # TODO: TEST VALUES OF VISIBILITY OTHER THAN OPAQUE + + def testWhere(self): + """Tests the existence of a in the entries + and verifies the value""" + + # Assert that each entry has a where value which is an gdata.calendar.Where + for an_entry in self.calendar_event_feed.entry: + for a_where in an_entry.where: + self.assert_(isinstance(a_where, gdata.calendar.Where), + "Calendar event entry element must be an instance of " + + "gdata.calendar.Where: %s" % a_where) + + # Assert one of the values for where is as expected + self.assertEquals(self.calendar_event_feed.entry[1].where[0].value_string, + 'Dolores Park with Kim') + + def testWhenAndReminder(self): + """Tests the existence of a and in the entries + and verifies the values""" + + # Assert that each entry's when value is a gdata.calendar.When + # Assert that each reminder is a gdata.calendar.Reminder + for an_entry in self.calendar_event_feed.entry: + for a_when in an_entry.when: + self.assert_(isinstance(a_when, gdata.calendar.When), + "Calendar event entry element must be an instance " + + "of gdata.calendar.When: %s" % a_when) + for a_reminder in a_when.reminder: + self.assert_(isinstance(a_reminder, gdata.calendar.Reminder), + "Calendar event entry element must be an " + + "instance of gdata.calendar.Reminder: %s" % a_reminder) + + # Assert one of the values for when is as expected + self.assertEquals(self.calendar_event_feed.entry[0].when[0].start_time, + '2007-03-23T12:00:00.000-07:00') + self.assertEquals(self.calendar_event_feed.entry[0].when[0].end_time, + '2007-03-23T13:00:00.000-07:00') + + # Assert the reminder child of when is as expected + self.assertEquals( + self.calendar_event_feed.entry[0].when[0].reminder[0].minutes, '10') + self.assertEquals( + self.calendar_event_feed.entry[1].when[0].reminder[0].minutes, '20') + + def testBatchRequestParsing(self): + batch_request = gdata.calendar.CalendarEventFeedFromString( + test_data.CALENDAR_BATCH_REQUEST) + self.assertEquals(len(batch_request.entry), 4) + # Iterate over the batch request entries and match the operation with + # the batch id. These values are hard coded to match the test data. + for entry in batch_request.entry: + if entry.batch_id.text == '1': + self.assertEquals(entry.batch_operation.type, 'insert') + if entry.batch_id.text == '2': + self.assertEquals(entry.batch_operation.type, 'query') + if entry.batch_id.text == '3': + self.assertEquals(entry.batch_operation.type, 'update') + self.assertEquals(entry.title.text, 'Event updated via batch') + if entry.batch_id.text == '4': + self.assertEquals(entry.batch_operation.type, 'delete') + self.assertEquals(entry.id.text, + 'http://www.google.com/calendar/feeds/default/' + 'private/full/d8qbg9egk1n6lhsgq1sjbqffqc') + self.assertEquals(entry.GetEditLink().href, + 'http://www.google.com/calendar/feeds/default/' + 'private/full/d8qbg9egk1n6lhsgq1sjbqffqc/' + '63326018324') + + def testBatchResponseParsing(self): + batch_response = gdata.calendar.CalendarEventFeedFromString( + test_data.CALENDAR_BATCH_RESPONSE) + self.assertEquals(len(batch_response.entry), 4) + for entry in batch_response.entry: + if entry.batch_id.text == '1': + self.assertEquals(entry.batch_operation.type, 'insert') + self.assertEquals(entry.batch_status.code, '201') + self.assertEquals(entry.batch_status.reason, 'Created') + self.assertEquals(entry.id.text, 'http://www.google.com/calendar/' + 'feeds/default/private/full/' + 'n9ug78gd9tv53ppn4hdjvk68ek') + if entry.batch_id.text == '2': + self.assertEquals(entry.batch_operation.type, 'query') + if entry.batch_id.text == '3': + self.assertEquals(entry.batch_operation.type, 'update') + if entry.batch_id.text == '4': + self.assertEquals(entry.batch_operation.type, 'delete') + self.assertEquals(entry.id.text, 'http://www.google.com/calendar/' + 'feeds/default/private/full/' + 'd8qbg9egk1n6lhsgq1sjbqffqc') + + # TODO add reminder tests for absolute_time and hours/seconds (if possible) + # TODO test recurrence and recurrenceexception + # TODO test originalEvent + +class CalendarWebContentTest(unittest.TestCase): + def setUp(self): + self.calendar_event_feed = ( + gdata.calendar.CalendarEventFeedFromString( + test_data.CALENDAR_FULL_EVENT_FEED)) + + def testAddSimpleWebContentEventEntry(self): + """Verifies that we can add a web content link to an event entry.""" + + title = "Al Einstein's Birthday!" + href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif' + type = 'image/jpeg' + url = 'http://gdata.ops.demo.googlepages.com/einstein.jpg' + width = '300' + height = '225' + + # Create a web content event + event = gdata.calendar.CalendarEventEntry() + web_content = gdata.calendar.WebContent(url=url, width=width, height=height) + web_content_link = gdata.calendar.WebContentLink(title=title, + href=href, link_type=type, web_content=web_content) + event.link.append(web_content_link) + + # Verify the web content link exists and contains the expected data + web_content_link = event.GetWebContentLink() + self.assertValidWebContentLink(title, href, type, web_content_link) + + # Verify the web content element exists and contains the expected data + web_content_element = web_content_link.web_content + self.assertValidSimpleWebContent(url, width, height, web_content_element) + + def testAddWebContentGadgetEventEntry(self): + """Verifies that we can add a web content gadget link to an event entry.""" + + title = "Date and Time Gadget" + href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif' + url = 'http://google.com/ig/modules/datetime.xml' + type = 'application/x-google-gadgets+xml' + width = '300' + height = '200' + pref_name = 'color' + pref_value = 'green' + + # Create a web content event + event = gdata.calendar.CalendarEventEntry() + web_content = gdata.calendar.WebContent(url=url, width=width, height=height) + web_content.gadget_pref.append( + gdata.calendar.WebContentGadgetPref(name=pref_name, value=pref_value)) + web_content_link = gdata.calendar.WebContentLink(title=title, + href=href, web_content=web_content, link_type=type) + event.link.append(web_content_link) + + # Verify the web content link exists and contains the expected data + web_content_link = event.GetWebContentLink() + self.assertValidWebContentLink(title, href, type, web_content_link) + + # Verify the web content element exists and contains the expected data + web_content_element = web_content_link.web_content + self.assertValidWebContentGadget(url, width, height, + pref_name, pref_value, web_content_element) + + def testFromXmlToSimpleWebContent(self): + """Verifies that we can read a web content link from an event entry.""" + + # Expected values (from test_data.py file) + title = 'World Cup' + href = 'http://www.google.com/calendar/images/google-holiday.gif' + type = 'image/gif' + url = 'http://www.google.com/logos/worldcup06.gif' + width = '276' + height = '120' + + # Note: The tenth event entry contains web content + web_content_event = self.calendar_event_feed.entry[9] + + # Verify the web content link exists and contains the expected data + web_content_link = web_content_event.GetWebContentLink() + self.assertValidWebContentLink(title, href, type, web_content_link) + + # Verify the web content element exists and contains the expected data + web_content_element = web_content_link.web_content + self.assertValidSimpleWebContent(url, width, height, web_content_element) + + def testFromXmlToWebContentGadget(self): + """Verifies that we can read a web content link from an event entry.""" + + # Expected values (from test_data.py file) + title = 'Date and Time Gadget' + href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif' + url = 'http://google.com/ig/modules/datetime.xml' + type = 'application/x-google-gadgets+xml' + width = '300' + height = '136' + pref_name = 'color' + pref_value = 'green' + + # Note: The eleventh event entry contains web content + web_content_event = self.calendar_event_feed.entry[10] + + # Verify the web content link exists and contains the expected data + web_content_link = web_content_event.GetWebContentLink() + self.assertValidWebContentLink(title, href, type, web_content_link) + + # Verify the web content element exists and contains the expected data + web_content_element = web_content_link.web_content + self.assertValidWebContentGadget(url, width, height, pref_name, + pref_value, web_content_element) + + def assertValidWebContentLink(self, expected_title=None, expected_href=None, + expected_type=None, web_content_link=None): + """Asserts that the web content link is the correct type and contains the + expected values""" + + self.assert_(isinstance(web_content_link, gdata.calendar.WebContentLink), + "Web content link element must be an " + + "instance of gdata.calendar.WebContentLink: %s" % web_content_link) + expected_rel = '%s/%s' % (gdata.calendar.GCAL_NAMESPACE, 'webContent') + self.assertEquals(expected_rel, web_content_link.rel) + self.assertEqual(expected_title, web_content_link.title) + self.assertEqual(expected_href, web_content_link.href) + self.assertEqual(expected_type, web_content_link.type) + + def assertValidSimpleWebContent(self, expected_url=None, expected_width=None, + expected_height=None, web_content_element=None): + """Asserts that the web content element is the correct type and contains + the expected values""" + + self.assert_(isinstance(web_content_element, gdata.calendar.WebContent), + "Calendar event entry element must be an " + + "instance of gdata.calendar.WebContent: %s" % web_content_element) + self.assertEquals(expected_width, web_content_element.width) + self.assertEquals(expected_height, web_content_element.height) + self.assertEquals(expected_url, web_content_element.url) + + def assertValidWebContentGadget(self, expected_url=None, expected_width=None, + expected_height=None, expected_pref_name=None, expected_pref_value=None, + web_content_element=None): + """Asserts that the web content element is the correct type and contains + the expected values""" + + self.assert_(isinstance(web_content_element, gdata.calendar.WebContent), + "Calendar event entry element must be an " + + "instance of gdata.calendar.WebContent: %s" % web_content_element) + self.assertEquals(expected_width, web_content_element.width) + self.assertEquals(expected_height, web_content_element.height) + self.assertEquals(expected_url, web_content_element.url) + self.assertEquals(expected_pref_name, + web_content_element.gadget_pref[0].name) + self.assertEquals(expected_pref_value, + web_content_element.gadget_pref[0].value) + + +class ExtendedPropertyTest(unittest.TestCase): + + def testExtendedPropertyToAndFromXml(self): + ep = gdata.calendar.ExtendedProperty(name='test') + ep.value = 'val' + xml_string = ep.ToString() + ep2 = gdata.ExtendedPropertyFromString(xml_string) + self.assertEquals(ep.name, ep2.name) + self.assertEquals(ep.value, ep2.value) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/client_online_test.py b/gdata.py-1.2.3/tests/gdata_tests/client_online_test.py new file mode 100755 index 0000000..d0dea3c --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/client_online_test.py @@ -0,0 +1,96 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import unittest +import getpass +import gdata.client +import gdata.service +import gdata + + +username = '' +password = '' + + +def Utf8String(my_string): + return unicode(my_string, 'UTF-8') + + +class ClientLiveTest(unittest.TestCase): + + def setUp(self): + self.client = gdata.client.GDataClient() + + def testUnauthenticatedReads(self): + feed_str = self.client.Get('http://www.google.com/base/feeds/snippets', + parser=Utf8String) + self.assert_(feed_str.startswith(' + Marie-Louise's chocolate butter + + Ingredients: +

      +
    • 250g margarine,
    • +
    • 200g sugar,
    • +
    • 2 eggs, and
    • +
    • approx. 8 tsp cacao.
    • +
    + + en + testrecipes + """ + new_entry = self.client.Post(entry, + 'http://www.google.com/base/feeds/items', + parser=gdata.GDataEntryFromString) + self.assert_(isinstance(new_entry, gdata.GDataEntry)) + self.client.Delete(new_entry.GetEditLink().href) + + +if __name__ == '__main__': + print ('GData Client Unit Tests\nNOTE: Please run these tests only ' + 'with a test account. The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/client_test.py b/gdata.py-1.2.3/tests/gdata_tests/client_test.py new file mode 100755 index 0000000..9aff83f --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/client_test.py @@ -0,0 +1,97 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeff Scudder)' + + +import unittest +import getpass +import gdata.client +import gdata.auth +import gdata.service +import atom.http_interface + + +class AuthSubUrlTest(unittest.TestCase): + + def testGenerateNextWithScope(self): + next = 'http://example.com/test' + scope = 'http://www.google.com/calendar/feeds/' + request_url = gdata.client.GenerateAuthSubRequestUrl(next, scope) + self.assert_(request_url.find('example.com') > -1) + self.assert_(request_url.find('calendar') > -1) + + def testGenerateNextWithMultipleScopes(self): + next = 'http://example.com/test' + scope = ['http://www.google.com/calendar/feeds/', + 'http://spreadsheets.google.com/feeds/'] + request_url = gdata.client.GenerateAuthSubRequestUrl(next, scope) + self.assert_(request_url.find('example.com') > -1) + self.assert_(request_url.find('calendar') > -1) + self.assert_(request_url.find('spreadsheets') > -1) + + def testExtractTokenWithScope(self): + url = ('http://example.com/test?authsub_token_scope=http%3A%2F%2F' + 'www.google.com%2Fcalendar%2Ffeeds%2F&token=yeF3EE&foo=1') + (token, scopes) = gdata.client.ExtractToken(url) + self.assert_(token == 'AuthSub token=yeF3EE') + self.assert_(scopes[0] == 'http://www.google.com/calendar/feeds/') + + def testExtractTokenWithMultipleScopes(self): + url = ('http://example.com/test?authsub_token_scope=http%3A%2F%2F' + 'www.google.com%2Fcalendar%2Ffeeds%2F+http%3A%2F%2F' + 'spreadsheets.google.com%2Ffeeds%2F&token=KyeF3E6Mma') + (token, scopes) = gdata.client.ExtractToken(url) + self.assert_(token == 'AuthSub token=KyeF3E6Mma') + self.assert_(len(scopes) == 2) + self.assert_(scopes[0] == 'http://www.google.com/calendar/feeds/') + self.assert_(scopes[1] == 'http://spreadsheets.google.com/feeds/') + + +class GDataClientTest(unittest.TestCase): + + def setUp(self): + self.client = gdata.client.GDataClient() + + def testFindTokenForScope(self): + # Add a test token with two scopes + token = 'AuthSub token=KyeF3E6Mma' + scope1 = 'http://www.google.com/calendar/feeds/' + scope2 = 'http://spreadsheets.google.com/feeds/' + auth_token = gdata.auth.AuthSubToken(token, [scope1, scope2]) + self.client.token_store.add_token(auth_token) + self.assert_(self.client.token_store.find_token(scope1) == auth_token) + self.assert_(self.client.token_store.find_token(scope2) == auth_token) + self.assert_(isinstance(self.client.token_store.find_token('foo'), + atom.http_interface.GenericToken)) + self.assert_( + self.client.token_store.find_token('foo%s' % scope1) != auth_token) + self.assert_(isinstance(self.client.token_store.find_token( + 'foo%s' % scope1), + atom.http_interface.GenericToken)) + self.assert_( + self.client.token_store.find_token('%sfoo' % scope1) == auth_token) + self.client.token_store.remove_token(auth_token) + self.assert_(self.client.token_store.find_token('%sfoo' % scope1) != auth_token) + self.assert_(isinstance(self.client.token_store.find_token( + '%sfoo' % scope1), + atom.http_interface.GenericToken)) + self.assert_(self.client.token_store.find_token(scope2) != auth_token) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/codesearch_test.py b/gdata.py-1.2.3/tests/gdata_tests/codesearch_test.py new file mode 100755 index 0000000..d881c22 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/codesearch_test.py @@ -0,0 +1,52 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +import unittest +import gdata.codesearch +import gdata.test_data + +class CodeSearchDataTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.codesearch.CodesearchFeedFromString( + gdata.test_data.CODE_SEARCH_FEED) + + def testCorrectXmlConversion(self): + self.assert_(self.feed.id.text == + 'http://www.google.com/codesearch/feeds/search?q=malloc') + self.assert_(len(self.feed.entry) == 10) + for entry in self.feed.entry: + if entry.id.text == ('http://www.google.com/codesearch?hl=en&q=+ma' + 'lloc+show:LDjwp-Iqc7U:84hEYaYsZk8:xDGReDhvNi0&sa=N&ct=rx&cd=1' + '&cs_p=http://www.gnu.org&cs_f=software/autoconf/manual/autoco' + 'nf-2.60/autoconf.html-002&cs_p=http://www.gnu.org&cs_f=softwa' + 're/autoconf/manual/autoconf-2.60/autoconf.html-002#first'): + self.assert_(len(entry.match) == 4) + for match in entry.match: + if match.line_number == '4': + self.assert_(match.type == 'text/html') + self.assert_(entry.file.name == + 'software/autoconf/manual/autoconf-2.60/autoconf.html-002') + self.assert_(entry.package.name == 'http://www.gnu.org') + self.assert_(entry.package.uri == 'http://www.gnu.org') + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/contacts/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/contacts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/contacts/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/contacts/service_test.py new file mode 100755 index 0000000..510aec6 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/contacts/service_test.py @@ -0,0 +1,217 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'api.jscudder (Jeff Scudder)' + +import getpass +import random +import re +import unittest +import urllib +import atom +import gdata.contacts.service + + +username = '' +password = '' +test_image_location = '../../testimage.jpg' +test_image_name = 'testimage.jpg' + + +class ContactsServiceTest(unittest.TestCase): + + def setUp(self): + self.gd_client = gdata.contacts.service.ContactsService() + self.gd_client.email = username + self.gd_client.password = password + self.gd_client.source = 'GoogleInc-ContactsPythonTest-1' + self.gd_client.ProgrammaticLogin() + + def testGetContactsFeed(self): + feed = self.gd_client.GetContactsFeed() + self.assert_(isinstance(feed, gdata.contacts.ContactsFeed)) + + def testCreateUpdateDeleteContactAndUpdatePhoto(self): + #DeleteTestContact(self.gd_client) + + # Create a new entry + new_entry = gdata.contacts.ContactEntry() + new_entry.title = atom.Title(text='Elizabeth Bennet') + new_entry.content = atom.Content(text='Test Notes') + new_entry.email.append(gdata.contacts.Email( + rel='http://schemas.google.com/g/2005#work', + primary='true', + address='liz@gmail.com')) + new_entry.phone_number.append(gdata.contacts.PhoneNumber( + rel='http://schemas.google.com/g/2005#work', text='(206)555-1212')) + new_entry.organization = gdata.contacts.Organization( + org_name=gdata.contacts.OrgName(text='TestCo.'), + rel='http://schemas.google.com/g/2005#work') + + entry = self.gd_client.CreateContact(new_entry, + '/m8/feeds/contacts/%s/full' % username) + + # Generate and parse the XML for the new entry. + self.assertEquals(entry.title.text, new_entry.title.text) + self.assertEquals(entry.content.text, 'Test Notes') + self.assertEquals(len(entry.email), 1) + self.assertEquals(entry.email[0].rel, new_entry.email[0].rel) + self.assertEquals(entry.email[0].address, 'liz@gmail.com') + self.assertEquals(len(entry.phone_number), 1) + self.assertEquals(entry.phone_number[0].rel, + new_entry.phone_number[0].rel) + self.assertEquals(entry.phone_number[0].text, '(206)555-1212') + self.assertEquals(entry.organization.org_name.text, 'TestCo.') + + # Edit the entry. + entry.phone_number[0].text = '(555)555-1212' + updated = self.gd_client.UpdateContact(entry.GetEditLink().href, entry) + self.assertEquals(updated.content.text, 'Test Notes') + self.assertEquals(len(updated.phone_number), 1) + self.assertEquals(updated.phone_number[0].rel, + entry.phone_number[0].rel) + self.assertEquals(updated.phone_number[0].text, '(555)555-1212') + + # Change the contact's photo. + updated_photo = self.gd_client.ChangePhoto(test_image_location, updated, + content_type='image/jpeg') + + # Refetch the contact so that it has the new photo link + updated = self.gd_client.GetContact(updated.GetSelfLink().href) + self.assert_(updated.GetPhotoLink() is not None) + + # Fetch the photo data. + hosted_image = self.gd_client.GetPhoto(updated) + self.assert_(hosted_image is not None) + + # Delete the entry. + self.gd_client.DeleteContact(updated.GetEditLink().href) + + def testCreateAndDeleteContactUsingBatch(self): + # Get random data for creating contact + r = random.Random() + random_contact_number = str(r.randint(100000, 1000000)) + random_contact_title = 'Random Contact %s' % ( + random_contact_number) + + # Set contact data + contact = gdata.contacts.ContactEntry() + contact.title = atom.Title(text=random_contact_title) + contact.email = gdata.contacts.Email( + address='user%s@example.com' % random_contact_number, + primary='true', + rel=gdata.contacts.REL_WORK) + contact.content = atom.Content(text='Contact created by ' + 'gdata-python-client automated test ' + 'suite.') + + # Form a batch request + batch_request = gdata.contacts.ContactsFeed() + batch_request.AddInsert(entry=contact) + + # Execute the batch request to insert the contact. + self.gd_client.ProgrammaticLogin() + default_batch_url = gdata.contacts.service.DEFAULT_BATCH_URL + batch_result = self.gd_client.ExecuteBatch(batch_request, + default_batch_url) + + self.assertEquals(len(batch_result.entry), 1) + self.assertEquals(batch_result.entry[0].title.text, + random_contact_title) + self.assertEquals(batch_result.entry[0].batch_operation.type, + gdata.BATCH_INSERT) + self.assertEquals(batch_result.entry[0].batch_status.code, + '201') + expected_batch_url = re.compile('default').sub( + urllib.quote(self.gd_client.email), + gdata.contacts.service.DEFAULT_BATCH_URL) + self.failUnless(batch_result.GetBatchLink().href, + expected_batch_url) + + # Create a batch request to delete the newly created entry. + batch_delete_request = gdata.contacts.ContactsFeed() + batch_delete_request.AddDelete(entry=batch_result.entry[0]) + + batch_delete_result = self.gd_client.ExecuteBatch( + batch_delete_request, + batch_result.GetBatchLink().href) + self.assertEquals(len(batch_delete_result.entry), 1) + self.assertEquals(batch_delete_result.entry[0].batch_operation.type, + gdata.BATCH_DELETE) + self.assertEquals(batch_result.entry[0].batch_status.code, + '201') + + +class ContactsQueryTest(unittest.TestCase): + + def testConvertToString(self): + query = gdata.contacts.service.ContactsQuery() + self.assertEquals(str(query), '/m8/feeds/contacts/default/full') + query.max_results = '10' + self.assertEquals(query.ToUri(), + '/m8/feeds/contacts/default/full?max-results=10') + + def testGroupQueryParameter(self): + query = gdata.contacts.service.ContactsQuery() + query.group = 'http://google.com/m8/feeds/groups/liz%40gmail.com/full/270f' + self.assertEquals(query.ToUri(), '/m8/feeds/contacts/default/full' + '?group=http%3A%2F%2Fgoogle.com%2Fm8%2Ffeeds%2Fgroups' + '%2Fliz%2540gmail.com%2Ffull%2F270f') + + +class ContactsGroupsTest(unittest.TestCase): + + def setUp(self): + self.gd_client = gdata.contacts.service.ContactsService() + self.gd_client.email = username + self.gd_client.password = password + self.gd_client.source = 'GoogleInc-ContactsPythonTest-1' + self.gd_client.ProgrammaticLogin() + + def testCreateUpdateDeleteGroup(self): + test_group = gdata.contacts.GroupEntry(title=atom.Title( + text='test group py')) + new_group = self.gd_client.CreateGroup(test_group) + self.assert_(isinstance(new_group, gdata.contacts.GroupEntry)) + self.assertEquals(new_group.title.text, 'test group py') + + # Change the group's title + new_group.title.text = 'new group name py' + updated_group = self.gd_client.UpdateGroup(new_group.GetEditLink().href, + new_group) + self.assertEquals(updated_group.title.text, new_group.title.text) + + # Remove the group + self.gd_client.DeleteGroup(updated_group.GetEditLink().href) + + +def DeleteTestContact(client): + # Get test contact + feed = client.GetContactsFeed(uri='/m8/feeds/contacts/%s/full' % username) + for entry in feed.entry: + if (entry.title.text == 'Elizabeth Bennet' and + entry.content.text == 'Test Notes' and + entry.email[0].address == 'liz@gmail.com'): + print 'Deleting test contact' + client.DeleteContact(entry.GetEditLink().href) + + +if __name__ == '__main__': + print ('Contacts Tests\nNOTE: Please run these tests only with a test ' + 'account. The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/contacts_test.py b/gdata.py-1.2.3/tests/gdata_tests/contacts_test.py new file mode 100755 index 0000000..98883d2 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/contacts_test.py @@ -0,0 +1,183 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +import unittest +from gdata import test_data +import atom +import gdata.contacts + +class ContactEntryTest(unittest.TestCase): + + def setUp(self): + self.entry = gdata.contacts.ContactEntryFromString(test_data.NEW_CONTACT) + + def testParsingTestEntry(self): + self.assertEquals(self.entry.title.text, 'Fitzgerald') + self.assertEquals(len(self.entry.email), 2) + for email in self.entry.email: + if email.rel == 'http://schemas.google.com/g/2005#work': + self.assertEquals(email.address, 'liz@gmail.com') + elif email.rel == 'http://schemas.google.com/g/2005#home': + self.assertEquals(email.address, 'liz@example.org') + self.assertEquals(len(self.entry.phone_number), 3) + self.assertEquals(len(self.entry.postal_address), 1) + self.assertEquals(self.entry.postal_address[0].primary, 'true') + self.assertEquals(self.entry.postal_address[0].text, + '1600 Amphitheatre Pkwy Mountain View') + self.assertEquals(len(self.entry.im), 1) + self.assertEquals(len(self.entry.group_membership_info), 1) + self.assertEquals(self.entry.group_membership_info[0].href, + 'http://google.com/m8/feeds/groups/liz%40gmail.com/base/270f') + self.assertEquals(self.entry.group_membership_info[0].deleted, 'false') + self.assertEquals(len(self.entry.extended_property), 2) + self.assertEquals(self.entry.extended_property[0].name, 'pet') + self.assertEquals(self.entry.extended_property[0].value, 'hamster') + self.assertEquals(self.entry.extended_property[1].name, 'cousine') + self.assertEquals( + self.entry.extended_property[1].GetXmlBlobExtensionElement().tag, + 'italian') + + def testToAndFromString(self): + copied_entry = gdata.contacts.ContactEntryFromString(str(self.entry)) + self.assertEquals(copied_entry.title.text, 'Fitzgerald') + self.assertEquals(len(copied_entry.email), 2) + for email in copied_entry.email: + if email.rel == 'http://schemas.google.com/g/2005#work': + self.assertEquals(email.address, 'liz@gmail.com') + elif email.rel == 'http://schemas.google.com/g/2005#home': + self.assertEquals(email.address, 'liz@example.org') + self.assertEquals(len(copied_entry.phone_number), 3) + self.assertEquals(len(copied_entry.postal_address), 1) + self.assertEquals(copied_entry.postal_address[0].primary, 'true') + self.assertEquals(copied_entry.postal_address[0].text, + '1600 Amphitheatre Pkwy Mountain View') + self.assertEquals(len(copied_entry.im), 1) + self.assertEquals(len(copied_entry.group_membership_info), 1) + self.assertEquals(copied_entry.group_membership_info[0].href, + 'http://google.com/m8/feeds/groups/liz%40gmail.com/base/270f') + self.assertEquals(copied_entry.group_membership_info[0].deleted, 'false') + self.assertEquals(len(copied_entry.extended_property), 2) + self.assertEquals(copied_entry.extended_property[0].name, 'pet') + self.assertEquals(copied_entry.extended_property[0].value, 'hamster') + self.assertEquals(copied_entry.extended_property[1].name, 'cousine') + self.assertEquals( + copied_entry.extended_property[1].GetXmlBlobExtensionElement().tag, + 'italian') + + def testCreateContactFromScratch(self): + # Create a new entry + new_entry = gdata.contacts.ContactEntry() + new_entry.title = atom.Title(text='Elizabeth Bennet') + new_entry.content = atom.Content(text='Test Notes') + new_entry.email.append(gdata.contacts.Email( + rel='http://schemas.google.com/g/2005#work', + address='liz@gmail.com')) + new_entry.phone_number.append(gdata.contacts.PhoneNumber( + rel='http://schemas.google.com/g/2005#work', text='(206)555-1212')) + new_entry.organization = gdata.contacts.Organization( + org_name=gdata.contacts.OrgName(text='TestCo.')) + new_entry.extended_property.append(gdata.ExtendedProperty(name='test', + value='1234')) + sports_property = gdata.ExtendedProperty(name='sports') + sports_property.SetXmlBlob('') + new_entry.extended_property.append(sports_property) + + # Generate and parse the XML for the new entry. + entry_copy = gdata.contacts.ContactEntryFromString(str(new_entry)) + self.assertEquals(entry_copy.title.text, new_entry.title.text) + self.assertEquals(entry_copy.content.text, 'Test Notes') + self.assertEquals(len(entry_copy.email), 1) + self.assertEquals(entry_copy.email[0].rel, new_entry.email[0].rel) + self.assertEquals(entry_copy.email[0].address, 'liz@gmail.com') + self.assertEquals(len(entry_copy.phone_number), 1) + self.assertEquals(entry_copy.phone_number[0].rel, + new_entry.phone_number[0].rel) + self.assertEquals(entry_copy.phone_number[0].text, '(206)555-1212') + self.assertEquals(entry_copy.organization.org_name.text, 'TestCo.') + self.assertEquals(len(entry_copy.extended_property), 2) + self.assertEquals(entry_copy.extended_property[0].name, 'test') + self.assertEquals(entry_copy.extended_property[0].value, '1234') + + +class ContactsFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.contacts.ContactsFeedFromString(test_data.CONTACTS_FEED) + + def testParsingTestFeed(self): + self.assertEquals(self.feed.id.text, + 'http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base') + self.assertEquals(self.feed.title.text, 'Contacts') + self.assertEquals(self.feed.total_results.text, '1') + self.assertEquals(len(self.feed.entry), 1) + self.assert_(isinstance(self.feed.entry[0], gdata.contacts.ContactEntry)) + self.assertEquals(self.feed.entry[0].GetPhotoLink().href, + 'http://google.com/m8/feeds/photos/media/liz%40gmail.com/c9012de') + self.assertEquals(self.feed.entry[0].GetPhotoEditLink().href, + 'http://www.google.com/m8/feeds/photos/media/liz%40gmail.com/' + 'c9012de/photo4524') + + def testToAndFromString(self): + copied_feed = gdata.contacts.ContactsFeedFromString(str(self.feed)) + self.assertEquals(copied_feed.id.text, + 'http://www.google.com/m8/feeds/contacts/liz%40gmail.com/base') + self.assertEquals(copied_feed.title.text, 'Contacts') + self.assertEquals(copied_feed.total_results.text, '1') + self.assertEquals(len(copied_feed.entry), 1) + self.assert_(isinstance(copied_feed.entry[0], gdata.contacts.ContactEntry)) + + +class GroupsFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.contacts.GroupsFeedFromString( + test_data.CONTACT_GROUPS_FEED) + + def testParsingGroupsFeed(self): + self.assertEquals(self.feed.id.text, 'jo@gmail.com') + self.assertEquals(self.feed.title.text, 'Jo\'s Contact Groups') + self.assertEquals(self.feed.total_results.text, '3') + self.assertEquals(len(self.feed.entry), 1) + self.assert_(isinstance(self.feed.entry[0], gdata.contacts.GroupEntry)) + + +class GroupEntryTest(unittest.TestCase): + + def setUp(self): + self.entry = gdata.contacts.GroupEntryFromString( + test_data.CONTACT_GROUP_ENTRY) + + def testParsingTestEntry(self): + self.assertEquals(self.entry.title.text, 'Salsa group') + self.assertEquals(len(self.entry.extended_property), 1) + self.assertEquals(self.entry.extended_property[0].name, + 'more info about the group') + self.assertEquals( + self.entry.extended_property[0].GetXmlBlobExtensionElement().namespace, + atom.ATOM_NAMESPACE) + self.assertEquals( + self.entry.extended_property[0].GetXmlBlobExtensionElement().tag, + 'info') + self.assertEquals( + self.entry.extended_property[0].GetXmlBlobExtensionElement().text, + 'Very nice people.') + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/docs/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/docs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/docs/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/docs/service_test.py new file mode 100755 index 0000000..3636220 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/docs/service_test.py @@ -0,0 +1,58 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'api.jfisher (Jeff Fisher)' + +import time +import getpass +import unittest +import StringIO +import gdata.docs.service + + +username = '' +password = '' + + +class DocumentListServiceTest(unittest.TestCase): + + def setUp(self): + + self.gd_client = gdata.docs.service.DocsService() + self.gd_client.email = username + self.gd_client.password = password + self.gd_client.source = 'Document List Client Unit Tests' + self.gd_client.ProgrammaticLogin() + + def testGetDocumentsListFeed(self): + feed = self.gd_client.GetDocumentListFeed() + self.assert_(isinstance(feed, gdata.docs.DocumentListFeed)) + + def testCreateAndDeleteSpreadsheet(self): + virtual_csv_file = StringIO.StringIO(',,,') + virtual_media_source = gdata.MediaSource(file_handle=virtual_csv_file, content_type='text/csv', content_length=3) + entry = self.gd_client.UploadSpreadsheet(virtual_media_source, 'test title') + self.assert_(entry.title.text == 'test title') + time.sleep(10) + self.gd_client.Delete(entry.GetEditLink().href) + + +if __name__ == '__main__': + print ('DocList API Tests\nNOTE: Please run these tests only with a test ' + 'account. The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/docs_test.py b/gdata.py-1.2.3/tests/gdata_tests/docs_test.py new file mode 100755 index 0000000..0d5defd --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/docs_test.py @@ -0,0 +1,76 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jfisher (Jeff Fisher)' + + +import unittest +from gdata import test_data +import gdata.docs + +class DocumentListEntryTest(unittest.TestCase): + + def setUp(self): + self.dl_entry = gdata.docs.DocumentListEntryFromString( + test_data.DOCUMENT_LIST_ENTRY) + + def testToAndFromStringWithData(self): + + entry = gdata.docs.DocumentListEntryFromString(str(self.dl_entry)) + + self.assertEqual(entry.author[0].name.text, 'test.user') + self.assertEqual(entry.author[0].email.text, 'test.user@gmail.com') + self.assertEqual(entry.category[0].label, 'spreadsheet') + self.assertEqual(entry.id.text, + 'http://docs.google.com/feeds/documents/private/full/' +\ + 'spreadsheet%3Asupercalifragilisticexpealidocious') + self.assertEqual(entry.title.text,'Test Spreadsheet') + +class DocumentListFeedTest(unittest.TestCase): + + def setUp(self): + self.dl_feed = gdata.docs.DocumentListFeedFromString( + test_data.DOCUMENT_LIST_FEED) + + def testToAndFromString(self): + self.assert_(len(self.dl_feed.entry) == 2) + for an_entry in self.dl_feed.entry: + self.assert_(isinstance(an_entry, gdata.docs.DocumentListEntry)) + new_dl_feed = gdata.docs.DocumentListFeedFromString(str( + self.dl_feed)) + for an_entry in new_dl_feed.entry: + self.assert_(isinstance(an_entry, gdata.docs.DocumentListEntry)) + + def testConvertActualData(self): + for an_entry in self.dl_feed.entry: + self.assertEqual(an_entry.author[0].name.text, 'test.user') + self.assertEqual(an_entry.author[0].email.text, 'test.user@gmail.com') + if(an_entry.category[0].label == 'spreadsheet'): + self.assertEqual(an_entry.title.text, 'Test Spreadsheet') + elif(an_entry.category[0].label == 'document'): + self.assertEqual(an_entry.title.text, 'Test Document') + + def testLinkFinderFindsHtmlLink(self): + for entry in self.dl_feed.entry: + # All Document List entries should have a self link + self.assert_(entry.GetSelfLink() is not None) + # All Document List entries should have an HTML link + self.assert_(entry.GetHtmlLink() is not None) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/photos/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/photos/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/photos/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/photos/service_test.py new file mode 100755 index 0000000..5da3ff4 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/photos/service_test.py @@ -0,0 +1,82 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'api.jscudder (Jeffrey Scudder)' + +import getpass +import time +import unittest +import StringIO +import gdata.photos.service +import gdata.photos +import atom + + +username = '' +password = '' +test_image_location = '../../testimage.jpg' +test_image_name = 'testimage.jpg' + + +class PhotosServiceTest(unittest.TestCase): + + def setUp(self): + # Initialize the client and create a new album for testing. + self.client = gdata.photos.service.PhotosService() + self.client.email = username + self.client.password = password + self.client.source = 'Photos Client Unit Tests' + self.client.ProgrammaticLogin() + + # Give the album a unique title by appending the current time. + self.test_album = self.client.InsertAlbum( + 'Python library test' + str(time.time()), + 'A temporary test album.') + + def testUploadGetAndDeletePhoto(self): + image_entry = self.client.InsertPhotoSimple(self.test_album, + 'test', 'a pretty testing picture', test_image_location) + self.assert_(image_entry.title.text == 'test') + results_feed = self.client.SearchUserPhotos('test') + self.assert_(len(results_feed.entry) > 0) + self.client.Delete(image_entry) + + def testInsertPhotoUpdateBlobAndDelete(self): + new_entry = gdata.photos.PhotoEntry() + new_entry.title = atom.Title(text='a_test_image') + new_entry.summary = atom.Summary(text='Just a test.') + new_entry.category.append(atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/photos/2007#photo')) + entry = self.client.InsertPhoto(self.test_album, new_entry, + test_image_location, content_type='image/jpeg') + self.assert_(entry.id.text) + updated_entry = self.client.UpdatePhotoBlob(entry, test_image_location) + self.assert_(entry.GetEditLink().href != updated_entry.GetEditLink().href) + self.client.Delete(updated_entry) + + def tearDown(self): + # Delete the test album. + test_album = self.client.GetEntry(self.test_album.GetSelfLink().href) + self.client.Delete(test_album) + + +if __name__ == '__main__': + print ('Google Photos test\nNOTE: Please run these tests only with a test ' + 'account. The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/photos_test.py b/gdata.py-1.2.3/tests/gdata_tests/photos_test.py new file mode 100755 index 0000000..f18f3f0 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/photos_test.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +import unittest +from gdata import test_data +import gdata.photos + + +class AlbumFeedTest(unittest.TestCase): + + def setUp(self): + self.album_feed = gdata.photos.AlbumFeedFromString(test_data.ALBUM_FEED) + + def testCorrectXmlParsing(self): + self.assert_(self.album_feed.id.text == 'http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/1') + self.assert_(self.album_feed.gphoto_id.text == '1') + self.assert_(len(self.album_feed.entry) == 4) + for entry in self.album_feed.entry: + if entry.id.text == 'http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2': + self.assert_(entry.summary.text == 'Blue') + + +class PhotoFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.photos.PhotoFeedFromString(test_data.ALBUM_FEED) + + def testCorrectXmlParsing(self): + for entry in self.feed.entry: + if entry.id.text == 'http://picasaweb.google.com/data/entry/api/user/sample.user/albumid/1/photoid/2': + self.assert_(entry.gphoto_id.text == '2') + self.assert_(entry.albumid.text == '1') + self.assert_(entry.exif.flash.text == 'true') + self.assert_(entry.media.title.type == 'plain') + self.assert_(entry.media.title.text == 'Aqua Blue.jpg') + self.assert_(len(entry.media.thumbnail) == 3) + + + +class AnyFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.photos.AnyFeedFromString(test_data.ALBUM_FEED) + + def testEntryTypeConversion(self): + for entry in self.feed.entry: + if entry.id.text == 'http://picasaweb.google.com/data/feed/api/user/sample.user/albumid/': + self.assert_(isinstance(entry, gdata.photos.PhotoEntry)) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/service_test.py new file mode 100755 index 0000000..8047542 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/service_test.py @@ -0,0 +1,549 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'api.jscudder (Jeff Scudder)' + +import unittest +import getpass +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import gdata.service +import gdata +import gdata.auth +import atom +import atom.service +import atom.token_store +import gdata.base +import os.path +from gdata import test_data +import atom.mock_http + + +username = '' +password = '' +test_image_location = '../testimage.jpg' +test_image_name = 'testimage.jpg' + + +class GDataServiceMediaUnitTest(unittest.TestCase): + + def setUp(self): + self.gd_client = gdata.service.GDataService() + self.gd_client.email = username + self.gd_client.password = password + self.gd_client.service = 'lh2' + self.gd_client.source = 'GDataService Media "Unit" Tests' + try: + self.gd_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + except gdata.service.BadAuthentication: + self.fail('Bad Authentication') + except gdata.service.Error: + self.fail('Login Error') + + # create a test album + gd_entry = gdata.GDataEntry() + gd_entry.title = atom.Title(text='GData Test Album') + gd_entry.category.append(atom.Category( + scheme='http://schemas.google.com/g/2005#kind', + term='http://schemas.google.com/photos/2007#album')) + + self.album_entry = self.gd_client.Post(gd_entry, + 'http://picasaweb.google.com/data/feed/api/user/' + username) + + def tearDown(self): + album_entry = self.gd_client.Get(self.album_entry.id.text) + self.gd_client.Delete(album_entry.GetEditLink().href) + + def testSourceGeneratesUserAgentHeader(self): + self.gd_client.source = 'GoogleInc-ServiceUnitTest-1' + self.assert_(self.gd_client.additional_headers['User-Agent'].startswith( + 'GoogleInc-ServiceUnitTest-1 GData-Python')) + + def testMedia1(self): + # Create media-only + ms = gdata.MediaSource() + ms.setFile(test_image_location, 'image/jpeg') + media_entry = self.gd_client.Post(None, + self.album_entry.GetFeedLink().href, media_source = ms) + self.assert_(media_entry is not None) + self.assert_(isinstance(media_entry, gdata.GDataEntry)) + self.assert_(media_entry.IsMedia()) + + # Update media & metadata + ms = gdata.MediaSource() + ms.setFile(test_image_location, 'image/jpeg') + media_entry.summary = atom.Summary(text='Test Image') + media_entry2 = self.gd_client.Put(media_entry, + media_entry.GetEditLink().href, media_source = ms) + self.assert_(media_entry2 is not None) + self.assert_(isinstance(media_entry2, gdata.GDataEntry)) + self.assert_(media_entry2.IsMedia()) + self.assert_(media_entry2.summary.text == 'Test Image') + + # Read media binary + imageSource = self.gd_client.GetMedia(media_entry2.GetMediaURL()) + self.assert_(isinstance(imageSource, gdata.MediaSource)) + self.assert_(imageSource.content_type == 'image/jpeg') + self.assert_(imageSource.content_length) + + imageData = imageSource.file_handle.read() + self.assert_(imageData) + + # Delete entry + response = self.gd_client.Delete(media_entry2.GetEditLink().href) + self.assert_(response) + + def testMedia2(self): + # Create media & metadata + ms = gdata.MediaSource() + ms.setFile(test_image_location, 'image/jpeg') + new_media_entry = gdata.GDataEntry() + new_media_entry.title = atom.Title(text='testimage1.jpg') + new_media_entry.summary = atom.Summary(text='Test Image') + new_media_entry.category.append(atom.Category(scheme = + 'http://schemas.google.com/g/2005#kind', term = + 'http://schemas.google.com/photos/2007#photo')) + media_entry = self.gd_client.Post(new_media_entry, + self.album_entry.GetFeedLink().href, media_source = ms) + self.assert_(media_entry is not None) + self.assert_(isinstance(media_entry, gdata.GDataEntry)) + self.assert_(media_entry.IsMedia()) + self.assert_(media_entry.summary.text == 'Test Image') + + # Update media only + ms = gdata.MediaSource() + ms.setFile(test_image_location, 'image/jpeg') + media_entry = self.gd_client.Put(None, media_entry.GetEditMediaLink().href, + media_source = ms) + self.assert_(media_entry is not None) + self.assert_(isinstance(media_entry, gdata.GDataEntry)) + self.assert_(media_entry.IsMedia()) + + # Delete entry + response = self.gd_client.Delete(media_entry.GetEditLink().href) + self.assert_(response) + + def testMediaConstructorDefaults(self): + + ms = gdata.MediaSource() + ms.setFile(test_image_location, 'image/jpeg') + + self.assert_(ms is not None) + self.assert_(isinstance(ms, gdata.MediaSource)) + self.assertEquals(ms.file_name, test_image_name) + self.assertEquals(ms.content_type, 'image/jpeg') + + def testMediaConstructorWithFilePath(self): + + ms = gdata.MediaSource(file_path=test_image_location, + content_type='image/jpeg') + + self.assert_(ms is not None) + self.assert_(isinstance(ms, gdata.MediaSource)) + self.assertEquals(ms.file_name, test_image_name) + self.assertEquals(ms.content_type, 'image/jpeg') + + def testMediaConstructorWithFileHandle(self): + + fh = open(test_image_location, 'r') + len = os.path.getsize(test_image_location) + ms = gdata.MediaSource(fh, 'image/jpeg', len, file_name=test_image_location) + + self.assert_(ms is not None) + self.assert_(isinstance(ms, gdata.MediaSource)) + self.assertEquals(ms.file_name, test_image_location) + self.assertEquals(ms.content_type, 'image/jpeg') + + +class GDataServiceUnitTest(unittest.TestCase): + + def setUp(self): + self.gd_client = gdata.service.GDataService() + self.gd_client.email = username + self.gd_client.password = password + self.gd_client.service = 'gbase' + self.gd_client.source = 'GDataClient "Unit" Tests' + + def testProperties(self): + email_string = 'Test Email' + password_string = 'Passwd' + + self.gd_client.email = email_string + self.assertEquals(self.gd_client.email, email_string) + self.gd_client.password = password_string + self.assertEquals(self.gd_client.password, password_string) + + def testCorrectLogin(self): + try: + self.gd_client.ProgrammaticLogin() + self.assert_(isinstance( + self.gd_client.token_store.find_token( + 'http://base.google.com/base/feeds/'), + gdata.auth.ClientLoginToken)) + self.assert_(self.gd_client.captcha_token is None) + self.assert_(self.gd_client.captcha_url is None) + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + + def testDefaultHttpClient(self): + self.assert_(isinstance(self.gd_client.http_client, + atom.http.HttpClient)) + + + def testGet(self): + try: + self.gd_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + except gdata.service.BadAuthentication: + self.fail('Bad Authentication') + except gdata.service.Error: + self.fail('Login Error') + self.gd_client.additional_headers = {'X-Google-Key': + 'ABQIAAAAoLioN3buSs9KqIIq9V' + + 'mkFxT2yXp_ZAY8_ufC3CFXhHIE' + + '1NvwkxRK8C1Q8OWhsWA2AIKv-c' + + 'VKlVrNhQ'} + self.gd_client.server = 'base.google.com' + result = self.gd_client.Get('/base/feeds/snippets?bq=digital+camera') + self.assert_(result is not None) + self.assert_(isinstance(result, atom.Feed)) + + def testGetWithAuthentication(self): + try: + self.gd_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + except gdata.service.BadAuthentication: + self.fail('Bad Authentication') + except gdata.service.Error: + self.fail('Login Error') + self.gd_client.additional_headers = {'X-Google-Key': + 'ABQIAAAAoLioN3buSs9KqIIq9V' + + 'mkFxT2yXp_ZAY8_ufC3CFXhHIE' + + '1NvwkxRK8C1Q8OWhsWA2AIKv-c' + + 'VKlVrNhQ'} + self.gd_client.server = 'base.google.com' + result = self.gd_client.Get('/base/feeds/items?bq=digital+camera') + self.assert_(result is not None) + self.assert_(isinstance(result, atom.Feed)) + + def testGetEntry(self): + try: + self.gd_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + except gdata.service.BadAuthentication: + self.fail('Bad Authentication') + except gdata.service.Error: + self.fail('Login Error') + self.gd_client.server = 'base.google.com' + try: + result = self.gd_client.GetEntry('/base/feeds/items?bq=digital+camera') + self.fail( + 'Result from server in GetEntry should have raised an exception') + except gdata.service.UnexpectedReturnType: + pass + + def testGetFeed(self): + try: + self.gd_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + except gdata.service.BadAuthentication: + self.fail('Bad Authentication') + except gdata.service.Error: + self.fail('Login Error') + self.gd_client.server = 'base.google.com' + result = self.gd_client.GetFeed('/base/feeds/items?bq=digital+camera') + self.assert_(result is not None) + self.assert_(isinstance(result, atom.Feed)) + + def testGetWithResponseTransformer(self): + # Query Google Base and interpret the results as a GBaseSnippetFeed. + feed = self.gd_client.Get( + 'http://www.google.com/base/feeds/snippets?bq=digital+camera', + converter=gdata.base.GBaseSnippetFeedFromString) + self.assertEquals(isinstance(feed, gdata.base.GBaseSnippetFeed), True) + + def testPostPutAndDelete(self): + try: + self.gd_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + except gdata.service.BadAuthentication: + self.fail('Bad Authentication') + except gdata.service.Error: + self.fail('Login Error') + self.gd_client.additional_headers = {'X-Google-Key': + 'ABQIAAAAoLioN3buSs9KqIIq9V' + + 'mkFxT2yXp_ZAY8_ufC3CFXhHIE' + + '1NvwkxRK8C1Q8OWhsWA2AIKv-c' + + 'VKlVrNhQ'} + self.gd_client.server = 'base.google.com' + + # Insert a new item + response = self.gd_client.Post(test_data.TEST_BASE_ENTRY, + '/base/feeds/items') + self.assert_(response is not None) + self.assert_(isinstance(response, atom.Entry)) + self.assert_(response.category[0].term == 'products') + + # Find the item id of the created item + item_id = response.id.text.lstrip( + 'http://www.google.com/base/feeds/items/') + self.assert_(item_id is not None) + + updated_xml = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY) + # Change one of the labels in the item + updated_xml.label[2].text = 'beach ball' + # Update the item + response = self.gd_client.Put(updated_xml, + '/base/feeds/items/%s' % item_id) + self.assert_(response is not None) + new_base_item = gdata.base.GBaseItemFromString(str(response)) + self.assert_(isinstance(new_base_item, atom.Entry)) + + # Delete the item the test just created. + response = self.gd_client.Delete('/base/feeds/items/%s' % item_id) + self.assert_(response) + + def testPostPutAndDeleteWithConverters(self): + try: + self.gd_client.ProgrammaticLogin() + except gdata.service.CaptchaRequired: + self.fail('Required Captcha') + except gdata.service.BadAuthentication: + self.fail('Bad Authentication') + except gdata.service.Error: + self.fail('Login Error') + self.gd_client.additional_headers = {'X-Google-Key': + 'ABQIAAAAoLioN3buSs9KqIIq9V' + + 'mkFxT2yXp_ZAY8_ufC3CFXhHIE' + + '1NvwkxRK8C1Q8OWhsWA2AIKv-c' + + 'VKlVrNhQ'} + self.gd_client.server = 'base.google.com' + + # Insert a new item + response = self.gd_client.Post(test_data.TEST_BASE_ENTRY, + '/base/feeds/items', converter=gdata.base.GBaseItemFromString) + self.assert_(response is not None) + self.assert_(isinstance(response, atom.Entry)) + self.assert_(isinstance(response, gdata.base.GBaseItem)) + self.assert_(response.category[0].term == 'products') + + updated_xml = gdata.base.GBaseItemFromString(test_data.TEST_BASE_ENTRY) + # Change one of the labels in the item + updated_xml.label[2].text = 'beach ball' + # Update the item + response = self.gd_client.Put(updated_xml, + response.id.text, + converter=gdata.base.GBaseItemFromString) + self.assertEquals(response is not None, True) + self.assertEquals(isinstance(response, gdata.base.GBaseItem), True) + + # Delete the item the test just created. + response = self.gd_client.Delete(response.id.text) + self.assert_(response) + + def testCaptchaUrlGeneration(self): + # Populate the mock server with a pairing for a ClientLogin request to a + # CAPTCHA challenge. + mock_client = atom.mock_http.MockHttpClient() + captcha_response = atom.mock_http.MockResponse( + body="""Url=http://www.google.com/login/captcha +Error=CaptchaRequired +CaptchaToken=DQAAAGgAdkI1LK9 +CaptchaUrl=Captcha?ctoken=HiteT4b0Bk5Xg18_AcVoP6-yFkHPibe7O9EqxeiI7lUSN +""", status=403, reason='Access Forbidden') + mock_client.add_response(captcha_response, 'POST', + 'https://www.google.com/accounts/ClientLogin') + + # Set the exising client's handler so that it will make requests to the + # mock service instead of the real server. + self.gd_client.http_client = mock_client + + try: + self.gd_client.ProgrammaticLogin() + self.fail('Login attempt should have caused a CAPTCHA challenge.') + except gdata.service.CaptchaRequired, error: + self.assertEquals(self.gd_client.captcha_url, + ('https://www.google.com/accounts/Captcha?ctoken=HiteT4b0Bk5Xg18_' + 'AcVoP6-yFkHPibe7O9EqxeiI7lUSN')) + + +class QueryTest(unittest.TestCase): + + def setUp(self): + self.query = gdata.service.Query() + + def testQueryShouldBehaveLikeDict(self): + try: + self.query['zap'] + self.fail() + except KeyError: + pass + self.query['zap'] = 'x' + self.assert_(self.query['zap'] == 'x') + + def testContructorShouldRejectBadInputs(self): + test_q = gdata.service.Query(params=[1,2,3,4]) + self.assert_(len(test_q.keys()) == 0) + + def testTextQueryProperty(self): + self.assert_(self.query.text_query is None) + self.query['q'] = 'test1' + self.assert_(self.query.text_query == 'test1') + self.query.text_query = 'test2' + self.assert_(self.query.text_query == 'test2') + + def testOrderByQueryProperty(self): + self.assert_(self.query.orderby is None) + self.query['orderby'] = 'updated' + self.assert_(self.query.orderby == 'updated') + self.query.orderby = 'starttime' + self.assert_(self.query.orderby == 'starttime') + + def testQueryShouldProduceExampleUris(self): + self.query.feed = '/base/feeds/snippets' + self.query.text_query = 'This is a test' + self.assert_(self.query.ToUri() == '/base/feeds/snippets?q=This+is+a+test') + + def testCategoriesFormattedCorrectly(self): + self.query.feed = '/x' + self.query.categories.append('Fritz') + self.query.categories.append('Laurie') + self.assert_(self.query.ToUri() == '/x/-/Fritz/Laurie') + # The query's feed should not have been changed + self.assert_(self.query.feed == '/x') + self.assert_(self.query.ToUri() == '/x/-/Fritz/Laurie') + + def testCategoryQueriesShouldEscapeOrSymbols(self): + self.query.feed = '/x' + self.query.categories.append('Fritz|Laurie') + self.assert_(self.query.ToUri() == '/x/-/Fritz%7CLaurie') + + def testTypeCoercionOnIntParams(self): + self.query.feed = '/x' + self.query.max_results = 10 + self.query.start_index = 5 + self.assert_(isinstance(self.query.max_results, str)) + self.assert_(isinstance(self.query.start_index, str)) + self.assertEquals(self.query['max-results'], '10') + self.assertEquals(self.query['start-index'], '5') + + def testPassInCategoryListToConstructor(self): + query = gdata.service.Query(feed='/feed/sample', categories=['foo', 'bar', + 'eggs|spam']) + url = query.ToUri() + self.assert_(url.find('/foo') > -1) + self.assert_(url.find('/bar') > -1) + self.assert_(url.find('/eggs%7Cspam') > -1) + + +class GetNextPageInFeedTest(unittest.TestCase): + + def setUp(self): + self.gd_client = gdata.service.GDataService() + + def testGetNextPage(self): + feed = self.gd_client.Get( + 'http://www.google.com/base/feeds/snippets?max-results=2', + converter=gdata.base.GBaseSnippetFeedFromString) + self.assert_(len(feed.entry) > 0) + first_id = feed.entry[0].id.text + feed2 = self.gd_client.GetNext(feed) + self.assert_(len(feed2.entry) > 0) + next_id = feed2.entry[0].id.text + self.assert_(first_id != next_id) + self.assert_(feed2.__class__ == feed.__class__) + + +class ScopeLookupTest(unittest.TestCase): + + def testLookupScopes(self): + scopes = gdata.service.lookup_scopes('cl') + self.assertEquals(scopes, gdata.service.CLIENT_LOGIN_SCOPES['cl']) + scopes = gdata.service.lookup_scopes(None) + self.assert_(scopes is None) + scopes = gdata.service.lookup_scopes('UNKNOWN_SERVICE') + self.assert_(scopes is None) + + +class TokenLookupTest(unittest.TestCase): + + def setUp(self): + self.client = gdata.service.GDataService() + + def testSetAndGetClientLoginTokenWithNoService(self): + self.assert_(self.client.auth_token is None) + self.client.SetClientLoginToken('foo') + self.assert_(self.client.auth_token is None) + self.assert_(self.client.token_store.find_token( + atom.token_store.SCOPE_ALL) is not None) + self.assertEquals(self.client.GetClientLoginToken(), 'foo') + self.client.SetClientLoginToken('foo2') + self.assertEquals(self.client.GetClientLoginToken(), 'foo2') + + def testSetAndGetClientLoginTokenWithService(self): + self.client.service = 'cp' + self.client.SetClientLoginToken('bar') + self.assertEquals(self.client.GetClientLoginToken(), 'bar') + # Changing the service should cause the token to no longer be found. + self.client.service = 'gbase' + self.client.current_token = None + self.assert_(self.client.GetClientLoginToken() is None) + + def testSetAndGetClientLoginTokenWithScopes(self): + scopes = gdata.service.CLIENT_LOGIN_SCOPES['cl'][:] + scopes.extend(gdata.service.CLIENT_LOGIN_SCOPES['gbase']) + self.client.SetClientLoginToken('baz', scopes=scopes) + self.client.current_token = None + self.assert_(self.client.GetClientLoginToken() is None) + self.client.service = 'cl' + self.assertEquals(self.client.GetClientLoginToken(), 'baz') + self.client.service = 'gbase' + self.assertEquals(self.client.GetClientLoginToken(), 'baz') + self.client.service = 'wise' + self.assert_(self.client.GetClientLoginToken() is None) + + def testLookupUsingTokenStore(self): + scopes = gdata.service.CLIENT_LOGIN_SCOPES['cl'][:] + scopes.extend(gdata.service.CLIENT_LOGIN_SCOPES['gbase']) + self.client.SetClientLoginToken('baz', scopes=scopes) + token = self.client.token_store.find_token( + 'http://www.google.com/calendar/feeds/foo') + self.assertEquals(token.get_token_string(), 'baz') + self.assertEquals(token.auth_header, '%s%s' % ( + gdata.auth.PROGRAMMATIC_AUTH_LABEL, 'baz')) + token = self.client.token_store.find_token( + 'http://www.google.com/calendar/') + self.assert_(isinstance(token, gdata.auth.ClientLoginToken) == False) + token = self.client.token_store.find_token( + 'http://www.google.com/base/feeds/snippets') + self.assertEquals(token.get_token_string(), 'baz') + + +if __name__ == '__main__': + print ('GData Service Media Unit Tests\nNOTE: Please run these tests only ' + 'with a test account. The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/spreadsheet/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/spreadsheet/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/spreadsheet/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/spreadsheet/service_test.py new file mode 100755 index 0000000..a120d7f --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/spreadsheet/service_test.py @@ -0,0 +1,211 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import gdata.spreadsheet.service +import gdata.service +import atom.service +import gdata.spreadsheet +import atom +import getpass + + +username = '' +password = '' +ss_key = '' +ws_key = '' + + +class DocumentQueryTest(unittest.TestCase): + + def setUp(self): + self.query = gdata.spreadsheet.service.DocumentQuery() + + def testTitle(self): + self.query['title'] = 'my title' + self.assert_(self.query['title'] == 'my title') + self.assert_(self.query.ToUri() == '?title=my+title') + + def testTitleExact(self): + self.query['title-exact'] = 'true' + self.assert_(self.query['title-exact'] == 'true') + self.assert_(self.query.ToUri() == '?title-exact=true') + + +class CellQueryTest(unittest.TestCase): + + def setUp(self): + self.query = gdata.spreadsheet.service.CellQuery() + + def testMinRow(self): + self.query['min-row'] = '1' + self.assert_(self.query['min-row'] == '1') + self.assert_(self.query.ToUri() == '?min-row=1') + + def testMaxRow(self): + self.query['max-row'] = '100' + self.assert_(self.query['max-row'] == '100') + self.assert_(self.query.ToUri() == '?max-row=100') + + def testMinCol(self): + self.query['min-col'] = '2' + self.assert_(self.query['min-col'] == '2') + self.assert_(self.query.ToUri() == '?min-col=2') + + def testMaxCol(self): + self.query['max-col'] = '20' + self.assert_(self.query['max-col'] == '20') + self.assert_(self.query.ToUri() == '?max-col=20') + + def testRange(self): + self.query['range'] = 'A1:B4' + self.assert_(self.query['range'] == 'A1:B4') + self.assert_(self.query.ToUri() == '?range=A1%3AB4') + + def testReturnEmpty(self): + self.query['return-empty'] = 'false' + self.assert_(self.query['return-empty'] == 'false') + self.assert_(self.query.ToUri() == '?return-empty=false') + + +class ListQueryTest(unittest.TestCase): + + def setUp(self): + self.query = gdata.spreadsheet.service.ListQuery() + + def testSpreadsheetQuery(self): + self.query['sq'] = 'first=john&last=smith' + self.assert_(self.query['sq'] == 'first=john&last=smith') + self.assert_(self.query.ToUri() == '?sq=first%3Djohn%26last%3Dsmith') + + def testOrderByQuery(self): + self.query['orderby'] = 'column:first' + self.assert_(self.query['orderby'] == 'column:first') + self.assert_(self.query.ToUri() == '?orderby=column%3Afirst') + + def testReverseQuery(self): + self.query['reverse'] = 'true' + self.assert_(self.query['reverse'] == 'true') + self.assert_(self.query.ToUri() == '?reverse=true') + + +class SpreadsheetsServiceTest(unittest.TestCase): + + def setUp(self): + self.key = ss_key + self.worksheet = ws_key + self.gd_client = gdata.spreadsheet.service.SpreadsheetsService() + self.gd_client.email = username + self.gd_client.password = password + self.gd_client.source = 'SpreadsheetsClient "Unit" Tests' + self.gd_client.ProgrammaticLogin() + + def testGetSpreadsheetsFeed(self): + entry = self.gd_client.GetSpreadsheetsFeed(self.key) + self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsSpreadsheet)) + + def testGetWorksheetsFeed(self): + feed = self.gd_client.GetWorksheetsFeed(self.key) + self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsWorksheetsFeed)) + entry = self.gd_client.GetWorksheetsFeed(self.key, self.worksheet) + self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsWorksheet)) + + def testGetCellsFeed(self): + feed = self.gd_client.GetCellsFeed(self.key) + self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsCellsFeed)) + entry = self.gd_client.GetCellsFeed(self.key, cell='R5C1') + self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsCell)) + + def testGetListFeed(self): + feed = self.gd_client.GetListFeed(self.key) + self.assert_(isinstance(feed, gdata.spreadsheet.SpreadsheetsListFeed)) + entry = self.gd_client.GetListFeed(self.key, row_id='cokwr') + self.assert_(isinstance(entry, gdata.spreadsheet.SpreadsheetsList)) + + def testUpdateCell(self): + self.gd_client.UpdateCell(row='5', col='1', inputValue='', key=self.key) + self.gd_client.UpdateCell(row='5', col='1', inputValue='newer data', + key=self.key) + + def testBatchUpdateCell(self): + cell_feed = self.gd_client.GetCellsFeed(key=self.key) + edit_cell = cell_feed.entry[0] + old_cell_value = 'a1' + + # Create a batch request to change the contents of a cell. + batch_feed = gdata.spreadsheet.SpreadsheetsCellsFeed() + edit_cell.cell.inputValue = 'New Value' + batch_feed.AddUpdate(edit_cell) + result = self.gd_client.ExecuteBatch(batch_feed, + url=cell_feed.GetBatchLink().href) + self.assertEquals(len(result.entry), 1) + self.assertEquals(result.entry[0].cell.inputValue, 'New Value') + + # Make a second batch request to change the cell's value back. + edit_cell = result.entry[0] + edit_cell.cell.inputValue = old_cell_value + batch_feed = gdata.spreadsheet.SpreadsheetsCellsFeed() + batch_feed.AddUpdate(edit_cell) + restored = self.gd_client.ExecuteBatch(batch_feed, + url=cell_feed.GetBatchLink().href) + self.assertEquals(len(restored.entry), 1) + self.assertEquals(restored.entry[0].cell.inputValue, old_cell_value) + + def testInsertUpdateRow(self): + entry = self.gd_client.InsertRow({'a1':'new', 'b1':'row', 'c1':'was', + 'd1':'here'}, self.key) + entry = self.gd_client.UpdateRow(entry, {'a1':'newer', + 'b1':entry.custom['b1'].text, 'c1':entry.custom['c1'].text, + 'd1':entry.custom['d1'].text}) + self.gd_client.DeleteRow(entry) + + def testWorksheetCRUD(self): + # Add a new worksheet. + new_worksheet = self.gd_client.AddWorksheet('worksheet_title_test_12', '2', + 3, self.key) + self.assertEquals(new_worksheet.col_count.text, '3') + self.assertEquals(new_worksheet.row_count.text, '2') + self.assertEquals(new_worksheet.title.text, 'worksheet_title_test_12') + + # Change the dimensions and title of the new worksheet. + new_worksheet.col_count.text = '1' + new_worksheet.title.text = 'edited worksheet test12' + edited_worksheet = self.gd_client.UpdateWorksheet(new_worksheet) + self.assertEquals(edited_worksheet.col_count.text, '1') + self.assertEquals(edited_worksheet.row_count.text, '2') + self.assertEquals(edited_worksheet.title.text, 'edited worksheet test12') + + # Delete the new worksheet. + result = self.gd_client.DeleteWorksheet(edited_worksheet) + self.assertEquals(result, True) + + + + +if __name__ == '__main__': + print ('Spreadsheet Tests\nNOTE: Please run these tests only with a test ' + 'account. The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + ss_key = raw_input('Please enter your spreadsheet key: ') + ws_key = raw_input('Please enter your worksheet key: ') + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/spreadsheet/text_db_test.py b/gdata.py-1.2.3/tests/gdata_tests/spreadsheet/text_db_test.py new file mode 100755 index 0000000..7b321fd --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/spreadsheet/text_db_test.py @@ -0,0 +1,209 @@ +#!/usr/bin/python +# +# Copyright Google 2007-2008, all rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import unittest +import getpass +import gdata.spreadsheet.text_db +import gdata.spreadsheet.service + + +__author__ = 'api.jscudder (Jeffrey Scudder)' + + +username = '' +password = '' + + +class FactoryTest(unittest.TestCase): + + def setUp(self): + self.client = gdata.spreadsheet.text_db.DatabaseClient() + + def testBadCredentials(self): + try: + self.client.SetCredentials('foo', 'bar') + self.fail() + except gdata.spreadsheet.text_db.Error, e: + pass + + def testCreateGetAndDeleteDatabase(self): + db_title = 'google_spreadsheets_db unit test 1' + self.client.SetCredentials(username, password) + db = self.client.CreateDatabase(db_title) + # Test finding the database using the name + time.sleep(5) + db_list = self.client.GetDatabases(name=db_title) + self.assert_(len(db_list) >= 1) + if len(db_list) >= 1: + self.assert_(db_list[0].entry.title.text == db_title) + # Test finding the database using the spreadsheet key + db_list = self.client.GetDatabases(spreadsheet_key=db.spreadsheet_key) + self.assert_(len(db_list) == 1) + self.assert_(db_list[0].entry.title.text == db_title) + # Delete the test spreadsheet + time.sleep(10) + db.Delete() + + +class DatabaseTest(unittest.TestCase): + + def setUp(self): + client = gdata.spreadsheet.text_db.DatabaseClient(username, password) + self.db = client.CreateDatabase('google_spreadsheets_db unit test 2') + + def tearDown(self): + time.sleep(10) + self.db.Delete() + + def testCreateGetAndDeleteTable(self): + table = self.db.CreateTable('test1', ['1','2','3']) + # Try to get the new table using the worksheet id. + table_list = self.db.GetTables(worksheet_id=table.worksheet_id) + self.assert_(len(table_list) == 1) + self.assert_(table_list[0].entry.title.text, 'test1') + # Try to get the table using the name + table_list = self.db.GetTables(name='test1') + self.assert_(len(table_list) == 1) + self.assert_(table_list[0].entry.title.text, 'test1') + # Delete the table + table.Delete() + + +class TableTest(unittest.TestCase): + + def setUp(self): + client = gdata.spreadsheet.text_db.DatabaseClient(username, password) + self.db = client.CreateDatabase('google_spreadsheets_db unit test 3') + self.table = self.db.CreateTable('test1', ['a','b','c_d','a', 'd:e']) + + def tearDown(self): + time.sleep(10) + self.db.Delete() + + def testCreateGetAndDeleteRecord(self): + new_record = self.table.AddRecord({'a':'test1', 'b':'test2', 'cd':'test3', 'a_2':'test4', 'de':'test5'}) + # Test getting record by line number. + record = self.table.GetRecord(row_number=1) + self.assert_(record is not None) + self.assert_(record.content['a'] == 'test1') + self.assert_(record.content['b'] == 'test2') + self.assert_(record.content['cd'] == 'test3') + self.assert_(record.content['a_2'] == 'test4') + # Test getting record using the id. + record_list = self.table.GetRecord(row_id=new_record.row_id) + self.assert_(record is not None) + # Delete the record. + time.sleep(10) + new_record.Delete() + + def testPushPullSyncing(self): + # Get two copies of the same row. + first_copy = self.table.AddRecord({'a':'1', 'b':'2', 'cd':'3', 'a_2':'4', 'de':'5'}) + second_copy = self.table.GetRecord(first_copy.row_id) + + # Make changes in the first copy + first_copy.content['a'] = '7' + first_copy.content['b'] = '9' + + # Try to get the changes before they've been committed + second_copy.Pull() + self.assert_(second_copy.content['a'] == '1') + self.assert_(second_copy.content['b'] == '2') + + # Commit the changes, the content should now be different + first_copy.Push() + second_copy.Pull() + self.assert_(second_copy.content['a'] == '7') + self.assert_(second_copy.content['b'] == '9') + + # Make changes to the second copy, push, then try to push changes from + # the first copy. + first_copy.content['a'] = '10' + second_copy.content['a'] = '15' + first_copy.Push() + try: + second_copy.Push() + # The second update should raise and exception due to a 409 conflict. + self.fail() + except gdata.spreadsheet.service.RequestError: + pass + except Exception, error: + #TODO: Why won't the except RequestError catch this? + pass + + def testFindRecords(self): + # Add lots of test records: + self.table.AddRecord({'a':'1', 'b':'2', 'cd':'3', 'a_2':'4', 'de':'5'}) + self.table.AddRecord({'a':'hi', 'b':'2', 'cd':'20', 'a_2':'4', 'de':'5'}) + self.table.AddRecord({'a':'2', 'b':'2', 'cd':'3'}) + self.table.AddRecord({'a':'2', 'b':'2', 'cd':'15', 'de':'7'}) + self.table.AddRecord({'a':'hi hi hi', 'b':'2', 'cd':'15', 'de':'7'}) + self.table.AddRecord({'a':'"5"', 'b':'5', 'cd':'15', 'de':'7'}) + self.table.AddRecord({'a':'5', 'b':'5', 'cd':'15', 'de':'7'}) + + time.sleep(10) + matches = self.table.FindRecords('a == 1') + self.assert_(len(matches) == 1) + self.assert_(matches[0].content['a'] == '1') + self.assert_(matches[0].content['b'] == '2') + + matches = self.table.FindRecords('a > 1 && cd < 20') + self.assert_(len(matches) == 4) + matches = self.table.FindRecords('cd < de') + self.assert_(len(matches) == 7) + matches = self.table.FindRecords('a == b') + self.assert_(len(matches) == 0) + matches = self.table.FindRecords('a == 5') + self.assert_(len(matches) == 1) + + def testIterateResultSet(self): + # Populate the table with test data. + self.table.AddRecord({'a':'1', 'b':'2', 'cd':'3', 'a_2':'4', 'de':'5'}) + self.table.AddRecord({'a':'hi', 'b':'2', 'cd':'20', 'a_2':'4', 'de':'5'}) + self.table.AddRecord({'a':'2', 'b':'2', 'cd':'3'}) + self.table.AddRecord({'a':'2', 'b':'2', 'cd':'15', 'de':'7'}) + self.table.AddRecord({'a':'hi hi hi', 'b':'2', 'cd':'15', 'de':'7'}) + self.table.AddRecord({'a':'"5"', 'b':'5', 'cd':'15', 'de':'7'}) + self.table.AddRecord({'a':'5', 'b':'5', 'cd':'15', 'de':'7'}) + + # Get the first two rows. + records = self.table.GetRecords(1, 2) + self.assert_(len(records) == 2) + self.assert_(records[0].content['a'] == '1') + self.assert_(records[1].content['a'] == 'hi') + + # Then get the next two rows. + next_records = records.GetNext() + self.assert_(len(next_records) == 2) + self.assert_(next_records[0].content['a'] == '2') + self.assert_(next_records[0].content['cd'] == '3') + self.assert_(next_records[1].content['cd'] == '15') + self.assert_(next_records[1].content['de'] == '7') + + def testLookupFieldsOnPreexistingTable(self): + existing_table = self.db.GetTables(name='test1')[0] + existing_table.LookupFields() + self.assertEquals(existing_table.fields, ['a', 'b', 'cd', 'a_2', 'de']) + + +if __name__ == '__main__': + if not username: + username = raw_input('Spreadsheets API | Text DB Tests\n' + 'Please enter your username: ') + if not password: + password = getpass.getpass() + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/spreadsheet_test.py b/gdata.py-1.2.3/tests/gdata_tests/spreadsheet_test.py new file mode 100755 index 0000000..1cea48b --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/spreadsheet_test.py @@ -0,0 +1,406 @@ +#!/usr/bin/python +# +# Copyright (C) 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.laurabeth@gmail.com (Laura Beth Lincoln)' + + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import gdata +import gdata.spreadsheet + +SPREADSHEETS_FEED = """ + http://spreadsheets.google.com/feeds/spreadsheets/private/full + 2006-11-17T18:23:45.173Z + Available Spreadsheets + + + + + Fitzwilliam Darcy + fitz@gmail.com + + 1 + 1 + 1 + + http://spreadsheets.google.com/feeds/spreadsheets/private/full/key + 2006-11-17T18:24:18.231Z + Groceries R Us + Groceries R Us + + + + + Fitzwilliam Darcy + fitz@gmail.com + + + +""" + +WORKSHEETS_FEED = """ + http://spreadsheets.google.com/feeds/worksheets/key/private/full + 2006-11-17T18:23:45.173Z + Groceries R Us + + + + + Fitzwilliam Darcy + fitz@gmail.com + + 1 + 1 + 1 + + http://spreadsheets.google.com/feeds/worksheets/key/private/full/od6 + 2006-11-17T18:23:45.173Z + Sheet1 + Sheet1 + + + + 100 + 20 + + +""" + +CELLS_FEED = """ + http://spreadsheets.google.com/feeds/cells/key/od6/private/full + 2006-11-17T18:27:32.543Z + Sheet1 + + + + + + Fitzwilliam Darcy + fitz@gmail.com + + 1 + 1 + 100 + 20 + + http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C1 + 2006-11-17T18:27:32.543Z + + A1 + Name + + + Name + + + http://spreadsheets.google.com/feeds/cells/key/od6/private/full/R1C2 + 2006-11-17T18:27:32.543Z + + B1 + Hours + + + Hours + + +""" + +LIST_FEED = """ + http://spreadsheets.google.com/feeds/list/key/od6/private/full + 2006-11-17T18:23:45.173Z + Sheet1 + + + + + + Fitzwilliam Darcy + fitz@gmail.com + + 2 + 1 + 2 + + http://spreadsheets.google.com/feeds/list/key/od6/private/full/cokwr + 2006-11-17T18:23:45.173Z + + Bingley + Hours: 10, Items: 2, IPM: 0.0033 + + + Bingley + 10 + 2 + 0.0033 + + + http://spreadsheets.google.com/feeds/list/key/od6/private/full/cyevm + 2006-11-17T18:23:45.173Z + + Charlotte + Hours: 60, Items: 18000, IPM: 5 + + + Charlotte + 60 + 18000 + 5 + + + +""" +class ColCountTest(unittest.TestCase): + + def setUp(self): + self.col_count = gdata.spreadsheet.ColCount() + + def testToAndFromString(self): + self.col_count.text = '20' + self.assert_(self.col_count.text == '20') + new_col_count = gdata.spreadsheet.ColCountFromString(self.col_count.ToString()) + self.assert_(self.col_count.text == new_col_count.text) + + +class RowCountTest(unittest.TestCase): + + def setUp(self): + self.row_count = gdata.spreadsheet.RowCount() + + def testToAndFromString(self): + self.row_count.text = '100' + self.assert_(self.row_count.text == '100') + new_row_count = gdata.spreadsheet.RowCountFromString(self.row_count.ToString()) + self.assert_(self.row_count.text == new_row_count.text) + + +class CellTest(unittest.TestCase): + + def setUp(self): + self.cell = gdata.spreadsheet.Cell() + + def testToAndFromString(self): + self.cell.text = 'test cell' + self.assert_(self.cell.text == 'test cell') + self.cell.row = '1' + self.assert_(self.cell.row == '1') + self.cell.col = '2' + self.assert_(self.cell.col == '2') + self.cell.inputValue = 'test input value' + self.assert_(self.cell.inputValue == 'test input value') + self.cell.numericValue = 'test numeric value' + self.assert_(self.cell.numericValue == 'test numeric value') + new_cell = gdata.spreadsheet.CellFromString(self.cell.ToString()) + self.assert_(self.cell.text == new_cell.text) + self.assert_(self.cell.row == new_cell.row) + self.assert_(self.cell.col == new_cell.col) + self.assert_(self.cell.inputValue == new_cell.inputValue) + self.assert_(self.cell.numericValue == new_cell.numericValue) + + +class CustomTest(unittest.TestCase): + + def setUp(self): + self.custom = gdata.spreadsheet.Custom() + + def testToAndFromString(self): + self.custom.text = 'value' + self.custom.column = 'column_name' + self.assert_(self.custom.text == 'value') + self.assert_(self.custom.column == 'column_name') + new_custom = gdata.spreadsheet.CustomFromString(self.custom.ToString()) + self.assert_(self.custom.text == new_custom.text) + self.assert_(self.custom.column == new_custom.column) + + +class SpreadsheetsWorksheetTest(unittest.TestCase): + + def setUp(self): + self.worksheet = gdata.spreadsheet.SpreadsheetsWorksheet() + + def testToAndFromString(self): + self.worksheet.row_count = gdata.spreadsheet.RowCount(text='100') + self.assert_(self.worksheet.row_count.text == '100') + self.worksheet.col_count = gdata.spreadsheet.ColCount(text='20') + self.assert_(self.worksheet.col_count.text == '20') + new_worksheet = gdata.spreadsheet.SpreadsheetsWorksheetFromString( + self.worksheet.ToString()) + self.assert_(self.worksheet.row_count.text == new_worksheet.row_count.text) + self.assert_(self.worksheet.col_count.text == new_worksheet.col_count.text) + + +class SpreadsheetsCellTest(unittest.TestCase): + + def setUp(self): + self.entry = gdata.spreadsheet.SpreadsheetsCell() + + def testToAndFromString(self): + self.entry.cell = gdata.spreadsheet.Cell(text='my cell', row='1', col='2', + inputValue='my input value', numericValue='my numeric value') + self.assert_(self.entry.cell.text == 'my cell') + self.assert_(self.entry.cell.row == '1') + self.assert_(self.entry.cell.col == '2') + self.assert_(self.entry.cell.inputValue == 'my input value') + self.assert_(self.entry.cell.numericValue == 'my numeric value') + new_cell = gdata.spreadsheet.SpreadsheetsCellFromString(self.entry.ToString()) + self.assert_(self.entry.cell.text == new_cell.cell.text) + self.assert_(self.entry.cell.row == new_cell.cell.row) + self.assert_(self.entry.cell.col == new_cell.cell.col) + self.assert_(self.entry.cell.inputValue == new_cell.cell.inputValue) + self.assert_(self.entry.cell.numericValue == new_cell.cell.numericValue) + + +class SpreadsheetsListTest(unittest.TestCase): + + def setUp(self): + self.row = gdata.spreadsheet.SpreadsheetsList() + + def testToAndFromString(self): + self.row.custom['column_1'] = gdata.spreadsheet.Custom(column='column_1', + text='my first column') + self.row.custom['column_2'] = gdata.spreadsheet.Custom(column='column_2', + text='my second column') + self.assert_(self.row.custom['column_1'].column == 'column_1') + self.assert_(self.row.custom['column_1'].text == 'my first column') + self.assert_(self.row.custom['column_2'].column == 'column_2') + self.assert_(self.row.custom['column_2'].text == 'my second column') + new_row = gdata.spreadsheet.SpreadsheetsListFromString(self.row.ToString()) + self.assert_(self.row.custom['column_1'].column == new_row.custom['column_1'].column) + self.assert_(self.row.custom['column_1'].text == new_row.custom['column_1'].text) + self.assert_(self.row.custom['column_2'].column == new_row.custom['column_2'].column) + self.assert_(self.row.custom['column_2'].text == new_row.custom['column_2'].text) + +class SpreadsheetsSpreadsheetsFeedTest(unittest.TestCase): + + def setUp(self): + #self.item_feed = gdata.spreadsheet.SpreadsheetSpreadsheetsFeed() + self.feed = gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString( + SPREADSHEETS_FEED) + + def testToAndFromString(self): + self.assert_(len(self.feed.entry) == 1) + for an_entry in self.feed.entry: + self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsSpreadsheet)) + new_feed = gdata.spreadsheet.SpreadsheetsSpreadsheetsFeedFromString( + str(self.feed)) + for an_entry in new_feed.entry: + self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsSpreadsheet)) + +class SpreadsheetsWorksheetsFeedTest(unittest.TestCase): + + def setUp(self): + #self.item_feed = gdata.spreadsheet.SpreadsheetWorksheetsFeed() + self.feed = gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString( + WORKSHEETS_FEED) + + def testToAndFromString(self): + self.assert_(len(self.feed.entry) == 1) + for an_entry in self.feed.entry: + self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsWorksheet)) + new_feed = gdata.spreadsheet.SpreadsheetsWorksheetsFeedFromString( + str(self.feed)) + for an_entry in new_feed.entry: + self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsWorksheet)) + +class SpreadsheetsCellsFeedTest(unittest.TestCase): + + def setUp(self): + #self.item_feed = gdata.spreadsheet.SpreadsheetCellsFeed() + self.feed = gdata.spreadsheet.SpreadsheetsCellsFeedFromString( + CELLS_FEED) + + def testToAndFromString(self): + self.assert_(len(self.feed.entry) == 2) + for an_entry in self.feed.entry: + self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsCell)) + new_feed = gdata.spreadsheet.SpreadsheetsCellsFeedFromString(str(self.feed)) + self.assert_(isinstance(new_feed.row_count, + gdata.spreadsheet.RowCount)) + self.assert_(new_feed.row_count.text == '100') + self.assert_(isinstance(new_feed.col_count, + gdata.spreadsheet.ColCount)) + self.assert_(new_feed.col_count.text == '20') + for an_entry in new_feed.entry: + self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsCell)) + +class SpreadsheetsListFeedTest(unittest.TestCase): + + def setUp(self): + #self.item_feed = gdata.spreadsheet.SpreadsheetListFeed() + self.feed = gdata.spreadsheet.SpreadsheetsListFeedFromString( + LIST_FEED) + + def testToAndFromString(self): + self.assert_(len(self.feed.entry) == 2) + for an_entry in self.feed.entry: + self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsList)) + new_feed = gdata.spreadsheet.SpreadsheetsListFeedFromString(str(self.feed)) + for an_entry in new_feed.entry: + self.assert_(isinstance(an_entry, gdata.spreadsheet.SpreadsheetsList)) + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/webmastertools_test.py b/gdata.py-1.2.3/tests/gdata_tests/webmastertools_test.py new file mode 100644 index 0000000..a691872 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/webmastertools_test.py @@ -0,0 +1,505 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Yu-Jie Lin +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'livibetter (Yu-Jie Lin)' + + +import unittest +try: + from xml.etree import ElementTree +except ImportError: + from elementtree import ElementTree +import gdata +from gdata import test_data +import gdata.webmastertools as webmastertools + + +class IndexedTest(unittest.TestCase): + + def setUp(self): + self.indexed = webmastertools.Indexed() + + def testToAndFromString(self): + self.indexed.text = 'true' + self.assert_(self.indexed.text == 'true') + new_indexed = webmastertools.IndexedFromString(self.indexed.ToString()) + self.assert_(self.indexed.text == new_indexed.text) + + +class CrawledTest(unittest.TestCase): + + def setUp(self): + self.crawled = webmastertools.Crawled() + + def testToAndFromString(self): + self.crawled.text = 'true' + self.assert_(self.crawled.text == 'true') + new_crawled = webmastertools.CrawledFromString(self.crawled.ToString()) + self.assert_(self.crawled.text == new_crawled.text) + + +class GeoLocationTest(unittest.TestCase): + + def setUp(self): + self.geolocation = webmastertools.GeoLocation() + + def testToAndFromString(self): + self.geolocation.text = 'US' + self.assert_(self.geolocation.text == 'US') + new_geolocation = webmastertools.GeoLocationFromString( + self.geolocation.ToString()) + self.assert_(self.geolocation.text == new_geolocation.text) + + +class PreferredDomainTest(unittest.TestCase): + + def setUp(self): + self.preferred_domain = webmastertools.PreferredDomain() + + def testToAndFromString(self): + self.preferred_domain.text = 'none' + self.assert_(self.preferred_domain.text == 'none') + new_preferred_domain = webmastertools.PreferredDomainFromString( + self.preferred_domain.ToString()) + self.assert_(self.preferred_domain.text == new_preferred_domain.text) + + +class CrawlRateTest(unittest.TestCase): + + def setUp(self): + self.crawl_rate = webmastertools.CrawlRate() + + def testToAndFromString(self): + self.crawl_rate.text = 'normal' + self.assert_(self.crawl_rate.text == 'normal') + new_crawl_rate = webmastertools.CrawlRateFromString( + self.crawl_rate.ToString()) + self.assert_(self.crawl_rate.text == new_crawl_rate.text) + + +class EnhancedImageSearchTest(unittest.TestCase): + + def setUp(self): + self.enhanced_image_search = webmastertools.EnhancedImageSearch() + + def testToAndFromString(self): + self.enhanced_image_search.text = 'true' + self.assert_(self.enhanced_image_search.text == 'true') + new_enhanced_image_search = webmastertools.EnhancedImageSearchFromString( + self.enhanced_image_search.ToString()) + self.assert_(self.enhanced_image_search.text == + new_enhanced_image_search.text) + + +class VerifiedTest(unittest.TestCase): + + def setUp(self): + self.verified = webmastertools.Verified() + + def testToAndFromString(self): + self.verified.text = 'true' + self.assert_(self.verified.text == 'true') + new_verified = webmastertools.VerifiedFromString(self.verified.ToString()) + self.assert_(self.verified.text == new_verified.text) + + +class VerificationMethodMetaTest(unittest.TestCase): + + def setUp(self): + self.meta = webmastertools.VerificationMethodMeta() + + def testToAndFromString(self): + self.meta.name = 'verify-vf1' + self.meta.content = 'a2Ai' + self.assert_(self.meta.name == 'verify-vf1') + self.assert_(self.meta.content == 'a2Ai') + new_meta = webmastertools.VerificationMethodMetaFromString( + self.meta.ToString()) + self.assert_(self.meta.name == new_meta.name) + self.assert_(self.meta.content == new_meta.content) + + +class VerificationMethodTest(unittest.TestCase): + + def setUp(self): + pass + + def testMetaTagToAndFromString(self): + self.method = webmastertools.VerificationMethod() + self.method.type = 'metatag' + self.method.in_use = 'false' + self.assert_(self.method.type == 'metatag') + self.assert_(self.method.in_use == 'false') + self.method.meta = webmastertools.VerificationMethodMeta(name='verify-vf1', + content='a2Ai') + self.assert_(self.method.meta.name == 'verify-vf1') + self.assert_(self.method.meta.content == 'a2Ai') + new_method = webmastertools.VerificationMethodFromString( + self.method.ToString()) + self.assert_(self.method.type == new_method.type) + self.assert_(self.method.in_use == new_method.in_use) + self.assert_(self.method.meta.name == new_method.meta.name) + self.assert_(self.method.meta.content == new_method.meta.content) + + def testHtmlPageToAndFromString(self): + self.method = webmastertools.VerificationMethod() + self.method.type = 'htmlpage' + self.method.in_use = 'false' + self.method.text = '456456-google.html' + self.assert_(self.method.type == 'htmlpage') + self.assert_(self.method.in_use == 'false') + self.assert_(self.method.text == '456456-google.html') + self.assert_(self.method.meta is None) + new_method = webmastertools.VerificationMethodFromString( + self.method.ToString()) + self.assert_(self.method.type == new_method.type) + self.assert_(self.method.in_use == new_method.in_use) + self.assert_(self.method.text == new_method.text) + self.assert_(self.method.meta is None) + + def testConvertActualData(self): + feed = webmastertools.SitesFeedFromString(test_data.SITES_FEED) + self.assert_(len(feed.entry[0].verification_method) == 2) + check = 0 + for method in feed.entry[0].verification_method: + self.assert_(isinstance(method, webmastertools.VerificationMethod)) + if method.type == 'metatag': + self.assert_(method.in_use == 'false') + self.assert_(method.text is None) + self.assert_(method.meta.name == 'verify-v1') + self.assert_(method.meta.content == 'a2Ai') + check = check | 1 + elif method.type == 'htmlpage': + self.assert_(method.in_use == 'false') + self.assert_(method.text == '456456-google.html') + check = check | 2 + else: + self.fail('Wrong Verification Method: %s' % method.type) + self.assert_(check == 2 ** 2 - 1, + 'Should only have two Verification Methods, metatag and htmlpage') + + +class MarkupLanguageTest(unittest.TestCase): + + def setUp(self): + self.markup_language = webmastertools.MarkupLanguage() + + def testToAndFromString(self): + self.markup_language.text = 'HTML' + self.assert_(self.markup_language.text == 'HTML') + new_markup_language = webmastertools.MarkupLanguageFromString( + self.markup_language.ToString()) + self.assert_(self.markup_language.text == new_markup_language.text) + + +class SitemapMobileTest(unittest.TestCase): + + def setUp(self): + self.sitemap_mobile = webmastertools.SitemapMobile() + + def testToAndFromString(self): + self.sitemap_mobile.markup_language.append(webmastertools.MarkupLanguage( + text = 'HTML')) + self.assert_(self.sitemap_mobile.text is None) + self.assert_(self.sitemap_mobile.markup_language[0].text == 'HTML') + new_sitemap_mobile = webmastertools.SitemapMobileFromString( + self.sitemap_mobile.ToString()) + self.assert_(new_sitemap_mobile.text is None) + self.assert_(self.sitemap_mobile.markup_language[0].text == + new_sitemap_mobile.markup_language[0].text) + + def testConvertActualData(self): + feed = webmastertools.SitemapsFeedFromString(test_data.SITEMAPS_FEED) + self.assert_(feed.sitemap_mobile.text.strip() == '') + self.assert_(len(feed.sitemap_mobile.markup_language) == 2) + check = 0 + for markup_language in feed.sitemap_mobile.markup_language: + self.assert_(isinstance(markup_language, webmastertools.MarkupLanguage)) + if markup_language.text == "HTML": + check = check | 1 + elif markup_language.text == "WAP": + check = check | 2 + else: + self.fail('Unexpected markup language: %s' % markup_language.text) + self.assert_(check == 2 ** 2 - 1, "Something is wrong with markup language") + + +class SitemapMobileMarkupLanguageTest(unittest.TestCase): + + def setUp(self): + self.sitemap_mobile_markup_language =\ + webmastertools.SitemapMobileMarkupLanguage() + + def testToAndFromString(self): + self.sitemap_mobile_markup_language.text = 'HTML' + self.assert_(self.sitemap_mobile_markup_language.text == 'HTML') + new_sitemap_mobile_markup_language =\ + webmastertools.SitemapMobileMarkupLanguageFromString( + self.sitemap_mobile_markup_language.ToString()) + self.assert_(self.sitemap_mobile_markup_language.text ==\ + new_sitemap_mobile_markup_language.text) + + +class PublicationLabelTest(unittest.TestCase): + + def setUp(self): + self.publication_label = webmastertools.PublicationLabel() + + def testToAndFromString(self): + self.publication_label.text = 'Value1' + self.assert_(self.publication_label.text == 'Value1') + new_publication_label = webmastertools.PublicationLabelFromString( + self.publication_label.ToString()) + self.assert_(self.publication_label.text == new_publication_label.text) + + +class SitemapNewsTest(unittest.TestCase): + + def setUp(self): + self.sitemap_news = webmastertools.SitemapNews() + + def testToAndFromString(self): + self.sitemap_news.publication_label.append(webmastertools.PublicationLabel( + text = 'Value1')) + self.assert_(self.sitemap_news.text is None) + self.assert_(self.sitemap_news.publication_label[0].text == 'Value1') + new_sitemap_news = webmastertools.SitemapNewsFromString( + self.sitemap_news.ToString()) + self.assert_(new_sitemap_news.text is None) + self.assert_(self.sitemap_news.publication_label[0].text == + new_sitemap_news.publication_label[0].text) + + def testConvertActualData(self): + feed = webmastertools.SitemapsFeedFromString(test_data.SITEMAPS_FEED) + self.assert_(len(feed.sitemap_news.publication_label) == 3) + check = 0 + for publication_label in feed.sitemap_news.publication_label: + if publication_label.text == "Value1": + check = check | 1 + elif publication_label.text == "Value2": + check = check | 2 + elif publication_label.text == "Value3": + check = check | 4 + else: + self.fail('Unexpected publication label: %s' % markup_language.text) + self.assert_(check == 2 ** 3 - 1, + 'Something is wrong with publication label') + + +class SitemapNewsPublicationLabelTest(unittest.TestCase): + + def setUp(self): + self.sitemap_news_publication_label =\ + webmastertools.SitemapNewsPublicationLabel() + + def testToAndFromString(self): + self.sitemap_news_publication_label.text = 'LabelValue' + self.assert_(self.sitemap_news_publication_label.text == 'LabelValue') + new_sitemap_news_publication_label =\ + webmastertools.SitemapNewsPublicationLabelFromString( + self.sitemap_news_publication_label.ToString()) + self.assert_(self.sitemap_news_publication_label.text ==\ + new_sitemap_news_publication_label.text) + + +class SitemapLastDownloadedTest(unittest.TestCase): + + def setUp(self): + self.sitemap_last_downloaded = webmastertools.SitemapLastDownloaded() + + def testToAndFromString(self): + self.sitemap_last_downloaded.text = '2006-11-18T19:27:32.543Z' + self.assert_(self.sitemap_last_downloaded.text ==\ + '2006-11-18T19:27:32.543Z') + new_sitemap_last_downloaded =\ + webmastertools.SitemapLastDownloadedFromString( + self.sitemap_last_downloaded.ToString()) + self.assert_(self.sitemap_last_downloaded.text ==\ + new_sitemap_last_downloaded.text) + + +class SitemapTypeTest(unittest.TestCase): + + def setUp(self): + self.sitemap_type = webmastertools.SitemapType() + + def testToAndFromString(self): + self.sitemap_type.text = 'WEB' + self.assert_(self.sitemap_type.text == 'WEB') + new_sitemap_type = webmastertools.SitemapTypeFromString( + self.sitemap_type.ToString()) + self.assert_(self.sitemap_type.text == new_sitemap_type.text) + + +class SitemapStatusTest(unittest.TestCase): + + def setUp(self): + self.sitemap_status = webmastertools.SitemapStatus() + + def testToAndFromString(self): + self.sitemap_status.text = 'Pending' + self.assert_(self.sitemap_status.text == 'Pending') + new_sitemap_status = webmastertools.SitemapStatusFromString( + self.sitemap_status.ToString()) + self.assert_(self.sitemap_status.text == new_sitemap_status.text) + + +class SitemapUrlCountTest(unittest.TestCase): + + def setUp(self): + self.sitemap_url_count = webmastertools.SitemapUrlCount() + + def testToAndFromString(self): + self.sitemap_url_count.text = '0' + self.assert_(self.sitemap_url_count.text == '0') + new_sitemap_url_count = webmastertools.SitemapUrlCountFromString( + self.sitemap_url_count.ToString()) + self.assert_(self.sitemap_url_count.text == new_sitemap_url_count.text) + + +class SitesEntryTest(unittest.TestCase): + + def setUp(self): + pass + + def testToAndFromString(self): + entry = webmastertools.SitesEntry( + indexed=webmastertools.Indexed(text='true'), + crawled=webmastertools.Crawled(text='2008-09-14T08:59:28.000'), + geolocation=webmastertools.GeoLocation(text='US'), + preferred_domain=webmastertools.PreferredDomain(text='none'), + crawl_rate=webmastertools.CrawlRate(text='normal'), + enhanced_image_search=webmastertools.EnhancedImageSearch(text='true'), + verified=webmastertools.Verified(text='false'), + ) + self.assert_(entry.indexed.text == 'true') + self.assert_(entry.crawled.text == '2008-09-14T08:59:28.000') + self.assert_(entry.geolocation.text == 'US') + self.assert_(entry.preferred_domain.text == 'none') + self.assert_(entry.crawl_rate.text == 'normal') + self.assert_(entry.enhanced_image_search.text == 'true') + self.assert_(entry.verified.text == 'false') + new_entry = webmastertools.SitesEntryFromString(entry.ToString()) + self.assert_(new_entry.indexed.text == 'true') + self.assert_(new_entry.crawled.text == '2008-09-14T08:59:28.000') + self.assert_(new_entry.geolocation.text == 'US') + self.assert_(new_entry.preferred_domain.text == 'none') + self.assert_(new_entry.crawl_rate.text == 'normal') + self.assert_(new_entry.enhanced_image_search.text == 'true') + self.assert_(new_entry.verified.text == 'false') + + def testConvertActualData(self): + feed = webmastertools.SitesFeedFromString(test_data.SITES_FEED) + self.assert_(len(feed.entry) == 1) + entry = feed.entry[0] + self.assert_(isinstance(entry, webmastertools.SitesEntry)) + self.assert_(entry.indexed.text == 'true') + self.assert_(entry.crawled.text == '2008-09-14T08:59:28.000') + self.assert_(entry.geolocation.text == 'US') + self.assert_(entry.preferred_domain.text == 'none') + self.assert_(entry.crawl_rate.text == 'normal') + self.assert_(entry.enhanced_image_search.text == 'true') + self.assert_(entry.verified.text == 'false') + + +class SitesFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.webmastertools.SitesFeedFromString(test_data.SITES_FEED) + + def testToAndFromString(self): + self.assert_(len(self.feed.entry) == 1) + for entry in self.feed.entry: + self.assert_(isinstance(entry, webmastertools.SitesEntry)) + new_feed = webmastertools.SitesFeedFromString(self.feed.ToString()) + self.assert_(len(new_feed.entry) == 1) + for entry in new_feed.entry: + self.assert_(isinstance(entry, webmastertools.SitesEntry)) + + +class SitemapsEntryTest(unittest.TestCase): + + def testRegularToAndFromString(self): + entry = webmastertools.SitemapsEntry( + sitemap_type=webmastertools.SitemapType(text='WEB'), + sitemap_status=webmastertools.SitemapStatus(text='Pending'), + sitemap_last_downloaded=webmastertools.SitemapLastDownloaded( + text='2006-11-18T19:27:32.543Z'), + sitemap_url_count=webmastertools.SitemapUrlCount(text='102'), + ) + self.assert_(entry.sitemap_type.text == 'WEB') + self.assert_(entry.sitemap_status.text == 'Pending') + self.assert_(entry.sitemap_last_downloaded.text ==\ + '2006-11-18T19:27:32.543Z') + self.assert_(entry.sitemap_url_count.text == '102') + new_entry = webmastertools.SitemapsEntryFromString(entry.ToString()) + self.assert_(new_entry.sitemap_type.text == 'WEB') + self.assert_(new_entry.sitemap_status.text == 'Pending') + self.assert_(new_entry.sitemap_last_downloaded.text ==\ + '2006-11-18T19:27:32.543Z') + self.assert_(new_entry.sitemap_url_count.text == '102') + + def testConvertActualData(self): + feed = gdata.webmastertools.SitemapsFeedFromString(test_data.SITEMAPS_FEED) + self.assert_(len(feed.entry) == 3) + for entry in feed.entry: + self.assert_(entry, webmastertools.SitemapsEntry) + self.assert_(entry.sitemap_status, webmastertools.SitemapStatus) + self.assert_(entry.sitemap_last_downloaded, + webmastertools.SitemapLastDownloaded) + self.assert_(entry.sitemap_url_count, webmastertools.SitemapUrlCount) + self.assert_(entry.sitemap_status.text == 'StatusValue') + self.assert_(entry.sitemap_last_downloaded.text ==\ + '2006-11-18T19:27:32.543Z') + self.assert_(entry.sitemap_url_count.text == '102') + if entry.id.text == 'http://www.example.com/sitemap-index.xml': + self.assert_(entry.sitemap_type, webmastertools.SitemapType) + self.assert_(entry.sitemap_type.text == 'WEB') + self.assert_(entry.sitemap_mobile_markup_language is None) + self.assert_(entry.sitemap_news_publication_label is None) + elif entry.id.text == 'http://www.example.com/mobile/sitemap-index.xml': + self.assert_(entry.sitemap_mobile_markup_language, + webmastertools.SitemapMobileMarkupLanguage) + self.assert_(entry.sitemap_mobile_markup_language.text == 'HTML') + self.assert_(entry.sitemap_type is None) + self.assert_(entry.sitemap_news_publication_label is None) + elif entry.id.text == 'http://www.example.com/news/sitemap-index.xml': + self.assert_(entry.sitemap_news_publication_label, + webmastertools.SitemapNewsPublicationLabel) + self.assert_(entry.sitemap_news_publication_label.text == 'LabelValue') + self.assert_(entry.sitemap_type is None) + self.assert_(entry.sitemap_mobile_markup_language is None) + + +class SitemapsFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.webmastertools.SitemapsFeedFromString( + test_data.SITEMAPS_FEED) + + def testToAndFromString(self): + self.assert_(len(self.feed.entry) == 3) + for entry in self.feed.entry: + self.assert_(isinstance(entry, webmastertools.SitemapsEntry)) + new_feed = webmastertools.SitemapsFeedFromString(self.feed.ToString()) + self.assert_(len(new_feed.entry) == 3) + for entry in new_feed.entry: + self.assert_(isinstance(entry, webmastertools.SitemapsEntry)) + + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/youtube/__init__.py b/gdata.py-1.2.3/tests/gdata_tests/youtube/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gdata.py-1.2.3/tests/gdata_tests/youtube/service_test.py b/gdata.py-1.2.3/tests/gdata_tests/youtube/service_test.py new file mode 100644 index 0000000..a56b113 --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/youtube/service_test.py @@ -0,0 +1,576 @@ +#!/usr/bin/python +# +# Copyright (C) 2008 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'api.jhartmann@gmail.com (Jochen Hartmann)' + +import getpass +import time +import StringIO +import random +import unittest +import atom +import gdata.youtube +import gdata.youtube.service + +YOUTUBE_TEST_CLIENT_ID = 'ytapi-pythonclientlibrary_servicetest' + + +class YouTubeServiceTest(unittest.TestCase): + + def setUp(self): + self.client = gdata.youtube.service.YouTubeService() + self.client.email = username + self.client.password = password + self.client.source = YOUTUBE_TEST_CLIENT_ID + self.client.developer_key = developer_key + self.client.client_id = YOUTUBE_TEST_CLIENT_ID + self.client.ProgrammaticLogin() + + def testRetrieveVideoFeed(self): + feed = self.client.GetYouTubeVideoFeed( + 'http://gdata.youtube.com/feeds/api/standardfeeds/recently_featured'); + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 0) + for entry in feed.entry: + self.assert_(entry.title.text != '') + + def testRetrieveTopRatedVideoFeed(self): + feed = self.client.GetTopRatedVideoFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 10) + + def testRetrieveMostViewedVideoFeed(self): + feed = self.client.GetMostViewedVideoFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 10) + + def testRetrieveRecentlyFeaturedVideoFeed(self): + feed = self.client.GetRecentlyFeaturedVideoFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 10) + + def testRetrieveWatchOnMobileVideoFeed(self): + feed = self.client.GetWatchOnMobileVideoFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 10) + + def testRetrieveTopFavoritesVideoFeed(self): + feed = self.client.GetTopFavoritesVideoFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 10) + + def testRetrieveMostRecentVideoFeed(self): + feed = self.client.GetMostRecentVideoFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 10) + + def testRetrieveMostDiscussedVideoFeed(self): + feed = self.client.GetMostDiscussedVideoFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 10) + + def testRetrieveMostLinkedVideoFeed(self): + feed = self.client.GetMostLinkedVideoFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 10) + + def testRetrieveMostRespondedVideoFeed(self): + feed = self.client.GetMostRespondedVideoFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 10) + + def testRetrieveVideoEntryByUri(self): + entry = self.client.GetYouTubeVideoEntry( + 'http://gdata.youtube.com/feeds/videos/Ncakifd_16k') + self.assert_(isinstance(entry, gdata.youtube.YouTubeVideoEntry)) + self.assert_(entry.title.text != '') + + def testRetrieveVideoEntryByVideoId(self): + entry = self.client.GetYouTubeVideoEntry(video_id='Ncakifd_16k') + self.assert_(isinstance(entry, gdata.youtube.YouTubeVideoEntry)) + self.assert_(entry.title.text != '') + + def testRetrieveUserVideosbyUri(self): + feed = self.client.GetYouTubeUserFeed( + 'http://gdata.youtube.com/feeds/users/gdpython/uploads') + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 0) + + def testRetrieveUserVideosbyUsername(self): + feed = self.client.GetYouTubeUserFeed(username='gdpython') + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 0) + + def testSearchWithVideoQuery(self): + query = gdata.youtube.service.YouTubeVideoQuery() + query.vq = 'google' + query.max_results = 8 + feed = self.client.YouTubeQuery(query) + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assertEquals(len(feed.entry), 8) + + def testDirectVideoUploadStatusUpdateAndDeletion(self): + self.assertEquals(self.client.developer_key, developer_key) + self.assertEquals(self.client.client_id, YOUTUBE_TEST_CLIENT_ID) + self.assertEquals(self.client.additional_headers['X-GData-Key'], + 'key=' + developer_key) + self.assertEquals(self.client.additional_headers['X-Gdata-Client'], + YOUTUBE_TEST_CLIENT_ID) + + test_video_title = 'my cool video ' + str(random.randint(1000,5000)) + test_video_description = 'description ' + str(random.randint(1000,5000)) + + my_media_group = gdata.media.Group( + title = gdata.media.Title(text=test_video_title), + description = gdata.media.Description(description_type='plain', + text=test_video_description), + keywords = gdata.media.Keywords(text='video, foo'), + category = gdata.media.Category( + text='Autos', + scheme='http://gdata.youtube.com/schemas/2007/categories.cat', + label='Autos'), + player=None + ) + self.assert_(isinstance(my_media_group, gdata.media.Group)) + + # Set Geo location to 37,-122 lat, long + where = gdata.geo.Where() + where.set_location((37.0,-122.0)) + + video_entry = gdata.youtube.YouTubeVideoEntry(media=my_media_group, + geo=where) + + self.assert_(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry)) + + new_entry = self.client.InsertVideoEntry(video_entry, video_file_location) + self.assert_(isinstance(new_entry, gdata.youtube.YouTubeVideoEntry)) + self.assertEquals(new_entry.title.text, test_video_title) + self.assertEquals(new_entry.media.description.text, test_video_description) + self.assert_(new_entry.id.text) + + # check upload status also + upload_status = self.client.CheckUploadStatus(new_entry) + self.assert_(upload_status[0] != '') + + # test updating entry meta-data + new_video_description = 'description ' + str(random.randint(1000,5000)) + new_entry.media.description.text = new_video_description + + updated_entry = self.client.UpdateVideoEntry(new_entry) + + self.assert_(isinstance(updated_entry, gdata.youtube.YouTubeVideoEntry)) + self.assertEquals(updated_entry.media.description.text, + new_video_description) + + # sleep for 10 seconds + time.sleep(10) + + # test to delete the entry + value = self.client.DeleteVideoEntry(updated_entry) + + if not value: + # sleep more and try again + time.sleep(20) + # test to delete the entry + value = self.client.DeleteVideoEntry(updated_entry) + + self.assert_(value == True) + + def testDirectVideoUploadWithDeveloperTags(self): + self.assertEquals(self.client.developer_key, developer_key) + self.assertEquals(self.client.client_id, YOUTUBE_TEST_CLIENT_ID) + self.assertEquals(self.client.additional_headers['X-GData-Key'], + 'key=' + developer_key) + self.assertEquals(self.client.additional_headers['X-Gdata-Client'], + YOUTUBE_TEST_CLIENT_ID) + + test_video_title = 'my cool video ' + str(random.randint(1000,5000)) + test_video_description = 'description ' + str(random.randint(1000,5000)) + + test_developer_tag_01 = 'tag' + str(random.randint(1000,5000)) + test_developer_tag_02 = 'tag' + str(random.randint(1000,5000)) + test_developer_tag_03 = 'tag' + str(random.randint(1000,5000)) + + my_media_group = gdata.media.Group( + title = gdata.media.Title(text=test_video_title), + description = gdata.media.Description(description_type='plain', + text=test_video_description), + keywords = gdata.media.Keywords(text='video, foo'), + category = [gdata.media.Category( + text='Autos', + scheme='http://gdata.youtube.com/schemas/2007/categories.cat', + label='Autos')], + player=None + ) + + self.assert_(isinstance(my_media_group, gdata.media.Group)) + + video_entry = gdata.youtube.YouTubeVideoEntry(media=my_media_group) + original_developer_tags = [test_developer_tag_01, test_developer_tag_02, + test_developer_tag_03] + + dev_tags = video_entry.AddDeveloperTags(original_developer_tags) + + for dev_tag in dev_tags: + self.assert_(dev_tag.text in original_developer_tags) + + self.assert_(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry)) + + new_entry = self.client.InsertVideoEntry(video_entry, video_file_location) + + self.assert_(isinstance(new_entry, gdata.youtube.YouTubeVideoEntry)) + self.assertEquals(new_entry.title.text, test_video_title) + self.assertEquals(new_entry.media.description.text, test_video_description) + self.assert_(new_entry.id.text) + + developer_tags_from_new_entry = new_entry.GetDeveloperTags() + for dev_tag in developer_tags_from_new_entry: + self.assert_(dev_tag.text in original_developer_tags) + + self.assertEquals(len(developer_tags_from_new_entry), + len(original_developer_tags)) + + # sleep for 10 seconds + time.sleep(10) + + # test to delete the entry + value = self.client.DeleteVideoEntry(new_entry) + + if not value: + # sleep more and try again + time.sleep(20) + # test to delete the entry + value = self.client.DeleteVideoEntry(new_entry) + + self.assert_(value == True) + + def testBrowserBasedVideoUpload(self): + self.assertEquals(self.client.developer_key, developer_key) + self.assertEquals(self.client.client_id, YOUTUBE_TEST_CLIENT_ID) + self.assertEquals(self.client.additional_headers['X-GData-Key'], + 'key=' + developer_key) + self.assertEquals(self.client.additional_headers['X-Gdata-Client'], + YOUTUBE_TEST_CLIENT_ID) + test_video_title = 'my cool video ' + str(random.randint(1000,5000)) + test_video_description = 'description ' + str(random.randint(1000,5000)) + + my_media_group = gdata.media.Group( + title = gdata.media.Title(text=test_video_title), + description = gdata.media.Description(description_type='plain', + text=test_video_description), + keywords = gdata.media.Keywords(text='video, foo'), + category = gdata.media.Category( + text='Autos', + scheme='http://gdata.youtube.com/schemas/2007/categories.cat', + label='Autos'), + player=None + ) + self.assert_(isinstance(my_media_group, gdata.media.Group)) + + video_entry = gdata.youtube.YouTubeVideoEntry(media=my_media_group) + self.assert_(isinstance(video_entry, gdata.youtube.YouTubeVideoEntry)) + + response = self.client.GetFormUploadToken(video_entry) + self.assert_(response[0].startswith( + 'http://uploads.gdata.youtube.com/action/FormDataUpload/')) + self.assert_(len(response[0]) > 55) + self.assert_(len(response[1]) > 100) + + def testRetrieveRelatedVideoFeedByUri(self): + feed = self.client.GetYouTubeRelatedVideoFeed( + 'http://gdata.youtube.com/feeds/videos/Ncakifd_16k/related') + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 0) + + def testRetrieveRelatedVideoFeedById(self): + feed = self.client.GetYouTubeRelatedVideoFeed(video_id = 'Ncakifd_16k') + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 0) + + def testRetrieveResponseVideoFeedByUri(self): + feed = self.client.GetYouTubeVideoResponseFeed( + 'http://gdata.youtube.com/feeds/videos/Ncakifd_16k/responses') + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoResponseFeed)) + self.assert_(len(feed.entry) > 0) + + def testRetrieveResponseVideoFeedById(self): + feed = self.client.GetYouTubeVideoResponseFeed(video_id='Ncakifd_16k') + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoResponseFeed)) + self.assert_(len(feed.entry) > 0) + + def testRetrieveVideoCommentFeedByUri(self): + feed = self.client.GetYouTubeVideoCommentFeed( + 'http://gdata.youtube.com/feeds/api/videos/Ncakifd_16k/comments') + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoCommentFeed)) + self.assert_(len(feed.entry) > 0) + + def testRetrieveVideoCommentFeedByVideoId(self): + feed = self.client.GetYouTubeVideoCommentFeed(video_id='Ncakifd_16k') + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoCommentFeed)) + self.assert_(len(feed.entry) > 0) + + def testAddComment(self): + video_id = '9g6buYJTt_g' + video_entry = self.client.GetYouTubeVideoEntry(video_id=video_id) + random_comment_text = 'test_comment_' + str(random.randint(1000,50000)) + self.client.AddComment(comment_text=random_comment_text, + video_entry=video_entry) + comment_feed = self.client.GetYouTubeVideoCommentFeed(video_id=video_id) + comment_found = False + for item in comment_feed.entry: + if (item.content.text == random_comment_text): + comment_found = True + self.assertEquals(comment_found, True) + + def testAddRating(self): + video_id_to_rate = 'Ncakifd_16k' + video_entry = self.client.GetYouTubeVideoEntry(video_id=video_id_to_rate) + response = self.client.AddRating(3, video_entry) + self.assert_(isinstance(response, gdata.GDataEntry)) + + def testRetrievePlaylistFeedByUri(self): + feed = self.client.GetYouTubePlaylistFeed( + 'http://gdata.youtube.com/feeds/users/gdpython/playlists') + self.assert_(isinstance(feed, gdata.youtube.YouTubePlaylistFeed)) + self.assert_(len(feed.entry) > 0) + + def testRetrievePlaylistListFeedByUsername(self): + feed = self.client.GetYouTubePlaylistFeed(username='gdpython') + self.assert_(isinstance(feed, gdata.youtube.YouTubePlaylistFeed)) + self.assert_(len(feed.entry) > 0) + + def testRetrievePlaylistVideoFeed(self): + feed = self.client.GetYouTubePlaylistVideoFeed( + 'http://gdata.youtube.com/feeds/api/playlists/BCB3BB96DF51B505') + self.assert_(isinstance(feed, gdata.youtube.YouTubePlaylistVideoFeed)) + self.assert_(len(feed.entry) > 0) + self.assert_(isinstance(feed.entry[0], + gdata.youtube.YouTubePlaylistVideoEntry)) + + def testAddUpdateAndDeletePlaylist(self): + test_playlist_title = 'my test playlist ' + str(random.randint(1000,3000)) + test_playlist_description = 'test playlist ' + response = self.client.AddPlaylist(test_playlist_title, + test_playlist_description) + self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistEntry)) + + new_playlist_title = 'my updated playlist ' + str(random.randint(1000,4000)) + new_playlist_description = 'my updated playlist ' + playlist_entry_id = response.id.text.split('/')[-1] + + updated_playlist = self.client.UpdatePlaylist(playlist_entry_id, + new_playlist_title, + new_playlist_description) + + playlist_feed = self.client.GetYouTubePlaylistFeed() + + update_successful = False + + for playlist_entry in playlist_feed.entry: + if playlist_entry.title.text == new_playlist_title: + update_successful = True + break + + self.assertEquals(update_successful, True) + + # wait + time.sleep(10) + # delete it + playlist_uri = updated_playlist.id.text + response = self.client.DeletePlaylist(playlist_uri) + self.assertEquals(response, True) + + def testAddUpdateAndDeletePrivatePlaylist(self): + test_playlist_title = 'my test playlist ' + str(random.randint(1000,3000)) + test_playlist_description = 'test playlist ' + response = self.client.AddPlaylist(test_playlist_title, + test_playlist_description, + playlist_private=True) + self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistEntry)) + + new_playlist_title = 'my updated playlist ' + str(random.randint(1000,4000)) + new_playlist_description = 'my updated playlist ' + playlist_entry_id = response.id.text.split('/')[-1] + + updated_playlist = self.client.UpdatePlaylist(playlist_entry_id, + new_playlist_title, + new_playlist_description, + playlist_private=True) + + playlist_feed = self.client.GetYouTubePlaylistFeed() + + update_successful = False + playlist_still_private = False + for playlist_entry in playlist_feed.entry: + if playlist_entry.title.text == new_playlist_title: + update_successful = True + if playlist_entry.private is not None: + playlist_still_private = True + + self.assertEquals(update_successful, True) + self.assertEquals(playlist_still_private, True) + + # wait + time.sleep(10) + # delete it + playlist_uri = updated_playlist.id.text + response = self.client.DeletePlaylist(playlist_uri) + self.assertEquals(response, True) + + def testAddEditAndDeleteVideoFromPlaylist(self): + test_playlist_title = 'my test playlist ' + str(random.randint(1000,3000)) + test_playlist_description = 'test playlist ' + response = self.client.AddPlaylist(test_playlist_title, + test_playlist_description) + self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistEntry)) + + custom_video_title = 'my test video on my test playlist' + custom_video_description = 'this is a test video on my test playlist' + video_id = 'Ncakifd_16k' + playlist_uri = response.feed_link[0].href + time.sleep(10) + response = self.client.AddPlaylistVideoEntryToPlaylist( + playlist_uri, video_id, custom_video_title, custom_video_description) + + self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistVideoEntry)) + + playlist_entry_id = response.id.text.split('/')[-1] + playlist_uri = response.id.text.split(playlist_entry_id)[0][:-1] + new_video_title = 'video number ' + str(random.randint(1000,3000)) + new_video_description = 'test video' + time.sleep(10) + response = self.client.UpdatePlaylistVideoEntryMetaData( + playlist_uri, + playlist_entry_id, + new_video_title, + new_video_description, + 1) + self.assert_(isinstance(response, gdata.youtube.YouTubePlaylistVideoEntry)) + time.sleep(10) + + playlist_entry_id = response.id.text.split('/')[-1] + # remove video from playlist + response = self.client.DeletePlaylistVideoEntry(playlist_uri, + playlist_entry_id) + self.assertEquals(response, True) + + time.sleep(10) + # delete the playlist + response = self.client.DeletePlaylist(playlist_uri) + self.assertEquals(response, True) + + def testRetrieveSubscriptionFeedByUri(self): + feed = self.client.GetYouTubeSubscriptionFeed( + 'http://gdata.youtube.com/feeds/users/gdpython/subscriptions') + self.assert_(isinstance(feed, gdata.youtube.YouTubeSubscriptionFeed)) + self.assert_(len(feed.entry) == 3) + + subscription_to_channel_found = False + subscription_to_favorites_found = False + subscription_to_query_found = False + all_types_found = False + + for entry in feed.entry: + self.assert_(isinstance(entry, gdata.youtube.YouTubeSubscriptionEntry)) + subscription_type = entry.GetSubscriptionType() + if subscription_type == 'channel': + subscription_to_channel_found = True + elif subscription_type == 'favorites': + subscription_to_favorites_found = True + elif subscription_type == 'query': + subscription_to_query_found = True + + if (subscription_to_channel_found and subscription_to_favorites_found and + subscription_to_query_found): + all_types_found = True + + self.assertEquals(all_types_found, True) + + def testRetrieveSubscriptionFeedByUsername(self): + feed = self.client.GetYouTubeSubscriptionFeed(username='gdpython') + self.assert_(isinstance(feed, gdata.youtube.YouTubeSubscriptionFeed)) + self.assert_(len(feed.entry) == 3) + + subscription_to_channel_found = False + subscription_to_favorites_found = False + subscription_to_query_found = False + all_types_found = False + + for entry in feed.entry: + self.assert_(isinstance(entry, gdata.youtube.YouTubeSubscriptionEntry)) + subscription_type = entry.GetSubscriptionType() + if subscription_type == 'channel': + subscription_to_channel_found = True + elif subscription_type == 'favorites': + subscription_to_favorites_found = True + elif subscription_type == 'query': + subscription_to_query_found = True + + if (subscription_to_channel_found and subscription_to_favorites_found and + subscription_to_query_found): + all_types_found = True + self.assertEquals(all_types_found, True) + + def testRetrieveUserProfileByUri(self): + user = self.client.GetYouTubeUserEntry( + 'http://gdata.youtube.com/feeds/users/gdpython') + self.assert_(isinstance(user, gdata.youtube.YouTubeUserEntry)) + self.assertEquals(user.location.text, 'US') + + def testRetrieveUserProfileByUsername(self): + user = self.client.GetYouTubeUserEntry(username='gdpython') + self.assert_(isinstance(user, gdata.youtube.YouTubeUserEntry)) + self.assertEquals(user.location.text, 'US') + + def testRetrieveUserFavoritesFeed(self): + feed = self.client.GetUserFavoritesFeed(username='gdpython') + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 0) + + def testRetrieveDefaultUserFavoritesFeed(self): + feed = self.client.GetUserFavoritesFeed() + self.assert_(isinstance(feed, gdata.youtube.YouTubeVideoFeed)) + self.assert_(len(feed.entry) > 0) + + def testAddAndDeleteVideoFromFavorites(self): + video_id = 'Ncakifd_16k' + video_entry = self.client.GetYouTubeVideoEntry(video_id=video_id) + response = self.client.AddVideoEntryToFavorites(video_entry) + self.assert_(isinstance(response, gdata.GDataEntry)) + time.sleep(10) + response = self.client.DeleteVideoEntryFromFavorites(video_id) + self.assertEquals(response, True) + + def testRetrieveContactFeedByUri(self): + feed = self.client.GetYouTubeContactFeed( + 'http://gdata.youtube.com/feeds/users/gdpython/contacts') + self.assert_(isinstance(feed, gdata.youtube.YouTubeContactFeed)) + self.assertEquals(len(feed.entry), 1) + + def testRetrieveContactFeedByUsername(self): + feed = self.client.GetYouTubeContactFeed(username='gdpython') + self.assert_(isinstance(feed, gdata.youtube.YouTubeContactFeed)) + self.assertEquals(len(feed.entry), 1) + +if __name__ == '__main__': + print ('NOTE: Please run these tests only with a test account. ' + 'The tests may delete or update your data.') + username = raw_input('Please enter your username: ') + password = getpass.getpass() + developer_key = raw_input('Please enter your developer key: ') + video_file_location = raw_input( + 'Please enter the absolute path to a video file: ') + unittest.main() diff --git a/gdata.py-1.2.3/tests/gdata_tests/youtube_test.py b/gdata.py-1.2.3/tests/gdata_tests/youtube_test.py new file mode 100755 index 0000000..09e654b --- /dev/null +++ b/gdata.py-1.2.3/tests/gdata_tests/youtube_test.py @@ -0,0 +1,561 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jhartmann@gmail.com (Jochen Hartmann)' + +import unittest +from gdata import test_data +import gdata.youtube +import gdata.youtube.service +import atom + +YOUTUBE_TEMPLATE = '{http://gdata.youtube.com/schemas/2007}%s' +YT_FORMAT = YOUTUBE_TEMPLATE % ('format') + + +class VideoEntryTest(unittest.TestCase): + + def setUp(self): + self.video_feed = gdata.youtube.YouTubeVideoFeedFromString( + test_data.YOUTUBE_VIDEO_FEED) + + def testCorrectXmlParsing(self): + self.assertEquals(self.video_feed.id.text, + 'http://gdata.youtube.com/feeds/api/standardfeeds/top_rated') + self.assertEquals(len(self.video_feed.entry), 2) + for entry in self.video_feed.entry: + if (entry.id.text == + 'http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8'): + + self.assertEquals(entry.published.text, '2008-03-20T10:17:27.000-07:00') + self.assertEquals(entry.updated.text, '2008-05-14T04:26:37.000-07:00') + self.assertEquals(entry.category[0].scheme, + 'http://gdata.youtube.com/schemas/2007/keywords.cat') + self.assertEquals(entry.category[0].term, 'karyn') + self.assertEquals(entry.category[1].scheme, + 'http://gdata.youtube.com/schemas/2007/keywords.cat') + self.assertEquals(entry.category[1].term, 'garcia') + self.assertEquals(entry.category[2].scheme, + 'http://gdata.youtube.com/schemas/2007/keywords.cat') + self.assertEquals(entry.category[2].term, 'me') + self.assertEquals(entry.category[3].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(entry.category[3].term, + 'http://gdata.youtube.com/schemas/2007#video') + self.assertEquals(entry.title.text, + 'Me odeio por te amar - KARYN GARCIA') + self.assertEquals(entry.content.text, 'http://www.karyngarcia.com.br') + self.assertEquals(entry.link[0].rel, 'alternate') + self.assertEquals(entry.link[0].href, + 'http://www.youtube.com/watch?v=C71ypXYGho8') + self.assertEquals(entry.link[1].rel, + 'http://gdata.youtube.com/schemas/2007#video.related') + self.assertEquals(entry.link[1].href, + 'http://gdata.youtube.com/feeds/api/videos/C71ypXYGho8/related') + self.assertEquals(entry.link[2].rel, 'self') + self.assertEquals(entry.link[2].href, + ('http://gdata.youtube.com/feeds/api/standardfeeds' + '/top_rated/C71ypXYGho8')) + self.assertEquals(entry.author[0].name.text, 'TvKarynGarcia') + self.assertEquals(entry.author[0].uri.text, + 'http://gdata.youtube.com/feeds/api/users/tvkaryngarcia') + + self.assertEquals(entry.media.title.text, + 'Me odeio por te amar - KARYN GARCIA') + self.assertEquals(entry.media.description.text, + 'http://www.karyngarcia.com.br') + self.assertEquals(entry.media.keywords.text, + 'amar, boyfriend, garcia, karyn, me, odeio, por, te') + self.assertEquals(entry.media.duration.seconds, '203') + self.assertEquals(entry.media.category[0].label, 'Music') + self.assertEquals(entry.media.category[0].scheme, + 'http://gdata.youtube.com/schemas/2007/categories.cat') + self.assertEquals(entry.media.category[0].text, 'Music') + self.assertEquals(entry.media.category[1].label, 'test111') + self.assertEquals(entry.media.category[1].scheme, + 'http://gdata.youtube.com/schemas/2007/developertags.cat') + self.assertEquals(entry.media.category[1].text, 'test111') + self.assertEquals(entry.media.category[2].label, 'test222') + self.assertEquals(entry.media.category[2].scheme, + 'http://gdata.youtube.com/schemas/2007/developertags.cat') + self.assertEquals(entry.media.category[2].text, 'test222') + self.assertEquals(entry.media.content[0].url, + 'http://www.youtube.com/v/C71ypXYGho8') + self.assertEquals(entry.media.content[0].type, + 'application/x-shockwave-flash') + self.assertEquals(entry.media.content[0].medium, 'video') + self.assertEquals( + entry.media.content[0].extension_attributes['isDefault'], 'true') + self.assertEquals( + entry.media.content[0].extension_attributes['expression'], 'full') + self.assertEquals( + entry.media.content[0].extension_attributes['duration'], '203') + self.assertEquals( + entry.media.content[0].extension_attributes[YT_FORMAT], '5') + self.assertEquals(entry.media.content[1].url, + ('rtsp://rtsp2.youtube.com/ChoLENy73wIaEQmPhgZ2pXK9CxMYDSANFEgGDA' + '==/0/0/0/video.3gp')) + self.assertEquals(entry.media.content[1].type, 'video/3gpp') + self.assertEquals(entry.media.content[1].medium, 'video') + self.assertEquals( + entry.media.content[1].extension_attributes['expression'], 'full') + self.assertEquals( + entry.media.content[1].extension_attributes['duration'], '203') + self.assertEquals( + entry.media.content[1].extension_attributes[YT_FORMAT], '1') + self.assertEquals(entry.media.content[2].url, + ('rtsp://rtsp2.youtube.com/ChoLENy73wIaEQmPhgZ2pXK9CxMYESARFEgGDA==' + '/0/0/0/video.3gp')) + self.assertEquals(entry.media.content[2].type, 'video/3gpp') + self.assertEquals(entry.media.content[2].medium, 'video') + self.assertEquals( + entry.media.content[2].extension_attributes['expression'], 'full') + self.assertEquals( + entry.media.content[2].extension_attributes['duration'], '203') + self.assertEquals( + entry.media.content[2].extension_attributes[YT_FORMAT], '6') + self.assertEquals(entry.media.player.url, + 'http://www.youtube.com/watch?v=C71ypXYGho8') + self.assertEquals(entry.media.thumbnail[0].url, + 'http://img.youtube.com/vi/C71ypXYGho8/2.jpg') + self.assertEquals(entry.media.thumbnail[0].height, '97') + self.assertEquals(entry.media.thumbnail[0].width, '130') + self.assertEquals(entry.media.thumbnail[0].extension_attributes['time'], + '00:01:41.500') + self.assertEquals(entry.media.thumbnail[1].url, + 'http://img.youtube.com/vi/C71ypXYGho8/1.jpg') + self.assertEquals(entry.media.thumbnail[1].height, '97') + self.assertEquals(entry.media.thumbnail[1].width, '130') + self.assertEquals(entry.media.thumbnail[1].extension_attributes['time'], + '00:00:50.750') + self.assertEquals(entry.media.thumbnail[2].url, + 'http://img.youtube.com/vi/C71ypXYGho8/3.jpg') + self.assertEquals(entry.media.thumbnail[2].height, '97') + self.assertEquals(entry.media.thumbnail[2].width, '130') + self.assertEquals(entry.media.thumbnail[2].extension_attributes['time'], + '00:02:32.250') + self.assertEquals(entry.media.thumbnail[3].url, + 'http://img.youtube.com/vi/C71ypXYGho8/0.jpg') + self.assertEquals(entry.media.thumbnail[3].height, '240') + self.assertEquals(entry.media.thumbnail[3].width, '320') + self.assertEquals(entry.media.thumbnail[3].extension_attributes['time'], + '00:01:41.500') + + self.assertEquals(entry.statistics.view_count, '138864') + self.assertEquals(entry.statistics.favorite_count, '2474') + self.assertEquals(entry.rating.min, '1') + self.assertEquals(entry.rating.max, '5') + self.assertEquals(entry.rating.num_raters, '4626') + self.assertEquals(entry.rating.average, '4.95') + self.assertEquals(entry.comments.feed_link[0].href, + ('http://gdata.youtube.com/feeds/api/videos/' + 'C71ypXYGho8/comments')) + self.assertEquals(entry.comments.feed_link[0].count_hint, '27') + + self.assertEquals(entry.GetSwfUrl(), + 'http://www.youtube.com/v/C71ypXYGho8') + self.assertEquals(entry.GetYouTubeCategoryAsString(), 'Music') + + +class VideoEntryPrivateTest(unittest.TestCase): + + def setUp(self): + self.entry = gdata.youtube.YouTubeVideoEntryFromString( + test_data.YOUTUBE_ENTRY_PRIVATE) + + def testCorrectXmlParsing(self): + self.assert_(isinstance(self.entry, + gdata.youtube.YouTubeVideoEntry)) + self.assert_(self.entry.media.private) + + +class VideoFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubeVideoFeedFromString( + test_data.YOUTUBE_VIDEO_FEED) + + def testCorrectXmlParsing(self): + self.assertEquals(self.feed.id.text, + 'http://gdata.youtube.com/feeds/api/standardfeeds/top_rated') + self.assertEquals(self.feed.generator.text, 'YouTube data API') + self.assertEquals(self.feed.generator.uri, 'http://gdata.youtube.com/') + self.assertEquals(len(self.feed.author), 1) + self.assertEquals(self.feed.author[0].name.text, 'YouTube') + self.assertEquals(len(self.feed.category), 1) + self.assertEquals(self.feed.category[0].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(self.feed.category[0].term, + 'http://gdata.youtube.com/schemas/2007#video') + self.assertEquals(self.feed.items_per_page.text, '25') + self.assertEquals(len(self.feed.link), 4) + self.assertEquals(self.feed.link[0].href, + 'http://www.youtube.com/browse?s=tr') + self.assertEquals(self.feed.link[0].rel, 'alternate') + self.assertEquals(self.feed.link[1].href, + 'http://gdata.youtube.com/feeds/api/standardfeeds/top_rated') + self.assertEquals(self.feed.link[1].rel, + 'http://schemas.google.com/g/2005#feed') + self.assertEquals(self.feed.link[2].href, + ('http://gdata.youtube.com/feeds/api/standardfeeds/top_rated?' + 'start-index=1&max-results=25')) + self.assertEquals(self.feed.link[2].rel, 'self') + self.assertEquals(self.feed.link[3].href, + ('http://gdata.youtube.com/feeds/api/standardfeeds/top_rated?' + 'start-index=26&max-results=25')) + self.assertEquals(self.feed.link[3].rel, 'next') + self.assertEquals(self.feed.start_index.text, '1') + self.assertEquals(self.feed.title.text, 'Top Rated') + self.assertEquals(self.feed.total_results.text, '100') + self.assertEquals(self.feed.updated.text, '2008-05-14T02:24:07.000-07:00') + self.assertEquals(len(self.feed.entry), 2) + + +class YouTubePlaylistFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubePlaylistFeedFromString( + test_data.YOUTUBE_PLAYLIST_FEED) + + def testCorrectXmlParsing(self): + self.assertEquals(len(self.feed.entry), 1) + self.assertEquals( + self.feed.category[0].scheme, 'http://schemas.google.com/g/2005#kind') + self.assertEquals(self.feed.category[0].term, + 'http://gdata.youtube.com/schemas/2007#playlistLink') + + +class YouTubePlaylistEntryTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubePlaylistFeedFromString( + test_data.YOUTUBE_PLAYLIST_FEED) + + def testCorrectXmlParsing(self): + for entry in self.feed.entry: + self.assertEquals(entry.category[0].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(entry.category[0].term, + 'http://gdata.youtube.com/schemas/2007#playlistLink') + self.assertEquals(entry.description.text, + 'My new playlist Description') + self.assertEquals(entry.feed_link[0].href, + 'http://gdata.youtube.com/feeds/playlists/8BCDD04DE8F771B2') + self.assertEquals(entry.feed_link[0].rel, + 'http://gdata.youtube.com/schemas/2007#playlist') + + +class YouTubePlaylistVideoFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubePlaylistVideoFeedFromString( + test_data.YOUTUBE_PLAYLIST_VIDEO_FEED) + + def testCorrectXmlParsing(self): + self.assertEquals(len(self.feed.entry), 1) + self.assertEquals(self.feed.category[0].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(self.feed.category[0].term, + 'http://gdata.youtube.com/schemas/2007#playlist') + self.assertEquals(self.feed.category[1].scheme, + 'http://gdata.youtube.com/schemas/2007/tags.cat') + self.assertEquals(self.feed.category[1].term, 'videos') + self.assertEquals(self.feed.category[2].scheme, + 'http://gdata.youtube.com/schemas/2007/tags.cat') + self.assertEquals(self.feed.category[2].term, 'python') + + +class YouTubePlaylistVideoEntryTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubePlaylistVideoFeedFromString( + test_data.YOUTUBE_PLAYLIST_VIDEO_FEED) + + def testCorrectXmlParsing(self): + self.assertEquals(len(self.feed.entry), 1) + for entry in self.feed.entry: + self.assertEquals(entry.position.text, '1') + + +class YouTubeVideoCommentFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubeVideoCommentFeedFromString( + test_data.YOUTUBE_COMMENT_FEED) + + def testCorrectXmlParsing(self): + self.assertEquals(len(self.feed.category), 1) + self.assertEquals(self.feed.category[0].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(self.feed.category[0].term, + 'http://gdata.youtube.com/schemas/2007#comment') + self.assertEquals(len(self.feed.link), 4) + self.assertEquals(self.feed.link[0].rel, 'related') + self.assertEquals(self.feed.link[0].href, + 'http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU') + self.assertEquals(self.feed.link[1].rel, 'alternate') + self.assertEquals(self.feed.link[1].href, + 'http://www.youtube.com/watch?v=2Idhz9ef5oU') + self.assertEquals(self.feed.link[2].rel, + 'http://schemas.google.com/g/2005#feed') + self.assertEquals(self.feed.link[2].href, + 'http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments') + self.assertEquals(self.feed.link[3].rel, 'self') + self.assertEquals(self.feed.link[3].href, + ('http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU/comments?' + 'start-index=1&max-results=25')) + self.assertEquals(len(self.feed.entry), 3) + + +class YouTubeVideoCommentEntryTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubeVideoCommentFeedFromString( + test_data.YOUTUBE_COMMENT_FEED) + + def testCorrectXmlParsing(self): + self.assertEquals(len(self.feed.entry), 3) + self.assert_(isinstance(self.feed.entry[0], + gdata.youtube.YouTubeVideoCommentEntry)) + + for entry in self.feed.entry: + if (entry.id.text == + ('http://gdata.youtube.com/feeds/videos/' + '2Idhz9ef5oU/comments/91F809A3DE2EB81B')): + self.assertEquals(entry.category[0].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(entry.category[0].term, + 'http://gdata.youtube.com/schemas/2007#comment') + self.assertEquals(entry.link[0].href, + 'http://gdata.youtube.com/feeds/videos/2Idhz9ef5oU') + self.assertEquals(entry.link[0].rel, 'related') + self.assertEquals(entry.content.text, 'test66') + + +class YouTubeVideoSubscriptionFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubeSubscriptionFeedFromString( + test_data.YOUTUBE_SUBSCRIPTION_FEED) + + def testCorrectXmlParsing(self): + + self.assertEquals(len(self.feed.category), 1) + self.assertEquals(self.feed.category[0].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(self.feed.category[0].term, + 'http://gdata.youtube.com/schemas/2007#subscription') + self.assertEquals(len(self.feed.link), 4) + self.assertEquals(self.feed.link[0].rel, 'related') + self.assertEquals(self.feed.link[0].href, + 'http://gdata.youtube.com/feeds/users/andyland74') + self.assertEquals(self.feed.link[1].rel, 'alternate') + self.assertEquals(self.feed.link[1].href, + 'http://www.youtube.com/profile_subscriptions?user=andyland74') + self.assertEquals(self.feed.link[2].rel, + 'http://schemas.google.com/g/2005#feed') + self.assertEquals(self.feed.link[2].href, + 'http://gdata.youtube.com/feeds/users/andyland74/subscriptions') + self.assertEquals(self.feed.link[3].rel, 'self') + self.assertEquals(self.feed.link[3].href, + ('http://gdata.youtube.com/feeds/users/andyland74/subscriptions?' + 'start-index=1&max-results=25')) + self.assertEquals(len(self.feed.entry), 1) + + +class YouTubeVideoSubscriptionEntryTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubeSubscriptionFeedFromString( + test_data.YOUTUBE_SUBSCRIPTION_FEED) + + def testCorrectXmlParsing(self): + for entry in self.feed.entry: + self.assertEquals(len(entry.category), 2) + self.assertEquals(entry.category[0].scheme, + 'http://gdata.youtube.com/schemas/2007/subscriptiontypes.cat') + self.assertEquals(entry.category[0].term, 'channel') + self.assertEquals(entry.category[1].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(entry.category[1].term, + 'http://gdata.youtube.com/schemas/2007#subscription') + self.assertEquals(len(entry.link), 3) + self.assertEquals(entry.link[0].href, + 'http://gdata.youtube.com/feeds/users/andyland74') + self.assertEquals(entry.link[0].rel, 'related') + self.assertEquals(entry.link[1].href, + 'http://www.youtube.com/profile_videos?user=NBC') + self.assertEquals(entry.link[1].rel, 'alternate') + self.assertEquals(entry.link[2].href, + ('http://gdata.youtube.com/feeds/users/andyland74/subscriptions/' + 'd411759045e2ad8c')) + self.assertEquals(entry.link[2].rel, 'self') + self.assertEquals(len(entry.feed_link), 1) + self.assertEquals(entry.feed_link[0].href, + 'http://gdata.youtube.com/feeds/api/users/nbc/uploads') + self.assertEquals(entry.feed_link[0].rel, + 'http://gdata.youtube.com/schemas/2007#user.uploads') + self.assertEquals(entry.username.text, 'NBC') + + +class YouTubeVideoResponseFeedTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubeVideoFeedFromString( + test_data.YOUTUBE_VIDEO_RESPONSE_FEED) + + def testCorrectXmlParsing(self): + self.assertEquals(len(self.feed.category), 1) + self.assertEquals(self.feed.category[0].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(self.feed.category[0].term, + 'http://gdata.youtube.com/schemas/2007#video') + self.assertEquals(len(self.feed.link), 4) + self.assertEquals(self.feed.link[0].href, + 'http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY') + self.assertEquals(self.feed.link[0].rel, 'related') + self.assertEquals(self.feed.link[1].href, + 'http://www.youtube.com/video_response_view_all?v=2c3q9K4cHzY') + self.assertEquals(self.feed.link[1].rel, 'alternate') + self.assertEquals(self.feed.link[2].href, + 'http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses') + self.assertEquals(self.feed.link[2].rel, + 'http://schemas.google.com/g/2005#feed') + self.assertEquals(self.feed.link[3].href, + ('http://gdata.youtube.com/feeds/videos/2c3q9K4cHzY/responses?' + 'start-index=1&max-results=25')) + self.assertEquals(self.feed.link[3].rel, 'self') + self.assertEquals(len(self.feed.entry), 1) + + +class YouTubeVideoResponseEntryTest(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubeVideoFeedFromString( + test_data.YOUTUBE_VIDEO_RESPONSE_FEED) + + def testCorrectXmlParsing(self): + for entry in self.feed.entry: + self.assert_(isinstance(entry, gdata.youtube.YouTubeVideoEntry)) + + +class YouTubeContactFeed(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubeContactFeedFromString( + test_data.YOUTUBE_CONTACTS_FEED) + + def testCorrectXmlParsing(self): + self.assertEquals(len(self.feed.entry), 2) + self.assertEquals(self.feed.category[0].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(self.feed.category[0].term, + 'http://gdata.youtube.com/schemas/2007#friend') + + +class YouTubeContactEntry(unittest.TestCase): + + def setUp(self): + self.feed= gdata.youtube.YouTubeContactFeedFromString( + test_data.YOUTUBE_CONTACTS_FEED) + + def testCorrectXmlParsing(self): + for entry in self.feed.entry: + if (entry.id.text == ('http://gdata.youtube.com/feeds/users/' + 'apitestjhartmann/contacts/testjfisher')): + self.assertEquals(entry.username.text, 'testjfisher') + self.assertEquals(entry.status.text, 'pending') + + +class YouTubeUserEntry(unittest.TestCase): + + def setUp(self): + self.feed = gdata.youtube.YouTubeUserEntryFromString( + test_data.YOUTUBE_PROFILE) + + def testCorrectXmlParsing(self): + self.assertEquals(self.feed.author[0].name.text, 'andyland74') + self.assertEquals(self.feed.books.text, 'Catch-22') + self.assertEquals(self.feed.category[0].scheme, + 'http://gdata.youtube.com/schemas/2007/channeltypes.cat') + self.assertEquals(self.feed.category[0].term, 'Standard') + self.assertEquals(self.feed.category[1].scheme, + 'http://schemas.google.com/g/2005#kind') + self.assertEquals(self.feed.category[1].term, + 'http://gdata.youtube.com/schemas/2007#userProfile') + self.assertEquals(self.feed.company.text, 'Google') + self.assertEquals(self.feed.gender.text, 'm') + self.assertEquals(self.feed.hobbies.text, 'Testing YouTube APIs') + self.assertEquals(self.feed.hometown.text, 'Somewhere') + self.assertEquals(len(self.feed.feed_link), 6) + self.assertEquals(self.feed.feed_link[0].count_hint, '4') + self.assertEquals(self.feed.feed_link[0].href, + 'http://gdata.youtube.com/feeds/users/andyland74/favorites') + self.assertEquals(self.feed.feed_link[0].rel, + 'http://gdata.youtube.com/schemas/2007#user.favorites') + self.assertEquals(self.feed.feed_link[1].count_hint, '1') + self.assertEquals(self.feed.feed_link[1].href, + 'http://gdata.youtube.com/feeds/users/andyland74/contacts') + self.assertEquals(self.feed.feed_link[1].rel, + 'http://gdata.youtube.com/schemas/2007#user.contacts') + self.assertEquals(self.feed.feed_link[2].count_hint, '0') + self.assertEquals(self.feed.feed_link[2].href, + 'http://gdata.youtube.com/feeds/users/andyland74/inbox') + self.assertEquals(self.feed.feed_link[2].rel, + 'http://gdata.youtube.com/schemas/2007#user.inbox') + self.assertEquals(self.feed.feed_link[3].count_hint, None) + self.assertEquals(self.feed.feed_link[3].href, + 'http://gdata.youtube.com/feeds/users/andyland74/playlists') + self.assertEquals(self.feed.feed_link[3].rel, + 'http://gdata.youtube.com/schemas/2007#user.playlists') + self.assertEquals(self.feed.feed_link[4].count_hint, '4') + self.assertEquals(self.feed.feed_link[4].href, + 'http://gdata.youtube.com/feeds/users/andyland74/subscriptions') + self.assertEquals(self.feed.feed_link[4].rel, + 'http://gdata.youtube.com/schemas/2007#user.subscriptions') + self.assertEquals(self.feed.feed_link[5].count_hint, '1') + self.assertEquals(self.feed.feed_link[5].href, + 'http://gdata.youtube.com/feeds/users/andyland74/uploads') + self.assertEquals(self.feed.feed_link[5].rel, + 'http://gdata.youtube.com/schemas/2007#user.uploads') + self.assertEquals(self.feed.first_name.text, 'andy') + self.assertEquals(self.feed.last_name.text, 'example') + self.assertEquals(self.feed.link[0].href, + 'http://www.youtube.com/profile?user=andyland74') + self.assertEquals(self.feed.link[0].rel, 'alternate') + self.assertEquals(self.feed.link[1].href, + 'http://gdata.youtube.com/feeds/users/andyland74') + self.assertEquals(self.feed.link[1].rel, 'self') + self.assertEquals(self.feed.location.text, 'US') + self.assertEquals(self.feed.movies.text, 'Aqua Teen Hungerforce') + self.assertEquals(self.feed.music.text, 'Elliott Smith') + self.assertEquals(self.feed.occupation.text, 'Technical Writer') + self.assertEquals(self.feed.published.text, '2006-10-16T00:09:45.000-07:00') + self.assertEquals(self.feed.school.text, 'University of North Carolina') + self.assertEquals(self.feed.statistics.last_web_access, + '2008-02-25T16:03:38.000-08:00') + self.assertEquals(self.feed.statistics.subscriber_count, '1') + self.assertEquals(self.feed.statistics.video_watch_count, '21') + self.assertEquals(self.feed.statistics.view_count, '9') + self.assertEquals(self.feed.thumbnail.url, + 'http://i.ytimg.com/vi/YFbSxcdOL-w/default.jpg') + self.assertEquals(self.feed.title.text, 'andyland74 Channel') + self.assertEquals(self.feed.updated.text, '2008-02-26T11:48:21.000-08:00') + self.assertEquals(self.feed.username.text, 'andyland74') + +if __name__ == '__main__': + unittest.main() diff --git a/gdata.py-1.2.3/tests/module_test_runner.py b/gdata.py-1.2.3/tests/module_test_runner.py new file mode 100755 index 0000000..881a70d --- /dev/null +++ b/gdata.py-1.2.3/tests/module_test_runner.py @@ -0,0 +1,57 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder@gmail.com (Jeff Scudder)' + + +import unittest + +class ModuleTestRunner(object): + + def __init__(self, module_list=None, module_settings=None): + """Constructor for a runner to run tests in the modules listed. + + Args: + module_list: list (optional) The modules whose test cases will be run. + module_settings: dict (optional) A dictionary of module level varables + which should be set in the modules if they are present. An + example is the username and password which is a module variable + in most service_test modules. + """ + self.modules = module_list or [] + self.settings = module_settings or {} + + def RunAllTests(self): + """Executes all tests in this objects modules list. + + It also sets any module variables which match the settings keys to the + corresponding values in the settings member. + """ + runner = unittest.TextTestRunner() + for module in self.modules: + # Set any module variables according to the contents in the settings + for setting, value in self.settings.iteritems(): + try: + setattr(module, setting, value) + except AttributeError: + # This module did not have a variable for the current setting, so + # we skip it and try the next setting. + pass + # We have set all of the applicable settings for the module, now + # run the tests. + print '\nRunning all tests in module', module.__name__ + runner.run(unittest.defaultTestLoader.loadTestsFromModule(module)) diff --git a/gdata.py-1.2.3/tests/run_all_tests.py b/gdata.py-1.2.3/tests/run_all_tests.py new file mode 100755 index 0000000..c6c3f62 --- /dev/null +++ b/gdata.py-1.2.3/tests/run_all_tests.py @@ -0,0 +1,32 @@ +#!/usr/bin/python +# +# Copyright (C) 2006 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__author__ = 'api.jscudder@gmail.com (Jeff Scudder)' + + +import sys +import unittest +import getopt +import getpass +import module_test_runner +import run_data_tests +import run_service_tests + + +if __name__ == '__main__': + run_data_tests.RunAllTests() + run_service_tests.GetValuesForTestSettingsAndRunAllTests() diff --git a/gdata.py-1.2.3/tests/run_data_tests.py b/gdata.py-1.2.3/tests/run_data_tests.py new file mode 100755 index 0000000..a597285 --- /dev/null +++ b/gdata.py-1.2.3/tests/run_data_tests.py @@ -0,0 +1,50 @@ +#!/usr/bin/python + +import sys +import unittest +import module_test_runner +import getopt +import getpass +# Modules whose tests we will run. +import gdata_test +import atom_test +import atom_tests.http_interface_test +import atom_tests.mock_http_test +import atom_tests.token_store_test +import atom_tests.url_test +import atom_tests.core_test +import gdata_tests.apps_test +import gdata_tests.auth_test +import gdata_tests.base_test +import gdata_tests.blogger_test +import gdata_tests.calendar_test +import gdata_tests.client_test +import gdata_tests.codesearch_test +import gdata_tests.contacts_test +import gdata_tests.docs_test +import gdata_tests.photos_test +import gdata_tests.spreadsheet_test +import gdata_tests.youtube_test +import gdata_tests.webmastertools_test + + +def RunAllTests(): + test_runner = module_test_runner.ModuleTestRunner() + test_runner.modules = [gdata_test, atom_test, atom_tests.url_test, + atom_tests.http_interface_test, + atom_tests.mock_http_test, + atom_tests.core_test, + atom_tests.token_store_test, + gdata_tests.client_test, + gdata_tests.apps_test, gdata_tests.auth_test, + gdata_tests.base_test, + gdata_tests.calendar_test, gdata_tests.docs_test, + gdata_tests.spreadsheet_test, + gdata_tests.photos_test, gdata_tests.codesearch_test, + gdata_tests.contacts_test, + gdata_tests.youtube_test, gdata_tests.blogger_test, + gdata_tests.webmastertools_test] + test_runner.RunAllTests() + +if __name__ == '__main__': + RunAllTests() diff --git a/gdata.py-1.2.3/tests/run_service_tests.py b/gdata.py-1.2.3/tests/run_service_tests.py new file mode 100755 index 0000000..938b286 --- /dev/null +++ b/gdata.py-1.2.3/tests/run_service_tests.py @@ -0,0 +1,106 @@ +#!/usr/bin/python + +import sys +import unittest +import module_test_runner +import getopt +import getpass + +# Modules whose tests we will run. +import atom_tests.service_test +import gdata_tests.service_test +import gdata_tests.client_online_test +import gdata_tests.apps.service_test +import gdata_tests.base.service_test +import gdata_tests.calendar.service_test +import gdata_tests.docs.service_test +import gdata_tests.spreadsheet.service_test +import gdata_tests.spreadsheet.text_db_test +import gdata_tests.photos.service_test +import gdata_tests.contacts.service_test +import gdata_tests.blogger.service_test +import gdata_tests.youtube.service_test + + +def RunAllTests(username, password, spreadsheet_key, worksheet_key, + apps_username, apps_password, apps_domain): + test_runner = module_test_runner.ModuleTestRunner() + test_runner.modules = [atom_tests.service_test, + gdata_tests.service_test, + gdata_tests.client_online_test, + gdata_tests.apps.service_test, + gdata_tests.base.service_test, + gdata_tests.calendar.service_test, + gdata_tests.docs.service_test, + gdata_tests.spreadsheet.service_test, + gdata_tests.spreadsheet.text_db_test, + gdata_tests.contacts.service_test, + gdata_tests.photos.service_test] + test_runner.settings = {'username':username, 'password':password, + 'test_image_location':'testimage.jpg', + 'ss_key':spreadsheet_key, + 'ws_key':worksheet_key, + 'apps_username':apps_username, + 'apps_password':apps_password, + 'apps_domain':apps_domain} + test_runner.RunAllTests() + +def GetValuesForTestSettingsAndRunAllTests(): + username = '' + password = '' + spreadsheet_key = '' + worksheet_key = '' + apps_domain = '' + apps_username = '' + apps_password = '' + + print ('NOTE: Please run these tests only with a test account. ' + 'The tests may delete or update your data.') + try: + opts, args = getopt.getopt(sys.argv[1:], '', ['username=', 'password=', + 'ss_key=', 'ws_key=', + 'apps_username=', + 'apps_password=', + 'apps_domain=']) + for o, a in opts: + if o == '--username': + username = a + elif o == '--password': + password = a + elif o == '--ss_key': + spreadsheet_key = a + elif o == '--ws_key': + worksheet_key = a + elif o == '--apps_username': + apps_username = a + elif o == '--apps_password': + apps_password = a + elif o == '--apps_domain': + apps_domain = a + except getopt.GetoptError: + pass + + if username == '' and password == '': + print ('Missing --user and --pw command line arguments, ' + 'prompting for credentials.') + if username == '': + username = raw_input('Please enter your username: ') + if password == '': + password = getpass.getpass() + if spreadsheet_key == '': + spreadsheet_key = raw_input( + 'Please enter the key for the test spreadsheet: ') + if worksheet_key == '': + worksheet_key = raw_input( + 'Please enter the id for the worksheet to be edited: ') + if apps_username == '': + apps_username = raw_input('Please enter your Google Apps admin username: ') + if apps_password == '': + apps_password = getpass.getpass() + if apps_domain == '': + apps_domain = raw_input('Please enter your Google Apps domain: ') + RunAllTests(username, password, spreadsheet_key, worksheet_key, + apps_username, apps_password, apps_domain) + +if __name__ == '__main__': + GetValuesForTestSettingsAndRunAllTests() diff --git a/gdata.py-1.2.3/tests/testimage.jpg b/gdata.py-1.2.3/tests/testimage.jpg new file mode 100755 index 0000000..165db62 Binary files /dev/null and b/gdata.py-1.2.3/tests/testimage.jpg differ diff --git a/home/__init__.pyc b/home/__init__.pyc new file mode 100644 index 0000000..e0c24c2 Binary files /dev/null and b/home/__init__.pyc differ diff --git a/home/views.py b/home/views.py index 832412b..a0b6b07 100644 --- a/home/views.py +++ b/home/views.py @@ -4,5 +4,4 @@ @render_to('home/index.html') def index(request): notifications = Notification.objects.order_by('-updated_at')[:10] - return { 'notifications': notifications } diff --git a/home/views.pyc b/home/views.pyc new file mode 100644 index 0000000..3dc5132 Binary files /dev/null and b/home/views.pyc differ diff --git a/recommendation/__init__.py b/recommendation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/recommendation/models.py b/recommendation/models.py new file mode 100644 index 0000000..71a8362 --- /dev/null +++ b/recommendation/models.py @@ -0,0 +1,3 @@ +from django.db import models + +# Create your models here. diff --git a/recommendation/templates/recommentation/index.html b/recommendation/templates/recommentation/index.html new file mode 100644 index 0000000..6292e81 --- /dev/null +++ b/recommendation/templates/recommentation/index.html @@ -0,0 +1,23 @@ +{% extends 'layout.html' %} + +{% block body_class %}recommendation{% endblock %} +{% block current_tab %}recommendation{% endblock %} + +{% block main_content %} +

    成员推荐书籍

    +
    + {% for book in reco %} +
    + + +
    作者: + {% for author in book.author %} + {{ author.name.text }}
    + {% endfor %} +
    +
    + {% endfor %} + +
    + +{% endblock main_content %} diff --git a/recommendation/templates/recommentation/index.html~ b/recommendation/templates/recommentation/index.html~ new file mode 100644 index 0000000..dfa9f85 --- /dev/null +++ b/recommendation/templates/recommentation/index.html~ @@ -0,0 +1,19 @@ +{% extends 'layout.html' %} + +{% block main_content %} +

    实验室推荐书籍

    +
    + {% for book in reco %} +
    + + +
    作者: + {% for author in book.author %} + {{ author.name.text }}
    + {% endfor %} +
    +
    + {% endfor %} + +
    +{% endblock main_content %} diff --git a/recommendation/tests.py b/recommendation/tests.py new file mode 100644 index 0000000..501deb7 --- /dev/null +++ b/recommendation/tests.py @@ -0,0 +1,16 @@ +""" +This file demonstrates writing tests using the unittest module. These will pass +when you run "manage.py test". + +Replace this with more appropriate tests for your application. +""" + +from django.test import TestCase + + +class SimpleTest(TestCase): + def test_basic_addition(self): + """ + Tests that 1 + 1 always equals 2. + """ + self.assertEqual(1 + 1, 2) diff --git a/recommendation/urls.py b/recommendation/urls.py new file mode 100644 index 0000000..5668312 --- /dev/null +++ b/recommendation/urls.py @@ -0,0 +1,6 @@ +from django.conf.urls.defaults import patterns, url + +urlpatterns = patterns('recommendation.views', + url(r'^$', 'index', name='recommendation_index'), + ) + diff --git a/recommendation/views.py b/recommendation/views.py new file mode 100644 index 0000000..0536a7a --- /dev/null +++ b/recommendation/views.py @@ -0,0 +1,42 @@ +# Create your views here. +from annoying.decorators import render_to +from django.template import Context +from contacts.models import Contact +try: + from douban.service import DoubanService + from douban.client import OAuthClient +except ImportError: + print 'please install douban-python' + sys.exit(0) + +HOST = 'http://www.douban.com' +API_KEY = "0ff1b8ce70b305ab2fd52a6b52191101" +SECRET = "47c2cb706c4ec51d" +reco_books = [] +reco_names = [] +service = DoubanService(API_KEY, SECRET) + +@render_to('recommentation/index.html') +def index(request): + get_reco() + return {'reco': reco_books}; + +def get_reco(): + member = Contact.objects.all() + for tmp in member: + feed_read = service.GetMyCollection('/people/%s/collection' % tmp.douban_id, 'book', 'IST','read') + get_book(feed_read) + feed_reading = service.GetMyCollection('/people/%s/collection' % tmp.douban_id, 'book', 'IST','reading') + get_book(feed_reading) + feed_wish = service.GetMyCollection('/people/%s/collection' % tmp.douban_id, 'book', 'IST','wish') + get_book(feed_wish) + +def get_book(feed): + for entry in feed.entry: + book_href = entry.link[1].href + book_href = book_href[21:] + bookfeed = service.GetBook(book_href) + book_name = bookfeed.title.text + if book_name not in reco_names: + reco_names.append(book_name) + reco_books.append(bookfeed) diff --git a/sass/.sass-cache/cc53b18d721900d70de8ee135769f49539be8f71/_utilities.scssc b/sass/.sass-cache/cc53b18d721900d70de8ee135769f49539be8f71/_utilities.scssc new file mode 100644 index 0000000..6bb0696 Binary files /dev/null and b/sass/.sass-cache/cc53b18d721900d70de8ee135769f49539be8f71/_utilities.scssc differ diff --git a/sass/.sass-cache/d0f0ca52c7a35286d306c6661da4caafb579a682/_reset.scssc b/sass/.sass-cache/d0f0ca52c7a35286d306c6661da4caafb579a682/_reset.scssc new file mode 100644 index 0000000..f996e7c Binary files /dev/null and b/sass/.sass-cache/d0f0ca52c7a35286d306c6661da4caafb579a682/_reset.scssc differ diff --git a/sass/assets/css/ie.css b/sass/assets/css/ie.css new file mode 100644 index 0000000..5cd5b6c --- /dev/null +++ b/sass/assets/css/ie.css @@ -0,0 +1,5 @@ +/* Welcome to Compass. Use this file to write IE specific override styles. + * Import this file using the following HTML or equivalent: + * */ diff --git a/sass/assets/css/print.css b/sass/assets/css/print.css new file mode 100644 index 0000000..b0e9e45 --- /dev/null +++ b/sass/assets/css/print.css @@ -0,0 +1,3 @@ +/* Welcome to Compass. Use this file to define print styles. + * Import this file using the following HTML or equivalent: + * */ diff --git a/sass/assets/css/screen.css b/sass/assets/css/screen.css new file mode 100644 index 0000000..51a69f2 --- /dev/null +++ b/sass/assets/css/screen.css @@ -0,0 +1,63 @@ +/* Welcome to Compass. + * In this file you should write your main styles. (or centralize your imports) + * Import this file using the following HTML or equivalent: + * */ +/* line 14, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +html, body, div, span, applet, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +a, abbr, acronym, address, big, cite, code, +del, dfn, em, font, img, ins, kbd, q, s, samp, +small, strike, strong, sub, sup, tt, var, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend, +table, caption, tbody, tfoot, thead, tr, th, td { + margin: 0; + padding: 0; + border: 0; + outline: 0; + font-weight: inherit; + font-style: inherit; + font-size: 100%; + font-family: inherit; + vertical-align: baseline; +} + +/* line 17, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +body { + line-height: 1; + color: black; + background: white; +} + +/* line 19, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +ol, ul { + list-style: none; +} + +/* line 21, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +table { + border-collapse: separate; + border-spacing: 0; + vertical-align: middle; +} + +/* line 23, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +caption, th, td { + text-align: left; + font-weight: normal; + vertical-align: middle; +} + +/* line 25, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +q, blockquote { + quotes: "" ""; +} +/* line 96, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +q:before, q:after, blockquote:before, blockquote:after { + content: ""; +} + +/* line 27, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +a img { + border: none; +} diff --git a/sass/screen.sass b/sass/screen.sass index 2895995..dbbb2de 100644 --- a/sass/screen.sass +++ b/sass/screen.sass @@ -82,7 +82,7 @@ thead th h2 color: #e40 - +single-text-shadow(#999, 1px, 1px, 0px) + +text-shadow(#999, 1px, 1px, 0px) #page +container @@ -93,7 +93,7 @@ h2 #logo h1 +column(14, true) - +single-text-shadow(#aaa, 0, 0, 5px) + +text-shadow(#aaa, 0, 0, 5px) #navigation +column(14, true) diff --git a/settings.py b/settings.py index 4c6ba21..5861a39 100644 --- a/settings.py +++ b/settings.py @@ -11,10 +11,10 @@ DATABASES = { 'default': { - 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. - 'NAME': relative_to_project_root('dev.db'), - 'USER': '', # Not used with sqlite3. - 'PASSWORD': '', # Not used with sqlite3. + 'ENGINE': 'mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. + 'NAME': 'istweb_db', + 'USER': 'istweb_proj', # Not used with sqlite3. + 'PASSWORD': '123456', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } @@ -85,6 +85,7 @@ 'users', 'contacts', 'notification', + 'recommendation', ) ROOT_URLCONF = 'istweb.urls' diff --git a/static/css/ie.css b/static/css/ie.css index e3e39d9..5cd5b6c 100644 --- a/static/css/ie.css +++ b/static/css/ie.css @@ -1,100 +1,5 @@ -/* line 34, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -body { - text-align: center; -} -/* line 48, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -* html body legend { - margin: 0px -8px 16px 0; - padding: 0; -} -/* line 52, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -html > body p code { - *white-space: normal; -} - -/* line 67, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -.container { - text-align: left; -} - -/* line 69, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -sup { - vertical-align: text-top; -} - -/* line 71, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -sub { - vertical-align: text-bottom; -} - -/* line 73, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -hr { - margin: -8px auto 11px; -} - -/* line 75, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -img { - -ms-interpolation-mode: bicubic; -} - -/* line 77, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -fieldset { - padding-top: 0; -} - -/* line 79, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -textarea { - overflow: auto; -} - -/* line 82, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -input.text { - margin: 0.5em 0; - background-color: white; - border: 1px solid #bbbbbb; -} -/* line 86, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -input.text:focus { - border: 1px solid #666666; -} -/* line 88, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -input.title { - margin: 0.5em 0; - background-color: white; - border: 1px solid #bbbbbb; -} -/* line 92, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -input.title:focus { - border: 1px solid #666666; -} -/* line 94, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -input.checkbox { - position: relative; - top: 0.25em; -} -/* line 97, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -input.radio { - position: relative; - top: 0.25em; -} -/* line 100, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -input.button { - position: relative; - top: 0.25em; -} - -/* line 103, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -textarea { - margin: 0.5em 0; -} - -/* line 105, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -select { - margin: 0.5em 0; -} - -/* line 107, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_ie.scss */ -button { - position: relative; - top: 0.25em; -} +/* Welcome to Compass. Use this file to write IE specific override styles. + * Import this file using the following HTML or equivalent: + * */ diff --git a/static/css/print.css b/static/css/print.css new file mode 100644 index 0000000..b0e9e45 --- /dev/null +++ b/static/css/print.css @@ -0,0 +1,3 @@ +/* Welcome to Compass. Use this file to define print styles. + * Import this file using the following HTML or equivalent: + * */ diff --git a/static/css/screen.css b/static/css/screen.css index 4f3b12a..51a69f2 100644 --- a/static/css/screen.css +++ b/static/css/screen.css @@ -1,9 +1,20 @@ -@charset "UTF-8"; -/* line 4, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/reset/_utilities.scss */ -html, body { +/* Welcome to Compass. + * In this file you should write your main styles. (or centralize your imports) + * Import this file using the following HTML or equivalent: + * */ +/* line 14, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +html, body, div, span, applet, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +a, abbr, acronym, address, big, cite, code, +del, dfn, em, font, img, ins, kbd, q, s, samp, +small, strike, strong, sub, sup, tt, var, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend, +table, caption, tbody, tfoot, thead, tr, th, td { margin: 0; padding: 0; border: 0; + outline: 0; font-weight: inherit; font-style: inherit; font-size: 100%; @@ -11,752 +22,42 @@ html, body { vertical-align: baseline; } -/* line 6, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/reset/_utilities.scss */ -html { - font-size: 100.01%; -} - -/* line 16, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/reset/_utilities.scss */ -div, span, object, iframe, h1, h2, h3, h4, h5, h6, p, -pre, a, abbr, acronym, address, code, del, dfn, em, img, -dl, dt, dd, ol, ul, li, fieldset, form, label, legend, caption, tbody, tfoot, thead, tr { - margin: 0; - padding: 0; - border: 0; - font-weight: inherit; - font-style: inherit; - font-size: 100%; - font-family: inherit; - vertical-align: baseline; +/* line 17, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +body { + line-height: 1; + color: black; + background: white; } -/* line 18, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/reset/_utilities.scss */ -blockquote, q { - margin: 0; - padding: 0; - border: 0; - font-weight: inherit; - font-style: inherit; - font-size: 100%; - font-family: inherit; - vertical-align: baseline; - quotes: "" ""; -} -/* line 45, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/reset/_utilities.scss */ -blockquote:before, blockquote:after, q:before, q:after { - content: ""; +/* line 19, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +ol, ul { + list-style: none; } -/* line 20, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/reset/_utilities.scss */ -th, td, caption { - margin: 0; - padding: 0; - border: 0; - font-weight: inherit; - font-style: inherit; - font-size: 100%; - font-family: inherit; - vertical-align: baseline; - text-align: left; - font-weight: normal; - vertical-align: middle; -} - -/* line 22, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/reset/_utilities.scss */ +/* line 21, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ table { - margin: 0; - padding: 0; - border: 0; - font-weight: inherit; - font-style: inherit; - font-size: 100%; - font-family: inherit; - vertical-align: baseline; border-collapse: separate; border-spacing: 0; vertical-align: middle; } -/* line 24, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/reset/_utilities.scss */ -a img { - border: none; -} - -/* line 39, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -body { - line-height: 1.5; - font-family: Helvetica, Arial, "Microsoft Yahei", "WenQuanYi Micro Hei", STXihei, SimHei, sans-serif; - color: #333333; - font-size: 75%; -} - -/* line 65, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -h1, h2, h3, h4, h5, h6 { +/* line 23, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +caption, th, td { + text-align: left; font-weight: normal; - color: #222222; -} -/* line 66, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -h1 img, h2 img, h3 img, h4 img, h5 img, h6 img { - margin: 0; -} - -/* line 67, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -h1 { - font-size: 3em; - line-height: 1; - margin-bottom: 0.50em; -} - -/* line 68, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -h2 { - font-size: 2em; - margin-bottom: 0.75em; -} - -/* line 69, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -h3 { - font-size: 1.5em; - line-height: 1; - margin-bottom: 1.00em; -} - -/* line 70, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -h4 { - font-size: 1.2em; - line-height: 1.25; - margin-bottom: 1.25em; -} - -/* line 71, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -h5 { - font-size: 1em; - font-weight: bold; - margin-bottom: 1.50em; -} - -/* line 72, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -h6 { - font-size: 1em; - font-weight: bold; -} - -/* line 73, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -p { - margin: 0 0 1.5em; -} -/* line 74, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -p img.left { - display: inline; - float: left; - margin: 1.5em 1.5em 1.5em 0; - padding: 0; -} -/* line 75, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -p img.right { - display: inline; - float: right; - margin: 1.5em 0 1.5em 1.5em; - padding: 0; -} - -/* line 77, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -a { - text-decoration: underline; - color: #000099; -} -/* line 18, ../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/utilities/links/_link-colors.scss */ -a:visited { - color: #000066; -} -/* line 21, ../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/utilities/links/_link-colors.scss */ -a:focus { - color: black; -} -/* line 24, ../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/utilities/links/_link-colors.scss */ -a:hover { - color: black; -} -/* line 27, ../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/utilities/links/_link-colors.scss */ -a:active { - color: #cc0099; -} - -/* line 78, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -blockquote { - margin: 1.5em; - color: #666666; - font-style: italic; -} - -/* line 79, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -strong { - font-weight: bold; -} - -/* line 80, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -em { - font-style: italic; -} - -/* line 81, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -dfn { - font-style: italic; - font-weight: bold; -} - -/* line 82, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -sup, sub { - line-height: 0; -} - -/* line 83, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -abbr, acronym { - border-bottom: 1px dotted #666666; -} - -/* line 84, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -address { - margin: 0 0 1.5em; - font-style: italic; -} - -/* line 85, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -del { - color: #666666; -} - -/* line 86, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -pre { - margin: 1.5em 0; - white-space: pre; -} - -/* line 87, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -pre, code, tt { - font: 1em "andale mono", "lucida console", monospace; - line-height: 1.5; -} - -/* line 88, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -li ul, li ol { - margin: 0; -} - -/* line 89, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -ul, ol { - margin: 0 1.5em 1.5em 0; - padding-left: 3.333em; -} - -/* line 90, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -ul { - list-style-type: disc; -} - -/* line 91, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -ol { - list-style-type: decimal; -} - -/* line 92, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -dl { - margin: 0 0 1.5em 0; -} -/* line 93, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -dl dt { - font-weight: bold; -} - -/* line 94, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -dd { - margin-left: 1.5em; -} - -/* line 95, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -table { - margin-bottom: 1.4em; - width: 100%; -} - -/* line 96, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -th { - font-weight: bold; -} - -/* line 97, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -thead th { - background: #c3d9ff; -} - -/* line 98, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -th, td, caption { - padding: 4px 10px 4px 5px; -} - -/* line 99, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -tr.even td { - background: #e5ecf9; -} - -/* line 100, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -tfoot { - font-style: italic; -} - -/* line 101, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -caption { - background: #eeeeee; -} - -/* line 102, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -.quiet { - color: #666666; -} - -/* line 103, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_typography.scss */ -.loud { - color: #111111; -} - -/* line 4, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.error { - padding: 0.8em; - margin-bottom: 1em; - border: 2px solid #dddddd; - background: #fbe3e4; - color: #8a1f11; - border-color: #fbc2c4; -} -/* line 29, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.error a { - color: #8a1f11; -} - -/* line 6, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.notice { - padding: 0.8em; - margin-bottom: 1em; - border: 2px solid #dddddd; - background: #fff6bf; - color: #514721; - border-color: #ffd324; -} -/* line 37, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.notice a { - color: #514721; -} - -/* line 8, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.success { - padding: 0.8em; - margin-bottom: 1em; - border: 2px solid #dddddd; - background: #e6efc2; - color: #264409; - border-color: #c6d880; -} -/* line 45, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.success a { - color: #264409; -} - -/* line 10, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.hide { - display: none; -} - -/* line 12, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.highlight { - background: yellow; -} - -/* line 14, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.added { - background: #006600; - color: white; -} - -/* line 16, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_interaction.scss */ -.removed { - background: #990000; - color: white; -} - -/* line 9, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_utilities.scss */ -.clear { - clear: both; -} - -/* line 12, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_utilities.scss */ -.nowrap { - white-space: nowrap; -} - -/* line 16, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_utilities.scss */ -.clearfix { - overflow: hidden; - *zoom: 1; -} - -/* line 18, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_utilities.scss */ -.small { - font-size: 0.8em; - margin-bottom: 1.875em; - line-height: 1.875em; -} - -/* line 22, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_utilities.scss */ -.large { - font-size: 1.2em; - line-height: 2.5em; - margin-bottom: 1.25em; -} - -/* line 26, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_utilities.scss */ -.first { - margin-left: 0; - padding-left: 0; -} - -/* line 29, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_utilities.scss */ -.last { - margin-right: 0; - padding-right: 0; -} - -/* line 32, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_utilities.scss */ -.top { - margin-top: 0; - padding-top: 0; -} - -/* line 35, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_utilities.scss */ -.bottom { - margin-bottom: 0; - padding-bottom: 0; -} - -/* line 17, ../sass/screen.sass */ -body { - background: url(../images/afterdark.png); -} - -/* line 20, ../sass/screen.sass */ -a { - text-decoration: none; + vertical-align: middle; } -/* line 24, ../sass/screen.sass */ -dl dt { - display: inline; - float: left; - margin-right: 20px; - width: 50px; - width: 160px; - font-size: 16px; - font-weight: bold; - text-align: justify; - clear: left; -} -/* line 138, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_grid.scss */ -* html dl dt { - overflow-x: hidden; -} -/* line 32, ../sass/screen.sass */ -dl dd { - display: inline; - float: left; - margin-right: 0; - width: 330px; - font-size: 14px; - margin: 2px 0 0 0; - width: 280px; -} -/* line 138, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_grid.scss */ -* html dl dd { - overflow-x: hidden; +/* line 25, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +q, blockquote { + quotes: "" ""; } - -/* line 38, ../sass/screen.sass */ -ul, ol, p { - margin: 0; - padding: 0; +/* line 96, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +q:before, q:after, blockquote:before, blockquote:after { + content: ""; } -/* line 42, ../sass/screen.sass */ -.button-like, form input[type=submit], body.contacts-me #actions a { - padding: 8px 14px; - -moz-border-radius: 5px; - -webkit-border-radius: 5px; - -o-border-radius: 5px; - -ms-border-radius: 5px; - -khtml-border-radius: 5px; - border-radius: 5px; +/* line 27, ../../../../../../../../usr/share/compass/frameworks/compass/stylesheets/compass/reset/_utilities.scss */ +a img { border: none; - background-color: #2459a0; - color: white; -} -/* line 49, ../sass/screen.sass */ -.button-like:hover, form input[type=submit]:hover, body.contacts-me #actions a:hover { - cursor: pointer; - background-color: #cccccc; - color: #333333; -} - -/* line 55, ../sass/screen.sass */ -form label, form input[type=text], form input[type=password], form input[type=submit] { - display: block; - font-size: 1.2em; -} -/* line 58, ../sass/screen.sass */ -form input[type=text], form input[type=password] { - width: 240px; - padding: 7px; - border: 1px solid #cccccc; - outline: none; -} -/* line 64, ../sass/screen.sass */ -form input[type=text]:focus, form input[type=password]:focus { - border: 1px solid #ee4400; -} -/* line 66, ../sass/screen.sass */ -form input[type=submit] { - margin-top: 10px; -} -/* line 69, ../sass/screen.sass */ -form .errorlist { - margin-bottom: 10px; - list-style-type: none; - color: red; -} - -/* line 74, ../sass/screen.sass */ -thead th { - background-color: #7ca7e1; - color: white; -} -/* line 78, ../sass/screen.sass */ -thead th:first-child { - -moz-border-radius-topleft: 5px; - -webkit-border-top-left-radius: 5px; - -o-border-top-left-radius: 5px; - -ms-border-top-left-radius: 5px; - -khtml-border-top-left-radius: 5px; - border-top-left-radius: 5px; - -moz-border-radius-bottomleft: 5px; - -webkit-border-bottom-left-radius: 5px; - -o-border-bottom-left-radius: 5px; - -ms-border-bottom-left-radius: 5px; - -khtml-border-bottom-left-radius: 5px; - border-bottom-left-radius: 5px; -} -/* line 80, ../sass/screen.sass */ -thead th:last-child { - -moz-border-radius-topright: 5px; - -webkit-border-top-right-radius: 5px; - -o-border-top-right-radius: 5px; - -ms-border-top-right-radius: 5px; - -khtml-border-top-right-radius: 5px; - border-top-right-radius: 5px; - -moz-border-radius-bottomright: 5px; - -webkit-border-bottom-right-radius: 5px; - -o-border-bottom-right-radius: 5px; - -ms-border-bottom-right-radius: 5px; - -khtml-border-bottom-right-radius: 5px; - border-bottom-right-radius: 5px; -} - -/* line 83, ../sass/screen.sass */ -h2 { - color: #ee4400; - text-shadow: #999999 1px 1px 0px; -} - -/* line 87, ../sass/screen.sass */ -#page { - width: 960px; - margin: 0 auto; - overflow: hidden; - *zoom: 1; - margin-top: 20px; - padding: 30px 40px; - background-color: white; - -moz-border-radius: 10px; - -webkit-border-radius: 10px; - -o-border-radius: 10px; - -ms-border-radius: 10px; - -khtml-border-radius: 10px; - border-radius: 10px; -} - -/* line 94, ../sass/screen.sass */ -#logo h1 { - display: inline; - float: left; - margin-right: 0; - width: 960px; - text-shadow: #aaaaaa 0 0 5px; -} -/* line 138, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_grid.scss */ -* html #logo h1 { - overflow-x: hidden; -} - -/* line 98, ../sass/screen.sass */ -#navigation { - display: inline; - float: left; - margin-right: 0; - width: 960px; - margin: 15px 0 30px 0; - list-style-type: none; -} -/* line 138, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_grid.scss */ -* html #navigation { - overflow-x: hidden; -} -/* line 103, ../sass/screen.sass */ -#navigation li { - float: left; - width: 180px; - margin-right: 2px; -} -/* line 108, ../sass/screen.sass */ -#navigation li a { - display: block; - padding: 5px; - font-size: 16px; - line-height: 1.8; - text-align: center; - background-color: #2459a0; - color: white; -} -/* line 117, ../sass/screen.sass */ -#navigation li a:hover { - background-color: #cccccc; - color: #333333; -} -/* line 121, ../sass/screen.sass */ -#navigation li:first-child a { - -moz-border-radius-topleft: 10px; - -webkit-border-top-left-radius: 10px; - -o-border-top-left-radius: 10px; - -ms-border-top-left-radius: 10px; - -khtml-border-top-left-radius: 10px; - border-top-left-radius: 10px; - -moz-border-radius-bottomleft: 10px; - -webkit-border-bottom-left-radius: 10px; - -o-border-bottom-left-radius: 10px; - -ms-border-bottom-left-radius: 10px; - -khtml-border-bottom-left-radius: 10px; - border-bottom-left-radius: 10px; -} -/* line 123, ../sass/screen.sass */ -#navigation li:last-child a { - -moz-border-radius-topright: 10px; - -webkit-border-top-right-radius: 10px; - -o-border-top-right-radius: 10px; - -ms-border-top-right-radius: 10px; - -khtml-border-top-right-radius: 10px; - border-top-right-radius: 10px; - -moz-border-radius-bottomright: 10px; - -webkit-border-bottom-right-radius: 10px; - -o-border-bottom-right-radius: 10px; - -ms-border-bottom-right-radius: 10px; - -khtml-border-bottom-right-radius: 10px; - border-bottom-right-radius: 10px; -} - -/* line 126, ../sass/screen.sass */ -#flash-messages { - clear: both; -} - -/* line 129, ../sass/screen.sass */ -#main { - display: inline; - float: left; - margin-right: 0; - width: 960px; - margin-bottom: 20px; -} -/* line 138, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_grid.scss */ -* html #main { - overflow-x: hidden; -} - -/* line 133, ../sass/screen.sass */ -#main-content { - display: inline; - float: left; - margin-right: 20px; - width: 666px; - padding-right: 8px; - padding-right: 8px; - margin-right: 10px; - border-right: 2px solid #dddddd; - border-right: 2px dotted #dddddd; -} -/* line 138, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_grid.scss */ -* html #main-content { - overflow-x: hidden; -} - -/* line 139, ../sass/screen.sass */ -#main-sidebar { - padding-left: 8px; - display: inline; - float: left; - margin-right: 0; - width: 246px; -} -/* line 138, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_grid.scss */ -* html #main-sidebar { - overflow-x: hidden; -} - -/* line 143, ../sass/screen.sass */ -#footer { - display: inline; - float: left; - margin-right: 0; - width: 960px; - padding-top: 10px; - text-align: center; - color: #999999; - border-top: 2px dotted #dddddd; -} -/* line 138, ../../../../../../usr/share/compass/frameworks/blueprint/stylesheets/blueprint/_grid.scss */ -* html #footer { - overflow-x: hidden; -} - -/* line 150, ../sass/screen.sass */ -#contacts-list { - list-style-type: none; - font-size: 16px; -} - -/* line 156, ../sass/screen.sass */ -body.homepage .notification h3 { - margin-bottom: 8px; -} -/* line 158, ../sass/screen.sass */ -body.homepage .notification .date { - color: #999999; - font-size: 0.9em; -} -/* line 161, ../sass/screen.sass */ -body.homepage .notification .content { - margin-top: 10px; - font-size: 1.1em; -} - -/* line 166, ../sass/screen.sass */ -body.contacts-me #actions { - margin-top: 15px; -} -/* line 168, ../sass/screen.sass */ -body.contacts-me #actions a { - margin-right: 15px; } diff --git a/templates/layout.html b/templates/layout.html index 9a624ec..2622c89 100644 --- a/templates/layout.html +++ b/templates/layout.html @@ -21,7 +21,7 @@ {% block header %} {% endblock header %} @@ -30,6 +30,7 @@
  • 首页
  • 通讯录
  • 个人资料
  • +
  • 推荐书籍
  • 关于
  • {% endblock %} diff --git a/uploads/contacts/admin/index.html b/uploads/contacts/admin/index.html new file mode 100755 index 0000000..af2e48c --- /dev/null +++ b/uploads/contacts/admin/index.html @@ -0,0 +1 @@ +This is a test for *ⵎ兿!@#$���೿῿ diff --git a/uploads/contacts/admin/index_1.html b/uploads/contacts/admin/index_1.html new file mode 100755 index 0000000..af2e48c --- /dev/null +++ b/uploads/contacts/admin/index_1.html @@ -0,0 +1 @@ +This is a test for *ⵎ兿!@#$���೿῿ diff --git a/uploads/contacts/cosmoslx/index.html b/uploads/contacts/cosmoslx/index.html new file mode 100755 index 0000000..af2e48c --- /dev/null +++ b/uploads/contacts/cosmoslx/index.html @@ -0,0 +1 @@ +This is a test for *ⵎ兿!@#$���೿῿ diff --git a/uploads/contacts/xqunix/index.html b/uploads/contacts/xqunix/index.html new file mode 100755 index 0000000..af2e48c --- /dev/null +++ b/uploads/contacts/xqunix/index.html @@ -0,0 +1 @@ +This is a test for *ⵎ兿!@#$���೿῿ diff --git a/urls.py b/urls.py index 4ab2158..9027fa2 100644 --- a/urls.py +++ b/urls.py @@ -10,6 +10,7 @@ url(r'^login/$', 'django.contrib.auth.views.login', { 'template_name': 'users/login.html' }), url(r'^logout/$', 'django.contrib.auth.views.logout_then_login'), + url(r'^recommendation/', include('istweb.recommendation.urls')), url(r'^admin/auth/user/bulkadd/$', 'users.views.bulkadd', name='users_bulkadd'), url(r'^admin/', include(admin.site.urls)), diff --git a/users/Guardfile b/users/Guardfile new file mode 100644 index 0000000..4d63ab7 --- /dev/null +++ b/users/Guardfile @@ -0,0 +1,8 @@ +guard 'compass', :configuration_file => 'sass/config.rb' do + watch(%r{sass/.+\.sass}) +end + +guard 'livereload' do + watch(%r{.+\.html}) + watch(%r{assets/css/.+\.css}) +end diff --git a/users/README.txt b/users/README.txt new file mode 100644 index 0000000..9ec6046 --- /dev/null +++ b/users/README.txt @@ -0,0 +1,14 @@ +# 部署方法 + +1. pip install -r requirements.txt 安装依赖的包。 +2. 配置数据库(步骤略) +3. python manage.py syncdb +4. python manage.py migrate +5. python manage.py collectstatic +6. 配置 nginx + gunicorn (步骤略) + +## 补充说明 + +1. 本项目的 css 文件全部由 sass 源文件编译生成,如需修改,建议修改 sass 文件后重新编译。 +2. Guardfile 文件主要用于 guard-livereload(开发期间使用),部署时不需要用到这个文件。 +3. 请保证当前目录以及 uploads/ 目录可写,因为文件上传时会上传到 uploads/。 diff --git a/users/assets/images/afterdark.png b/users/assets/images/afterdark.png new file mode 100644 index 0000000..7468936 Binary files /dev/null and b/users/assets/images/afterdark.png differ diff --git a/users/compilecss b/users/compilecss new file mode 100644 index 0000000..d9f2ff8 --- /dev/null +++ b/users/compilecss @@ -0,0 +1,2 @@ +#! /bin/bash +compass compile -c sass/config.rb diff --git a/users/contacts/__init__.py b/users/contacts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/contacts/__init__.pyc b/users/contacts/__init__.pyc new file mode 100644 index 0000000..bf5519c Binary files /dev/null and b/users/contacts/__init__.pyc differ diff --git a/users/contacts/extractor.py b/users/contacts/extractor.py new file mode 100644 index 0000000..d787f3d --- /dev/null +++ b/users/contacts/extractor.py @@ -0,0 +1,74 @@ +# encoding: utf-8 +# source: http://github.com/dziegler/excerpt_extractor/tree/master + +from BeautifulSoup import BeautifulSoup, Comment, SoupStrainer +import re + +CONTACT_INFO_PATTERNS = { + 'fullname': ( + re.compile(u'姓名[::]?\s*(.*)'), + re.compile(u'全名[::]?\s*(.*)'), + re.compile(r'fullname[:]?\s*(.*)', re.IGNORECASE) + ), + + 'email': ( + re.compile(r'([\w\-\.]+@\w[\w\-]+\.+[\w\-]+)'), + ), + + 'phone': ( + re.compile(u'电话[::]?\s*(\d+)'), + re.compile(u'手机[::]?\s*(\d+)'), + re.compile(r'phone[::]?\s*(\d+)', re.IGNORECASE), + re.compile(r'mobile[::]?\s*(\d+)', re.IGNORECASE), + ), + + 'qq': ( + re.compile(r'qq[::]?\s*(\d+)', re.IGNORECASE), + ), +} + + +def cleanSoup(soup): + # get rid of javascript, noscript and css + [[tree.extract() for tree in soup(elem)] for elem in ('script','noscript','style')] + # get rid of doctype + subtree = soup.findAll(text=re.compile("DOCTYPE")) + [tree.extract() for tree in subtree] + # get rid of comments + comments = soup.findAll(text=lambda text:isinstance(text,Comment)) + [comment.extract() for comment in comments] + return soup + + +def removeHeaders(soup): + [[tree.extract() for tree in soup(elem)] for elem in ('h1','h2','h3','h4','h5','h6')] + return soup + + +def html2text(content): + soup = removeHeaders(cleanSoup(BeautifulSoup(content, parseOnlyThese=SoupStrainer('body')))) + text = ''.join(soup.findAll(text=True)) + return text + + +def extract_contact_info_from_html(html): + text = html2text(html) + contact = {} + + for field, patterns in CONTACT_INFO_PATTERNS.iteritems(): + for pattern in patterns: + results = pattern.findall(text) + + if len(results) > 0: + contact[field] = results[0] + break + + return contact + + +if __name__ == '__main__': + html = open('./test.html').read() + c = extract_contact_info_from_html(html) + + for key, value in c.iteritems(): + print('%s: %s' % (key, value)) diff --git a/users/contacts/extractor.pyc b/users/contacts/extractor.pyc new file mode 100644 index 0000000..88919ef Binary files /dev/null and b/users/contacts/extractor.pyc differ diff --git a/users/contacts/migrations/0001_initial.py b/users/contacts/migrations/0001_initial.py new file mode 100644 index 0000000..e8a99ba --- /dev/null +++ b/users/contacts/migrations/0001_initial.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding model 'Contact' + db.create_table('contacts_contact', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)), + ('html', self.gf('django.db.models.fields.files.FileField')(max_length=100)), + )) + db.send_create_signal('contacts', ['Contact']) + + def backwards(self, orm): + # Deleting model 'Contact' + db.delete_table('contacts_contact') + + models = { + 'auth.group': { + 'Meta': {'object_name': 'Group'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), + 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) + }, + 'auth.permission': { + 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, + 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) + }, + 'auth.user': { + 'Meta': {'object_name': 'User'}, + 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), + 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), + 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) + }, + 'contacts.contact': { + 'Meta': {'object_name': 'Contact'}, + 'html': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) + }, + 'contenttypes.contenttype': { + 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, + 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) + } + } + + complete_apps = ['contacts'] \ No newline at end of file diff --git a/users/contacts/migrations/0001_initial.pyc b/users/contacts/migrations/0001_initial.pyc new file mode 100644 index 0000000..af5bf80 Binary files /dev/null and b/users/contacts/migrations/0001_initial.pyc differ diff --git a/users/contacts/migrations/0002_auto__add_field_contact_fullname__add_field_contact_email__add_field_c.py b/users/contacts/migrations/0002_auto__add_field_contact_fullname__add_field_contact_email__add_field_c.py new file mode 100644 index 0000000..b8508b3 --- /dev/null +++ b/users/contacts/migrations/0002_auto__add_field_contact_fullname__add_field_contact_email__add_field_c.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding field 'Contact.fullname' + db.add_column('contacts_contact', 'fullname', + self.gf('django.db.models.fields.CharField')(default='', max_length=255), + keep_default=False) + + # Adding field 'Contact.email' + db.add_column('contacts_contact', 'email', + self.gf('django.db.models.fields.EmailField')(default='', max_length=75), + keep_default=False) + + # Adding field 'Contact.phone' + db.add_column('contacts_contact', 'phone', + self.gf('django.db.models.fields.CharField')(default='', max_length=15), + keep_default=False) + + # Adding field 'Contact.qq' + db.add_column('contacts_contact', 'qq', + self.gf('django.db.models.fields.CharField')(default='', max_length=15), + keep_default=False) + + def backwards(self, orm): + # Deleting field 'Contact.fullname' + db.delete_column('contacts_contact', 'fullname') + + # Deleting field 'Contact.email' + db.delete_column('contacts_contact', 'email') + + # Deleting field 'Contact.phone' + db.delete_column('contacts_contact', 'phone') + + # Deleting field 'Contact.qq' + db.delete_column('contacts_contact', 'qq') + + models = { + 'auth.group': { + 'Meta': {'object_name': 'Group'}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), + 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) + }, + 'auth.permission': { + 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, + 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) + }, + 'auth.user': { + 'Meta': {'object_name': 'User'}, + 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), + 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), + 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), + 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), + 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), + 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), + 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), + 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) + }, + 'contacts.contact': { + 'Meta': {'object_name': 'Contact'}, + 'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75'}), + 'fullname': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), + 'html': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15'}), + 'qq': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15'}), + 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) + }, + 'contenttypes.contenttype': { + 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, + 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), + 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) + } + } + + complete_apps = ['contacts'] \ No newline at end of file diff --git a/users/contacts/migrations/0002_auto__add_field_contact_fullname__add_field_contact_email__add_field_c.pyc b/users/contacts/migrations/0002_auto__add_field_contact_fullname__add_field_contact_email__add_field_c.pyc new file mode 100644 index 0000000..3903b7a Binary files /dev/null and b/users/contacts/migrations/0002_auto__add_field_contact_fullname__add_field_contact_email__add_field_c.pyc differ diff --git a/users/contacts/migrations/__init__.py b/users/contacts/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/contacts/migrations/__init__.pyc b/users/contacts/migrations/__init__.pyc new file mode 100644 index 0000000..c3885a9 Binary files /dev/null and b/users/contacts/migrations/__init__.pyc differ diff --git a/users/contacts/models.py b/users/contacts/models.py new file mode 100644 index 0000000..f350d0b --- /dev/null +++ b/users/contacts/models.py @@ -0,0 +1,42 @@ +# encoding: utf-8 + +import os +from django.db import models +from django import forms +from django.contrib.auth.models import User +from django.conf import settings + +def handle_uploading(instance, filename): + username = instance.user.username + return os.path.join(settings.MEDIA_ROOT, 'contacts', username, 'index.html') + + +class ContactManager(models.Manager): + def by_user(self, user): + queryset = self.filter(user=user)[:1] + + if len(queryset) > 0: + return queryset[0] + else: + return None + + +class Contact(models.Model): + user = models.OneToOneField(User) + html = models.FileField(upload_to=handle_uploading) + fullname = models.CharField(max_length=255, default='', verbose_name=u'全名') + email = models.EmailField(default='', verbose_name=u'Email') + phone = models.CharField(max_length=15, default='', verbose_name=u'电话号码') + qq = models.CharField(max_length=15, default='', verbose_name=u'QQ 号码') + + objects = ContactManager() + + +class ContactUploadForm(forms.Form): + html = forms.FileField(label=u'个人页面 HTML 文件') + + +class ContactEditForm(forms.ModelForm): + class Meta: + model = Contact + fields = ('fullname', 'email', 'phone', 'qq') diff --git a/users/contacts/models.pyc b/users/contacts/models.pyc new file mode 100644 index 0000000..82e2075 Binary files /dev/null and b/users/contacts/models.pyc differ diff --git a/users/contacts/templates/contacts/edit.html b/users/contacts/templates/contacts/edit.html new file mode 100644 index 0000000..da0c0e3 --- /dev/null +++ b/users/contacts/templates/contacts/edit.html @@ -0,0 +1,11 @@ +{% extends 'layout.html' %} + +{% block page_title %}更新个人资料{% endblock %} +{% block current_tab %}contacts{% endblock %} + +{% block main %} +
    + {% include 'form.html' %} + +
    +{% endblock main %} diff --git a/users/contacts/templates/contacts/index.html b/users/contacts/templates/contacts/index.html new file mode 100644 index 0000000..e2b8c1c --- /dev/null +++ b/users/contacts/templates/contacts/index.html @@ -0,0 +1,31 @@ +{% extends 'layout.html' %} + +{% block page_title %}实验室通讯录{% endblock %} +{% block body_class %}contacts{% endblock %} +{% block current_tab %}contacts{% endblock %} + +{% block main %} + + + + + + + + + + + {% for contact in contacts %} + + + + + + + + {% endfor %} + +
    姓名Email电话号码QQ
    {{ contact.fullname|default:"暂无" }}{{ contact.email|default:"暂无" }}{{ contact.phone|default:"暂无" }}{{ contact.qq|default:"暂无" }} + 查看 +
    +{% endblock main %} diff --git a/users/contacts/templates/contacts/me.html b/users/contacts/templates/contacts/me.html new file mode 100644 index 0000000..c638e5b --- /dev/null +++ b/users/contacts/templates/contacts/me.html @@ -0,0 +1,26 @@ +{% extends 'layout.html' %} + +{% block page_title %}个人资料{% endblock %} +{% block body_class %}contacts-me{% endblock %} +{% block current_tab %}profile{% endblock %} + +{% block main %} +

    个人资料

    +
    +
    姓名
    +
    {{ contact.fullname|default:"暂无" }}
    +
    Email
    +
    {{ contact.email|default:"暂无" }}
    +
    电话号码
    +
    {{ contact.phone|default:"暂无" }}
    +
    QQ
    +
    {{ contact.qq|default:"暂无" }}
    +
    + +
    + +

    + 上传个人页面 + 编辑个人资料 +

    +{% endblock main %} diff --git a/users/contacts/templates/contacts/upload.html b/users/contacts/templates/contacts/upload.html new file mode 100644 index 0000000..f6faa54 --- /dev/null +++ b/users/contacts/templates/contacts/upload.html @@ -0,0 +1,11 @@ +{% extends 'layout.html' %} + +{% block page_title %}上传个人资料{% endblock %} +{% block current_tab %}contacts{% endblock %} + +{% block main %} +
    + {% include 'form.html' %} + +
    +{% endblock main %} diff --git a/users/contacts/test.html b/users/contacts/test.html new file mode 100644 index 0000000..d085e5c --- /dev/null +++ b/users/contacts/test.html @@ -0,0 +1,13 @@ + + + + + + + +

    全名: 彭灵波

    +

    电话: 150182234234

    +

    Email: asiasda@gmail.com

    +

    QQ: 32423423423

    + + diff --git a/users/contacts/urls.py b/users/contacts/urls.py new file mode 100644 index 0000000..bc1cfc8 --- /dev/null +++ b/users/contacts/urls.py @@ -0,0 +1,10 @@ +from django.conf.urls.defaults import patterns, url + +urlpatterns = patterns('contacts.views', + url(r'^$', 'index', name='contacts_index'), + url(r'^upload/$', 'upload', name='contacts_upload'), + url(r'^edit/$', 'edit', name='contacts_edit'), + url(r'^me/$', 'me', name='contacts_me'), + url(r'^(?P[A-Za-z0-9\-_]+)/$', 'show', name='contacts_show'), +) + diff --git a/users/contacts/urls.pyc b/users/contacts/urls.pyc new file mode 100644 index 0000000..9068c64 Binary files /dev/null and b/users/contacts/urls.pyc differ diff --git a/users/contacts/views.py b/users/contacts/views.py new file mode 100644 index 0000000..64a1b51 --- /dev/null +++ b/users/contacts/views.py @@ -0,0 +1,92 @@ +# encoding: utf-8 + +from django.http import HttpResponse, HttpResponseRedirect +from django.core.urlresolvers import reverse +from django.shortcuts import get_object_or_404 +from django.contrib.auth.models import User +from django.contrib import messages +from annoying.decorators import render_to +from models import Contact, ContactUploadForm, ContactEditForm +from extractor import extract_contact_info_from_html + + +def contacts_list(request): + contacts = Contact.objects.filter(fullname__isnull=False) + return { 'contacts_list': contacts } + + +@render_to('contacts/index.html') +def index(request): + # TODO: pagination + contacts = Contact.objects.all() + + return { 'contacts': contacts } + + +@render_to('contacts/upload.html') +def upload(request): + if request.method == 'GET': + form = ContactUploadForm() + elif request.method == 'POST': + form = ContactUploadForm(request.POST, request.FILES) + + if form.is_valid(): + user = request.user + contact = Contact.objects.by_user(user) or Contact(user=user) + contact.html = request.FILES['html'] + contact.save() + + request.session['contact_info'] = extract_contact_info_from_html(contact.html) + + messages.success(request, u'个人页面已成功上传') + return HttpResponseRedirect(reverse('contacts_edit')) + + return { 'form': form } + + +@render_to('contacts/edit.html') +def edit(request): + user = request.user + contact = Contact.objects.by_user(user) + + if not contact: + messages.error(request, u'请先上传个人页面') + return HttpResponseRedirect(reverse('contacts_upload')) + + if request.method == 'GET': + if request.session.get('contact_info'): + contact_info = request.session.pop('contact_info') + contact.fullname = contact_info.get('fullname') + contact.email = contact_info.get('email') + contact.phone = contact_info.get('phone') + contact.qq = contact_info.get('qq') + + form = ContactEditForm(instance=contact) + elif request.method == 'POST': + form = ContactEditForm(request.POST, instance=contact) + + if form.is_valid(): + form.save() + messages.success(request, u'个人资料已更新') + return HttpResponseRedirect(reverse('root')) + + return { 'form': form } + + +@render_to('contacts/me.html') +def me(request): + return { 'contact': Contact.objects.by_user(request.user) } + +def show(request, username): + user = get_object_or_404(User, username=username) + contact = Contact.objects.by_user(user) + + if not contact: + if user == request.user: + messages.info(request, u'你还没有上传自己的个人页面') + return HttpResponseRedirect(reverse('contacts_upload')) + else: + messages.error(request, u'该用户还没有上传自己的个人页面') + return HttpResponseRedirect(reverse('root')) + + return HttpResponse(contact.html.chunks()) diff --git a/users/contacts/views.pyc b/users/contacts/views.pyc new file mode 100644 index 0000000..320cb4e Binary files /dev/null and b/users/contacts/views.pyc differ diff --git a/users/home/__init__.py b/users/home/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/home/__init__.pyc b/users/home/__init__.pyc new file mode 100644 index 0000000..e0c24c2 Binary files /dev/null and b/users/home/__init__.pyc differ diff --git a/users/home/templates/home/hosts b/users/home/templates/home/hosts new file mode 100644 index 0000000..cb09676 --- /dev/null +++ b/users/home/templates/home/hosts @@ -0,0 +1,5181 @@ +# 本版本 hosts 发布地址: +# https://docs.google.com/View?id=dfkdmxnt_61d9ck9ffq +# https://ipv6-hosts.googlecode.com/hg/hosts +# Google Code 项目页: +# http://code.google.com/p/ipv6-hosts/ + +##Google.com Google.com +2404:6800:8005::69 google.com +2404:6800:8005::54 accounts.google.com #帐户 +2404:6800:8005::54 accounts.l.google.com +2404:6800:8005::c1 m.google.com #移动版 +2404:6800:8005::c1 m.google.com.hk +2404:6800:8005::c1 mobile.l.google.com +2001:4860:4860::8888 google-public-dns-a.google.com #8.8.8.8 +2001:4860:4860::8844 google-public-dns-b.google.com #8.8.4.4 +2404:6800:8005::65 services.google.com #服务申请 +2404:6800:8005::8b goto.google.com #跳转 +2404:6800:8005::d2 jmt0.google.com +2404:6800:8005::d2 wire.l.google.com +#2404:6800:8005::63 ipv6.google.com +#2404:6800:8005::69 ipv6.google.com +#2404:6800:8005::63 ipv6.google.com.hk +#2404:6800:8005::69 ipv6.google.com.hk +#2404:6800:8005::63 ipv6.google.co.jp +#2404:6800:8005::69 ipv6.google.co.jp +#2404:6800:8005::63 ipv6.l.google.com + +##Google.com.hk 谷歌香港 +2404:6800:8005::2e www.google.com.hk +2404:6800:8005::2e picasaweb.google.com.hk +2404:6800:8005::2e desktop.google.com.hk +2404:6800:8005::2e id.google.com.hk +2404:6800:8005::93 google.com.hk +2404:6800:8005::67 www.googlechinawebmaster.com + +##Google.cn 谷歌中国(启用此地址无法正常使用谷歌音乐) +#2404:6800:8005::a0 www.google.cn +2404:6800:8005::a0 g.cn +2404:6800:8005::a0 google.cn #跳转 +2404:6800:8005::a0 news.google.cn #新闻(跳转) +2404:6800:8005::a0 video.google.cn #视频(跳转) +#2404:6800:8005::6a ipv6.google.cn +#2404:6800:8005::6a ipv6cn.l.google.com +2404:6800:8005::84 music.googleusercontent.cn +2404:6800:8005::a0 m.google.cn +2404:6800:8005::a0 www.265.com #上网导航 +2404:6800:8005::a0 265.com + +##Google.com.tw Google 台湾 +2404:6800:8005::2f www.google.com.tw +2404:6800:8005::2f picasaweb.google.com.tw + +##Google.co.jp Google 日本 +2a00:1450:8006::30 www.google.co.jp + +##其他 +2404:6800:8005::31 www.google.ae #阿联酋 +2404:6800:8005::29 www.google.com.ar #阿根廷 +2404:6800:8005::40 www.google.at #奥地利 +2404:6800:8005::21 www.google.com.au #澳大利亚 +2404:6800:8005::42 www.google.be #比利时 +2404:6800:8005::4c www.google.com.br #巴西 +2404:6800:8005::3a www.google.ca #加拿大 +2404:6800:8005::3c www.google.ch #瑞士 +#2404:6800:8005::2c www.google.cn #中国(已死) +2404:6800:8005::48 www.google.fi #芬兰 +2404:6800:8005::36 www.google.fr #法国 +2404:6800:8005::34 www.google.de #德国 +2404:6800:8005::33 www.google.com.gr #希腊 +2404:6800:8005::35 www.google.co.il #以色列 +2404:6800:8005::49 www.google.co.in #印度 +2404:6800:8005::38 www.google.it #意大利 +2404:6800:8005::30 www.google.co.jp #日本 +2404:6800:8005::44 www.google.co.kr #南韩 +2404:6800:8005::2b www.google.lt #立陶宛 +2404:6800:8005::39 www.google.lv #拉托维亚 +2404:6800:8005::4b www.google.com.ly #利比亚 +2404:6800:8005::24 www.google.com.my #马来西亚 +2404:6800:8005::3e www.google.nl #荷兰 +2404:6800:8005::2a www.google.co.nz #纽西兰 +2404:6800:8005::25 www.google.com.pe #秘鲁 +2404:6800:8005::28 www.google.com.ph #菲律宾 +2404:6800:8005::23 www.google.com.pk #巴基斯坦 +2404:6800:8005::3b www.google.pl #波兰 +2404:6800:8005::4a www.google.pt #葡萄牙 +2404:6800:8005::3d www.google.ro #罗马尼亚 +2404:6800:8005::3f www.google.com.ru #俄罗斯 +2404:6800:8005::2d www.google.com.sg #新加坡 +2404:6800:8005::20 www.google.com.tr #土耳其 +2404:6800:8005::45 www.google.com.ua #乌克兰 +2404:6800:8005::32 www.google.co.uk #英国 +2404:6800:8005::27 www.google.co.ve #委内瑞拉 +2404:6800:8005::22 www.google.com.vn #越南 +2404:6800:8005::26 www.google.co.za #南非 + +#Web 网页 +2404:6800:8005::68 www.google.com #主/登录域名,GTalk 客户端用户需注释此条,参见 Issue 11 +2404:6800:8005::68 encrypted.google.com +2404:6800:8005::68 www.l.google.com +2404:6800:8005::62 www0.l.google.com +2404:6800:8005::62 www1.l.google.com +2404:6800:8005::62 www2.l.google.com +2404:6800:8005::62 www3.l.google.com +2404:6800:8005::76 www4.l.google.com +2404:6800:8005::62 suggestqueries.google.com #搜索建议 +2404:6800:8005::62 suggestqueries.l.google.com #搜索建议 +2404:6800:8005::64 gxc.google.com #gwt +2404:6800:8005::64 gxc.google.com.hk +2404:6800:8005::64 investor.google.com +2404:6800:8005::64 clients1.google.com +2404:6800:8005::64 clients2.google.com +2404:6800:8005::64 clients3.google.com +2404:6800:8005::64 clients4.google.com +2404:6800:8005::64 clients5.google.com +2404:6800:8005::64 clients6.google.com +2404:6800:8005::64 clients1.google.com.hk # .com.hk 搜索建议 +2404:6800:8005::64 clients1.google.com.tw +2404:6800:8005::64 linkhelp.clients.google.com +2404:6800:8005::64 clients.l.google.com +2404:6800:8005::65 encrypted-tbn0.google.com +2404:6800:8005::71 encrypted-tbn1.google.com +2404:6800:8005::8b encrypted-tbn2.google.com +2404:6800:8005::65 encrypted-tbn3.google.com +2404:6800:8005::65 encrypted-tbn.l.google.com + +#Google Apps 企业应用套件 +2404:6800:8005::62 apps.google.com +2404:6800:8005::79 ghs.google.com +2404:6800:8005::79 ghs46.google.com #GHS 双栈入口! +2404:6800:8005::79 ghs.l.google.com +2404:6800:8005::79 ghs46.l.google.com +#2404:6800:8005::79 ghs.google.com #可以添加你 GApps 域名的博客地址 / GSites 地址到这里 + +#Mashups/App Engine GAE +2404:6800:8005::67 googlemashups.com #Google Mashup Editor +2404:6800:8005::68 www.googlemashups.com +2404:6800:8005::62 googlemashups.l.google.com +2404:6800:8005::63 *.googlemashups.com +2404:6800:8005::64 appengine.google.com +2404:6800:8002::8d appspot.l.google.com +2404:6800:8002::8d productideas.appspot.com #Google 汇问 +2404:6800:8002::8d storegadgetwizard.appspot.com #Google Checkout Store Gadget +2404:6800:8002::8d moderator.appspot.com #Google Moderator +2404:6800:8002::8d gv-gadget.appspot.com #YouTube Gadget +2404:6800:8002::8d haiticrisis.appspot.com #Google Person Finder: Haiti Earthquake +2404:6800:8002::8d i18napis.appspot.com +2404:6800:8002::8d googcloudlabs.appspot.com +2404:6800:8002::8d googlesharedspaces.appspot.com +2404:6800:8002::8d project-slingshot-gp.appspot.com +2404:6800:8002::8d r3269-dot-latest-dot-project-slingshot-gp.appspot.com +2404:6800:8002::8d newsfeed-dot-latest-dot-rovio-ad-engine.appspot.com +2404:6800:8002::8d evolutionofweb.appspot.com +2404:6800:8001::8d yt-debates.appspot.com +2404:6800:8002::8d *.appspot.com #可以添加自己的 gae 应用 + +#Answers/Guru/WenDa 问答社区(国际版已停止运营) +2404:6800:8005::66 answers.google.com +#2404:6800:8005::62 guru.google.com +2404:6800:8005::62 guru.google.co.th #泰国 +2404:6800:8005::66 wenda.google.com.hk + +#Blog 博客搜索 +2404:6800:8005::68 blogsearch.google.com +2404:6800:8005::68 blogsearch.google.com.hk +2404:6800:8005::68 blogsearch.google.com.tw +2404:6800:8005::a0 blogsearch.google.cn + +#Blogger 博客服务 +2404:6800:8005::bf www.blogger.com +2404:6800:8005::bf blogger.com +2404:6800:8005::bf buttons.blogger.com +2404:6800:8005::bf beta.blogger.com +2404:6800:8005::bf draft.blogger.com #Blogger 测试区 +2404:6800:8005::bf status.blogger.com #Blogger 状态 +2404:6800:8005::bf help.blogger.com #支持中心 +2404:6800:8005::bf buzz.blogger.com #Blogger Buzz博客(英文) +2404:6800:8005::bf photos1.blogger.com +2404:6800:8005::65 bp0.blogger.com +2404:6800:8005::65 bp1.blogger.com +2404:6800:8005::65 bp2.blogger.com +2404:6800:8005::65 bp3.blogger.com +2404:6800:8005::65 bloggerphotos.l.google.com +2404:6800:8005::bf blogger.google.com +2404:6800:8005::bf www2.blogger.com +2404:6800:8005::bf blogger.l.google.com +2404:6800:8005::bf www.blogblog.com +2404:6800:8005::bf www1.blogblog.com +2404:6800:8005::bf www2.blogblog.com +2404:6800:8005::84 img.blogblog.com +2404:6800:8005::bf img1.blogblog.com +2404:6800:8005::bf img2.blogblog.com +2404:6800:8005::79 www.textcube.com + +#Blogspot 博客服务 +2404:6800:8005::62 www.blogspot.com +#2404:6800:8005::62 blogger.l.google.com +2404:6800:8005::bf blogspot.com +2404:6800:8005::62 blogsofnote.blogspot.com #留言博客(英文版本) +2404:6800:8005::62 knownissues.blogspot.com #已知问题 +2404:6800:8005::62 1.bp.blogspot.com +2404:6800:8005::62 2.bp.blogspot.com +2404:6800:8005::62 3.bp.blogspot.com +2404:6800:8005::62 4.bp.blogspot.com +2404:6800:8005::62 bloggertemplatespreview.blogspot.com #模板编辑器的实时预览功能 +#Google 官方博客群 +2404:6800:8005::84 adwordsapi.blogspot.com +2404:6800:8005::84 adsense-zhs.blogspot.com +2404:6800:8005::84 android-developers.blogspot.com +2404:6800:8005::84 apacdeveloper.blogspot.com #Google Asia Pacific Developer Blog +2404:6800:8005::84 booksearch.blogspot.com #Inside Google Books +2404:6800:8005::84 chrome.blogspot.com +2404:6800:8005::84 doubleclickpublishersapi.blogspot.com +2404:6800:8005::84 emeadev.blogspot.com #Google Europe, Middle East & Africa Developer Blog +2404:6800:8005::84 gearsblog.blogspot.com +2404:6800:8005::84 google-code-featured.blogspot.com #Featured Projects on Google Code +2404:6800:8005::84 google-entertainment-it.blogspot.com +2404:6800:8005::84 google-opensource.blogspot.com +2404:6800:8005::84 googleajaxsearchapi.blogspot.com +2404:6800:8005::84 googleappengine.blogspot.com +2404:6800:8005::84 googleappsdeveloper.blogspot.com +2404:6800:8005::84 googleblog.blogspot.com #Official Google Blog +2404:6800:8005::84 googlecheckout.blogspot.com +2404:6800:8005::84 googlecheckoutapi.blogspot.com +2404:6800:8005::84 googlechinablog.blogspot.com +2404:6800:8005::84 googlechromereleases.blogspot.com #Google Chrome Releases +2404:6800:8005::84 googlecode.blogspot.com +2404:6800:8005::84 googlecustomsearch.blogspot.com #Google Custom Search Blog +2404:6800:8005::84 googleenterprise.blogspot.com +2404:6800:8005::84 googlegeodevelopers.blogspot.com #Google Geo Developers Blog +2404:6800:8005::84 googlemashupeditor.blogspot.com +2404:6800:8005::84 googlemobile.blogspot.com +2404:6800:8005::84 googleplusplatform.blogspot.com +2404:6800:8005::84 googleresearch.blogspot.com +2404:6800:8005::84 googletalk.blogspot.com +2404:6800:8005::84 googlewebmaster-cn.blogspot.com +2404:6800:8005::84 googlewebmastercentral.blogspot.com +2404:6800:8005::84 googlewebtoolkit.blogspot.com +2404:6800:8005::84 golangblog.blogspot.com +2404:6800:8005::84 gmailblog.blogspot.com +2404:6800:8005::84 igoogledeveloper.blogspot.com #iGoogle Developer Blog +2404:6800:8005::84 webmproject.blogspot.com +2404:6800:8005::84 youtube-global.blogspot.com #YouTube Blog +2404:6800:8005::84 *.blogspot.com #可以添加你自己的博客地址到这里 +2404:6800:8005::84 blogspot.l.google.com + +#Books 图书 +2404:6800:8005::66 books.google.com +2404:6800:8005::66 books.google.com.hk +2404:6800:8005::a0 books.google.cn +2404:6800:8005::66 bks0.books.google.com +2404:6800:8005::66 bks1.books.google.com +2404:6800:8005::66 bks2.books.google.com +2404:6800:8005::66 bks3.books.google.com +2404:6800:8005::66 bks4.books.google.com +2404:6800:8005::66 bks5.books.google.com +2404:6800:8005::66 bks6.books.google.com +2404:6800:8005::66 bks7.books.google.com +2404:6800:8005::66 bks8.books.google.com +2404:6800:8005::66 bks9.books.google.com +2404:6800:8005::66 bks10.books.google.com +2404:6800:8005::66 bks0.books.google.com.hk +2404:6800:8005::66 bks1.books.google.com.hk +2404:6800:8005::66 bks2.books.google.com.hk +2404:6800:8005::66 bks3.books.google.com.hk +2404:6800:8005::66 bks4.books.google.com.hk +2404:6800:8005::66 bks5.books.google.com.hk +2404:6800:8005::66 bks6.books.google.com.hk +2404:6800:8005::66 bks7.books.google.com.hk +2404:6800:8005::66 bks8.books.google.com.hk +2404:6800:8005::66 bks9.books.google.com.hk +2404:6800:8005::66 bks10.books.google.com.hk#Buzz +2404:6800:8005::62 buzz.google.com + +#Calendar 日历 +2404:6800:8005::64 calendar.google.com + +#Checkout 买家 +2404:6800:8005::73 checkout.google.com +#2404:6800:8005::73 checkout.l.google.com +2404:6800:8005::8b wallet.google.com + +#Chrome 谷歌浏览器 +2404:6800:8005::64 chrome.google.com +2404:6800:8005::65 browsersync.google.com +2404:6800:8005::65 browsersync.l.google.com +2404:6800:8005::63 toolbarqueries.google.com #PageRank 查询(工具栏显示) +2404:6800:8005::63 toolbarqueries.google.com.hk +2404:6800:8005::63 toolbarqueries.clients.google.com +2404:6800:8005::63 toolbarqueries.l.google.com +2404:6800:8005::79 chrome.angrybirds.com + +#Chromium Chromium 项目 +2404:6800:8005::65 chromium.org #跳转 +2404:6800:8005::79 www.chromium.org +2404:6800:8005::79 dev.chromium.org +2404:6800:8005::79 blog.chromium.org +#2404:6800:8005::62 build.chromium.org +2404:6800:8005::79 codereview.chromium.org + +#Chromium OS Chromium 操作系统 +2404:6800:8005::79 goto.ext.google.com +2404:6800:8002::8d welcome-cros.appspot.com #原 Chromium 主菜单 + +#Code 代码 +2404:6800:8005::64 code.google.com +2404:6800:8005::64 code.l.google.com +2404:6800:8005::52 *.googlecode.com +2404:6800:8005::52 googlecode.l.google.com +2404:6800:8005::52 chromium.googlecode.com +2404:6800:8005::52 closure-library.googlecode.com +2404:6800:8005::52 earth-api-samples.googlecode.com +2404:6800:8005::52 gmaps-samples-flash.googlecode.com +2404:6800:8005::52 googleappengine.googlecode.com +2404:6800:8005::52 google-code-feed-gadget.googlecode.com +2404:6800:8005::52 www.googlesource.com +2404:6800:8005::52 android.googlesource.com + +#Desktop 桌面 +2404:6800:8005::62 desktop.google.com +2404:6800:8005::62 desktop.l.google.com + +#Directory 网页目录 +2404:6800:8005::8a directory.google.com +2404:6800:8005::8a dir.google.com #Google网页目录 + +#Docs 文档 +2404:6800:8005::71 docs.google.com +2404:6800:8005::64 docs0.google.com +2404:6800:8005::64 docs1.google.com +2404:6800:8005::64 docs2.google.com +2404:6800:8005::64 docs3.google.com +2404:6800:8005::64 docs4.google.com +2404:6800:8005::64 docs5.google.com +2404:6800:8005::64 docs6.google.com +2404:6800:8005::64 docs7.google.com +2404:6800:8005::64 docs8.google.com +2404:6800:8005::64 docs9.google.com +2404:6800:8005::bd 0.docs.google.com +2404:6800:8005::bd 1.docs.google.com +2404:6800:8005::bd 2.docs.google.com +2404:6800:8005::bd 3.docs.google.com +2404:6800:8005::bd 4.docs.google.com +2404:6800:8005::bd 5.docs.google.com +2404:6800:8005::bd 6.docs.google.com +2404:6800:8005::bd 7.docs.google.com +2404:6800:8005::bd 8.docs.google.com +2404:6800:8005::bd 9.docs.google.com +2404:6800:8005::bd 10.docs.google.com +2404:6800:8005::bd 11.docs.google.com +2404:6800:8005::bd 12.docs.google.com +2404:6800:8005::bd 13.docs.google.com +2404:6800:8005::bd 14.docs.google.com +2404:6800:8005::bd 15.docs.google.com +2404:6800:8005::bd 16.docs.google.com +2404:6800:8005::bd *.docs.google.com #按需要同时打开的文档个数自行添加 +2404:6800:8005::bd browserchannel-docs.l.google.com +2404:6800:8005::62 spreadsheet.google.com +2404:6800:8005::62 spreadsheets.google.com +2404:6800:8005::62 spreadsheets0.google.com +2404:6800:8005::62 spreadsheets1.google.com +2404:6800:8005::62 spreadsheets2.google.com +2404:6800:8005::62 spreadsheets3.google.com +2404:6800:8005::62 spreadsheets4.google.com +2404:6800:8005::62 spreadsheets5.google.com +2404:6800:8005::62 spreadsheets6.google.com +2404:6800:8005::62 spreadsheets7.google.com +2404:6800:8005::62 spreadsheets8.google.com +2404:6800:8005::62 spreadsheets9.google.com +2404:6800:8005::62 spreadsheets.l.google.com +2404:6800:8005::62 spreadsheets-china.l.google.com +2404:6800:8005::62 writely.google.com +2404:6800:8005::62 writely.l.google.com +2404:6800:8005::62 writely-com.l.google.com +2404:6800:8005::62 writely-china.l.google.com + +#Download 下载 +2404:6800:8005::5b dl.google.com +2404:6800:8005::5d dl.l.google.com +2404:6800:8005::88 dl-ssl.google.com + +#Google Earth 地球 +2404:6800:8005::65 earth.google.com +2404:6800:8005::8b auth.keyhole.com +2404:6800:8005::8b geoauth.google.com + +#Finance 财经 +2404:6800:8005::62 finance.google.com + +#Fusion RSS 聚合指南 +2404:6800:8005::62 fusion.google.com + +#Gmail 邮箱 +2404:6800:8005::11 mail.google.com +2404:6800:8005::11 googlemail.l.google.com +2404:6800:8005::bd chatenabled.mail.google.com #Gmail 中 Gtalk 聊天服务 +2404:6800:8005::bd filetransferenabled.mail.google.com #可能会造成 GTalk 客户端异常 +2404:6800:8005::bd b.googlemail.l.google.com +#2404:6800:8005::62 talk.gmail.com #Gmail中Gtalk聊天服务 +2404:6800:8005::64 gmail.google.com +2404:6800:8005::11 www.gmail.com +2404:6800:8005::11 gmail.com +2404:6800:8005::11 m.gmail.com +2404:6800:8005::11 m.googlemail.com +2001:4860:8004::6d pop.gmail.com #Foxmail 等客户端不支持 ipv6 连接,参阅 Issue 33 +2001:4860:8004::6d gmail-pop.l.google.com +2001:4860:8004::10 pop.googlemail.com +2001:4860:8004::10 googlemail-pop.l.google.com +2001:4860:8004::6d smtp.gmail.com #Foxmail 暂不支持 +2001:4860:8004::6d gmail-smtp-msa.l.google.com +2404:6800:8005::62 smtp1.google.com +#2404:6800:8005::62 smtp2.google.com +2404:6800:8005::62 smtp3.google.com +2404:6800:8005::62 smtp-out.google.com +2404:6800:8005::62 smtp-out3.google.com +2001:4860:8004::6d gmail-smtp.l.google.com +2404:6800:8005::62 gmail-smtp-in.l.google.com +2404:6800:8005::62 gmr-smtp-in.l.google.com +2001:4860:8004::10 smtp.googlemail.com +2001:4860:8004::10 googlemail-smtp.l.google.com +2001:4860:8005::6c imap.gmail.com #Foxmail 暂不支持 +2001:4860:8005::6c imap.google.com +2001:4860:8005::6c gmail-imap.l.google.com +2001:4860:8004::10 imap.googlemail.com +2001:4860:8004::10 googlemail-imap.l.google.com + +#Group 论坛 +2404:6800:8005::8b groups.google.com +2401:3800:c001::68 groups.google.cn +2404:6800:8005::8b groups.google.com.hk +2404:6800:8005::8b groups-beta.google.com +2404:6800:8005::8b groups.l.google.com +2404:6800:8005::89 blob-s-docs.googlegroups.com +2404:6800:8005::89 *.googlegroups.com + +#Health 健康 +2404:6800:8005::66 health.google.com + +#Images 图片 +2404:6800:8005::68 images.google.com +2404:6800:8005::68 images.google.com.hk +2404:6800:8005::68 images.l.google.com +2404:6800:8005::62 tbn0.google.com +2404:6800:8005::62 tbn1.google.com +2404:6800:8005::62 tbn2.google.com +2404:6800:8005::62 tbn3.google.com +2404:6800:8005::62 tbn4.google.com +2404:6800:8005::62 tbn5.google.com +2404:6800:8005::62 tbn6.google.com + +#Knol 在线百科全书 +2404:6800:8005::65 knol.google.com + +#Labs 实验室 +2404:6800:8005::65 labs.google.com +2404:6800:8005::8a labs.google.cn +2404:6800:8002::8d www.googlelabs.com +2404:6800:8002::8d browsersize.googlelabs.com #Browser Size +2404:6800:8002::8d bodybrowser.googlelabs.com #Google Body +2404:6800:8002::8d citytours.googlelabs.com #City Tours +2404:6800:8002::8d correlate.googlelabs.com +2404:6800:8002::8d datawiki.googlelabs.com #Google DataWiki +2404:6800:8002::8d earthengine.googlelabs.com #Earth Engine +2404:6800:8002::8d fastflip.googlelabs.com #Fast Flip +2404:6800:8002::8d followfinder.googlelabs.com #Follow Finder +2404:6800:8002::8d guru.googlelabs.com +2404:6800:8002::8d image-swirl.googlelabs.com #Image Swirl +2404:6800:8002::8d juliamap.googlelabs.com #Julia Map +2404:6800:8002::8d listen.googlelabs.com #Google Listen +2404:6800:8002::8d livingstories.googlelabs.com #Living Stories +2404:6800:8002::8d newstimeline.googlelabs.com #Google News Timeline +2404:6800:8002::8d ngrams.googlelabs.com +2404:6800:8002::8d pagespeed.googlelabs.com +2404:6800:8002::8d relatedlinks.googlelabs.com #Related Links +2404:6800:8002::8d scribe.googlelabs.com +2404:6800:8002::8d scriptconv.googlelabs.com #Script Converter +2404:6800:8002::8d similar-images.googlelabs.com #Similar Images +2404:6800:8002::8d storegadget.googlelabs.com #Google Checkout Store Gadget +2404:6800:8002::8d swiffy.googlelabs.com #Google Swiffy +2404:6800:8002::8d tables.googlelabs.com #Fusion Tables + +#Google Mars 火星地图 +2404:6800:8005::65 mars.google.com + +#Maps 地图 +2404:6800:8005::64 maps.google.com +2404:6800:8005::64 maps.google.com.hk +2404:6800:8005::64 maps.google.com.tw +2404:6800:8005::64 local.google.com +2404:6800:8005::64 ditu.google.com +2404:6800:8005::64 maps-api-ssl.google.com +2404:6800:8005::64 map.google.com +2404:6800:8005::64 maps.l.google.com +2404:6800:8005::5d kh.google.com +2404:6800:8005::5d kh.l.google.com +2404:6800:8005::65 khmdb.google.com +2404:6800:8005::66 khm.google.com +2404:6800:8005::66 khm.l.google.com +2404:6800:8005::66 khm0.google.com #Satellite View +2404:6800:8005::66 khm1.google.com #Satellite View +2404:6800:8005::66 khm2.google.com #Satellite View +2404:6800:8005::66 khm3.google.com #Satellite View +2404:6800:8005::65 cbk0.google.com #Street View +2404:6800:8005::65 cbk1.google.com #Street View +2404:6800:8005::65 cbk2.google.com #Street View +2404:6800:8005::65 cbk3.google.com #Street View +2404:6800:8005::8a khms.google.com #加密版 +2404:6800:8005::8a khms0.google.com +2404:6800:8005::8a khms1.google.com +2404:6800:8005::8a khms2.google.com +2404:6800:8005::8a khms3.google.com +2404:6800:8005::66 cbks0.google.com +2404:6800:8005::66 cbks1.google.com +2404:6800:8005::66 cbks2.google.com +2404:6800:8005::66 cbks3.google.com +2404:6800:8005::8a khms.l.google.com +2404:6800:8005::76 mw1.google.com #small +2404:6800:8005::76 mw2.google.com #medium +2404:6800:8005::71 mt.l.google.com +2404:6800:8005::71 mt.google.com +2404:6800:8005::71 mt0.google.com +2404:6800:8005::71 mt1.google.com +2404:6800:8005::71 mt2.google.com +2404:6800:8005::71 mt3.google.com +2404:6800:8005::62 gg.google.com +2404:6800:8005::8a gg.google.cn +2404:6800:8005::62 csi.l.google.com +2404:6800:8005::62 id.google.com +2404:6800:8005::62 id.google.cn +2404:6800:8005::62 id.l.google.com +2404:6800:8005::a0 ditu.google.cn +2404:6800:8005::a0 mt.google.cn +2404:6800:8005::a0 mt0.google.cn +2404:6800:8005::a0 mt1.google.cn +2404:6800:8005::a0 mt2.google.cn +2404:6800:8005::a0 mt3.google.cn +2404:6800:8005::71 mts.google.com #加密版 +2404:6800:8005::71 mts0.google.com +2404:6800:8005::71 mts1.google.com +2404:6800:8005::71 mts2.google.com +2404:6800:8005::71 mts3.google.com +2404:6800:8005::71 mts.l.google.com +2404:6800:8005::67 maps.gstatic.cn +2404:6800:8005::8b mobilemaps.clients.google.com + +#Music Beta by Google 云音乐服务 +2404:6800:8005::65 music.google.com +2404:6800:8005::74 t.doc-0-0-sj.sj.googleusercontent.com #个人音乐库 +2404:6800:8005::74 music-streaming.l.google.com +2404:6800:8005::84 music.googleusercontent.com #音乐播放器 专辑封面 等 +2404:6800:8005::74 uploadsj.clients.google.com #客户端上传 +2404:6800:8005::74 large-uploads.l.google.com + +#News 资讯 +2404:6800:8005::64 news.google.com +2404:6800:8005::64 news.google.com.hk +2404:6800:8005::64 news.google.com.tw +2404:6800:8005::64 news.l.google.com +2404:6800:8005::62 nt0.ggpht.com +2404:6800:8005::62 nt1.ggpht.com +2404:6800:8005::62 nt2.ggpht.com +2404:6800:8005::62 nt3.ggpht.com +2404:6800:8005::62 nt4.ggpht.com +2404:6800:8005::62 nt5.ggpht.com + +#Orkut 网络社区 +2404:6800:8005::56 www.orkut.com +2404:6800:8005::56 orkut-fe.l.google.com +2404:6800:8005::56 www.orkut.gmodules.com +2404:6800:8005::56 *.a.orkut.gmodules.com +2404:6800:8005::56 orkut-opensocial.l.google.com +2404:6800:8005::57 img1.orkut.com +2404:6800:8005::57 img2.orkut.com +2404:6800:8005::57 img3.orkut.com +2404:6800:8005::57 img4.orkut.com +2404:6800:8005::57 img5.orkut.com +2404:6800:8005::57 img6.orkut.com +2404:6800:8005::57 img7.orkut.com +2404:6800:8005::57 img8.orkut.com +2404:6800:8005::57 img9.orkut.com +#2404:6800:8005::57 orkut-images.l.google.com +2404:6800:8005::55 static1.orkut.com +2404:6800:8005::55 static2.orkut.com +2404:6800:8005::55 static3.orkut.com +2404:6800:8005::55 static4.orkut.com +#2404:6800:8005::55 orkut-static.l.google.com +2404:6800:8005::55 promote.orkut.com +#2404:6800:8005::55 orkut-promote.l.google.com +2404:6800:8005::69 help.orkut.com +2404:6800:8005::62 officialorkutblog.blogspot.com +2404:6800:8005::79 blog.orkut.com +2404:6800:8005::79 en.blog.orkut.com + +#Pack 软件精选 +2404:6800:8005::68 pack.google.com +2607:f8b0:4004:6::15 cache.pack.google.com +2607:f8b0:4004:6::15 cache.l.google.com +2404:6800:8005::6a pack.google.cn + +#Photo/Picasa 照片/网络相册 +2404:6800:8005::5d photos.google.com +#2404:6800:8005::5d picasaweb.l.google.com +2404:6800:8005::63 picasa.google.com +2404:6800:8005::be picasaweb.google.com +2404:6800:8005::84 lh3.ggpht.com +2404:6800:8005::84 lh4.ggpht.com +2404:6800:8005::84 lh5.ggpht.com +2404:6800:8005::84 lh6.ggpht.com +2404:6800:8005::84 photos-ugc.l.google.com +2404:6800:8005::66 lh2.google.com +2404:6800:8005::66 lh3.google.com +2404:6800:8005::66 lh4.google.com +2404:6800:8005::66 lh5.google.com +2404:6800:8005::66 lh6.google.com +2404:6800:8005::66 lh2.l.google.com + +#Profiles 个人主页 +2404:6800:8005::71 profiles.google.com +2404:6800:8005::65 plusone.google.com +2404:6800:8005::8a plus.google.com +2404:6800:8005::8a plus.google.com.hk # URL 重定向 +2404:6800:8005::8a plus.url.google.com +2404:6800:8005::8a plus.url.google.com.hk #实际上不存在的域名 +2404:6800:8005::64 developers.google.com + +#Reader 阅读器 +2404:6800:8005::68 reader.google.com + +#SafeBrowing 安全检测服务 +2404:6800:8005::62 sb.google.com #安全浏览检测 API +2404:6800:8005::88 sb.l.google.com +2404:6800:8005::5d sb-ssl.google.com +2404:6800:8005::88 sb-ssl.google.com +2404:6800:8005::88 sb-ssl.l.google.com +2404:6800:8005::62 safebrowsing.clients.google.com #安全浏览警告页面 +2404:6800:8005::62 safebrowsing-cache.google.com #安全浏览警告数据(分块加载) +2404:6800:8005::65 safebrowsing.cache.l.google.com + +#Sandbox 服务沙盒 +2404:6800:8005::51 sandbox.google.com + +#Scholar 学术搜索 +2404:6800:8005::62 scholar.google.com +2404:6800:8005::62 scholar.google.com.hk +2404:6800:8005::62 scholar.google.com.tw +2404:6800:8005::62 scholar.l.google.com +2404:6800:8005::a0 scholar.google.cn + +#Sites 协作平台 +2404:6800:8005::65 sites.google.com +2404:6800:8005::62 gsamplemaps.googlepages.com + +#SketchUp 3D 建模工具 +2404:6800:8005::62 sketchup.google.com +#2404:6800:8005::62 sketchup.l.google.com + +#Google Store Google 商店(里面有很多谷歌自己的东西出售,像谷歌T恤、茶杯、小玩意等等) +#2404:6800:8005::62 www.googlestore.com + +#Google Talk/Chat 聊天 +#2404:6800:8005::7d talk.google.com +#2404:6800:8005::7d talk.l.google.com +#2404:6800:8005::62 default.talk.google.com +2404:6800:8005::62 talkgadget.google.com +#2404:6800:8005::62 rtmp0.google.com +#2404:6800:8005::62 users.talk.google.com + +#Tools 工具 +2404:6800:8005::66 tools.google.com +2404:6800:8005::66 toolbar.google.com +2404:6800:8005::66 toolbar.google.com.hk +2404:6800:8005::66 tools.l.google.com + +#Translate 翻译 +2404:6800:8005::62 translate.google.com +2404:6800:8005::62 translate.google.com.hk +2404:6800:8005::62 translate.google.com.tw +2404:6800:8005::a0 translate.google.cn + +#Trends 趋势 +2404:6800:8005::63 trends.google.com + +#Video 视频 +2404:6800:8005::62 video.google.com +2404:6800:8005::62 video.google.com.hk +2404:6800:8005::62 video.google.com.tw +#2404:6800:8005::62 video.l.google.com +2404:6800:8005::62 0.gvt0.com +2404:6800:8005::62 1.gvt0.com +2404:6800:8005::62 2.gvt0.com +2404:6800:8005::62 3.gvt0.com +2404:6800:8005::62 4.gvt0.com +2404:6800:8005::62 5.gvt0.com +2404:6800:8005::62 video-stats.video.google.com +2404:6800:8005::74 upload.video.google.com +2404:6800:8005::74 sslvideo-upload.l.google.com +2404:6800:8005::62 vp.video.google.com +2404:6800:8005::62 vp.video.l.google.com +2404:6800:8005::62 qwqy.vp.video.l.google.com +2404:6800:8005::62 nz.vp.video.l.google.com +2404:6800:8005::62 nztdug.vp.video.l.google.com +2404:6800:8005::62 pr.vp.video.l.google.com +2404:6800:8005::62 ug.vp.video.l.google.com +2404:6800:8005::62 vp01.video.l.google.com +2404:6800:8005::62 vp02.video.l.google.com +2404:6800:8005::62 vp03.video.l.google.com +2404:6800:8005::62 vp04.video.l.google.com +2404:6800:8005::62 vp05.video.l.google.com +2404:6800:8005::62 vp06.video.l.google.com +2404:6800:8005::62 vp07.video.l.google.com +2404:6800:8005::62 vp08.video.l.google.com +2404:6800:8005::62 vp09.video.l.google.com +2404:6800:8005::62 vp10.video.l.google.com +2404:6800:8005::62 vp11.video.l.google.com +2404:6800:8005::62 vp12.video.l.google.com +2404:6800:8005::62 vp13.video.l.google.com +2404:6800:8005::62 vp14.video.l.google.com +2404:6800:8005::62 vp15.video.l.google.com +2404:6800:8005::62 vp16.video.l.google.com +2404:6800:8005::62 vp17.video.l.google.com +2404:6800:8005::62 vp18.video.l.google.com +2404:6800:8005::62 vp19.video.l.google.com +2404:6800:8005::62 vp20.video.l.google.com +2404:6800:4005:4::6 v1.lscache1.googlevideo.com +2404:6800:4005::6 v1.lscache2.googlevideo.com +2404:6800:4005:1::6 v1.lscache3.googlevideo.com +2404:6800:4005:5::6 v1.lscache4.googlevideo.com +2404:6800:4005:2::6 v1.lscache5.googlevideo.com +2404:6800:4005:2::6 v1.lscache6.googlevideo.com +2404:6800:4005:7::6 v1.lscache7.googlevideo.com +2404:6800:4005:3::6 v1.lscache8.googlevideo.com +2404:6800:4005::a v2.lscache1.googlevideo.com +2404:6800:4005::a v2.lscache2.googlevideo.com +2404:6800:4005:5::a v2.lscache3.googlevideo.com +2404:6800:4005:5::a v2.lscache4.googlevideo.com +2404:6800:4005:2::a v2.lscache5.googlevideo.com +2404:6800:4005:6::a v2.lscache6.googlevideo.com +2404:6800:4005:3::a v2.lscache7.googlevideo.com +2404:6800:4005:3::a v2.lscache8.googlevideo.com +2404:6800:4005::e v3.lscache1.googlevideo.com +2404:6800:4005::e v3.lscache2.googlevideo.com +2404:6800:4005:1::e v3.lscache3.googlevideo.com +2404:6800:4005:5::e v3.lscache4.googlevideo.com +2404:6800:4005:2::e v3.lscache5.googlevideo.com +2404:6800:4005:2::e v3.lscache6.googlevideo.com +2404:6800:4005:7::e v3.lscache7.googlevideo.com +2404:6800:4005:7::e v3.lscache8.googlevideo.com +2404:6800:4005:4::12 v4.lscache1.googlevideo.com +2404:6800:4005::12 v4.lscache2.googlevideo.com +2404:6800:4005:5::12 v4.lscache3.googlevideo.com +2404:6800:4005:1::12 v4.lscache4.googlevideo.com +2404:6800:4005:6::12 v4.lscache5.googlevideo.com +2404:6800:4005:2::12 v4.lscache6.googlevideo.com +2404:6800:4005:7::12 v4.lscache7.googlevideo.com +2404:6800:4005:7::12 v4.lscache8.googlevideo.com +2404:6800:4005:4::16 v5.lscache1.googlevideo.com +2404:6800:4005:4::16 v5.lscache2.googlevideo.com +2404:6800:4005:1::16 v5.lscache3.googlevideo.com +2404:6800:4005:5::16 v5.lscache4.googlevideo.com +2404:6800:4005:2::16 v5.lscache5.googlevideo.com +2404:6800:4005:6::16 v5.lscache6.googlevideo.com +2404:6800:4005:7::16 v5.lscache7.googlevideo.com +2404:6800:4005:7::16 v5.lscache8.googlevideo.com +2404:6800:4005::7 v6.lscache1.googlevideo.com +2404:6800:4005:4::7 v6.lscache2.googlevideo.com +2404:6800:4005:5::7 v6.lscache3.googlevideo.com +2404:6800:4005:5::7 v6.lscache4.googlevideo.com +2404:6800:4005:6::7 v6.lscache5.googlevideo.com +2404:6800:4005:6::7 v6.lscache6.googlevideo.com +2404:6800:4005:7::7 v6.lscache7.googlevideo.com +2404:6800:4005:3::7 v6.lscache8.googlevideo.com +2404:6800:4005:4::b v7.lscache1.googlevideo.com +2404:6800:4005:4::b v7.lscache2.googlevideo.com +2404:6800:4005:5::b v7.lscache3.googlevideo.com +2404:6800:4005:1::b v7.lscache4.googlevideo.com +2404:6800:4005:6::b v7.lscache5.googlevideo.com +2404:6800:4005:6::b v7.lscache6.googlevideo.com +2404:6800:4005:7::b v7.lscache7.googlevideo.com +2404:6800:4005:7::b v7.lscache8.googlevideo.com +2404:6800:4005:4::f v8.lscache1.googlevideo.com +2404:6800:4005:4::f v8.lscache2.googlevideo.com +2404:6800:4005:5::f v8.lscache3.googlevideo.com +2404:6800:4005:1::f v8.lscache4.googlevideo.com +2404:6800:4005:2::f v8.lscache5.googlevideo.com +2404:6800:4005:6::f v8.lscache6.googlevideo.com +2404:6800:4005:7::f v8.lscache7.googlevideo.com +2404:6800:4005:3::f v8.lscache8.googlevideo.com +2404:6800:4005:4::13 v9.lscache1.googlevideo.com +2404:6800:4005:4::13 v9.lscache2.googlevideo.com +2404:6800:4005:1::13 v9.lscache3.googlevideo.com +2404:6800:4005:1::13 v9.lscache4.googlevideo.com +2404:6800:4005:6::13 v9.lscache5.googlevideo.com +2404:6800:4005:2::13 v9.lscache6.googlevideo.com +2404:6800:4005:3::13 v9.lscache7.googlevideo.com +2404:6800:4005:7::13 v9.lscache8.googlevideo.com +2404:6800:4005:4::17 v10.lscache1.googlevideo.com +2404:6800:4005:4::17 v10.lscache2.googlevideo.com +2404:6800:4005:1::17 v10.lscache3.googlevideo.com +2404:6800:4005:5::17 v10.lscache4.googlevideo.com +2404:6800:4005:6::17 v10.lscache5.googlevideo.com +2404:6800:4005:2::17 v10.lscache6.googlevideo.com +2404:6800:4005:7::17 v10.lscache7.googlevideo.com +2404:6800:4005:3::17 v10.lscache8.googlevideo.com +2404:6800:4005:4::8 v11.lscache1.googlevideo.com +2404:6800:4005:4::8 v11.lscache2.googlevideo.com +2404:6800:4005:1::8 v11.lscache3.googlevideo.com +2404:6800:4005:5::8 v11.lscache4.googlevideo.com +2404:6800:4005:6::8 v11.lscache5.googlevideo.com +2404:6800:4005:6::8 v11.lscache6.googlevideo.com +2404:6800:4005:3::8 v11.lscache7.googlevideo.com +2404:6800:4005:3::8 v11.lscache8.googlevideo.com +2404:6800:4005:4::c v12.lscache1.googlevideo.com +2404:6800:4005:4::c v12.lscache2.googlevideo.com +2404:6800:4005:5::c v12.lscache3.googlevideo.com +2404:6800:4005:1::c v12.lscache4.googlevideo.com +2404:6800:4005:6::c v12.lscache5.googlevideo.com +2404:6800:4005:2::c v12.lscache6.googlevideo.com +2404:6800:4005:7::c v12.lscache7.googlevideo.com +2404:6800:4005:3::c v12.lscache8.googlevideo.com +2404:6800:4005::10 v13.lscache1.googlevideo.com +2404:6800:4005:4::10 v13.lscache2.googlevideo.com +2404:6800:4005:5::10 v13.lscache3.googlevideo.com +2404:6800:4005:5::10 v13.lscache4.googlevideo.com +2404:6800:4005:6::10 v13.lscache5.googlevideo.com +2404:6800:4005:6::10 v13.lscache6.googlevideo.com +2404:6800:4005:7::10 v13.lscache7.googlevideo.com +2404:6800:4005:7::10 v13.lscache8.googlevideo.com +2404:6800:4005::14 v14.lscache1.googlevideo.com +2404:6800:4005:4::14 v14.lscache2.googlevideo.com +2404:6800:4005:5::14 v14.lscache3.googlevideo.com +2404:6800:4005:1::14 v14.lscache4.googlevideo.com +2404:6800:4005:2::14 v14.lscache5.googlevideo.com +2404:6800:4005:6::14 v14.lscache6.googlevideo.com +2404:6800:4005:3::14 v14.lscache7.googlevideo.com +2404:6800:4005:7::14 v14.lscache8.googlevideo.com +2404:6800:4005:4::18 v15.lscache1.googlevideo.com +2404:6800:4005:4::18 v15.lscache2.googlevideo.com +2404:6800:4005:5::18 v15.lscache3.googlevideo.com +2404:6800:4005:1::18 v15.lscache4.googlevideo.com +2404:6800:4005:6::18 v15.lscache5.googlevideo.com +2404:6800:4005:6::18 v15.lscache6.googlevideo.com +2404:6800:4005:7::18 v15.lscache7.googlevideo.com +2404:6800:4005:3::18 v15.lscache8.googlevideo.com +2404:6800:4005::9 v16.lscache1.googlevideo.com +2404:6800:4005::9 v16.lscache2.googlevideo.com +2404:6800:4005:5::9 v16.lscache3.googlevideo.com +2404:6800:4005:5::9 v16.lscache4.googlevideo.com +2404:6800:4005:6::9 v16.lscache5.googlevideo.com +2404:6800:4005:2::9 v16.lscache6.googlevideo.com +2404:6800:4005:3::9 v16.lscache7.googlevideo.com +2404:6800:4005:3::9 v16.lscache8.googlevideo.com +2404:6800:4005:4::d v17.lscache1.googlevideo.com +2404:6800:4005:4::d v17.lscache2.googlevideo.com +2404:6800:4005:1::d v17.lscache3.googlevideo.com +2404:6800:4005:5::d v17.lscache4.googlevideo.com +2404:6800:4005:2::d v17.lscache5.googlevideo.com +2404:6800:4005:6::d v17.lscache6.googlevideo.com +2404:6800:4005:7::d v17.lscache7.googlevideo.com +2404:6800:4005:3::d v17.lscache8.googlevideo.com +2404:6800:4005::11 v18.lscache1.googlevideo.com +2404:6800:4005::11 v18.lscache2.googlevideo.com +2404:6800:4005:1::11 v18.lscache3.googlevideo.com +2404:6800:4005:5::11 v18.lscache4.googlevideo.com +2404:6800:4005:2::11 v18.lscache5.googlevideo.com +2404:6800:4005:2::11 v18.lscache6.googlevideo.com +2404:6800:4005:3::11 v18.lscache7.googlevideo.com +2404:6800:4005:7::11 v18.lscache8.googlevideo.com +2404:6800:4005:4::15 v19.lscache1.googlevideo.com +2404:6800:4005:4::15 v19.lscache2.googlevideo.com +2404:6800:4005:1::15 v19.lscache3.googlevideo.com +2404:6800:4005:5::15 v19.lscache4.googlevideo.com +2404:6800:4005:6::15 v19.lscache5.googlevideo.com +2404:6800:4005:6::15 v19.lscache6.googlevideo.com +2404:6800:4005:3::15 v19.lscache7.googlevideo.com +2404:6800:4005:7::15 v19.lscache8.googlevideo.com +2404:6800:4005::19 v20.lscache1.googlevideo.com +2404:6800:4005::19 v20.lscache2.googlevideo.com +2404:6800:4005:1::19 v20.lscache3.googlevideo.com +2404:6800:4005:5::19 v20.lscache4.googlevideo.com +2404:6800:4005:6::19 v20.lscache5.googlevideo.com +2404:6800:4005:2::19 v20.lscache6.googlevideo.com +2404:6800:4005:3::19 v20.lscache7.googlevideo.com +2404:6800:4005:3::19 v20.lscache8.googlevideo.com +2404:6800:4005:4::6 v21.lscache1.googlevideo.com +2404:6800:4005::6 v21.lscache2.googlevideo.com +2404:6800:4005:5::6 v21.lscache3.googlevideo.com +2404:6800:4005:1::6 v21.lscache4.googlevideo.com +2404:6800:4005:6::6 v21.lscache5.googlevideo.com +2404:6800:4005:6::6 v21.lscache6.googlevideo.com +2404:6800:4005:3::6 v21.lscache7.googlevideo.com +2404:6800:4005:3::6 v21.lscache8.googlevideo.com +2404:6800:4005:4::a v22.lscache1.googlevideo.com +2404:6800:4005:4::a v22.lscache2.googlevideo.com +2404:6800:4005:5::a v22.lscache3.googlevideo.com +2404:6800:4005:1::a v22.lscache4.googlevideo.com +2404:6800:4005:6::a v22.lscache5.googlevideo.com +2404:6800:4005:6::a v22.lscache6.googlevideo.com +2404:6800:4005:7::a v22.lscache7.googlevideo.com +2404:6800:4005:3::a v22.lscache8.googlevideo.com +2404:6800:4005::e v23.lscache1.googlevideo.com +2404:6800:4005:4::e v23.lscache2.googlevideo.com +2404:6800:4005:1::e v23.lscache3.googlevideo.com +2404:6800:4005:5::e v23.lscache4.googlevideo.com +2404:6800:4005:2::e v23.lscache5.googlevideo.com +2404:6800:4005:6::e v23.lscache6.googlevideo.com +2404:6800:4005:7::e v23.lscache7.googlevideo.com +2404:6800:4005:3::e v23.lscache8.googlevideo.com +2404:6800:4005::12 v24.lscache1.googlevideo.com +2404:6800:4005::12 v24.lscache2.googlevideo.com +2404:6800:4005:1::12 v24.lscache3.googlevideo.com +2404:6800:4005:5::12 v24.lscache4.googlevideo.com +2404:6800:4005:6::12 v24.lscache5.googlevideo.com +2404:6800:4005:6::12 v24.lscache6.googlevideo.com +2404:6800:4005:7::12 v24.lscache7.googlevideo.com +2404:6800:4005:3::12 v24.lscache8.googlevideo.com +2404:6800:4005:4::6 v1.cache1.googlevideo.com +2404:6800:4005::6 v1.cache2.googlevideo.com +2404:6800:4005:1::6 v1.cache3.googlevideo.com +2404:6800:4005:5::6 v1.cache4.googlevideo.com +2404:6800:4005:2::6 v1.cache5.googlevideo.com +2404:6800:4005:2::6 v1.cache6.googlevideo.com +2404:6800:4005:7::6 v1.cache7.googlevideo.com +2404:6800:4005:3::6 v1.cache8.googlevideo.com +2404:6800:4005::a v2.cache1.googlevideo.com +2404:6800:4005::a v2.cache2.googlevideo.com +2404:6800:4005:5::a v2.cache3.googlevideo.com +2404:6800:4005:5::a v2.cache4.googlevideo.com +2404:6800:4005:2::a v2.cache5.googlevideo.com +2404:6800:4005:6::a v2.cache6.googlevideo.com +2404:6800:4005:3::a v2.cache7.googlevideo.com +2404:6800:4005:3::a v2.cache8.googlevideo.com +2404:6800:4005::e v3.cache1.googlevideo.com +2404:6800:4005::e v3.cache2.googlevideo.com +2404:6800:4005:1::e v3.cache3.googlevideo.com +2404:6800:4005:5::e v3.cache4.googlevideo.com +2404:6800:4005:2::e v3.cache5.googlevideo.com +2404:6800:4005:2::e v3.cache6.googlevideo.com +2404:6800:4005:7::e v3.cache7.googlevideo.com +2404:6800:4005:7::e v3.cache8.googlevideo.com +2404:6800:4005:4::12 v4.cache1.googlevideo.com +2404:6800:4005::12 v4.cache2.googlevideo.com +2404:6800:4005:5::12 v4.cache3.googlevideo.com +2404:6800:4005:1::12 v4.cache4.googlevideo.com +2404:6800:4005:6::12 v4.cache5.googlevideo.com +2404:6800:4005:2::12 v4.cache6.googlevideo.com +2404:6800:4005:7::12 v4.cache7.googlevideo.com +2404:6800:4005:7::12 v4.cache8.googlevideo.com +2404:6800:4005:4::16 v5.cache1.googlevideo.com +2404:6800:4005:4::16 v5.cache2.googlevideo.com +2404:6800:4005:1::16 v5.cache3.googlevideo.com +2404:6800:4005:5::16 v5.cache4.googlevideo.com +2404:6800:4005:2::16 v5.cache5.googlevideo.com +2404:6800:4005:6::16 v5.cache6.googlevideo.com +2404:6800:4005:7::16 v5.cache7.googlevideo.com +2404:6800:4005:7::16 v5.cache8.googlevideo.com +2404:6800:4005::7 v6.cache1.googlevideo.com +2404:6800:4005:4::7 v6.cache2.googlevideo.com +2404:6800:4005:5::7 v6.cache3.googlevideo.com +2404:6800:4005:5::7 v6.cache4.googlevideo.com +2404:6800:4005:6::7 v6.cache5.googlevideo.com +2404:6800:4005:6::7 v6.cache6.googlevideo.com +2404:6800:4005:7::7 v6.cache7.googlevideo.com +2404:6800:4005:3::7 v6.cache8.googlevideo.com +2404:6800:4005:4::b v7.cache1.googlevideo.com +2404:6800:4005:4::b v7.cache2.googlevideo.com +2404:6800:4005:5::b v7.cache3.googlevideo.com +2404:6800:4005:1::b v7.cache4.googlevideo.com +2404:6800:4005:6::b v7.cache5.googlevideo.com +2404:6800:4005:6::b v7.cache6.googlevideo.com +2404:6800:4005:7::b v7.cache7.googlevideo.com +2404:6800:4005:7::b v7.cache8.googlevideo.com +2404:6800:4005:4::f v8.cache1.googlevideo.com +2404:6800:4005:4::f v8.cache2.googlevideo.com +2404:6800:4005:5::f v8.cache3.googlevideo.com +2404:6800:4005:1::f v8.cache4.googlevideo.com +2404:6800:4005:2::f v8.cache5.googlevideo.com +2404:6800:4005:6::f v8.cache6.googlevideo.com +2404:6800:4005:7::f v8.cache7.googlevideo.com +2404:6800:4005:3::f v8.cache8.googlevideo.com +2404:6800:4005:4::13 v9.cache1.googlevideo.com +2404:6800:4005:4::13 v9.cache2.googlevideo.com +2404:6800:4005:1::13 v9.cache3.googlevideo.com +2404:6800:4005:1::13 v9.cache4.googlevideo.com +2404:6800:4005:6::13 v9.cache5.googlevideo.com +2404:6800:4005:2::13 v9.cache6.googlevideo.com +2404:6800:4005:3::13 v9.cache7.googlevideo.com +2404:6800:4005:7::13 v9.cache8.googlevideo.com +2404:6800:4005:4::17 v10.cache1.googlevideo.com +2404:6800:4005:4::17 v10.cache2.googlevideo.com +2404:6800:4005:1::17 v10.cache3.googlevideo.com +2404:6800:4005:5::17 v10.cache4.googlevideo.com +2404:6800:4005:6::17 v10.cache5.googlevideo.com +2404:6800:4005:2::17 v10.cache6.googlevideo.com +2404:6800:4005:7::17 v10.cache7.googlevideo.com +2404:6800:4005:3::17 v10.cache8.googlevideo.com +2404:6800:4005:4::8 v11.cache1.googlevideo.com +2404:6800:4005:4::8 v11.cache2.googlevideo.com +2404:6800:4005:1::8 v11.cache3.googlevideo.com +2404:6800:4005:5::8 v11.cache4.googlevideo.com +2404:6800:4005:6::8 v11.cache5.googlevideo.com +2404:6800:4005:6::8 v11.cache6.googlevideo.com +2404:6800:4005:3::8 v11.cache7.googlevideo.com +2404:6800:4005:3::8 v11.cache8.googlevideo.com +2404:6800:4005:4::c v12.cache1.googlevideo.com +2404:6800:4005:4::c v12.cache2.googlevideo.com +2404:6800:4005:5::c v12.cache3.googlevideo.com +2404:6800:4005:1::c v12.cache4.googlevideo.com +2404:6800:4005:6::c v12.cache5.googlevideo.com +2404:6800:4005:2::c v12.cache6.googlevideo.com +2404:6800:4005:7::c v12.cache7.googlevideo.com +2404:6800:4005:3::c v12.cache8.googlevideo.com +2404:6800:4005::10 v13.cache1.googlevideo.com +2404:6800:4005:4::10 v13.cache2.googlevideo.com +2404:6800:4005:5::10 v13.cache3.googlevideo.com +2404:6800:4005:5::10 v13.cache4.googlevideo.com +2404:6800:4005:6::10 v13.cache5.googlevideo.com +2404:6800:4005:6::10 v13.cache6.googlevideo.com +2404:6800:4005:7::10 v13.cache7.googlevideo.com +2404:6800:4005:7::10 v13.cache8.googlevideo.com +2404:6800:4005::14 v14.cache1.googlevideo.com +2404:6800:4005:4::14 v14.cache2.googlevideo.com +2404:6800:4005:5::14 v14.cache3.googlevideo.com +2404:6800:4005:1::14 v14.cache4.googlevideo.com +2404:6800:4005:2::14 v14.cache5.googlevideo.com +2404:6800:4005:6::14 v14.cache6.googlevideo.com +2404:6800:4005:3::14 v14.cache7.googlevideo.com +2404:6800:4005:7::14 v14.cache8.googlevideo.com +2404:6800:4005:4::18 v15.cache1.googlevideo.com +2404:6800:4005:4::18 v15.cache2.googlevideo.com +2404:6800:4005:5::18 v15.cache3.googlevideo.com +2404:6800:4005:1::18 v15.cache4.googlevideo.com +2404:6800:4005:6::18 v15.cache5.googlevideo.com +2404:6800:4005:6::18 v15.cache6.googlevideo.com +2404:6800:4005:7::18 v15.cache7.googlevideo.com +2404:6800:4005:3::18 v15.cache8.googlevideo.com +2404:6800:4005::9 v16.cache1.googlevideo.com +2404:6800:4005::9 v16.cache2.googlevideo.com +2404:6800:4005:5::9 v16.cache3.googlevideo.com +2404:6800:4005:5::9 v16.cache4.googlevideo.com +2404:6800:4005:6::9 v16.cache5.googlevideo.com +2404:6800:4005:2::9 v16.cache6.googlevideo.com +2404:6800:4005:3::9 v16.cache7.googlevideo.com +2404:6800:4005:3::9 v16.cache8.googlevideo.com +2404:6800:4005:4::d v17.cache1.googlevideo.com +2404:6800:4005:4::d v17.cache2.googlevideo.com +2404:6800:4005:1::d v17.cache3.googlevideo.com +2404:6800:4005:5::d v17.cache4.googlevideo.com +2404:6800:4005:2::d v17.cache5.googlevideo.com +2404:6800:4005:6::d v17.cache6.googlevideo.com +2404:6800:4005:7::d v17.cache7.googlevideo.com +2404:6800:4005:3::d v17.cache8.googlevideo.com +2404:6800:4005::11 v18.cache1.googlevideo.com +2404:6800:4005::11 v18.cache2.googlevideo.com +2404:6800:4005:1::11 v18.cache3.googlevideo.com +2404:6800:4005:5::11 v18.cache4.googlevideo.com +2404:6800:4005:2::11 v18.cache5.googlevideo.com +2404:6800:4005:2::11 v18.cache6.googlevideo.com +2404:6800:4005:3::11 v18.cache7.googlevideo.com +2404:6800:4005:7::11 v18.cache8.googlevideo.com +2404:6800:4005:4::15 v19.cache1.googlevideo.com +2404:6800:4005:4::15 v19.cache2.googlevideo.com +2404:6800:4005:1::15 v19.cache3.googlevideo.com +2404:6800:4005:5::15 v19.cache4.googlevideo.com +2404:6800:4005:6::15 v19.cache5.googlevideo.com +2404:6800:4005:6::15 v19.cache6.googlevideo.com +2404:6800:4005:3::15 v19.cache7.googlevideo.com +2404:6800:4005:7::15 v19.cache8.googlevideo.com +2404:6800:4005::19 v20.cache1.googlevideo.com +2404:6800:4005::19 v20.cache2.googlevideo.com +2404:6800:4005:1::19 v20.cache3.googlevideo.com +2404:6800:4005:5::19 v20.cache4.googlevideo.com +2404:6800:4005:6::19 v20.cache5.googlevideo.com +2404:6800:4005:2::19 v20.cache6.googlevideo.com +2404:6800:4005:3::19 v20.cache7.googlevideo.com +2404:6800:4005:3::19 v20.cache8.googlevideo.com +2404:6800:4005:4::6 v21.cache1.googlevideo.com +2404:6800:4005::6 v21.cache2.googlevideo.com +2404:6800:4005:5::6 v21.cache3.googlevideo.com +2404:6800:4005:1::6 v21.cache4.googlevideo.com +2404:6800:4005:6::6 v21.cache5.googlevideo.com +2404:6800:4005:6::6 v21.cache6.googlevideo.com +2404:6800:4005:3::6 v21.cache7.googlevideo.com +2404:6800:4005:3::6 v21.cache8.googlevideo.com +2404:6800:4005:4::a v22.cache1.googlevideo.com +2404:6800:4005:4::a v22.cache2.googlevideo.com +2404:6800:4005:5::a v22.cache3.googlevideo.com +2404:6800:4005:1::a v22.cache4.googlevideo.com +2404:6800:4005:6::a v22.cache5.googlevideo.com +2404:6800:4005:6::a v22.cache6.googlevideo.com +2404:6800:4005:7::a v22.cache7.googlevideo.com +2404:6800:4005:3::a v22.cache8.googlevideo.com +2404:6800:4005::e v23.cache1.googlevideo.com +2404:6800:4005:4::e v23.cache2.googlevideo.com +2404:6800:4005:1::e v23.cache3.googlevideo.com +2404:6800:4005:5::e v23.cache4.googlevideo.com +2404:6800:4005:2::e v23.cache5.googlevideo.com +2404:6800:4005:6::e v23.cache6.googlevideo.com +2404:6800:4005:7::e v23.cache7.googlevideo.com +2404:6800:4005:3::e v23.cache8.googlevideo.com +2404:6800:4005::12 v24.cache1.googlevideo.com +2404:6800:4005::12 v24.cache2.googlevideo.com +2404:6800:4005:1::12 v24.cache3.googlevideo.com +2404:6800:4005:5::12 v24.cache4.googlevideo.com +2404:6800:4005:6::12 v24.cache5.googlevideo.com +2404:6800:4005:6::12 v24.cache6.googlevideo.com +2404:6800:4005:7::12 v24.cache7.googlevideo.com +2404:6800:4005:3::12 v24.cache8.googlevideo.com +2404:6800:4005:4::6 v1.nonxt1.googlevideo.com +2404:6800:4005::6 v1.nonxt2.googlevideo.com +2404:6800:4005:5::6 v1.nonxt3.googlevideo.com +2404:6800:4005:5::6 v1.nonxt4.googlevideo.com +2404:6800:4005:6::6 v1.nonxt5.googlevideo.com +2404:6800:4005:6::6 v1.nonxt6.googlevideo.com +2404:6800:4005:7::6 v1.nonxt7.googlevideo.com +2404:6800:4005:7::6 v1.nonxt8.googlevideo.com +2404:6800:4005::a v2.nonxt1.googlevideo.com +2404:6800:4005:4::a v2.nonxt2.googlevideo.com +2404:6800:4005:1::a v2.nonxt3.googlevideo.com +2404:6800:4005:1::a v2.nonxt4.googlevideo.com +2404:6800:4005:6::a v2.nonxt5.googlevideo.com +2404:6800:4005:2::a v2.nonxt6.googlevideo.com +2404:6800:4005:3::a v2.nonxt7.googlevideo.com +2404:6800:4005:3::a v2.nonxt8.googlevideo.com +2404:6800:4005::e v3.nonxt1.googlevideo.com +2404:6800:4005:4::e v3.nonxt2.googlevideo.com +2404:6800:4005:5::e v3.nonxt3.googlevideo.com +2404:6800:4005:1::e v3.nonxt4.googlevideo.com +2404:6800:4005:6::e v3.nonxt5.googlevideo.com +2404:6800:4005:6::e v3.nonxt6.googlevideo.com +2404:6800:4005:3::e v3.nonxt7.googlevideo.com +2404:6800:4005:7::e v3.nonxt8.googlevideo.com +2404:6800:4005:4::12 v4.nonxt1.googlevideo.com +2404:6800:4005::12 v4.nonxt2.googlevideo.com +2404:6800:4005:5::12 v4.nonxt3.googlevideo.com +2404:6800:4005:1::12 v4.nonxt4.googlevideo.com +2404:6800:4005:2::12 v4.nonxt5.googlevideo.com +2404:6800:4005:6::12 v4.nonxt6.googlevideo.com +2404:6800:4005:7::12 v4.nonxt7.googlevideo.com +2404:6800:4005:7::12 v4.nonxt8.googlevideo.com +2404:6800:4005::16 v5.nonxt1.googlevideo.com +2404:6800:4005:4::16 v5.nonxt2.googlevideo.com +2404:6800:4005:5::16 v5.nonxt3.googlevideo.com +2404:6800:4005:1::16 v5.nonxt4.googlevideo.com +2404:6800:4005:6::16 v5.nonxt5.googlevideo.com +2404:6800:4005:2::16 v5.nonxt6.googlevideo.com +2404:6800:4005:3::16 v5.nonxt7.googlevideo.com +2404:6800:4005:3::16 v5.nonxt8.googlevideo.com +2404:6800:4005::7 v6.nonxt1.googlevideo.com +2404:6800:4005::7 v6.nonxt2.googlevideo.com +2404:6800:4005:1::7 v6.nonxt3.googlevideo.com +2404:6800:4005:1::7 v6.nonxt4.googlevideo.com +2404:6800:4005:2::7 v6.nonxt5.googlevideo.com +2404:6800:4005:2::7 v6.nonxt6.googlevideo.com +2404:6800:4005:3::7 v6.nonxt7.googlevideo.com +2404:6800:4005:7::7 v6.nonxt8.googlevideo.com +2404:6800:4005:4::b v7.nonxt1.googlevideo.com +2404:6800:4005::b v7.nonxt2.googlevideo.com +2404:6800:4005:5::b v7.nonxt3.googlevideo.com +2404:6800:4005:5::b v7.nonxt4.googlevideo.com +2404:6800:4005:2::b v7.nonxt5.googlevideo.com +2404:6800:4005:6::b v7.nonxt6.googlevideo.com +2404:6800:4005:7::b v7.nonxt7.googlevideo.com +2404:6800:4005:3::b v7.nonxt8.googlevideo.com +2404:6800:4005::f v8.nonxt1.googlevideo.com +2404:6800:4005:4::f v8.nonxt2.googlevideo.com +2404:6800:4005:1::f v8.nonxt3.googlevideo.com +2404:6800:4005:5::f v8.nonxt4.googlevideo.com +2404:6800:4005:2::f v8.nonxt5.googlevideo.com +2404:6800:4005:2::f v8.nonxt6.googlevideo.com +2404:6800:4005:7::f v8.nonxt7.googlevideo.com +2404:6800:4005:3::f v8.nonxt8.googlevideo.com +2404:6800:4005::13 v9.nonxt1.googlevideo.com +2404:6800:4005:4::13 v9.nonxt2.googlevideo.com +2404:6800:4005:1::13 v9.nonxt3.googlevideo.com +2404:6800:4005:5::13 v9.nonxt4.googlevideo.com +2404:6800:4005:2::13 v9.nonxt5.googlevideo.com +2404:6800:4005:6::13 v9.nonxt6.googlevideo.com +2404:6800:4005:7::13 v9.nonxt7.googlevideo.com +2404:6800:4005:3::13 v9.nonxt8.googlevideo.com +2404:6800:4005:4::17 v10.nonxt1.googlevideo.com +2404:6800:4005:4::17 v10.nonxt2.googlevideo.com +2404:6800:4005:1::17 v10.nonxt3.googlevideo.com +2404:6800:4005:5::17 v10.nonxt4.googlevideo.com +2404:6800:4005:6::17 v10.nonxt5.googlevideo.com +2404:6800:4005:2::17 v10.nonxt6.googlevideo.com +2404:6800:4005:7::17 v10.nonxt7.googlevideo.com +2404:6800:4005:3::17 v10.nonxt8.googlevideo.com +2404:6800:4005:4::8 v11.nonxt1.googlevideo.com +2404:6800:4005::8 v11.nonxt2.googlevideo.com +2404:6800:4005:5::8 v11.nonxt3.googlevideo.com +2404:6800:4005:5::8 v11.nonxt4.googlevideo.com +2404:6800:4005:2::8 v11.nonxt5.googlevideo.com +2404:6800:4005:2::8 v11.nonxt6.googlevideo.com +2404:6800:4005:3::8 v11.nonxt7.googlevideo.com +2404:6800:4005:3::8 v11.nonxt8.googlevideo.com +2404:6800:4005:4::c v12.nonxt1.googlevideo.com +2404:6800:4005:4::c v12.nonxt2.googlevideo.com +2404:6800:4005:5::c v12.nonxt3.googlevideo.com +2404:6800:4005:1::c v12.nonxt4.googlevideo.com +2404:6800:4005:6::c v12.nonxt5.googlevideo.com +2404:6800:4005:6::c v12.nonxt6.googlevideo.com +2404:6800:4005:7::c v12.nonxt7.googlevideo.com +2404:6800:4005:3::c v12.nonxt8.googlevideo.com +2404:6800:4005::10 v13.nonxt1.googlevideo.com +2404:6800:4005::10 v13.nonxt2.googlevideo.com +2404:6800:4005:1::10 v13.nonxt3.googlevideo.com +2404:6800:4005:5::10 v13.nonxt4.googlevideo.com +2404:6800:4005:2::10 v13.nonxt5.googlevideo.com +2404:6800:4005:6::10 v13.nonxt6.googlevideo.com +2404:6800:4005:3::10 v13.nonxt7.googlevideo.com +2404:6800:4005:3::10 v13.nonxt8.googlevideo.com +2404:6800:4005::14 v14.nonxt1.googlevideo.com +2404:6800:4005:4::14 v14.nonxt2.googlevideo.com +2404:6800:4005:5::14 v14.nonxt3.googlevideo.com +2404:6800:4005:1::14 v14.nonxt4.googlevideo.com +2404:6800:4005:2::14 v14.nonxt5.googlevideo.com +2404:6800:4005:2::14 v14.nonxt6.googlevideo.com +2404:6800:4005:7::14 v14.nonxt7.googlevideo.com +2404:6800:4005:3::14 v14.nonxt8.googlevideo.com +2404:6800:4005::18 v15.nonxt1.googlevideo.com +2404:6800:4005::18 v15.nonxt2.googlevideo.com +2404:6800:4005:1::18 v15.nonxt3.googlevideo.com +2404:6800:4005:1::18 v15.nonxt4.googlevideo.com +2404:6800:4005:2::18 v15.nonxt5.googlevideo.com +2404:6800:4005:2::18 v15.nonxt6.googlevideo.com +2404:6800:4005:3::18 v15.nonxt7.googlevideo.com +2404:6800:4005:7::18 v15.nonxt8.googlevideo.com +2404:6800:4005:4::9 v16.nonxt1.googlevideo.com +2404:6800:4005:4::9 v16.nonxt2.googlevideo.com +2404:6800:4005:5::9 v16.nonxt3.googlevideo.com +2404:6800:4005:1::9 v16.nonxt4.googlevideo.com +2404:6800:4005:2::9 v16.nonxt5.googlevideo.com +2404:6800:4005:6::9 v16.nonxt6.googlevideo.com +2404:6800:4005:3::9 v16.nonxt7.googlevideo.com +2404:6800:4005:3::9 v16.nonxt8.googlevideo.com +2404:6800:4005:4::d v17.nonxt1.googlevideo.com +2404:6800:4005:4::d v17.nonxt2.googlevideo.com +2404:6800:4005:1::d v17.nonxt3.googlevideo.com +2404:6800:4005:5::d v17.nonxt4.googlevideo.com +2404:6800:4005:6::d v17.nonxt5.googlevideo.com +2404:6800:4005:6::d v17.nonxt6.googlevideo.com +2404:6800:4005:7::d v17.nonxt7.googlevideo.com +2404:6800:4005:7::d v17.nonxt8.googlevideo.com +2404:6800:4005:4::11 v18.nonxt1.googlevideo.com +2404:6800:4005::11 v18.nonxt2.googlevideo.com +2404:6800:4005:1::11 v18.nonxt3.googlevideo.com +2404:6800:4005:1::11 v18.nonxt4.googlevideo.com +2404:6800:4005:6::11 v18.nonxt5.googlevideo.com +2404:6800:4005:2::11 v18.nonxt6.googlevideo.com +2404:6800:4005:7::11 v18.nonxt7.googlevideo.com +2404:6800:4005:3::11 v18.nonxt8.googlevideo.com +2404:6800:4005::15 v19.nonxt1.googlevideo.com +2404:6800:4005::15 v19.nonxt2.googlevideo.com +2404:6800:4005:5::15 v19.nonxt3.googlevideo.com +2404:6800:4005:5::15 v19.nonxt4.googlevideo.com +2404:6800:4005:6::15 v19.nonxt5.googlevideo.com +2404:6800:4005:2::15 v19.nonxt6.googlevideo.com +2404:6800:4005:7::15 v19.nonxt7.googlevideo.com +2404:6800:4005:7::15 v19.nonxt8.googlevideo.com +2404:6800:4005:4::19 v20.nonxt1.googlevideo.com +2404:6800:4005::19 v20.nonxt2.googlevideo.com +2404:6800:4005:1::19 v20.nonxt3.googlevideo.com +2404:6800:4005:1::19 v20.nonxt4.googlevideo.com +2404:6800:4005:6::19 v20.nonxt5.googlevideo.com +2404:6800:4005:6::19 v20.nonxt6.googlevideo.com +2404:6800:4005:7::19 v20.nonxt7.googlevideo.com +2404:6800:4005:7::19 v20.nonxt8.googlevideo.com +2404:6800:4005::6 v21.nonxt1.googlevideo.com +2404:6800:4005:4::6 v21.nonxt2.googlevideo.com +2404:6800:4005:5::6 v21.nonxt3.googlevideo.com +2404:6800:4005:1::6 v21.nonxt4.googlevideo.com +2404:6800:4005:6::6 v21.nonxt5.googlevideo.com +2404:6800:4005:2::6 v21.nonxt6.googlevideo.com +2404:6800:4005:3::6 v21.nonxt7.googlevideo.com +2404:6800:4005:3::6 v21.nonxt8.googlevideo.com +2404:6800:4005:4::a v22.nonxt1.googlevideo.com +2404:6800:4005::a v22.nonxt2.googlevideo.com +2404:6800:4005:1::a v22.nonxt3.googlevideo.com +2404:6800:4005:5::a v22.nonxt4.googlevideo.com +2404:6800:4005:6::a v22.nonxt5.googlevideo.com +2404:6800:4005:2::a v22.nonxt6.googlevideo.com +2404:6800:4005:7::a v22.nonxt7.googlevideo.com +2404:6800:4005:7::a v22.nonxt8.googlevideo.com +2404:6800:4005:4::e v23.nonxt1.googlevideo.com +2404:6800:4005::e v23.nonxt2.googlevideo.com +2404:6800:4005:1::e v23.nonxt3.googlevideo.com +2404:6800:4005:1::e v23.nonxt4.googlevideo.com +2404:6800:4005:2::e v23.nonxt5.googlevideo.com +2404:6800:4005:6::e v23.nonxt6.googlevideo.com +2404:6800:4005:3::e v23.nonxt7.googlevideo.com +2404:6800:4005:7::e v23.nonxt8.googlevideo.com +2404:6800:4005::12 v24.nonxt1.googlevideo.com +2404:6800:4005:4::12 v24.nonxt2.googlevideo.com +2404:6800:4005:1::12 v24.nonxt3.googlevideo.com +2404:6800:4005:5::12 v24.nonxt4.googlevideo.com +2404:6800:4005:2::12 v24.nonxt5.googlevideo.com +2404:6800:4005:6::12 v24.nonxt6.googlevideo.com +2404:6800:4005:7::12 v24.nonxt7.googlevideo.com +2404:6800:4005:7::12 v24.nonxt8.googlevideo.com +2404:6800:4007::6 tc.v1.cache1.googlevideo.com +2404:6800:4007::6 tc.v1.cache2.googlevideo.com +2404:6800:4007::6 tc.v1.cache3.googlevideo.com +2404:6800:4007::6 tc.v1.cache4.googlevideo.com +2404:6800:4007::6 tc.v1.cache5.googlevideo.com +2404:6800:4007::6 tc.v1.cache6.googlevideo.com +2404:6800:4007::6 tc.v1.cache7.googlevideo.com +2404:6800:4007::6 tc.v1.cache8.googlevideo.com +2404:6800:4007::a tc.v2.cache1.googlevideo.com +2404:6800:4007::a tc.v2.cache2.googlevideo.com +2404:6800:4007::a tc.v2.cache3.googlevideo.com +2404:6800:4007::a tc.v2.cache4.googlevideo.com +2404:6800:4007::a tc.v2.cache5.googlevideo.com +2404:6800:4007::a tc.v2.cache6.googlevideo.com +2404:6800:4007::a tc.v2.cache7.googlevideo.com +2404:6800:4007::a tc.v2.cache8.googlevideo.com +2404:6800:4007::e tc.v3.cache1.googlevideo.com +2404:6800:4007::e tc.v3.cache2.googlevideo.com +2404:6800:4007::e tc.v3.cache3.googlevideo.com +2404:6800:4007::e tc.v3.cache4.googlevideo.com +2404:6800:4007::e tc.v3.cache5.googlevideo.com +2404:6800:4007::e tc.v3.cache6.googlevideo.com +2404:6800:4007::e tc.v3.cache7.googlevideo.com +2404:6800:4007::e tc.v3.cache8.googlevideo.com +2404:6800:4007::12 tc.v4.cache1.googlevideo.com +2404:6800:4007::12 tc.v4.cache2.googlevideo.com +2404:6800:4007::12 tc.v4.cache3.googlevideo.com +2404:6800:4007::12 tc.v4.cache4.googlevideo.com +2404:6800:4007::12 tc.v4.cache5.googlevideo.com +2404:6800:4007::12 tc.v4.cache6.googlevideo.com +2404:6800:4007::12 tc.v4.cache7.googlevideo.com +2404:6800:4007::12 tc.v4.cache8.googlevideo.com +2404:6800:4007::16 tc.v5.cache1.googlevideo.com +2404:6800:4007::16 tc.v5.cache2.googlevideo.com +2404:6800:4007::16 tc.v5.cache3.googlevideo.com +2404:6800:4007::16 tc.v5.cache4.googlevideo.com +2404:6800:4007::16 tc.v5.cache5.googlevideo.com +2404:6800:4007::16 tc.v5.cache6.googlevideo.com +2404:6800:4007::16 tc.v5.cache7.googlevideo.com +2404:6800:4007::16 tc.v5.cache8.googlevideo.com +2404:6800:4007::7 tc.v6.cache1.googlevideo.com +2404:6800:4007::7 tc.v6.cache2.googlevideo.com +2404:6800:4007::7 tc.v6.cache3.googlevideo.com +2404:6800:4007::7 tc.v6.cache4.googlevideo.com +2404:6800:4007::7 tc.v6.cache5.googlevideo.com +2404:6800:4007::7 tc.v6.cache6.googlevideo.com +2404:6800:4007::7 tc.v6.cache7.googlevideo.com +2404:6800:4007::7 tc.v6.cache8.googlevideo.com +2404:6800:4007::b tc.v7.cache1.googlevideo.com +2404:6800:4007::b tc.v7.cache2.googlevideo.com +2404:6800:4007::b tc.v7.cache3.googlevideo.com +2404:6800:4007::b tc.v7.cache4.googlevideo.com +2404:6800:4007::b tc.v7.cache5.googlevideo.com +2404:6800:4007::b tc.v7.cache6.googlevideo.com +2404:6800:4007::b tc.v7.cache7.googlevideo.com +2404:6800:4007::b tc.v7.cache8.googlevideo.com +2404:6800:4007::f tc.v8.cache1.googlevideo.com +2404:6800:4007::f tc.v8.cache2.googlevideo.com +2404:6800:4007::f tc.v8.cache3.googlevideo.com +2404:6800:4007::f tc.v8.cache4.googlevideo.com +2404:6800:4007::f tc.v8.cache5.googlevideo.com +2404:6800:4007::f tc.v8.cache6.googlevideo.com +2404:6800:4007::f tc.v8.cache7.googlevideo.com +2404:6800:4007::f tc.v8.cache8.googlevideo.com +2404:6800:4007::13 tc.v9.cache1.googlevideo.com +2404:6800:4007::13 tc.v9.cache2.googlevideo.com +2404:6800:4007::13 tc.v9.cache3.googlevideo.com +2404:6800:4007::13 tc.v9.cache4.googlevideo.com +2404:6800:4007::13 tc.v9.cache5.googlevideo.com +2404:6800:4007::13 tc.v9.cache6.googlevideo.com +2404:6800:4007::13 tc.v9.cache7.googlevideo.com +2404:6800:4007::13 tc.v9.cache8.googlevideo.com +2404:6800:4007::17 tc.v10.cache1.googlevideo.com +2404:6800:4007::17 tc.v10.cache2.googlevideo.com +2404:6800:4007::17 tc.v10.cache3.googlevideo.com +2404:6800:4007::17 tc.v10.cache4.googlevideo.com +2404:6800:4007::17 tc.v10.cache5.googlevideo.com +2404:6800:4007::17 tc.v10.cache6.googlevideo.com +2404:6800:4007::17 tc.v10.cache7.googlevideo.com +2404:6800:4007::17 tc.v10.cache8.googlevideo.com +2404:6800:4007::8 tc.v11.cache1.googlevideo.com +2404:6800:4007::8 tc.v11.cache2.googlevideo.com +2404:6800:4007::8 tc.v11.cache3.googlevideo.com +2404:6800:4007::8 tc.v11.cache4.googlevideo.com +2404:6800:4007::8 tc.v11.cache5.googlevideo.com +2404:6800:4007::8 tc.v11.cache6.googlevideo.com +2404:6800:4007::8 tc.v11.cache7.googlevideo.com +2404:6800:4007::8 tc.v11.cache8.googlevideo.com +2404:6800:4007::c tc.v12.cache1.googlevideo.com +2404:6800:4007::c tc.v12.cache2.googlevideo.com +2404:6800:4007::c tc.v12.cache3.googlevideo.com +2404:6800:4007::c tc.v12.cache4.googlevideo.com +2404:6800:4007::c tc.v12.cache5.googlevideo.com +2404:6800:4007::c tc.v12.cache6.googlevideo.com +2404:6800:4007::c tc.v12.cache7.googlevideo.com +2404:6800:4007::c tc.v12.cache8.googlevideo.com +2404:6800:4007::10 tc.v13.cache1.googlevideo.com +2404:6800:4007::10 tc.v13.cache2.googlevideo.com +2404:6800:4007::10 tc.v13.cache3.googlevideo.com +2404:6800:4007::10 tc.v13.cache4.googlevideo.com +2404:6800:4007::10 tc.v13.cache5.googlevideo.com +2404:6800:4007::10 tc.v13.cache6.googlevideo.com +2404:6800:4007::10 tc.v13.cache7.googlevideo.com +2404:6800:4007::10 tc.v13.cache8.googlevideo.com +2404:6800:4007::14 tc.v14.cache1.googlevideo.com +2404:6800:4007::14 tc.v14.cache2.googlevideo.com +2404:6800:4007::14 tc.v14.cache3.googlevideo.com +2404:6800:4007::14 tc.v14.cache4.googlevideo.com +2404:6800:4007::14 tc.v14.cache5.googlevideo.com +2404:6800:4007::14 tc.v14.cache6.googlevideo.com +2404:6800:4007::14 tc.v14.cache7.googlevideo.com +2404:6800:4007::14 tc.v14.cache8.googlevideo.com +2404:6800:4007::18 tc.v15.cache1.googlevideo.com +2404:6800:4007::18 tc.v15.cache2.googlevideo.com +2404:6800:4007::18 tc.v15.cache3.googlevideo.com +2404:6800:4007::18 tc.v15.cache4.googlevideo.com +2404:6800:4007::18 tc.v15.cache5.googlevideo.com +2404:6800:4007::18 tc.v15.cache6.googlevideo.com +2404:6800:4007::18 tc.v15.cache7.googlevideo.com +2404:6800:4007::18 tc.v15.cache8.googlevideo.com +2404:6800:4007::9 tc.v16.cache1.googlevideo.com +2404:6800:4007::9 tc.v16.cache2.googlevideo.com +2404:6800:4007::9 tc.v16.cache3.googlevideo.com +2404:6800:4007::9 tc.v16.cache4.googlevideo.com +2404:6800:4007::9 tc.v16.cache5.googlevideo.com +2404:6800:4007::9 tc.v16.cache6.googlevideo.com +2404:6800:4007::9 tc.v16.cache7.googlevideo.com +2404:6800:4007::9 tc.v16.cache8.googlevideo.com +2404:6800:4007::d tc.v17.cache1.googlevideo.com +2404:6800:4007::d tc.v17.cache2.googlevideo.com +2404:6800:4007::d tc.v17.cache3.googlevideo.com +2404:6800:4007::d tc.v17.cache4.googlevideo.com +2404:6800:4007::d tc.v17.cache5.googlevideo.com +2404:6800:4007::d tc.v17.cache6.googlevideo.com +2404:6800:4007::d tc.v17.cache7.googlevideo.com +2404:6800:4007::d tc.v17.cache8.googlevideo.com +2404:6800:4007::11 tc.v18.cache1.googlevideo.com +2404:6800:4007::11 tc.v18.cache2.googlevideo.com +2404:6800:4007::11 tc.v18.cache3.googlevideo.com +2404:6800:4007::11 tc.v18.cache4.googlevideo.com +2404:6800:4007::11 tc.v18.cache5.googlevideo.com +2404:6800:4007::11 tc.v18.cache6.googlevideo.com +2404:6800:4007::11 tc.v18.cache7.googlevideo.com +2404:6800:4007::11 tc.v18.cache8.googlevideo.com +2404:6800:4007::15 tc.v19.cache1.googlevideo.com +2404:6800:4007::15 tc.v19.cache2.googlevideo.com +2404:6800:4007::15 tc.v19.cache3.googlevideo.com +2404:6800:4007::15 tc.v19.cache4.googlevideo.com +2404:6800:4007::15 tc.v19.cache5.googlevideo.com +2404:6800:4007::15 tc.v19.cache6.googlevideo.com +2404:6800:4007::15 tc.v19.cache7.googlevideo.com +2404:6800:4007::15 tc.v19.cache8.googlevideo.com +2404:6800:4007::19 tc.v20.cache1.googlevideo.com +2404:6800:4007::19 tc.v20.cache2.googlevideo.com +2404:6800:4007::19 tc.v20.cache3.googlevideo.com +2404:6800:4007::19 tc.v20.cache4.googlevideo.com +2404:6800:4007::19 tc.v20.cache5.googlevideo.com +2404:6800:4007::19 tc.v20.cache6.googlevideo.com +2404:6800:4007::19 tc.v20.cache7.googlevideo.com +2404:6800:4007::19 tc.v20.cache8.googlevideo.com +2404:6800:4007::6 tc.v21.cache1.googlevideo.com +2404:6800:4007::6 tc.v21.cache2.googlevideo.com +2404:6800:4007::6 tc.v21.cache3.googlevideo.com +2404:6800:4007::6 tc.v21.cache4.googlevideo.com +2404:6800:4007::6 tc.v21.cache5.googlevideo.com +2404:6800:4007::6 tc.v21.cache6.googlevideo.com +2404:6800:4007::6 tc.v21.cache7.googlevideo.com +2404:6800:4007::6 tc.v21.cache8.googlevideo.com +2404:6800:4007::a tc.v22.cache1.googlevideo.com +2404:6800:4007::a tc.v22.cache2.googlevideo.com +2404:6800:4007::a tc.v22.cache3.googlevideo.com +2404:6800:4007::a tc.v22.cache4.googlevideo.com +2404:6800:4007::a tc.v22.cache5.googlevideo.com +2404:6800:4007::a tc.v22.cache6.googlevideo.com +2404:6800:4007::a tc.v22.cache7.googlevideo.com +2404:6800:4007::a tc.v22.cache8.googlevideo.com +2404:6800:4007::e tc.v23.cache1.googlevideo.com +2404:6800:4007::e tc.v23.cache2.googlevideo.com +2404:6800:4007::e tc.v23.cache3.googlevideo.com +2404:6800:4007::e tc.v23.cache4.googlevideo.com +2404:6800:4007::e tc.v23.cache5.googlevideo.com +2404:6800:4007::e tc.v23.cache6.googlevideo.com +2404:6800:4007::e tc.v23.cache7.googlevideo.com +2404:6800:4007::e tc.v23.cache8.googlevideo.com +2404:6800:4007::12 tc.v24.cache1.googlevideo.com +2404:6800:4007::12 tc.v24.cache2.googlevideo.com +2404:6800:4007::12 tc.v24.cache3.googlevideo.com +2404:6800:4007::12 tc.v24.cache4.googlevideo.com +2404:6800:4007::12 tc.v24.cache5.googlevideo.com +2404:6800:4007::12 tc.v24.cache6.googlevideo.com +2404:6800:4007::12 tc.v24.cache7.googlevideo.com +2404:6800:4007::12 tc.v24.cache8.googlevideo.com +2404:6800:4008:2::6 r1.tpe05s03.googlevideo.com +2404:6800:4008:3::6 r1.tpe05s04.googlevideo.com +2404:6800:4008:2::7 r2.tpe05s03.googlevideo.com +2404:6800:4008:3::7 r2.tpe05s04.googlevideo.com +2404:6800:4008:2::8 r3.tpe05s03.googlevideo.com +2404:6800:4008:3::8 r3.tpe05s04.googlevideo.com +2404:6800:4008:2::9 r4.tpe05s03.googlevideo.com +2404:6800:4008:3::9 r4.tpe05s04.googlevideo.com +2404:6800:4008:2::a r5.tpe05s03.googlevideo.com +2404:6800:4008:3::a r5.tpe05s04.googlevideo.com +2404:6800:4008:2::b r6.tpe05s03.googlevideo.com +2404:6800:4008:3::b r6.tpe05s04.googlevideo.com +2404:6800:4008:2::c r7.tpe05s03.googlevideo.com +2404:6800:4008:3::c r7.tpe05s04.googlevideo.com +2404:6800:4008:2::d r8.tpe05s03.googlevideo.com +2404:6800:4008:3::d r8.tpe05s04.googlevideo.com +2404:6800:4008:2::e r9.tpe05s03.googlevideo.com +2404:6800:4008:3::e r9.tpe05s04.googlevideo.com +2404:6800:4008:2::f r10.tpe05s03.googlevideo.com +2404:6800:4008:3::f r10.tpe05s04.googlevideo.com +2404:6800:4008:2::10 r11.tpe05s03.googlevideo.com +2404:6800:4008:3::10 r11.tpe05s04.googlevideo.com +2404:6800:4008:2::11 r12.tpe05s03.googlevideo.com +2404:6800:4008:3::11 r12.tpe05s04.googlevideo.com +2404:6800:4008:2::12 r13.tpe05s03.googlevideo.com +2404:6800:4008:3::12 r13.tpe05s04.googlevideo.com +2404:6800:4008:2::13 r14.tpe05s03.googlevideo.com +2404:6800:4008:3::13 r14.tpe05s04.googlevideo.com +2404:6800:4008:2::14 r15.tpe05s03.googlevideo.com +2404:6800:4008:3::14 r15.tpe05s04.googlevideo.com +2404:6800:4008:2::15 r16.tpe05s03.googlevideo.com +2404:6800:4008:3::15 r16.tpe05s04.googlevideo.com +2404:6800:4008:2::16 r17.tpe05s03.googlevideo.com +2404:6800:4008:3::16 r17.tpe05s04.googlevideo.com +2404:6800:4008:2::17 r18.tpe05s03.googlevideo.com +2404:6800:4008:3::17 r18.tpe05s04.googlevideo.com +2404:6800:4008:2::18 r19.tpe05s03.googlevideo.com +2404:6800:4008:3::18 r19.tpe05s04.googlevideo.com +2404:6800:4008:2::19 r20.tpe05s03.googlevideo.com +2404:6800:4008:3::19 r20.tpe05s04.googlevideo.com +2404:6800:4005:4::6 r1.hkg03s05.googlevideo.com +2404:6800:4005:5::6 r1.hkg03s06.googlevideo.com +2404:6800:4005:6::6 r1.hkg03s07.googlevideo.com +2404:6800:4005:7::6 r1.hkg03s08.googlevideo.com +2404:6800:4005:4::7 r2.hkg03s05.googlevideo.com +2404:6800:4005:5::7 r2.hkg03s06.googlevideo.com +2404:6800:4005:6::7 r2.hkg03s07.googlevideo.com +2404:6800:4005:7::7 r2.hkg03s08.googlevideo.com +2404:6800:4005:4::8 r3.hkg03s05.googlevideo.com +2404:6800:4005:5::8 r3.hkg03s06.googlevideo.com +2404:6800:4005:6::8 r3.hkg03s07.googlevideo.com +2404:6800:4005:7::8 r3.hkg03s08.googlevideo.com +2404:6800:4005:4::9 r4.hkg03s05.googlevideo.com +2404:6800:4005:5::9 r4.hkg03s06.googlevideo.com +2404:6800:4005:6::9 r4.hkg03s07.googlevideo.com +2404:6800:4005:7::9 r4.hkg03s08.googlevideo.com +2404:6800:4005:4::a r5.hkg03s05.googlevideo.com +2404:6800:4005:5::a r5.hkg03s06.googlevideo.com +2404:6800:4005:6::a r5.hkg03s07.googlevideo.com +2404:6800:4005:7::a r5.hkg03s08.googlevideo.com +2404:6800:4005:4::b r6.hkg03s05.googlevideo.com +2404:6800:4005:5::b r6.hkg03s06.googlevideo.com +2404:6800:4005:6::b r6.hkg03s07.googlevideo.com +2404:6800:4005:7::b r6.hkg03s08.googlevideo.com +2404:6800:4005:4::c r7.hkg03s05.googlevideo.com +2404:6800:4005:5::c r7.hkg03s06.googlevideo.com +2404:6800:4005:6::c r7.hkg03s07.googlevideo.com +2404:6800:4005:7::c r7.hkg03s08.googlevideo.com +2404:6800:4005:4::d r8.hkg03s05.googlevideo.com +2404:6800:4005:5::d r8.hkg03s06.googlevideo.com +2404:6800:4005:6::d r8.hkg03s07.googlevideo.com +2404:6800:4005:7::d r8.hkg03s08.googlevideo.com +2404:6800:4005:4::e r9.hkg03s05.googlevideo.com +2404:6800:4005:5::e r9.hkg03s06.googlevideo.com +2404:6800:4005:6::e r9.hkg03s07.googlevideo.com +2404:6800:4005:7::e r9.hkg03s08.googlevideo.com +2404:6800:4005:4::f r10.hkg03s05.googlevideo.com +2404:6800:4005:5::f r10.hkg03s06.googlevideo.com +2404:6800:4005:6::f r10.hkg03s07.googlevideo.com +2404:6800:4005:7::f r10.hkg03s08.googlevideo.com +2404:6800:4005:4::10 r11.hkg03s05.googlevideo.com +2404:6800:4005:5::10 r11.hkg03s06.googlevideo.com +2404:6800:4005:6::10 r11.hkg03s07.googlevideo.com +2404:6800:4005:7::10 r11.hkg03s08.googlevideo.com +2404:6800:4005:4::11 r12.hkg03s05.googlevideo.com +2404:6800:4005:5::11 r12.hkg03s06.googlevideo.com +2404:6800:4005:6::11 r12.hkg03s07.googlevideo.com +2404:6800:4005:7::11 r12.hkg03s08.googlevideo.com +2404:6800:4005:4::12 r13.hkg03s05.googlevideo.com +2404:6800:4005:5::12 r13.hkg03s06.googlevideo.com +2404:6800:4005:6::12 r13.hkg03s07.googlevideo.com +2404:6800:4005:7::12 r13.hkg03s08.googlevideo.com +2404:6800:4005:4::13 r14.hkg03s05.googlevideo.com +2404:6800:4005:5::13 r14.hkg03s06.googlevideo.com +2404:6800:4005:6::13 r14.hkg03s07.googlevideo.com +2404:6800:4005:7::13 r14.hkg03s08.googlevideo.com +2404:6800:4005:4::14 r15.hkg03s05.googlevideo.com +2404:6800:4005:5::14 r15.hkg03s06.googlevideo.com +2404:6800:4005:6::14 r15.hkg03s07.googlevideo.com +2404:6800:4005:7::14 r15.hkg03s08.googlevideo.com +2404:6800:4005:4::15 r16.hkg03s05.googlevideo.com +2404:6800:4005:5::15 r16.hkg03s06.googlevideo.com +2404:6800:4005:6::15 r16.hkg03s07.googlevideo.com +2404:6800:4005:7::15 r16.hkg03s08.googlevideo.com +2404:6800:4005:4::16 r17.hkg03s05.googlevideo.com +2404:6800:4005:5::16 r17.hkg03s06.googlevideo.com +2404:6800:4005:6::16 r17.hkg03s07.googlevideo.com +2404:6800:4005:7::16 r17.hkg03s08.googlevideo.com +2404:6800:4005:4::17 r18.hkg03s05.googlevideo.com +2404:6800:4005:5::17 r18.hkg03s06.googlevideo.com +2404:6800:4005:6::17 r18.hkg03s07.googlevideo.com +2404:6800:4005:7::17 r18.hkg03s08.googlevideo.com +2404:6800:4005:4::18 r19.hkg03s05.googlevideo.com +2404:6800:4005:5::18 r19.hkg03s06.googlevideo.com +2404:6800:4005:6::18 r19.hkg03s07.googlevideo.com +2404:6800:4005:7::18 r19.hkg03s08.googlevideo.com +2404:6800:4005:4::19 r20.hkg03s05.googlevideo.com +2404:6800:4005:5::19 r20.hkg03s06.googlevideo.com +2404:6800:4005:6::19 r20.hkg03s07.googlevideo.com +2404:6800:4005:7::19 r20.hkg03s08.googlevideo.com +2404:6800:4005::6 r1.hkg05s01.googlevideo.com +2404:6800:4005:1::6 r1.hkg05s02.googlevideo.com +2404:6800:4005:2::6 r1.hkg05s03.googlevideo.com +2404:6800:4005:3::6 r1.hkg05s04.googlevideo.com +2404:6800:4005::7 r2.hkg05s01.googlevideo.com +2404:6800:4005:1::7 r2.hkg05s02.googlevideo.com +2404:6800:4005:2::7 r2.hkg05s03.googlevideo.com +2404:6800:4005:3::7 r2.hkg05s04.googlevideo.com +2404:6800:4005::8 r3.hkg05s01.googlevideo.com +2404:6800:4005:1::8 r3.hkg05s02.googlevideo.com +2404:6800:4005:2::8 r3.hkg05s03.googlevideo.com +2404:6800:4005:3::8 r3.hkg05s04.googlevideo.com +2404:6800:4005::9 r4.hkg05s01.googlevideo.com +2404:6800:4005:1::9 r4.hkg05s02.googlevideo.com +2404:6800:4005:2::9 r4.hkg05s03.googlevideo.com +2404:6800:4005:3::9 r4.hkg05s04.googlevideo.com +2404:6800:4005::a r5.hkg05s01.googlevideo.com +2404:6800:4005:1::a r5.hkg05s02.googlevideo.com +2404:6800:4005:2::a r5.hkg05s03.googlevideo.com +2404:6800:4005:3::a r5.hkg05s04.googlevideo.com +2404:6800:4005::b r6.hkg05s01.googlevideo.com +2404:6800:4005:1::b r6.hkg05s02.googlevideo.com +2404:6800:4005:2::b r6.hkg05s03.googlevideo.com +2404:6800:4005:3::b r6.hkg05s04.googlevideo.com +2404:6800:4005::c r7.hkg05s01.googlevideo.com +2404:6800:4005:1::c r7.hkg05s02.googlevideo.com +2404:6800:4005:2::c r7.hkg05s03.googlevideo.com +2404:6800:4005:3::c r7.hkg05s04.googlevideo.com +2404:6800:4005::d r8.hkg05s01.googlevideo.com +2404:6800:4005:1::d r8.hkg05s02.googlevideo.com +2404:6800:4005:2::d r8.hkg05s03.googlevideo.com +2404:6800:4005:3::d r8.hkg05s04.googlevideo.com +2404:6800:4005::e r9.hkg05s01.googlevideo.com +2404:6800:4005:1::e r9.hkg05s02.googlevideo.com +2404:6800:4005:2::e r9.hkg05s03.googlevideo.com +2404:6800:4005:3::e r9.hkg05s04.googlevideo.com +2404:6800:4005::f r10.hkg05s01.googlevideo.com +2404:6800:4005:1::f r10.hkg05s02.googlevideo.com +2404:6800:4005:2::f r10.hkg05s03.googlevideo.com +2404:6800:4005:3::f r10.hkg05s04.googlevideo.com +2404:6800:4005::10 r11.hkg05s01.googlevideo.com +2404:6800:4005:1::10 r11.hkg05s02.googlevideo.com +2404:6800:4005:2::10 r11.hkg05s03.googlevideo.com +2404:6800:4005:3::10 r11.hkg05s04.googlevideo.com +2404:6800:4005::11 r12.hkg05s01.googlevideo.com +2404:6800:4005:1::11 r12.hkg05s02.googlevideo.com +2404:6800:4005:2::11 r12.hkg05s03.googlevideo.com +2404:6800:4005:3::11 r12.hkg05s04.googlevideo.com +2404:6800:4005::12 r13.hkg05s01.googlevideo.com +2404:6800:4005:1::12 r13.hkg05s02.googlevideo.com +2404:6800:4005:2::12 r13.hkg05s03.googlevideo.com +2404:6800:4005:3::12 r13.hkg05s04.googlevideo.com +2404:6800:4005::13 r14.hkg05s01.googlevideo.com +2404:6800:4005:1::13 r14.hkg05s02.googlevideo.com +2404:6800:4005:2::13 r14.hkg05s03.googlevideo.com +2404:6800:4005:3::13 r14.hkg05s04.googlevideo.com +2404:6800:4005::14 r15.hkg05s01.googlevideo.com +2404:6800:4005:1::14 r15.hkg05s02.googlevideo.com +2404:6800:4005:2::14 r15.hkg05s03.googlevideo.com +2404:6800:4005:3::14 r15.hkg05s04.googlevideo.com +2404:6800:4005::15 r16.hkg05s01.googlevideo.com +2404:6800:4005:1::15 r16.hkg05s02.googlevideo.com +2404:6800:4005:2::15 r16.hkg05s03.googlevideo.com +2404:6800:4005:3::15 r16.hkg05s04.googlevideo.com +2404:6800:4005::16 r17.hkg05s01.googlevideo.com +2404:6800:4005:1::16 r17.hkg05s02.googlevideo.com +2404:6800:4005:2::16 r17.hkg05s03.googlevideo.com +2404:6800:4005:3::16 r17.hkg05s04.googlevideo.com +2404:6800:4005::17 r18.hkg05s01.googlevideo.com +2404:6800:4005:1::17 r18.hkg05s02.googlevideo.com +2404:6800:4005:2::17 r18.hkg05s03.googlevideo.com +2404:6800:4005:3::17 r18.hkg05s04.googlevideo.com +2404:6800:4005::18 r19.hkg05s01.googlevideo.com +2404:6800:4005:1::18 r19.hkg05s02.googlevideo.com +2404:6800:4005:2::18 r19.hkg05s03.googlevideo.com +2404:6800:4005:3::18 r19.hkg05s04.googlevideo.com +2404:6800:4005::19 r20.hkg05s01.googlevideo.com +2404:6800:4005:1::19 r20.hkg05s02.googlevideo.com +2404:6800:4005:2::19 r20.hkg05s03.googlevideo.com +2404:6800:4005:3::19 r20.hkg05s04.googlevideo.com +2404:6800:4007::6 r1.maa03s08.googlevideo.com +2404:6800:4007:1::6 r1.maa03s09.googlevideo.com +2404:6800:4007::7 r2.maa03s08.googlevideo.com +2404:6800:4007:1::7 r2.maa03s09.googlevideo.com +2404:6800:4007::8 r3.maa03s08.googlevideo.com +2404:6800:4007:1::8 r3.maa03s09.googlevideo.com +2404:6800:4007::9 r4.maa03s08.googlevideo.com +2404:6800:4007:1::9 r4.maa03s09.googlevideo.com +2404:6800:4007::a r5.maa03s08.googlevideo.com +2404:6800:4007:1::a r5.maa03s09.googlevideo.com +2404:6800:4007::b r6.maa03s08.googlevideo.com +2404:6800:4007:1::b r6.maa03s09.googlevideo.com +2404:6800:4007::c r7.maa03s08.googlevideo.com +2404:6800:4007:1::c r7.maa03s09.googlevideo.com +2404:6800:4007::d r8.maa03s08.googlevideo.com +2404:6800:4007:1::d r8.maa03s09.googlevideo.com +2404:6800:4007::e r9.maa03s08.googlevideo.com +2404:6800:4007:1::e r9.maa03s09.googlevideo.com +2404:6800:4007::f r10.maa03s08.googlevideo.com +2404:6800:4007:1::f r10.maa03s09.googlevideo.com +2404:6800:4007::10 r11.maa03s08.googlevideo.com +2404:6800:4007:1::10 r11.maa03s09.googlevideo.com +2404:6800:4007::11 r12.maa03s08.googlevideo.com +2404:6800:4007:1::11 r12.maa03s09.googlevideo.com +2404:6800:4007::12 r13.maa03s08.googlevideo.com +2404:6800:4007:1::12 r13.maa03s09.googlevideo.com +2404:6800:4007::13 r14.maa03s08.googlevideo.com +2404:6800:4007:1::13 r14.maa03s09.googlevideo.com +2404:6800:4007::14 r15.maa03s08.googlevideo.com +2404:6800:4007:1::14 r15.maa03s09.googlevideo.com +2404:6800:4007::15 r16.maa03s08.googlevideo.com +2404:6800:4007:1::15 r16.maa03s09.googlevideo.com +2404:6800:4007::16 r17.maa03s08.googlevideo.com +2404:6800:4007:1::16 r17.maa03s09.googlevideo.com +2404:6800:4007::17 r18.maa03s08.googlevideo.com +2404:6800:4007:1::17 r18.maa03s09.googlevideo.com +2404:6800:4007::18 r19.maa03s08.googlevideo.com +2404:6800:4007:1::18 r19.maa03s09.googlevideo.com +2404:6800:4007::19 r20.maa03s08.googlevideo.com +2404:6800:4007:1::19 r20.maa03s09.googlevideo.com +2404:6800:8005::a0 www.gvt0.cn +2404:6800:8005::a0 0.gvt0.cn +2404:6800:8005::a0 1.gvt0.cn +2404:6800:8005::a0 2.gvt0.cn +2404:6800:8005::a0 3.gvt0.cn + +#Wave 波浪 +2404:6800:8005::76 wave.google.com +2404:6800:8005::76 wave0.google.com +2404:6800:8005::76 wave1.google.com +2404:6800:8005::62 googlewave.com + +#WiFi +2404:6800:8005::7b wifi.google.com +2404:6800:8005::62 wifi.l.google.com + +#iGoogle Modules iGoogle 小工具 +2404:6800:8005::62 gmodules.com +2404:6800:8005::62 www.gmodules.com +2404:6800:8005::62 www.ig.gmodules.com +2404:6800:8005::62 ig.gmodules.com +2404:6800:8005::62 ads.gmodules.com +2404:6800:8005::62 p.gmodules.com +2404:6800:8005::62 1.ig.gmodules.com +2404:6800:8005::62 2.ig.gmodules.com +2404:6800:8005::62 3.ig.gmodules.com +2404:6800:8005::62 4.ig.gmodules.com +2404:6800:8005::62 5.ig.gmodules.com +2404:6800:8005::62 6.ig.gmodules.com +2404:6800:8005::62 maps.gmodules.com +2404:6800:8005::62 img0.gmodules.com +2404:6800:8005::62 img1.gmodules.com +2404:6800:8005::62 img2.gmodules.com +2404:6800:8005::62 img3.gmodules.com +2404:6800:8005::62 skins.gmodules.com +2404:6800:8005::76 friendconnect.gmodules.com +2404:6800:8005::76 0.blogger.gmodules.com +2404:6800:8005::76 gadgets.l.google.com +2404:6800:8005::63 partnerpage.google.com +2404:6800:8005::84 *.ig.ig.gmodules.com + +#GStatic Google 静态文件存储 +2404:6800:8005::62 www.gstatic.com +2404:6800:8005::62 csi.gstatic.com +2404:6800:8005::62 maps.gstatic.com +2404:6800:8005::78 ssl.gstatic.com +2404:6800:8005::62 t0.gstatic.com +2404:6800:8005::62 t1.gstatic.com +2404:6800:8005::62 t2.gstatic.com +2404:6800:8005::62 t3.gstatic.com +2404:6800:8005::93 g0.gstatic.com +2404:6800:8005::69 g1.gstatic.com +2404:6800:8005::6a g2.gstatic.com +2404:6800:8005::93 g3.gstatic.com +2404:6800:8005::66 mt0.gstatic.com +2404:6800:8005::66 mt1.gstatic.com +2404:6800:8005::8b mt2.gstatic.com +2404:6800:8005::66 mt3.gstatic.com +2404:6800:8005::8b mt4.gstatic.com +2404:6800:8005::66 mt5.gstatic.com +2404:6800:8005::8b mt6.gstatic.com +2404:6800:8005::66 mt7.gstatic.com +2404:6800:8005::67 www.gstatic.cn + +#Google APIs 开发接口服务 +2404:6800:8005::65 apis.google.com +2404:6800:8005::62 chart.apis.google.com +2404:6800:8005::5f www.googleapis.com +2404:6800:8005::5f *.googleapis.com +2404:6800:8005::5f googleapis.l.google.com +2404:6800:8005::5f ajax.googleapis.com +2404:6800:8005::5f chart.googleapis.com +2404:6800:8005::5f fonts.googleapis.com +2404:6800:8005::5f maps.googleapis.com +2404:6800:8005::5f translate.googleapis.com +2404:6800:8005::5f googleapis-ajax.google.com +2404:6800:8005::5f googleapis-ajax.l.google.com + +#BigCache (Google Storage) +2404:6800:4005::6 v1.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::6 v1.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::6 v1.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::6 v1.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::6 v1.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::6 v1.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::6 v1.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::6 v1.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::a v2.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::a v2.cache2.c.bigcache.googleapis.com +2404:6800:4005:1::a v2.cache3.c.bigcache.googleapis.com +2404:6800:4005:5::a v2.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::a v2.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::a v2.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::a v2.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::a v2.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::e v3.cache1.c.bigcache.googleapis.com +2404:6800:4005::e v3.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::e v3.cache3.c.bigcache.googleapis.com +2404:6800:4005:5::e v3.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::e v3.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::e v3.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::e v3.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::e v3.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::12 v4.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::12 v4.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::12 v4.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::12 v4.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::12 v4.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::12 v4.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::12 v4.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::12 v4.cache8.c.bigcache.googleapis.com +2404:6800:4005::16 v5.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::16 v5.cache2.c.bigcache.googleapis.com +2404:6800:4005:1::16 v5.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::16 v5.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::16 v5.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::16 v5.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::16 v5.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::16 v5.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::7 v6.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::7 v6.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::7 v6.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::7 v6.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::7 v6.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::7 v6.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::7 v6.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::7 v6.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::b v7.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::b v7.cache2.c.bigcache.googleapis.com +2404:6800:4005:1::b v7.cache3.c.bigcache.googleapis.com +2404:6800:4005:5::b v7.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::b v7.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::b v7.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::b v7.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::b v7.cache8.c.bigcache.googleapis.com +2404:6800:4005::f v8.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::f v8.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::f v8.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::f v8.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::f v8.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::f v8.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::f v8.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::f v8.cache8.c.bigcache.googleapis.com +2404:6800:4005::13 v9.cache1.c.bigcache.googleapis.com +2404:6800:4005::13 v9.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::13 v9.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::13 v9.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::13 v9.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::13 v9.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::13 v9.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::13 v9.cache8.c.bigcache.googleapis.com +2404:6800:4005::17 v10.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::17 v10.cache2.c.bigcache.googleapis.com +2404:6800:4005:1::17 v10.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::17 v10.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::17 v10.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::17 v10.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::17 v10.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::17 v10.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::8 v11.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::8 v11.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::8 v11.cache3.c.bigcache.googleapis.com +2404:6800:4005:5::8 v11.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::8 v11.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::8 v11.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::8 v11.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::8 v11.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::c v12.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::c v12.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::c v12.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::c v12.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::c v12.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::c v12.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::c v12.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::c v12.cache8.c.bigcache.googleapis.com +2404:6800:4005::10 v13.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::10 v13.cache2.c.bigcache.googleapis.com +2404:6800:4005:1::10 v13.cache3.c.bigcache.googleapis.com +2404:6800:4005:5::10 v13.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::10 v13.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::10 v13.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::10 v13.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::10 v13.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::14 v14.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::14 v14.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::14 v14.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::14 v14.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::14 v14.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::14 v14.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::14 v14.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::14 v14.cache8.c.bigcache.googleapis.com +2404:6800:4005::18 v15.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::18 v15.cache2.c.bigcache.googleapis.com +2404:6800:4005:1::18 v15.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::18 v15.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::18 v15.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::18 v15.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::18 v15.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::18 v15.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::9 v16.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::9 v16.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::9 v16.cache3.c.bigcache.googleapis.com +2404:6800:4005:5::9 v16.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::9 v16.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::9 v16.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::9 v16.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::9 v16.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::d v17.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::d v17.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::d v17.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::d v17.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::d v17.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::d v17.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::d v17.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::d v17.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::11 v18.cache1.c.bigcache.googleapis.com +2404:6800:4005::11 v18.cache2.c.bigcache.googleapis.com +2404:6800:4005:1::11 v18.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::11 v18.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::11 v18.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::11 v18.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::11 v18.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::11 v18.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::15 v19.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::15 v19.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::15 v19.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::15 v19.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::15 v19.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::15 v19.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::15 v19.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::15 v19.cache8.c.bigcache.googleapis.com +2404:6800:4005::19 v20.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::19 v20.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::19 v20.cache3.c.bigcache.googleapis.com +2404:6800:4005:5::19 v20.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::19 v20.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::19 v20.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::19 v20.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::19 v20.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::6 v21.cache1.c.bigcache.googleapis.com +2404:6800:4005::6 v21.cache2.c.bigcache.googleapis.com +2404:6800:4005:1::6 v21.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::6 v21.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::6 v21.cache5.c.bigcache.googleapis.com +2404:6800:4005:6::6 v21.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::6 v21.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::6 v21.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::a v22.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::a v22.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::a v22.cache3.c.bigcache.googleapis.com +2404:6800:4005:5::a v22.cache4.c.bigcache.googleapis.com +2404:6800:4005:6::a v22.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::a v22.cache6.c.bigcache.googleapis.com +2404:6800:4005:7::a v22.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::a v22.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::e v23.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::e v23.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::e v23.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::e v23.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::e v23.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::e v23.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::e v23.cache7.c.bigcache.googleapis.com +2404:6800:4005:7::e v23.cache8.c.bigcache.googleapis.com +2404:6800:4005:4::12 v24.cache1.c.bigcache.googleapis.com +2404:6800:4005:4::12 v24.cache2.c.bigcache.googleapis.com +2404:6800:4005:5::12 v24.cache3.c.bigcache.googleapis.com +2404:6800:4005:1::12 v24.cache4.c.bigcache.googleapis.com +2404:6800:4005:2::12 v24.cache5.c.bigcache.googleapis.com +2404:6800:4005:2::12 v24.cache6.c.bigcache.googleapis.com +2404:6800:4005:3::12 v24.cache7.c.bigcache.googleapis.com +2404:6800:4005:3::12 v24.cache8.c.bigcache.googleapis.com +2404:6800:4005::6 v1.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::6 v1.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::6 v1.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::6 v1.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::6 v1.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::6 v1.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::6 v1.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::6 v1.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::a v2.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::a v2.lscache2.c.bigcache.googleapis.com +2404:6800:4005:1::a v2.lscache3.c.bigcache.googleapis.com +2404:6800:4005:5::a v2.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::a v2.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::a v2.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::a v2.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::a v2.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::e v3.lscache1.c.bigcache.googleapis.com +2404:6800:4005::e v3.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::e v3.lscache3.c.bigcache.googleapis.com +2404:6800:4005:5::e v3.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::e v3.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::e v3.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::e v3.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::e v3.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::12 v4.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::12 v4.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::12 v4.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::12 v4.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::12 v4.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::12 v4.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::12 v4.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::12 v4.lscache8.c.bigcache.googleapis.com +2404:6800:4005::16 v5.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::16 v5.lscache2.c.bigcache.googleapis.com +2404:6800:4005:1::16 v5.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::16 v5.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::16 v5.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::16 v5.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::16 v5.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::16 v5.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::7 v6.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::7 v6.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::7 v6.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::7 v6.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::7 v6.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::7 v6.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::7 v6.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::7 v6.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::b v7.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::b v7.lscache2.c.bigcache.googleapis.com +2404:6800:4005:1::b v7.lscache3.c.bigcache.googleapis.com +2404:6800:4005:5::b v7.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::b v7.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::b v7.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::b v7.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::b v7.lscache8.c.bigcache.googleapis.com +2404:6800:4005::f v8.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::f v8.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::f v8.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::f v8.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::f v8.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::f v8.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::f v8.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::f v8.lscache8.c.bigcache.googleapis.com +2404:6800:4005::13 v9.lscache1.c.bigcache.googleapis.com +2404:6800:4005::13 v9.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::13 v9.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::13 v9.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::13 v9.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::13 v9.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::13 v9.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::13 v9.lscache8.c.bigcache.googleapis.com +2404:6800:4005::17 v10.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::17 v10.lscache2.c.bigcache.googleapis.com +2404:6800:4005:1::17 v10.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::17 v10.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::17 v10.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::17 v10.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::17 v10.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::17 v10.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::8 v11.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::8 v11.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::8 v11.lscache3.c.bigcache.googleapis.com +2404:6800:4005:5::8 v11.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::8 v11.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::8 v11.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::8 v11.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::8 v11.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::c v12.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::c v12.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::c v12.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::c v12.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::c v12.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::c v12.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::c v12.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::c v12.lscache8.c.bigcache.googleapis.com +2404:6800:4005::10 v13.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::10 v13.lscache2.c.bigcache.googleapis.com +2404:6800:4005:1::10 v13.lscache3.c.bigcache.googleapis.com +2404:6800:4005:5::10 v13.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::10 v13.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::10 v13.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::10 v13.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::10 v13.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::14 v14.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::14 v14.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::14 v14.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::14 v14.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::14 v14.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::14 v14.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::14 v14.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::14 v14.lscache8.c.bigcache.googleapis.com +2404:6800:4005::18 v15.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::18 v15.lscache2.c.bigcache.googleapis.com +2404:6800:4005:1::18 v15.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::18 v15.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::18 v15.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::18 v15.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::18 v15.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::18 v15.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::9 v16.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::9 v16.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::9 v16.lscache3.c.bigcache.googleapis.com +2404:6800:4005:5::9 v16.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::9 v16.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::9 v16.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::9 v16.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::9 v16.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::d v17.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::d v17.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::d v17.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::d v17.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::d v17.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::d v17.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::d v17.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::d v17.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::11 v18.lscache1.c.bigcache.googleapis.com +2404:6800:4005::11 v18.lscache2.c.bigcache.googleapis.com +2404:6800:4005:1::11 v18.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::11 v18.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::11 v18.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::11 v18.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::11 v18.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::11 v18.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::15 v19.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::15 v19.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::15 v19.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::15 v19.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::15 v19.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::15 v19.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::15 v19.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::15 v19.lscache8.c.bigcache.googleapis.com +2404:6800:4005::19 v20.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::19 v20.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::19 v20.lscache3.c.bigcache.googleapis.com +2404:6800:4005:5::19 v20.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::19 v20.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::19 v20.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::19 v20.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::19 v20.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::6 v21.lscache1.c.bigcache.googleapis.com +2404:6800:4005::6 v21.lscache2.c.bigcache.googleapis.com +2404:6800:4005:1::6 v21.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::6 v21.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::6 v21.lscache5.c.bigcache.googleapis.com +2404:6800:4005:6::6 v21.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::6 v21.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::6 v21.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::a v22.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::a v22.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::a v22.lscache3.c.bigcache.googleapis.com +2404:6800:4005:5::a v22.lscache4.c.bigcache.googleapis.com +2404:6800:4005:6::a v22.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::a v22.lscache6.c.bigcache.googleapis.com +2404:6800:4005:7::a v22.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::a v22.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::e v23.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::e v23.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::e v23.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::e v23.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::e v23.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::e v23.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::e v23.lscache7.c.bigcache.googleapis.com +2404:6800:4005:7::e v23.lscache8.c.bigcache.googleapis.com +2404:6800:4005:4::12 v24.lscache1.c.bigcache.googleapis.com +2404:6800:4005:4::12 v24.lscache2.c.bigcache.googleapis.com +2404:6800:4005:5::12 v24.lscache3.c.bigcache.googleapis.com +2404:6800:4005:1::12 v24.lscache4.c.bigcache.googleapis.com +2404:6800:4005:2::12 v24.lscache5.c.bigcache.googleapis.com +2404:6800:4005:2::12 v24.lscache6.c.bigcache.googleapis.com +2404:6800:4005:3::12 v24.lscache7.c.bigcache.googleapis.com +2404:6800:4005:3::12 v24.lscache8.c.bigcache.googleapis.com + +#Google Hosted 托管服务 +2404:6800:8005::84 commondatastorage.googleapis.com +2404:6800:8005::84 googlehosted.l.googleusercontent.com +2404:6800:8005::84 bucket.commondatastorage.googleapis.com +2404:6800:8005::80 c.commondatastorage.googleapis.com +2404:6800:8005::84 chromium-browser-symbols.commondatastorage.googleapis.com +2404:6800:8005::80 storage.l.googleusercontent.com +2404:6800:8005::84 www.googlehosted.com +2404:6800:8005::84 googlehosted.l.google.com +2404:6800:8005::62 base.googlehosted.com +2404:6800:8005::62 base0.googlehosted.com +2404:6800:8005::62 base1.googlehosted.com +2404:6800:8005::62 base2.googlehosted.com +2404:6800:8005::62 base3.googlehosted.com +2404:6800:8005::62 base4.googlehosted.com +2404:6800:8005::62 base5.googlehosted.com + +#GoogleUserContent 用户自定义的Google服务 +2404:6800:8005::84 www.googleusercontent.com +2404:6800:8005::84 androidmarket.googleusercontent.com +2404:6800:8005::84 blogger.googleusercontent.com +2404:6800:8005::84 clients1.googleusercontent.com +2404:6800:8005::84 clients2.googleusercontent.com +2404:6800:8005::84 doc-00-1k-docs.googleusercontent.com #建议通过 pac 实现 +2404:6800:8005::84 doc-00-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-04-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-08-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-0c-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-0g-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-0k-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-0o-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-0s-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-10-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-14-1o-docs.googleusercontent.com +2404:6800:8005::84 doc-0k-38-docs.googleusercontent.com +2404:6800:8005::84 doc-00-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-04-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-08-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-0c-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-0g-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-0k-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-0o-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-0s-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-10-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-14-6c-docs.googleusercontent.com +2404:6800:8005::84 doc-0o-74-docs.googleusercontent.com +2404:6800:8005::84 doc-14-74-docs.googleusercontent.com +2404:6800:8005::84 doc-00-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-04-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-08-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-0c-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-0g-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-0k-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-0o-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-0s-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-10-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-14-7o-docs.googleusercontent.com +2404:6800:8005::84 doc-00-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-04-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-08-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-0c-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-0g-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-0k-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-0o-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-0s-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-10-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-14-8k-docs.googleusercontent.com +2404:6800:8005::84 doc-00-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-04-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-08-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-0c-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-0g-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-0k-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-0o-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-0s-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-10-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-14-9k-docs.googleusercontent.com +2404:6800:8005::84 doc-00-as-docs.googleusercontent.com +2404:6800:8005::84 doc-04-as-docs.googleusercontent.com +2404:6800:8005::84 doc-08-as-docs.googleusercontent.com +2404:6800:8005::84 doc-0c-as-docs.googleusercontent.com +2404:6800:8005::84 doc-0g-as-docs.googleusercontent.com +2404:6800:8005::84 doc-0k-as-docs.googleusercontent.com +2404:6800:8005::84 doc-0o-as-docs.googleusercontent.com +2404:6800:8005::84 doc-0s-as-docs.googleusercontent.com +2404:6800:8005::84 doc-10-as-docs.googleusercontent.com +2404:6800:8005::84 doc-14-as-docs.googleusercontent.com +2404:6800:8005::84 doc-00-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 doc-04-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 doc-08-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 doc-0c-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 doc-0g-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 doc-0k-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 doc-0o-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 doc-0s-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 doc-10-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 doc-14-2g-3dwarehouse.googleusercontent.com +2404:6800:8005::84 feedback.googleusercontent.com +2404:6800:8005::84 mail-attachment.googleusercontent.com +2404:6800:8005::84 lh0.googleusercontent.com +2404:6800:8005::84 lh1.googleusercontent.com +2404:6800:8005::84 lh2.googleusercontent.com +2404:6800:8005::84 lh3.googleusercontent.com +2404:6800:8005::84 lh4.googleusercontent.com +2404:6800:8005::84 lh5.googleusercontent.com +2404:6800:8005::84 lh6.googleusercontent.com +2404:6800:8005::84 music-onebox.googleusercontent.com +2404:6800:8005::84 oauth.googleusercontent.com +2404:6800:8005::84 reader.googleusercontent.com +2404:6800:8005::84 static.googleusercontent.com +2404:6800:8005::84 s2.googleusercontent.com +2404:6800:8005::84 themes.googleusercontent.com #开放字体库 +2404:6800:8005::84 translate.googleusercontent.com +2404:6800:8005::84 wave.googleusercontent.com +2404:6800:8005::84 webcache.googleusercontent.com #网页快照 +2404:6800:8005::84 ytimg.googleusercontent.com +2404:6800:8005::84 code-opensocial.googleusercontent.com +2404:6800:8005::84 spreadsheets-opensocial.googleusercontent.com +2404:6800:8005::84 www-opensocial.googleusercontent.com +2404:6800:8005::84 images-docs-opensocial.googleusercontent.com +2404:6800:8005::84 www-gm-opensocial.googleusercontent.com #Gmail +2404:6800:8005::84 4lam9a1ki27mb9p1h5q3furvvf58ss02-a-gm-opensocial.googleusercontent.com #YouTube in Gmail +2404:6800:8005::84 nl3atn8402ncgahndqn8rb6qi2ld2g5a-a-gm-opensocial.googleusercontent.com +2404:6800:8005::84 www-opensocial-sandbox.googleusercontent.com +2404:6800:8005::84 www-fc-opensocial.googleusercontent.com +2404:6800:8005::84 *-a-fc-opensocial.googleusercontent.com +2404:6800:8005::84 8fkcem1ves287v3g5lu9gep1j91p3kk1-a-fc-opensocial.googleusercontent.com #有的博客主页的 OpenID 登陆 +2404:6800:8005::84 www-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images0-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images1-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images2-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images3-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images4-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images5-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images6-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images7-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images8-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images9-focus-opensocial.googleusercontent.com +2404:6800:8005::84 images*-focus-opensocial.googleusercontent.com #建议通过 pac 实现 +2404:6800:8005::84 www-focus-opensocial.googleusercontent.com +2404:6800:8005::84 0-focus-opensocial.googleusercontent.com +2404:6800:8005::84 1-focus-opensocial.googleusercontent.com +2404:6800:8005::84 2-focus-opensocial.googleusercontent.com +2404:6800:8005::84 3-focus-opensocial.googleusercontent.com +2404:6800:8005::84 www-fusiontables-opensocial.googleusercontent.com +2404:6800:8005::84 www-kix-opensocial.googleusercontent.com +2404:6800:8005::84 images-lso-opensocial.googleusercontent.com +2404:6800:8005::84 www-onepick-opensocial.googleusercontent.com +2404:6800:8005::84 images-onepick-opensocial.googleusercontent.com +2404:6800:8005::84 www-open-opensocial.googleusercontent.com +2404:6800:8005::84 0-open-opensocial.googleusercontent.com +2404:6800:8005::84 1-open-opensocial.googleusercontent.com +2404:6800:8005::84 2-open-opensocial.googleusercontent.com +2404:6800:8005::84 3-open-opensocial.googleusercontent.com +2404:6800:8005::84 www-oz-opensocial.googleusercontent.com +2404:6800:8005::84 images-oz-opensocial.googleusercontent.com +2404:6800:8005::84 4fjvqid3r3oq66t548clrdj52df15coc-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 53rd6p0catml6vat6qra84rs0del836d-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 59cbv4l9s05pbaks9v77vc3mengeqors-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 8kubpeu8314p2efdd7jlv09an9i2ljdo-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 adstvca8k2ooaknjjmv89j22n9t676ve-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 bt26mravu2qpe56n8gnmjnpv2inl84bf-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 debh8vg7vd93bco3prdajidmm7dhql3f-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 hsco54a20sh11q9jkmb51ad2n3hmkmrg-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 i8brh95qor6r54nkl52hidj2ggcs4jgm-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 k6v18tjr24doa89b55o3na41kn4v73eb-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 ob7f2qc0i50kbjnc81vkhgmb5hsv7a8l-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 qhie5b8u979rnch1q0hqbrmbkn9estf7-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 r70rmsn4s0rhk6cehcbbcbfbs31pu0va-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 rbjhe237k979f79d87gmenp3gejfonu9-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 u807isd5egseeabjccgcns005p2miucq-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 upt14k1i2veesusrda9nfotcrbp9d7p5-a-oz-opensocial.googleusercontent.com +2404:6800:8005::84 *-a-oz-opensocial.googleusercontent.com #建议通过 pac 实现 +2404:6800:8005::84 images-pos-opensocial.googleusercontent.com +2404:6800:8005::84 www-trixcopysheet-opensocial.googleusercontent.com +2404:6800:8005::84 www-wave-opensocial.googleusercontent.com +2404:6800:8005::84 wave-opensocial.googleusercontent.com +2404:6800:8005::84 0-wave-opensocial.googleusercontent.com +2404:6800:8005::84 1-wave-opensocial.googleusercontent.com +2404:6800:8005::84 2-wave-opensocial.googleusercontent.com +2404:6800:8005::84 3-wave-opensocial.googleusercontent.com +2404:6800:8005::84 4-wave-opensocial.googleusercontent.com +2404:6800:8005::84 5-wave-opensocial.googleusercontent.com +2404:6800:8005::84 6-wave-opensocial.googleusercontent.com +2404:6800:8005::84 7-wave-opensocial.googleusercontent.com +2404:6800:8005::84 8-wave-opensocial.googleusercontent.com +2404:6800:8005::84 9-wave-opensocial.googleusercontent.com +2404:6800:8005::84 10-wave-opensocial.googleusercontent.com +2404:6800:8005::84 11-wave-opensocial.googleusercontent.com +2404:6800:8005::84 12-wave-opensocial.googleusercontent.com +2404:6800:8005::84 13-wave-opensocial.googleusercontent.com +2404:6800:8005::84 14-wave-opensocial.googleusercontent.com +2404:6800:8005::84 15-wave-opensocial.googleusercontent.com +2404:6800:8005::84 16-wave-opensocial.googleusercontent.com +2404:6800:8005::84 17-wave-opensocial.googleusercontent.com +2404:6800:8005::84 18-wave-opensocial.googleusercontent.com +2404:6800:8005::84 19-wave-opensocial.googleusercontent.com +2404:6800:8005::84 20-wave-opensocial.googleusercontent.com +2404:6800:8005::84 21-wave-opensocial.googleusercontent.com +2404:6800:8005::84 22-wave-opensocial.googleusercontent.com +2404:6800:8005::84 23-wave-opensocial.googleusercontent.com +2404:6800:8005::84 24-wave-opensocial.googleusercontent.com +2404:6800:8005::84 25-wave-opensocial.googleusercontent.com +2404:6800:8005::84 26-wave-opensocial.googleusercontent.com +2404:6800:8005::84 27-wave-opensocial.googleusercontent.com +2404:6800:8005::84 28-wave-opensocial.googleusercontent.com +2404:6800:8005::84 29-wave-opensocial.googleusercontent.com +2404:6800:8005::84 30-wave-opensocial.googleusercontent.com +2404:6800:8005::84 31-wave-opensocial.googleusercontent.com +2404:6800:8005::84 32-wave-opensocial.googleusercontent.com +2404:6800:8005::84 33-wave-opensocial.googleusercontent.com +2404:6800:8005::84 34-wave-opensocial.googleusercontent.com +2404:6800:8005::84 35-wave-opensocial.googleusercontent.com +2404:6800:8005::84 36-wave-opensocial.googleusercontent.com +2404:6800:8005::84 37-wave-opensocial.googleusercontent.com +2404:6800:8005::84 38-wave-opensocial.googleusercontent.com +2404:6800:8005::84 39-wave-opensocial.googleusercontent.com +2404:6800:8005::84 40-wave-opensocial.googleusercontent.com +2404:6800:8005::84 *-wave-opensocial.googleusercontent.com + +##Google其他服务 + +#YouTube +# 如遇连接错误或过慢,请尝试全文替换数据中心前缀: +# 页面分发 2a00:1450:8005:: 2001:4860:8005:: 2404:6800:8001:: 2404:6800:8005:: +# 数据分发 2404:6800:4008:2:: 2404:6800:4008:3:: 2a00:1450:4009:15:: 2a00:1450:4008:: +# 2001:4860:4001:f:: 2001:4860:4007:: 2001:4860:4001:10:: 2001:4860:4006:803:: +2404:6800:8005::be youtube.com +2404:6800:8005::5b www.youtube.com +2404:6800:8005::5b au.youtube.com +2404:6800:8005::5b ca.youtube.com +2404:6800:8005::5b de.youtube.com +2404:6800:8005::5b fr.youtube.com +2404:6800:8005::5b jp.youtube.com +2404:6800:8005::5b ru.youtube.com +2404:6800:8005::5b uk.youtube.com +2404:6800:8005::5b tw.youtube.com +2404:6800:8005::64 ads.youtube.com +2404:6800:8005::5b www.youtube-nocookie.com +2404:6800:8005::5b youtube-ui.l.google.com +2404:6800:8005::66 m.youtube.com +2404:6800:8005::88 youtu.be +2404:6800:8005::76 gdata.youtube.com +2404:6800:8005::76 stage.gdata.youtube.com +2404:6800:8005::8a s.youtube.com +2404:6800:8005::8a s2.youtube.com +2404:6800:8005::8a accounts.youtube.com +2404:6800:8005::8a video-stats.l.google.com +2404:6800:8005::8b img.youtube.com +2404:6800:8005::8b ytimg.l.google.com +2404:6800:8005::8b i.ytimg.com +2404:6800:8005::8b i1.ytimg.com +2404:6800:8005::8b i2.ytimg.com +2404:6800:8005::8b i3.ytimg.com +2404:6800:8005::8b i4.ytimg.com +2404:6800:8005::8b s.ytimg.com +2404:6800:8005::8b ytstatic.l.google.com +2404:6800:8005::69 help.youtube.com +2404:6800:8005::64 upload.youtube.com +2404:6800:8005::64 insight.youtube.com +2404:6800:8005::79 apiblog.youtube.com +2404:6800:8005::64 clients1.youtube.com +2404:6800:4008:2::19 static.cache.l.google.com +2404:6800:4005::6 v1.lscache1.c.youtube.com +2404:6800:4005:4::6 v1.lscache2.c.youtube.com +2404:6800:4005:5::6 v1.lscache3.c.youtube.com +2404:6800:4005:1::6 v1.lscache4.c.youtube.com +2404:6800:4005:2::6 v1.lscache5.c.youtube.com +2404:6800:4005:6::6 v1.lscache6.c.youtube.com +2404:6800:4005:7::6 v1.lscache7.c.youtube.com +2404:6800:4005:7::6 v1.lscache8.c.youtube.com +2404:6800:4005:4::a v2.lscache1.c.youtube.com +2404:6800:4005::a v2.lscache2.c.youtube.com +2404:6800:4005:5::a v2.lscache3.c.youtube.com +2404:6800:4005:5::a v2.lscache4.c.youtube.com +2404:6800:4005:6::a v2.lscache5.c.youtube.com +2404:6800:4005:6::a v2.lscache6.c.youtube.com +2404:6800:4005:7::a v2.lscache7.c.youtube.com +2404:6800:4005:7::a v2.lscache8.c.youtube.com +2404:6800:4005::e v3.lscache1.c.youtube.com +2404:6800:4005:4::e v3.lscache2.c.youtube.com +2404:6800:4005:1::e v3.lscache3.c.youtube.com +2404:6800:4005:5::e v3.lscache4.c.youtube.com +2404:6800:4005:2::e v3.lscache5.c.youtube.com +2404:6800:4005:6::e v3.lscache6.c.youtube.com +2404:6800:4005:3::e v3.lscache7.c.youtube.com +2404:6800:4005:3::e v3.lscache8.c.youtube.com +2404:6800:4005:4::12 v4.lscache1.c.youtube.com +2404:6800:4005:4::12 v4.lscache2.c.youtube.com +2404:6800:4005:1::12 v4.lscache3.c.youtube.com +2404:6800:4005:1::12 v4.lscache4.c.youtube.com +2404:6800:4005:6::12 v4.lscache5.c.youtube.com +2404:6800:4005:2::12 v4.lscache6.c.youtube.com +2404:6800:4005:7::12 v4.lscache7.c.youtube.com +2404:6800:4005:7::12 v4.lscache8.c.youtube.com +2404:6800:4005::16 v5.lscache1.c.youtube.com +2404:6800:4005:4::16 v5.lscache2.c.youtube.com +2404:6800:4005:5::16 v5.lscache3.c.youtube.com +2404:6800:4005:1::16 v5.lscache4.c.youtube.com +2404:6800:4005:6::16 v5.lscache5.c.youtube.com +2404:6800:4005:6::16 v5.lscache6.c.youtube.com +2404:6800:4005:3::16 v5.lscache7.c.youtube.com +2404:6800:4005:3::16 v5.lscache8.c.youtube.com +2404:6800:4005::7 v6.lscache1.c.youtube.com +2404:6800:4005:4::7 v6.lscache2.c.youtube.com +2404:6800:4005:1::7 v6.lscache3.c.youtube.com +2404:6800:4005:1::7 v6.lscache4.c.youtube.com +2404:6800:4005:6::7 v6.lscache5.c.youtube.com +2404:6800:4005:2::7 v6.lscache6.c.youtube.com +2404:6800:4005:3::7 v6.lscache7.c.youtube.com +2404:6800:4005:7::7 v6.lscache8.c.youtube.com +2404:6800:4005:4::b v7.lscache1.c.youtube.com +2404:6800:4005::b v7.lscache2.c.youtube.com +2404:6800:4005:1::b v7.lscache3.c.youtube.com +2404:6800:4005:1::b v7.lscache4.c.youtube.com +2404:6800:4005:2::b v7.lscache5.c.youtube.com +2404:6800:4005:2::b v7.lscache6.c.youtube.com +2404:6800:4005:7::b v7.lscache7.c.youtube.com +2404:6800:4005:7::b v7.lscache8.c.youtube.com +2404:6800:4005:4::f v8.lscache1.c.youtube.com +2404:6800:4005::f v8.lscache2.c.youtube.com +2404:6800:4005:1::f v8.lscache3.c.youtube.com +2404:6800:4005:5::f v8.lscache4.c.youtube.com +2404:6800:4005:2::f v8.lscache5.c.youtube.com +2404:6800:4005:2::f v8.lscache6.c.youtube.com +2404:6800:4005:7::f v8.lscache7.c.youtube.com +2404:6800:4005:3::f v8.lscache8.c.youtube.com +2404:6800:4005::13 v9.lscache1.c.youtube.com +2404:6800:4005:4::13 v9.lscache2.c.youtube.com +2404:6800:4005:5::13 v9.lscache3.c.youtube.com +2404:6800:4005:1::13 v9.lscache4.c.youtube.com +2404:6800:4005:2::13 v9.lscache5.c.youtube.com +2404:6800:4005:6::13 v9.lscache6.c.youtube.com +2404:6800:4005:3::13 v9.lscache7.c.youtube.com +2404:6800:4005:7::13 v9.lscache8.c.youtube.com +2404:6800:4005:4::17 v10.lscache1.c.youtube.com +2404:6800:4005::17 v10.lscache2.c.youtube.com +2404:6800:4005:5::17 v10.lscache3.c.youtube.com +2404:6800:4005:5::17 v10.lscache4.c.youtube.com +2404:6800:4005:2::17 v10.lscache5.c.youtube.com +2404:6800:4005:6::17 v10.lscache6.c.youtube.com +2404:6800:4005:7::17 v10.lscache7.c.youtube.com +2404:6800:4005:3::17 v10.lscache8.c.youtube.com +2404:6800:4005::8 v11.lscache1.c.youtube.com +2404:6800:4005::8 v11.lscache2.c.youtube.com +2404:6800:4005:1::8 v11.lscache3.c.youtube.com +2404:6800:4005:5::8 v11.lscache4.c.youtube.com +2404:6800:4005:6::8 v11.lscache5.c.youtube.com +2404:6800:4005:6::8 v11.lscache6.c.youtube.com +2404:6800:4005:3::8 v11.lscache7.c.youtube.com +2404:6800:4005:7::8 v11.lscache8.c.youtube.com +2404:6800:4005::c v12.lscache1.c.youtube.com +2404:6800:4005::c v12.lscache2.c.youtube.com +2404:6800:4005:1::c v12.lscache3.c.youtube.com +2404:6800:4005:1::c v12.lscache4.c.youtube.com +2404:6800:4005:2::c v12.lscache5.c.youtube.com +2404:6800:4005:2::c v12.lscache6.c.youtube.com +2404:6800:4005:7::c v12.lscache7.c.youtube.com +2404:6800:4005:3::c v12.lscache8.c.youtube.com +2404:6800:4005:4::10 v13.lscache1.c.youtube.com +2404:6800:4005::10 v13.lscache2.c.youtube.com +2404:6800:4005:5::10 v13.lscache3.c.youtube.com +2404:6800:4005:1::10 v13.lscache4.c.youtube.com +2404:6800:4005:6::10 v13.lscache5.c.youtube.com +2404:6800:4005:6::10 v13.lscache6.c.youtube.com +2404:6800:4005:7::10 v13.lscache7.c.youtube.com +2404:6800:4005:7::10 v13.lscache8.c.youtube.com +2404:6800:4005::14 v14.lscache1.c.youtube.com +2404:6800:4005:4::14 v14.lscache2.c.youtube.com +2404:6800:4005:5::14 v14.lscache3.c.youtube.com +2404:6800:4005:5::14 v14.lscache4.c.youtube.com +2404:6800:4005:6::14 v14.lscache5.c.youtube.com +2404:6800:4005:6::14 v14.lscache6.c.youtube.com +2404:6800:4005:7::14 v14.lscache7.c.youtube.com +2404:6800:4005:3::14 v14.lscache8.c.youtube.com +2404:6800:4005:4::18 v15.lscache1.c.youtube.com +2404:6800:4005:4::18 v15.lscache2.c.youtube.com +2404:6800:4005:5::18 v15.lscache3.c.youtube.com +2404:6800:4005:5::18 v15.lscache4.c.youtube.com +2404:6800:4005:6::18 v15.lscache5.c.youtube.com +2404:6800:4005:2::18 v15.lscache6.c.youtube.com +2404:6800:4005:3::18 v15.lscache7.c.youtube.com +2404:6800:4005:7::18 v15.lscache8.c.youtube.com +2404:6800:4005:4::9 v16.lscache1.c.youtube.com +2404:6800:4005::9 v16.lscache2.c.youtube.com +2404:6800:4005:5::9 v16.lscache3.c.youtube.com +2404:6800:4005:1::9 v16.lscache4.c.youtube.com +2404:6800:4005:6::9 v16.lscache5.c.youtube.com +2404:6800:4005:6::9 v16.lscache6.c.youtube.com +2404:6800:4005:7::9 v16.lscache7.c.youtube.com +2404:6800:4005:7::9 v16.lscache8.c.youtube.com +2404:6800:4005::d v17.lscache1.c.youtube.com +2404:6800:4005::d v17.lscache2.c.youtube.com +2404:6800:4005:1::d v17.lscache3.c.youtube.com +2404:6800:4005:5::d v17.lscache4.c.youtube.com +2404:6800:4005:6::d v17.lscache5.c.youtube.com +2404:6800:4005:2::d v17.lscache6.c.youtube.com +2404:6800:4005:3::d v17.lscache7.c.youtube.com +2404:6800:4005:7::d v17.lscache8.c.youtube.com +2404:6800:4005:4::11 v18.lscache1.c.youtube.com +2404:6800:4005::11 v18.lscache2.c.youtube.com +2404:6800:4005:1::11 v18.lscache3.c.youtube.com +2404:6800:4005:5::11 v18.lscache4.c.youtube.com +2404:6800:4005:2::11 v18.lscache5.c.youtube.com +2404:6800:4005:2::11 v18.lscache6.c.youtube.com +2404:6800:4005:7::11 v18.lscache7.c.youtube.com +2404:6800:4005:7::11 v18.lscache8.c.youtube.com +2404:6800:4005:4::15 v19.lscache1.c.youtube.com +2404:6800:4005:4::15 v19.lscache2.c.youtube.com +2404:6800:4005:1::15 v19.lscache3.c.youtube.com +2404:6800:4005:5::15 v19.lscache4.c.youtube.com +2404:6800:4005:6::15 v19.lscache5.c.youtube.com +2404:6800:4005:6::15 v19.lscache6.c.youtube.com +2404:6800:4005:7::15 v19.lscache7.c.youtube.com +2404:6800:4005:7::15 v19.lscache8.c.youtube.com +2404:6800:4005::19 v20.lscache1.c.youtube.com +2404:6800:4005:4::19 v20.lscache2.c.youtube.com +2404:6800:4005:1::19 v20.lscache3.c.youtube.com +2404:6800:4005:5::19 v20.lscache4.c.youtube.com +2404:6800:4005:6::19 v20.lscache5.c.youtube.com +2404:6800:4005:6::19 v20.lscache6.c.youtube.com +2404:6800:4005:3::19 v20.lscache7.c.youtube.com +2404:6800:4005:3::19 v20.lscache8.c.youtube.com +2404:6800:4005:4::6 v21.lscache1.c.youtube.com +2404:6800:4005:4::6 v21.lscache2.c.youtube.com +2404:6800:4005:1::6 v21.lscache3.c.youtube.com +2404:6800:4005:1::6 v21.lscache4.c.youtube.com +2404:6800:4005:2::6 v21.lscache5.c.youtube.com +2404:6800:4005:2::6 v21.lscache6.c.youtube.com +2404:6800:4005:3::6 v21.lscache7.c.youtube.com +2404:6800:4005:7::6 v21.lscache8.c.youtube.com +2404:6800:4005::a v22.lscache1.c.youtube.com +2404:6800:4005:4::a v22.lscache2.c.youtube.com +2404:6800:4005:5::a v22.lscache3.c.youtube.com +2404:6800:4005:5::a v22.lscache4.c.youtube.com +2404:6800:4005:2::a v22.lscache5.c.youtube.com +2404:6800:4005:2::a v22.lscache6.c.youtube.com +2404:6800:4005:3::a v22.lscache7.c.youtube.com +2404:6800:4005:3::a v22.lscache8.c.youtube.com +2404:6800:4005::e v23.lscache1.c.youtube.com +2404:6800:4005::e v23.lscache2.c.youtube.com +2404:6800:4005:1::e v23.lscache3.c.youtube.com +2404:6800:4005:5::e v23.lscache4.c.youtube.com +2404:6800:4005:6::e v23.lscache5.c.youtube.com +2404:6800:4005:2::e v23.lscache6.c.youtube.com +2404:6800:4005:7::e v23.lscache7.c.youtube.com +2404:6800:4005:3::e v23.lscache8.c.youtube.com +2404:6800:4005:4::12 v24.lscache1.c.youtube.com +2404:6800:4005:4::12 v24.lscache2.c.youtube.com +2404:6800:4005:5::12 v24.lscache3.c.youtube.com +2404:6800:4005:1::12 v24.lscache4.c.youtube.com +2404:6800:4005:6::12 v24.lscache5.c.youtube.com +2404:6800:4005:2::12 v24.lscache6.c.youtube.com +2404:6800:4005:3::12 v24.lscache7.c.youtube.com +2404:6800:4005:7::12 v24.lscache8.c.youtube.com +2404:6800:4005::6 v1.nonxt1.c.youtube.com +2404:6800:4005:4::6 v1.nonxt2.c.youtube.com +2404:6800:4005:1::6 v1.nonxt3.c.youtube.com +2404:6800:4005:5::6 v1.nonxt4.c.youtube.com +2404:6800:4005:2::6 v1.nonxt5.c.youtube.com +2404:6800:4005:6::6 v1.nonxt6.c.youtube.com +2404:6800:4005:3::6 v1.nonxt7.c.youtube.com +2404:6800:4005:7::6 v1.nonxt8.c.youtube.com +2404:6800:4005:4::a v2.nonxt1.c.youtube.com +2404:6800:4005:4::a v2.nonxt2.c.youtube.com +2404:6800:4005:5::a v2.nonxt3.c.youtube.com +2404:6800:4005:1::a v2.nonxt4.c.youtube.com +2404:6800:4005:6::a v2.nonxt5.c.youtube.com +2404:6800:4005:6::a v2.nonxt6.c.youtube.com +2404:6800:4005:7::a v2.nonxt7.c.youtube.com +2404:6800:4005:7::a v2.nonxt8.c.youtube.com +2404:6800:4005:4::e v3.nonxt1.c.youtube.com +2404:6800:4005:4::e v3.nonxt2.c.youtube.com +2404:6800:4005:1::e v3.nonxt3.c.youtube.com +2404:6800:4005:5::e v3.nonxt4.c.youtube.com +2404:6800:4005:6::e v3.nonxt5.c.youtube.com +2404:6800:4005:2::e v3.nonxt6.c.youtube.com +2404:6800:4005:7::e v3.nonxt7.c.youtube.com +2404:6800:4005:7::e v3.nonxt8.c.youtube.com +2404:6800:4005::12 v4.nonxt1.c.youtube.com +2404:6800:4005::12 v4.nonxt2.c.youtube.com +2404:6800:4005:1::12 v4.nonxt3.c.youtube.com +2404:6800:4005:5::12 v4.nonxt4.c.youtube.com +2404:6800:4005:6::12 v4.nonxt5.c.youtube.com +2404:6800:4005:2::12 v4.nonxt6.c.youtube.com +2404:6800:4005:3::12 v4.nonxt7.c.youtube.com +2404:6800:4005:7::12 v4.nonxt8.c.youtube.com +2404:6800:4005:4::16 v5.nonxt1.c.youtube.com +2404:6800:4005::16 v5.nonxt2.c.youtube.com +2404:6800:4005:1::16 v5.nonxt3.c.youtube.com +2404:6800:4005:5::16 v5.nonxt4.c.youtube.com +2404:6800:4005:6::16 v5.nonxt5.c.youtube.com +2404:6800:4005:2::16 v5.nonxt6.c.youtube.com +2404:6800:4005:3::16 v5.nonxt7.c.youtube.com +2404:6800:4005:7::16 v5.nonxt8.c.youtube.com +2404:6800:4005:4::7 v6.nonxt1.c.youtube.com +2404:6800:4005:4::7 v6.nonxt2.c.youtube.com +2404:6800:4005:1::7 v6.nonxt3.c.youtube.com +2404:6800:4005:5::7 v6.nonxt4.c.youtube.com +2404:6800:4005:2::7 v6.nonxt5.c.youtube.com +2404:6800:4005:6::7 v6.nonxt6.c.youtube.com +2404:6800:4005:3::7 v6.nonxt7.c.youtube.com +2404:6800:4005:7::7 v6.nonxt8.c.youtube.com +2404:6800:4005:4::b v7.nonxt1.c.youtube.com +2404:6800:4005:4::b v7.nonxt2.c.youtube.com +2404:6800:4005:5::b v7.nonxt3.c.youtube.com +2404:6800:4005:5::b v7.nonxt4.c.youtube.com +2404:6800:4005:6::b v7.nonxt5.c.youtube.com +2404:6800:4005:6::b v7.nonxt6.c.youtube.com +2404:6800:4005:7::b v7.nonxt7.c.youtube.com +2404:6800:4005:7::b v7.nonxt8.c.youtube.com +2404:6800:4005::f v8.nonxt1.c.youtube.com +2404:6800:4005:4::f v8.nonxt2.c.youtube.com +2404:6800:4005:5::f v8.nonxt3.c.youtube.com +2404:6800:4005:5::f v8.nonxt4.c.youtube.com +2404:6800:4005:6::f v8.nonxt5.c.youtube.com +2404:6800:4005:2::f v8.nonxt6.c.youtube.com +2404:6800:4005:7::f v8.nonxt7.c.youtube.com +2404:6800:4005:3::f v8.nonxt8.c.youtube.com +2404:6800:4005::13 v9.nonxt1.c.youtube.com +2404:6800:4005:4::13 v9.nonxt2.c.youtube.com +2404:6800:4005:5::13 v9.nonxt3.c.youtube.com +2404:6800:4005:5::13 v9.nonxt4.c.youtube.com +2404:6800:4005:6::13 v9.nonxt5.c.youtube.com +2404:6800:4005:2::13 v9.nonxt6.c.youtube.com +2404:6800:4005:7::13 v9.nonxt7.c.youtube.com +2404:6800:4005:3::13 v9.nonxt8.c.youtube.com +2404:6800:4005:4::17 v10.nonxt1.c.youtube.com +2404:6800:4005:4::17 v10.nonxt2.c.youtube.com +2404:6800:4005:5::17 v10.nonxt3.c.youtube.com +2404:6800:4005:5::17 v10.nonxt4.c.youtube.com +2404:6800:4005:2::17 v10.nonxt5.c.youtube.com +2404:6800:4005:6::17 v10.nonxt6.c.youtube.com +2404:6800:4005:3::17 v10.nonxt7.c.youtube.com +2404:6800:4005:3::17 v10.nonxt8.c.youtube.com +2404:6800:4005:4::8 v11.nonxt1.c.youtube.com +2404:6800:4005:4::8 v11.nonxt2.c.youtube.com +2404:6800:4005:1::8 v11.nonxt3.c.youtube.com +2404:6800:4005:1::8 v11.nonxt4.c.youtube.com +2404:6800:4005:2::8 v11.nonxt5.c.youtube.com +2404:6800:4005:2::8 v11.nonxt6.c.youtube.com +2404:6800:4005:3::8 v11.nonxt7.c.youtube.com +2404:6800:4005:7::8 v11.nonxt8.c.youtube.com +2404:6800:4005:4::c v12.nonxt1.c.youtube.com +2404:6800:4005::c v12.nonxt2.c.youtube.com +2404:6800:4005:5::c v12.nonxt3.c.youtube.com +2404:6800:4005:1::c v12.nonxt4.c.youtube.com +2404:6800:4005:6::c v12.nonxt5.c.youtube.com +2404:6800:4005:2::c v12.nonxt6.c.youtube.com +2404:6800:4005:3::c v12.nonxt7.c.youtube.com +2404:6800:4005:7::c v12.nonxt8.c.youtube.com +2404:6800:4005:4::10 v13.nonxt1.c.youtube.com +2404:6800:4005:4::10 v13.nonxt2.c.youtube.com +2404:6800:4005:5::10 v13.nonxt3.c.youtube.com +2404:6800:4005:5::10 v13.nonxt4.c.youtube.com +2404:6800:4005:6::10 v13.nonxt5.c.youtube.com +2404:6800:4005:2::10 v13.nonxt6.c.youtube.com +2404:6800:4005:7::10 v13.nonxt7.c.youtube.com +2404:6800:4005:3::10 v13.nonxt8.c.youtube.com +2404:6800:4005::14 v14.nonxt1.c.youtube.com +2404:6800:4005:4::14 v14.nonxt2.c.youtube.com +2404:6800:4005:5::14 v14.nonxt3.c.youtube.com +2404:6800:4005:5::14 v14.nonxt4.c.youtube.com +2404:6800:4005:6::14 v14.nonxt5.c.youtube.com +2404:6800:4005:6::14 v14.nonxt6.c.youtube.com +2404:6800:4005:7::14 v14.nonxt7.c.youtube.com +2404:6800:4005:3::14 v14.nonxt8.c.youtube.com +2404:6800:4005::18 v15.nonxt1.c.youtube.com +2404:6800:4005:4::18 v15.nonxt2.c.youtube.com +2404:6800:4005:5::18 v15.nonxt3.c.youtube.com +2404:6800:4005:1::18 v15.nonxt4.c.youtube.com +2404:6800:4005:6::18 v15.nonxt5.c.youtube.com +2404:6800:4005:2::18 v15.nonxt6.c.youtube.com +2404:6800:4005:3::18 v15.nonxt7.c.youtube.com +2404:6800:4005:7::18 v15.nonxt8.c.youtube.com +2404:6800:4005:4::9 v16.nonxt1.c.youtube.com +2404:6800:4005::9 v16.nonxt2.c.youtube.com +2404:6800:4005:1::9 v16.nonxt3.c.youtube.com +2404:6800:4005:5::9 v16.nonxt4.c.youtube.com +2404:6800:4005:6::9 v16.nonxt5.c.youtube.com +2404:6800:4005:6::9 v16.nonxt6.c.youtube.com +2404:6800:4005:7::9 v16.nonxt7.c.youtube.com +2404:6800:4005:7::9 v16.nonxt8.c.youtube.com +2404:6800:4005::d v17.nonxt1.c.youtube.com +2404:6800:4005::d v17.nonxt2.c.youtube.com +2404:6800:4005:1::d v17.nonxt3.c.youtube.com +2404:6800:4005:1::d v17.nonxt4.c.youtube.com +2404:6800:4005:6::d v17.nonxt5.c.youtube.com +2404:6800:4005:2::d v17.nonxt6.c.youtube.com +2404:6800:4005:3::d v17.nonxt7.c.youtube.com +2404:6800:4005:3::d v17.nonxt8.c.youtube.com +2404:6800:4005:4::11 v18.nonxt1.c.youtube.com +2404:6800:4005:4::11 v18.nonxt2.c.youtube.com +2404:6800:4005:5::11 v18.nonxt3.c.youtube.com +2404:6800:4005:5::11 v18.nonxt4.c.youtube.com +2404:6800:4005:6::11 v18.nonxt5.c.youtube.com +2404:6800:4005:2::11 v18.nonxt6.c.youtube.com +2404:6800:4005:7::11 v18.nonxt7.c.youtube.com +2404:6800:4005:7::11 v18.nonxt8.c.youtube.com +2404:6800:4005::15 v19.nonxt1.c.youtube.com +2404:6800:4005::15 v19.nonxt2.c.youtube.com +2404:6800:4005:5::15 v19.nonxt3.c.youtube.com +2404:6800:4005:5::15 v19.nonxt4.c.youtube.com +2404:6800:4005:2::15 v19.nonxt5.c.youtube.com +2404:6800:4005:2::15 v19.nonxt6.c.youtube.com +2404:6800:4005:3::15 v19.nonxt7.c.youtube.com +2404:6800:4005:7::15 v19.nonxt8.c.youtube.com +2404:6800:4005::19 v20.nonxt1.c.youtube.com +2404:6800:4005::19 v20.nonxt2.c.youtube.com +2404:6800:4005:1::19 v20.nonxt3.c.youtube.com +2404:6800:4005:5::19 v20.nonxt4.c.youtube.com +2404:6800:4005:6::19 v20.nonxt5.c.youtube.com +2404:6800:4005:6::19 v20.nonxt6.c.youtube.com +2404:6800:4005:3::19 v20.nonxt7.c.youtube.com +2404:6800:4005:3::19 v20.nonxt8.c.youtube.com +2404:6800:4005::6 v21.nonxt1.c.youtube.com +2404:6800:4005::6 v21.nonxt2.c.youtube.com +2404:6800:4005:1::6 v21.nonxt3.c.youtube.com +2404:6800:4005:1::6 v21.nonxt4.c.youtube.com +2404:6800:4005:6::6 v21.nonxt5.c.youtube.com +2404:6800:4005:6::6 v21.nonxt6.c.youtube.com +2404:6800:4005:3::6 v21.nonxt7.c.youtube.com +2404:6800:4005:7::6 v21.nonxt8.c.youtube.com +2404:6800:4005:4::a v22.nonxt1.c.youtube.com +2404:6800:4005:4::a v22.nonxt2.c.youtube.com +2404:6800:4005:5::a v22.nonxt3.c.youtube.com +2404:6800:4005:1::a v22.nonxt4.c.youtube.com +2404:6800:4005:6::a v22.nonxt5.c.youtube.com +2404:6800:4005:6::a v22.nonxt6.c.youtube.com +2404:6800:4005:7::a v22.nonxt7.c.youtube.com +2404:6800:4005:7::a v22.nonxt8.c.youtube.com +2404:6800:4005:4::e v23.nonxt1.c.youtube.com +2404:6800:4005:4::e v23.nonxt2.c.youtube.com +2404:6800:4005:5::e v23.nonxt3.c.youtube.com +2404:6800:4005:5::e v23.nonxt4.c.youtube.com +2404:6800:4005:2::e v23.nonxt5.c.youtube.com +2404:6800:4005:6::e v23.nonxt6.c.youtube.com +2404:6800:4005:3::e v23.nonxt7.c.youtube.com +2404:6800:4005:7::e v23.nonxt8.c.youtube.com +2404:6800:4005::12 v24.nonxt1.c.youtube.com +2404:6800:4005::12 v24.nonxt2.c.youtube.com +2404:6800:4005:1::12 v24.nonxt3.c.youtube.com +2404:6800:4005:1::12 v24.nonxt4.c.youtube.com +2404:6800:4005:6::12 v24.nonxt5.c.youtube.com +2404:6800:4005:6::12 v24.nonxt6.c.youtube.com +2404:6800:4005:3::12 v24.nonxt7.c.youtube.com +2404:6800:4005:3::12 v24.nonxt8.c.youtube.com +2404:6800:4005::6 v1.cache1.c.youtube.com +2404:6800:4005:4::6 v1.cache2.c.youtube.com +2404:6800:4005:5::6 v1.cache3.c.youtube.com +2404:6800:4005:1::6 v1.cache4.c.youtube.com +2404:6800:4005:2::6 v1.cache5.c.youtube.com +2404:6800:4005:6::6 v1.cache6.c.youtube.com +2404:6800:4005:7::6 v1.cache7.c.youtube.com +2404:6800:4005:7::6 v1.cache8.c.youtube.com +2404:6800:4005:4::a v2.cache1.c.youtube.com +2404:6800:4005::a v2.cache2.c.youtube.com +2404:6800:4005:5::a v2.cache3.c.youtube.com +2404:6800:4005:5::a v2.cache4.c.youtube.com +2404:6800:4005:6::a v2.cache5.c.youtube.com +2404:6800:4005:6::a v2.cache6.c.youtube.com +2404:6800:4005:7::a v2.cache7.c.youtube.com +2404:6800:4005:7::a v2.cache8.c.youtube.com +2404:6800:4005::e v3.cache1.c.youtube.com +2404:6800:4005:4::e v3.cache2.c.youtube.com +2404:6800:4005:1::e v3.cache3.c.youtube.com +2404:6800:4005:5::e v3.cache4.c.youtube.com +2404:6800:4005:2::e v3.cache5.c.youtube.com +2404:6800:4005:6::e v3.cache6.c.youtube.com +2404:6800:4005:3::e v3.cache7.c.youtube.com +2404:6800:4005:3::e v3.cache8.c.youtube.com +2404:6800:4005:4::12 v4.cache1.c.youtube.com +2404:6800:4005:4::12 v4.cache2.c.youtube.com +2404:6800:4005:1::12 v4.cache3.c.youtube.com +2404:6800:4005:1::12 v4.cache4.c.youtube.com +2404:6800:4005:6::12 v4.cache5.c.youtube.com +2404:6800:4005:2::12 v4.cache6.c.youtube.com +2404:6800:4005:7::12 v4.cache7.c.youtube.com +2404:6800:4005:7::12 v4.cache8.c.youtube.com +2404:6800:4005::16 v5.cache1.c.youtube.com +2404:6800:4005:4::16 v5.cache2.c.youtube.com +2404:6800:4005:5::16 v5.cache3.c.youtube.com +2404:6800:4005:1::16 v5.cache4.c.youtube.com +2404:6800:4005:6::16 v5.cache5.c.youtube.com +2404:6800:4005:6::16 v5.cache6.c.youtube.com +2404:6800:4005:3::16 v5.cache7.c.youtube.com +2404:6800:4005:3::16 v5.cache8.c.youtube.com +2404:6800:4005::7 v6.cache1.c.youtube.com +2404:6800:4005:4::7 v6.cache2.c.youtube.com +2404:6800:4005:1::7 v6.cache3.c.youtube.com +2404:6800:4005:1::7 v6.cache4.c.youtube.com +2404:6800:4005:6::7 v6.cache5.c.youtube.com +2404:6800:4005:2::7 v6.cache6.c.youtube.com +2404:6800:4005:3::7 v6.cache7.c.youtube.com +2404:6800:4005:7::7 v6.cache8.c.youtube.com +2404:6800:4005:4::b v7.cache1.c.youtube.com +2404:6800:4005::b v7.cache2.c.youtube.com +2404:6800:4005:1::b v7.cache3.c.youtube.com +2404:6800:4005:1::b v7.cache4.c.youtube.com +2404:6800:4005:2::b v7.cache5.c.youtube.com +2404:6800:4005:2::b v7.cache6.c.youtube.com +2404:6800:4005:7::b v7.cache7.c.youtube.com +2404:6800:4005:7::b v7.cache8.c.youtube.com +2404:6800:4005:4::f v8.cache1.c.youtube.com +2404:6800:4005::f v8.cache2.c.youtube.com +2404:6800:4005:1::f v8.cache3.c.youtube.com +2404:6800:4005:5::f v8.cache4.c.youtube.com +2404:6800:4005:2::f v8.cache5.c.youtube.com +2404:6800:4005:2::f v8.cache6.c.youtube.com +2404:6800:4005:7::f v8.cache7.c.youtube.com +2404:6800:4005:3::f v8.cache8.c.youtube.com +2404:6800:4005::13 v9.cache1.c.youtube.com +2404:6800:4005:4::13 v9.cache2.c.youtube.com +2404:6800:4005:5::13 v9.cache3.c.youtube.com +2404:6800:4005:1::13 v9.cache4.c.youtube.com +2404:6800:4005:2::13 v9.cache5.c.youtube.com +2404:6800:4005:6::13 v9.cache6.c.youtube.com +2404:6800:4005:3::13 v9.cache7.c.youtube.com +2404:6800:4005:7::13 v9.cache8.c.youtube.com +2404:6800:4005:4::17 v10.cache1.c.youtube.com +2404:6800:4005::17 v10.cache2.c.youtube.com +2404:6800:4005:5::17 v10.cache3.c.youtube.com +2404:6800:4005:5::17 v10.cache4.c.youtube.com +2404:6800:4005:2::17 v10.cache5.c.youtube.com +2404:6800:4005:6::17 v10.cache6.c.youtube.com +2404:6800:4005:7::17 v10.cache7.c.youtube.com +2404:6800:4005:3::17 v10.cache8.c.youtube.com +2404:6800:4005::8 v11.cache1.c.youtube.com +2404:6800:4005::8 v11.cache2.c.youtube.com +2404:6800:4005:1::8 v11.cache3.c.youtube.com +2404:6800:4005:5::8 v11.cache4.c.youtube.com +2404:6800:4005:6::8 v11.cache5.c.youtube.com +2404:6800:4005:6::8 v11.cache6.c.youtube.com +2404:6800:4005:3::8 v11.cache7.c.youtube.com +2404:6800:4005:7::8 v11.cache8.c.youtube.com +2404:6800:4005::c v12.cache1.c.youtube.com +2404:6800:4005::c v12.cache2.c.youtube.com +2404:6800:4005:1::c v12.cache3.c.youtube.com +2404:6800:4005:1::c v12.cache4.c.youtube.com +2404:6800:4005:2::c v12.cache5.c.youtube.com +2404:6800:4005:2::c v12.cache6.c.youtube.com +2404:6800:4005:7::c v12.cache7.c.youtube.com +2404:6800:4005:3::c v12.cache8.c.youtube.com +2404:6800:4005:4::10 v13.cache1.c.youtube.com +2404:6800:4005::10 v13.cache2.c.youtube.com +2404:6800:4005:5::10 v13.cache3.c.youtube.com +2404:6800:4005:1::10 v13.cache4.c.youtube.com +2404:6800:4005:6::10 v13.cache5.c.youtube.com +2404:6800:4005:6::10 v13.cache6.c.youtube.com +2404:6800:4005:7::10 v13.cache7.c.youtube.com +2404:6800:4005:7::10 v13.cache8.c.youtube.com +2404:6800:4005::14 v14.cache1.c.youtube.com +2404:6800:4005:4::14 v14.cache2.c.youtube.com +2404:6800:4005:5::14 v14.cache3.c.youtube.com +2404:6800:4005:5::14 v14.cache4.c.youtube.com +2404:6800:4005:6::14 v14.cache5.c.youtube.com +2404:6800:4005:6::14 v14.cache6.c.youtube.com +2404:6800:4005:7::14 v14.cache7.c.youtube.com +2404:6800:4005:3::14 v14.cache8.c.youtube.com +2404:6800:4005:4::18 v15.cache1.c.youtube.com +2404:6800:4005:4::18 v15.cache2.c.youtube.com +2404:6800:4005:5::18 v15.cache3.c.youtube.com +2404:6800:4005:5::18 v15.cache4.c.youtube.com +2404:6800:4005:6::18 v15.cache5.c.youtube.com +2404:6800:4005:2::18 v15.cache6.c.youtube.com +2404:6800:4005:3::18 v15.cache7.c.youtube.com +2404:6800:4005:7::18 v15.cache8.c.youtube.com +2404:6800:4005:4::9 v16.cache1.c.youtube.com +2404:6800:4005::9 v16.cache2.c.youtube.com +2404:6800:4005:5::9 v16.cache3.c.youtube.com +2404:6800:4005:1::9 v16.cache4.c.youtube.com +2404:6800:4005:6::9 v16.cache5.c.youtube.com +2404:6800:4005:6::9 v16.cache6.c.youtube.com +2404:6800:4005:7::9 v16.cache7.c.youtube.com +2404:6800:4005:7::9 v16.cache8.c.youtube.com +2404:6800:4005::d v17.cache1.c.youtube.com +2404:6800:4005::d v17.cache2.c.youtube.com +2404:6800:4005:1::d v17.cache3.c.youtube.com +2404:6800:4005:5::d v17.cache4.c.youtube.com +2404:6800:4005:6::d v17.cache5.c.youtube.com +2404:6800:4005:2::d v17.cache6.c.youtube.com +2404:6800:4005:3::d v17.cache7.c.youtube.com +2404:6800:4005:7::d v17.cache8.c.youtube.com +2404:6800:4005:4::11 v18.cache1.c.youtube.com +2404:6800:4005::11 v18.cache2.c.youtube.com +2404:6800:4005:1::11 v18.cache3.c.youtube.com +2404:6800:4005:5::11 v18.cache4.c.youtube.com +2404:6800:4005:2::11 v18.cache5.c.youtube.com +2404:6800:4005:2::11 v18.cache6.c.youtube.com +2404:6800:4005:7::11 v18.cache7.c.youtube.com +2404:6800:4005:7::11 v18.cache8.c.youtube.com +2404:6800:4005:4::15 v19.cache1.c.youtube.com +2404:6800:4005:4::15 v19.cache2.c.youtube.com +2404:6800:4005:1::15 v19.cache3.c.youtube.com +2404:6800:4005:5::15 v19.cache4.c.youtube.com +2404:6800:4005:6::15 v19.cache5.c.youtube.com +2404:6800:4005:6::15 v19.cache6.c.youtube.com +2404:6800:4005:7::15 v19.cache7.c.youtube.com +2404:6800:4005:7::15 v19.cache8.c.youtube.com +2404:6800:4005::19 v20.cache1.c.youtube.com +2404:6800:4005:4::19 v20.cache2.c.youtube.com +2404:6800:4005:1::19 v20.cache3.c.youtube.com +2404:6800:4005:5::19 v20.cache4.c.youtube.com +2404:6800:4005:6::19 v20.cache5.c.youtube.com +2404:6800:4005:6::19 v20.cache6.c.youtube.com +2404:6800:4005:3::19 v20.cache7.c.youtube.com +2404:6800:4005:3::19 v20.cache8.c.youtube.com +2404:6800:4005:4::6 v21.cache1.c.youtube.com +2404:6800:4005:4::6 v21.cache2.c.youtube.com +2404:6800:4005:1::6 v21.cache3.c.youtube.com +2404:6800:4005:1::6 v21.cache4.c.youtube.com +2404:6800:4005:2::6 v21.cache5.c.youtube.com +2404:6800:4005:2::6 v21.cache6.c.youtube.com +2404:6800:4005:3::6 v21.cache7.c.youtube.com +2404:6800:4005:7::6 v21.cache8.c.youtube.com +2404:6800:4005::a v22.cache1.c.youtube.com +2404:6800:4005:4::a v22.cache2.c.youtube.com +2404:6800:4005:5::a v22.cache3.c.youtube.com +2404:6800:4005:5::a v22.cache4.c.youtube.com +2404:6800:4005:2::a v22.cache5.c.youtube.com +2404:6800:4005:2::a v22.cache6.c.youtube.com +2404:6800:4005:3::a v22.cache7.c.youtube.com +2404:6800:4005:3::a v22.cache8.c.youtube.com +2404:6800:4005::e v23.cache1.c.youtube.com +2404:6800:4005::e v23.cache2.c.youtube.com +2404:6800:4005:1::e v23.cache3.c.youtube.com +2404:6800:4005:5::e v23.cache4.c.youtube.com +2404:6800:4005:6::e v23.cache5.c.youtube.com +2404:6800:4005:2::e v23.cache6.c.youtube.com +2404:6800:4005:7::e v23.cache7.c.youtube.com +2404:6800:4005:3::e v23.cache8.c.youtube.com +2404:6800:4005:4::12 v24.cache1.c.youtube.com +2404:6800:4005:4::12 v24.cache2.c.youtube.com +2404:6800:4005:5::12 v24.cache3.c.youtube.com +2404:6800:4005:1::12 v24.cache4.c.youtube.com +2404:6800:4005:6::12 v24.cache5.c.youtube.com +2404:6800:4005:2::12 v24.cache6.c.youtube.com +2404:6800:4005:3::12 v24.cache7.c.youtube.com +2404:6800:4005:7::12 v24.cache8.c.youtube.com +2404:6800:4007::6 tc.v1.cache1.c.youtube.com +2404:6800:4007::6 tc.v1.cache2.c.youtube.com +2404:6800:4007::6 tc.v1.cache3.c.youtube.com +2404:6800:4007::6 tc.v1.cache4.c.youtube.com +2404:6800:4007::6 tc.v1.cache5.c.youtube.com +2404:6800:4007::6 tc.v1.cache6.c.youtube.com +2404:6800:4007::6 tc.v1.cache7.c.youtube.com +2404:6800:4007::6 tc.v1.cache8.c.youtube.com +2404:6800:4007::a tc.v2.cache1.c.youtube.com +2404:6800:4007::a tc.v2.cache2.c.youtube.com +2404:6800:4007::a tc.v2.cache3.c.youtube.com +2404:6800:4007::a tc.v2.cache4.c.youtube.com +2404:6800:4007::a tc.v2.cache5.c.youtube.com +2404:6800:4007::a tc.v2.cache6.c.youtube.com +2404:6800:4007::a tc.v2.cache7.c.youtube.com +2404:6800:4007::a tc.v2.cache8.c.youtube.com +2404:6800:4007::e tc.v3.cache1.c.youtube.com +2404:6800:4007::e tc.v3.cache2.c.youtube.com +2404:6800:4007::e tc.v3.cache3.c.youtube.com +2404:6800:4007::e tc.v3.cache4.c.youtube.com +2404:6800:4007::e tc.v3.cache5.c.youtube.com +2404:6800:4007::e tc.v3.cache6.c.youtube.com +2404:6800:4007::e tc.v3.cache7.c.youtube.com +2404:6800:4007::e tc.v3.cache8.c.youtube.com +2404:6800:4007::12 tc.v4.cache1.c.youtube.com +2404:6800:4007::12 tc.v4.cache2.c.youtube.com +2404:6800:4007::12 tc.v4.cache3.c.youtube.com +2404:6800:4007::12 tc.v4.cache4.c.youtube.com +2404:6800:4007::12 tc.v4.cache5.c.youtube.com +2404:6800:4007::12 tc.v4.cache6.c.youtube.com +2404:6800:4007::12 tc.v4.cache7.c.youtube.com +2404:6800:4007::12 tc.v4.cache8.c.youtube.com +2404:6800:4007::16 tc.v5.cache1.c.youtube.com +2404:6800:4007::16 tc.v5.cache2.c.youtube.com +2404:6800:4007::16 tc.v5.cache3.c.youtube.com +2404:6800:4007::16 tc.v5.cache4.c.youtube.com +2404:6800:4007::16 tc.v5.cache5.c.youtube.com +2404:6800:4007::16 tc.v5.cache6.c.youtube.com +2404:6800:4007::16 tc.v5.cache7.c.youtube.com +2404:6800:4007::16 tc.v5.cache8.c.youtube.com +2404:6800:4007::7 tc.v6.cache1.c.youtube.com +2404:6800:4007::7 tc.v6.cache2.c.youtube.com +2404:6800:4007::7 tc.v6.cache3.c.youtube.com +2404:6800:4007::7 tc.v6.cache4.c.youtube.com +2404:6800:4007::7 tc.v6.cache5.c.youtube.com +2404:6800:4007::7 tc.v6.cache6.c.youtube.com +2404:6800:4007::7 tc.v6.cache7.c.youtube.com +2404:6800:4007::7 tc.v6.cache8.c.youtube.com +2404:6800:4007::b tc.v7.cache1.c.youtube.com +2404:6800:4007::b tc.v7.cache2.c.youtube.com +2404:6800:4007::b tc.v7.cache3.c.youtube.com +2404:6800:4007::b tc.v7.cache4.c.youtube.com +2404:6800:4007::b tc.v7.cache5.c.youtube.com +2404:6800:4007::b tc.v7.cache6.c.youtube.com +2404:6800:4007::b tc.v7.cache7.c.youtube.com +2404:6800:4007::b tc.v7.cache8.c.youtube.com +2404:6800:4007::f tc.v8.cache1.c.youtube.com +2404:6800:4007::f tc.v8.cache2.c.youtube.com +2404:6800:4007::f tc.v8.cache3.c.youtube.com +2404:6800:4007::f tc.v8.cache4.c.youtube.com +2404:6800:4007::f tc.v8.cache5.c.youtube.com +2404:6800:4007::f tc.v8.cache6.c.youtube.com +2404:6800:4007::f tc.v8.cache7.c.youtube.com +2404:6800:4007::f tc.v8.cache8.c.youtube.com +2404:6800:4007::13 tc.v9.cache1.c.youtube.com +2404:6800:4007::13 tc.v9.cache2.c.youtube.com +2404:6800:4007::13 tc.v9.cache3.c.youtube.com +2404:6800:4007::13 tc.v9.cache4.c.youtube.com +2404:6800:4007::13 tc.v9.cache5.c.youtube.com +2404:6800:4007::13 tc.v9.cache6.c.youtube.com +2404:6800:4007::13 tc.v9.cache7.c.youtube.com +2404:6800:4007::13 tc.v9.cache8.c.youtube.com +2404:6800:4007::17 tc.v10.cache1.c.youtube.com +2404:6800:4007::17 tc.v10.cache2.c.youtube.com +2404:6800:4007::17 tc.v10.cache3.c.youtube.com +2404:6800:4007::17 tc.v10.cache4.c.youtube.com +2404:6800:4007::17 tc.v10.cache5.c.youtube.com +2404:6800:4007::17 tc.v10.cache6.c.youtube.com +2404:6800:4007::17 tc.v10.cache7.c.youtube.com +2404:6800:4007::17 tc.v10.cache8.c.youtube.com +2404:6800:4007::8 tc.v11.cache1.c.youtube.com +2404:6800:4007::8 tc.v11.cache2.c.youtube.com +2404:6800:4007::8 tc.v11.cache3.c.youtube.com +2404:6800:4007::8 tc.v11.cache4.c.youtube.com +2404:6800:4007::8 tc.v11.cache5.c.youtube.com +2404:6800:4007::8 tc.v11.cache6.c.youtube.com +2404:6800:4007::8 tc.v11.cache7.c.youtube.com +2404:6800:4007::8 tc.v11.cache8.c.youtube.com +2404:6800:4007::c tc.v12.cache1.c.youtube.com +2404:6800:4007::c tc.v12.cache2.c.youtube.com +2404:6800:4007::c tc.v12.cache3.c.youtube.com +2404:6800:4007::c tc.v12.cache4.c.youtube.com +2404:6800:4007::c tc.v12.cache5.c.youtube.com +2404:6800:4007::c tc.v12.cache6.c.youtube.com +2404:6800:4007::c tc.v12.cache7.c.youtube.com +2404:6800:4007::c tc.v12.cache8.c.youtube.com +2404:6800:4007::10 tc.v13.cache1.c.youtube.com +2404:6800:4007::10 tc.v13.cache2.c.youtube.com +2404:6800:4007::10 tc.v13.cache3.c.youtube.com +2404:6800:4007::10 tc.v13.cache4.c.youtube.com +2404:6800:4007::10 tc.v13.cache5.c.youtube.com +2404:6800:4007::10 tc.v13.cache6.c.youtube.com +2404:6800:4007::10 tc.v13.cache7.c.youtube.com +2404:6800:4007::10 tc.v13.cache8.c.youtube.com +2404:6800:4007::14 tc.v14.cache1.c.youtube.com +2404:6800:4007::14 tc.v14.cache2.c.youtube.com +2404:6800:4007::14 tc.v14.cache3.c.youtube.com +2404:6800:4007::14 tc.v14.cache4.c.youtube.com +2404:6800:4007::14 tc.v14.cache5.c.youtube.com +2404:6800:4007::14 tc.v14.cache6.c.youtube.com +2404:6800:4007::14 tc.v14.cache7.c.youtube.com +2404:6800:4007::14 tc.v14.cache8.c.youtube.com +2404:6800:4007::18 tc.v15.cache1.c.youtube.com +2404:6800:4007::18 tc.v15.cache2.c.youtube.com +2404:6800:4007::18 tc.v15.cache3.c.youtube.com +2404:6800:4007::18 tc.v15.cache4.c.youtube.com +2404:6800:4007::18 tc.v15.cache5.c.youtube.com +2404:6800:4007::18 tc.v15.cache6.c.youtube.com +2404:6800:4007::18 tc.v15.cache7.c.youtube.com +2404:6800:4007::18 tc.v15.cache8.c.youtube.com +2404:6800:4007::9 tc.v16.cache1.c.youtube.com +2404:6800:4007::9 tc.v16.cache2.c.youtube.com +2404:6800:4007::9 tc.v16.cache3.c.youtube.com +2404:6800:4007::9 tc.v16.cache4.c.youtube.com +2404:6800:4007::9 tc.v16.cache5.c.youtube.com +2404:6800:4007::9 tc.v16.cache6.c.youtube.com +2404:6800:4007::9 tc.v16.cache7.c.youtube.com +2404:6800:4007::9 tc.v16.cache8.c.youtube.com +2404:6800:4007::d tc.v17.cache1.c.youtube.com +2404:6800:4007::d tc.v17.cache2.c.youtube.com +2404:6800:4007::d tc.v17.cache3.c.youtube.com +2404:6800:4007::d tc.v17.cache4.c.youtube.com +2404:6800:4007::d tc.v17.cache5.c.youtube.com +2404:6800:4007::d tc.v17.cache6.c.youtube.com +2404:6800:4007::d tc.v17.cache7.c.youtube.com +2404:6800:4007::d tc.v17.cache8.c.youtube.com +2404:6800:4007::11 tc.v18.cache1.c.youtube.com +2404:6800:4007::11 tc.v18.cache2.c.youtube.com +2404:6800:4007::11 tc.v18.cache3.c.youtube.com +2404:6800:4007::11 tc.v18.cache4.c.youtube.com +2404:6800:4007::11 tc.v18.cache5.c.youtube.com +2404:6800:4007::11 tc.v18.cache6.c.youtube.com +2404:6800:4007::11 tc.v18.cache7.c.youtube.com +2404:6800:4007::11 tc.v18.cache8.c.youtube.com +2404:6800:4007::15 tc.v19.cache1.c.youtube.com +2404:6800:4007::15 tc.v19.cache2.c.youtube.com +2404:6800:4007::15 tc.v19.cache3.c.youtube.com +2404:6800:4007::15 tc.v19.cache4.c.youtube.com +2404:6800:4007::15 tc.v19.cache5.c.youtube.com +2404:6800:4007::15 tc.v19.cache6.c.youtube.com +2404:6800:4007::15 tc.v19.cache7.c.youtube.com +2404:6800:4007::15 tc.v19.cache8.c.youtube.com +2404:6800:4007::19 tc.v20.cache1.c.youtube.com +2404:6800:4007::19 tc.v20.cache2.c.youtube.com +2404:6800:4007::19 tc.v20.cache3.c.youtube.com +2404:6800:4007::19 tc.v20.cache4.c.youtube.com +2404:6800:4007::19 tc.v20.cache5.c.youtube.com +2404:6800:4007::19 tc.v20.cache6.c.youtube.com +2404:6800:4007::19 tc.v20.cache7.c.youtube.com +2404:6800:4007::19 tc.v20.cache8.c.youtube.com +2404:6800:4007::6 tc.v21.cache1.c.youtube.com +2404:6800:4007::6 tc.v21.cache2.c.youtube.com +2404:6800:4007::6 tc.v21.cache3.c.youtube.com +2404:6800:4007::6 tc.v21.cache4.c.youtube.com +2404:6800:4007::6 tc.v21.cache5.c.youtube.com +2404:6800:4007::6 tc.v21.cache6.c.youtube.com +2404:6800:4007::6 tc.v21.cache7.c.youtube.com +2404:6800:4007::6 tc.v21.cache8.c.youtube.com +2404:6800:4007::a tc.v22.cache1.c.youtube.com +2404:6800:4007::a tc.v22.cache2.c.youtube.com +2404:6800:4007::a tc.v22.cache3.c.youtube.com +2404:6800:4007::a tc.v22.cache4.c.youtube.com +2404:6800:4007::a tc.v22.cache5.c.youtube.com +2404:6800:4007::a tc.v22.cache6.c.youtube.com +2404:6800:4007::a tc.v22.cache7.c.youtube.com +2404:6800:4007::a tc.v22.cache8.c.youtube.com +2404:6800:4007::e tc.v23.cache1.c.youtube.com +2404:6800:4007::e tc.v23.cache2.c.youtube.com +2404:6800:4007::e tc.v23.cache3.c.youtube.com +2404:6800:4007::e tc.v23.cache4.c.youtube.com +2404:6800:4007::e tc.v23.cache5.c.youtube.com +2404:6800:4007::e tc.v23.cache6.c.youtube.com +2404:6800:4007::e tc.v23.cache7.c.youtube.com +2404:6800:4007::e tc.v23.cache8.c.youtube.com +2404:6800:4007::12 tc.v24.cache1.c.youtube.com +2404:6800:4007::12 tc.v24.cache2.c.youtube.com +2404:6800:4007::12 tc.v24.cache3.c.youtube.com +2404:6800:4007::12 tc.v24.cache4.c.youtube.com +2404:6800:4007::12 tc.v24.cache5.c.youtube.com +2404:6800:4007::12 tc.v24.cache6.c.youtube.com +2404:6800:4007::12 tc.v24.cache7.c.youtube.com +2404:6800:4007::12 tc.v24.cache8.c.youtube.com +2404:6800:4008:2::6 r1.tpe05s03.c.youtube.com +2404:6800:4008:3::6 r1.tpe05s04.c.youtube.com +2404:6800:4008:2::7 r2.tpe05s03.c.youtube.com +2404:6800:4008:3::7 r2.tpe05s04.c.youtube.com +2404:6800:4008:2::8 r3.tpe05s03.c.youtube.com +2404:6800:4008:3::8 r3.tpe05s04.c.youtube.com +2404:6800:4008:2::9 r4.tpe05s03.c.youtube.com +2404:6800:4008:3::9 r4.tpe05s04.c.youtube.com +2404:6800:4008:2::a r5.tpe05s03.c.youtube.com +2404:6800:4008:3::a r5.tpe05s04.c.youtube.com +2404:6800:4008:2::b r6.tpe05s03.c.youtube.com +2404:6800:4008:3::b r6.tpe05s04.c.youtube.com +2404:6800:4008:2::c r7.tpe05s03.c.youtube.com +2404:6800:4008:3::c r7.tpe05s04.c.youtube.com +2404:6800:4008:2::d r8.tpe05s03.c.youtube.com +2404:6800:4008:3::d r8.tpe05s04.c.youtube.com +2404:6800:4008:2::e r9.tpe05s03.c.youtube.com +2404:6800:4008:3::e r9.tpe05s04.c.youtube.com +2404:6800:4008:2::f r10.tpe05s03.c.youtube.com +2404:6800:4008:3::f r10.tpe05s04.c.youtube.com +2404:6800:4008:2::10 r11.tpe05s03.c.youtube.com +2404:6800:4008:3::10 r11.tpe05s04.c.youtube.com +2404:6800:4008:2::11 r12.tpe05s03.c.youtube.com +2404:6800:4008:3::11 r12.tpe05s04.c.youtube.com +2404:6800:4008:2::12 r13.tpe05s03.c.youtube.com +2404:6800:4008:3::12 r13.tpe05s04.c.youtube.com +2404:6800:4008:2::13 r14.tpe05s03.c.youtube.com +2404:6800:4008:3::13 r14.tpe05s04.c.youtube.com +2404:6800:4008:2::14 r15.tpe05s03.c.youtube.com +2404:6800:4008:3::14 r15.tpe05s04.c.youtube.com +2404:6800:4008:2::15 r16.tpe05s03.c.youtube.com +2404:6800:4008:3::15 r16.tpe05s04.c.youtube.com +2404:6800:4008:2::16 r17.tpe05s03.c.youtube.com +2404:6800:4008:3::16 r17.tpe05s04.c.youtube.com +2404:6800:4008:2::17 r18.tpe05s03.c.youtube.com +2404:6800:4008:3::17 r18.tpe05s04.c.youtube.com +2404:6800:4008:2::18 r19.tpe05s03.c.youtube.com +2404:6800:4008:3::18 r19.tpe05s04.c.youtube.com +2404:6800:4008:2::19 r20.tpe05s03.c.youtube.com +2404:6800:4008:3::19 r20.tpe05s04.c.youtube.com +2404:6800:4005:4::6 r1.hkg03s05.c.youtube.com +2404:6800:4005:5::6 r1.hkg03s06.c.youtube.com +2404:6800:4005:6::6 r1.hkg03s07.c.youtube.com +2404:6800:4005:7::6 r1.hkg03s08.c.youtube.com +2404:6800:4005:4::7 r2.hkg03s05.c.youtube.com +2404:6800:4005:5::7 r2.hkg03s06.c.youtube.com +2404:6800:4005:6::7 r2.hkg03s07.c.youtube.com +2404:6800:4005:7::7 r2.hkg03s08.c.youtube.com +2404:6800:4005:4::8 r3.hkg03s05.c.youtube.com +2404:6800:4005:5::8 r3.hkg03s06.c.youtube.com +2404:6800:4005:6::8 r3.hkg03s07.c.youtube.com +2404:6800:4005:7::8 r3.hkg03s08.c.youtube.com +2404:6800:4005:4::9 r4.hkg03s05.c.youtube.com +2404:6800:4005:5::9 r4.hkg03s06.c.youtube.com +2404:6800:4005:6::9 r4.hkg03s07.c.youtube.com +2404:6800:4005:7::9 r4.hkg03s08.c.youtube.com +2404:6800:4005:4::a r5.hkg03s05.c.youtube.com +2404:6800:4005:5::a r5.hkg03s06.c.youtube.com +2404:6800:4005:6::a r5.hkg03s07.c.youtube.com +2404:6800:4005:7::a r5.hkg03s08.c.youtube.com +2404:6800:4005:4::b r6.hkg03s05.c.youtube.com +2404:6800:4005:5::b r6.hkg03s06.c.youtube.com +2404:6800:4005:6::b r6.hkg03s07.c.youtube.com +2404:6800:4005:7::b r6.hkg03s08.c.youtube.com +2404:6800:4005:4::c r7.hkg03s05.c.youtube.com +2404:6800:4005:5::c r7.hkg03s06.c.youtube.com +2404:6800:4005:6::c r7.hkg03s07.c.youtube.com +2404:6800:4005:7::c r7.hkg03s08.c.youtube.com +2404:6800:4005:4::d r8.hkg03s05.c.youtube.com +2404:6800:4005:5::d r8.hkg03s06.c.youtube.com +2404:6800:4005:6::d r8.hkg03s07.c.youtube.com +2404:6800:4005:7::d r8.hkg03s08.c.youtube.com +2404:6800:4005:4::e r9.hkg03s05.c.youtube.com +2404:6800:4005:5::e r9.hkg03s06.c.youtube.com +2404:6800:4005:6::e r9.hkg03s07.c.youtube.com +2404:6800:4005:7::e r9.hkg03s08.c.youtube.com +2404:6800:4005:4::f r10.hkg03s05.c.youtube.com +2404:6800:4005:5::f r10.hkg03s06.c.youtube.com +2404:6800:4005:6::f r10.hkg03s07.c.youtube.com +2404:6800:4005:7::f r10.hkg03s08.c.youtube.com +2404:6800:4005:4::10 r11.hkg03s05.c.youtube.com +2404:6800:4005:5::10 r11.hkg03s06.c.youtube.com +2404:6800:4005:6::10 r11.hkg03s07.c.youtube.com +2404:6800:4005:7::10 r11.hkg03s08.c.youtube.com +2404:6800:4005:4::11 r12.hkg03s05.c.youtube.com +2404:6800:4005:5::11 r12.hkg03s06.c.youtube.com +2404:6800:4005:6::11 r12.hkg03s07.c.youtube.com +2404:6800:4005:7::11 r12.hkg03s08.c.youtube.com +2404:6800:4005:4::12 r13.hkg03s05.c.youtube.com +2404:6800:4005:5::12 r13.hkg03s06.c.youtube.com +2404:6800:4005:6::12 r13.hkg03s07.c.youtube.com +2404:6800:4005:7::12 r13.hkg03s08.c.youtube.com +2404:6800:4005:4::13 r14.hkg03s05.c.youtube.com +2404:6800:4005:5::13 r14.hkg03s06.c.youtube.com +2404:6800:4005:6::13 r14.hkg03s07.c.youtube.com +2404:6800:4005:7::13 r14.hkg03s08.c.youtube.com +2404:6800:4005:4::14 r15.hkg03s05.c.youtube.com +2404:6800:4005:5::14 r15.hkg03s06.c.youtube.com +2404:6800:4005:6::14 r15.hkg03s07.c.youtube.com +2404:6800:4005:7::14 r15.hkg03s08.c.youtube.com +2404:6800:4005:4::15 r16.hkg03s05.c.youtube.com +2404:6800:4005:5::15 r16.hkg03s06.c.youtube.com +2404:6800:4005:6::15 r16.hkg03s07.c.youtube.com +2404:6800:4005:7::15 r16.hkg03s08.c.youtube.com +2404:6800:4005:4::16 r17.hkg03s05.c.youtube.com +2404:6800:4005:5::16 r17.hkg03s06.c.youtube.com +2404:6800:4005:6::16 r17.hkg03s07.c.youtube.com +2404:6800:4005:7::16 r17.hkg03s08.c.youtube.com +2404:6800:4005:4::17 r18.hkg03s05.c.youtube.com +2404:6800:4005:5::17 r18.hkg03s06.c.youtube.com +2404:6800:4005:6::17 r18.hkg03s07.c.youtube.com +2404:6800:4005:7::17 r18.hkg03s08.c.youtube.com +2404:6800:4005:4::18 r19.hkg03s05.c.youtube.com +2404:6800:4005:5::18 r19.hkg03s06.c.youtube.com +2404:6800:4005:6::18 r19.hkg03s07.c.youtube.com +2404:6800:4005:7::18 r19.hkg03s08.c.youtube.com +2404:6800:4005:4::19 r20.hkg03s05.c.youtube.com +2404:6800:4005:5::19 r20.hkg03s06.c.youtube.com +2404:6800:4005:6::19 r20.hkg03s07.c.youtube.com +2404:6800:4005:7::19 r20.hkg03s08.c.youtube.com +2404:6800:4005::6 r1.hkg05s01.c.youtube.com +2404:6800:4005:1::6 r1.hkg05s02.c.youtube.com +2404:6800:4005:2::6 r1.hkg05s03.c.youtube.com +2404:6800:4005:3::6 r1.hkg05s04.c.youtube.com +2404:6800:4005::7 r2.hkg05s01.c.youtube.com +2404:6800:4005:1::7 r2.hkg05s02.c.youtube.com +2404:6800:4005:2::7 r2.hkg05s03.c.youtube.com +2404:6800:4005:3::7 r2.hkg05s04.c.youtube.com +2404:6800:4005::8 r3.hkg05s01.c.youtube.com +2404:6800:4005:1::8 r3.hkg05s02.c.youtube.com +2404:6800:4005:2::8 r3.hkg05s03.c.youtube.com +2404:6800:4005:3::8 r3.hkg05s04.c.youtube.com +2404:6800:4005::9 r4.hkg05s01.c.youtube.com +2404:6800:4005:1::9 r4.hkg05s02.c.youtube.com +2404:6800:4005:2::9 r4.hkg05s03.c.youtube.com +2404:6800:4005:3::9 r4.hkg05s04.c.youtube.com +2404:6800:4005::a r5.hkg05s01.c.youtube.com +2404:6800:4005:1::a r5.hkg05s02.c.youtube.com +2404:6800:4005:2::a r5.hkg05s03.c.youtube.com +2404:6800:4005:3::a r5.hkg05s04.c.youtube.com +2404:6800:4005::b r6.hkg05s01.c.youtube.com +2404:6800:4005:1::b r6.hkg05s02.c.youtube.com +2404:6800:4005:2::b r6.hkg05s03.c.youtube.com +2404:6800:4005:3::b r6.hkg05s04.c.youtube.com +2404:6800:4005::c r7.hkg05s01.c.youtube.com +2404:6800:4005:1::c r7.hkg05s02.c.youtube.com +2404:6800:4005:2::c r7.hkg05s03.c.youtube.com +2404:6800:4005:3::c r7.hkg05s04.c.youtube.com +2404:6800:4005::d r8.hkg05s01.c.youtube.com +2404:6800:4005:1::d r8.hkg05s02.c.youtube.com +2404:6800:4005:2::d r8.hkg05s03.c.youtube.com +2404:6800:4005:3::d r8.hkg05s04.c.youtube.com +2404:6800:4005::e r9.hkg05s01.c.youtube.com +2404:6800:4005:1::e r9.hkg05s02.c.youtube.com +2404:6800:4005:2::e r9.hkg05s03.c.youtube.com +2404:6800:4005:3::e r9.hkg05s04.c.youtube.com +2404:6800:4005::f r10.hkg05s01.c.youtube.com +2404:6800:4005:1::f r10.hkg05s02.c.youtube.com +2404:6800:4005:2::f r10.hkg05s03.c.youtube.com +2404:6800:4005:3::f r10.hkg05s04.c.youtube.com +2404:6800:4005::10 r11.hkg05s01.c.youtube.com +2404:6800:4005:1::10 r11.hkg05s02.c.youtube.com +2404:6800:4005:2::10 r11.hkg05s03.c.youtube.com +2404:6800:4005:3::10 r11.hkg05s04.c.youtube.com +2404:6800:4005::11 r12.hkg05s01.c.youtube.com +2404:6800:4005:1::11 r12.hkg05s02.c.youtube.com +2404:6800:4005:2::11 r12.hkg05s03.c.youtube.com +2404:6800:4005:3::11 r12.hkg05s04.c.youtube.com +2404:6800:4005::12 r13.hkg05s01.c.youtube.com +2404:6800:4005:1::12 r13.hkg05s02.c.youtube.com +2404:6800:4005:2::12 r13.hkg05s03.c.youtube.com +2404:6800:4005:3::12 r13.hkg05s04.c.youtube.com +2404:6800:4005::13 r14.hkg05s01.c.youtube.com +2404:6800:4005:1::13 r14.hkg05s02.c.youtube.com +2404:6800:4005:2::13 r14.hkg05s03.c.youtube.com +2404:6800:4005:3::13 r14.hkg05s04.c.youtube.com +2404:6800:4005::14 r15.hkg05s01.c.youtube.com +2404:6800:4005:1::14 r15.hkg05s02.c.youtube.com +2404:6800:4005:2::14 r15.hkg05s03.c.youtube.com +2404:6800:4005:3::14 r15.hkg05s04.c.youtube.com +2404:6800:4005::15 r16.hkg05s01.c.youtube.com +2404:6800:4005:1::15 r16.hkg05s02.c.youtube.com +2404:6800:4005:2::15 r16.hkg05s03.c.youtube.com +2404:6800:4005:3::15 r16.hkg05s04.c.youtube.com +2404:6800:4005::16 r17.hkg05s01.c.youtube.com +2404:6800:4005:1::16 r17.hkg05s02.c.youtube.com +2404:6800:4005:2::16 r17.hkg05s03.c.youtube.com +2404:6800:4005:3::16 r17.hkg05s04.c.youtube.com +2404:6800:4005::17 r18.hkg05s01.c.youtube.com +2404:6800:4005:1::17 r18.hkg05s02.c.youtube.com +2404:6800:4005:2::17 r18.hkg05s03.c.youtube.com +2404:6800:4005:3::17 r18.hkg05s04.c.youtube.com +2404:6800:4005::18 r19.hkg05s01.c.youtube.com +2404:6800:4005:1::18 r19.hkg05s02.c.youtube.com +2404:6800:4005:2::18 r19.hkg05s03.c.youtube.com +2404:6800:4005:3::18 r19.hkg05s04.c.youtube.com +2404:6800:4005::19 r20.hkg05s01.c.youtube.com +2404:6800:4005:1::19 r20.hkg05s02.c.youtube.com +2404:6800:4005:2::19 r20.hkg05s03.c.youtube.com +2404:6800:4005:3::19 r20.hkg05s04.c.youtube.com +2404:6800:4007::6 r1.maa03s08.c.youtube.com +2404:6800:4007:1::6 r1.maa03s09.c.youtube.com +2404:6800:4007::7 r2.maa03s08.c.youtube.com +2404:6800:4007:1::7 r2.maa03s09.c.youtube.com +2404:6800:4007::8 r3.maa03s08.c.youtube.com +2404:6800:4007:1::8 r3.maa03s09.c.youtube.com +2404:6800:4007::9 r4.maa03s08.c.youtube.com +2404:6800:4007:1::9 r4.maa03s09.c.youtube.com +2404:6800:4007::a r5.maa03s08.c.youtube.com +2404:6800:4007:1::a r5.maa03s09.c.youtube.com +2404:6800:4007::b r6.maa03s08.c.youtube.com +2404:6800:4007:1::b r6.maa03s09.c.youtube.com +2404:6800:4007::c r7.maa03s08.c.youtube.com +2404:6800:4007:1::c r7.maa03s09.c.youtube.com +2404:6800:4007::d r8.maa03s08.c.youtube.com +2404:6800:4007:1::d r8.maa03s09.c.youtube.com +2404:6800:4007::e r9.maa03s08.c.youtube.com +2404:6800:4007:1::e r9.maa03s09.c.youtube.com +2404:6800:4007::f r10.maa03s08.c.youtube.com +2404:6800:4007:1::f r10.maa03s09.c.youtube.com +2404:6800:4007::10 r11.maa03s08.c.youtube.com +2404:6800:4007:1::10 r11.maa03s09.c.youtube.com +2404:6800:4007::11 r12.maa03s08.c.youtube.com +2404:6800:4007:1::11 r12.maa03s09.c.youtube.com +2404:6800:4007::12 r13.maa03s08.c.youtube.com +2404:6800:4007:1::12 r13.maa03s09.c.youtube.com +2404:6800:4007::13 r14.maa03s08.c.youtube.com +2404:6800:4007:1::13 r14.maa03s09.c.youtube.com +2404:6800:4007::14 r15.maa03s08.c.youtube.com +2404:6800:4007:1::14 r15.maa03s09.c.youtube.com +2404:6800:4007::15 r16.maa03s08.c.youtube.com +2404:6800:4007:1::15 r16.maa03s09.c.youtube.com +2404:6800:4007::16 r17.maa03s08.c.youtube.com +2404:6800:4007:1::16 r17.maa03s09.c.youtube.com +2404:6800:4007::17 r18.maa03s08.c.youtube.com +2404:6800:4007:1::17 r18.maa03s09.c.youtube.com +2404:6800:4007::18 r19.maa03s08.c.youtube.com +2404:6800:4007:1::18 r19.maa03s09.c.youtube.com +2404:6800:4007::19 r20.maa03s08.c.youtube.com +2404:6800:4007:1::19 r20.maa03s09.c.youtube.com + +#AdWords/AdSense 广告服务 +#2404:6800:8005::62 www.googlesyndication.com +2404:6800:8005::60 www.googleadservices.com +2404:6800:8005::62 pagead2.googlesyndication.com +2404:6800:8005::62 pagead.google.com +2404:6800:8005::a4 pagead2.googleadservices.com +2404:6800:8005::62 pagead.l.google.com +2404:6800:8005::68 buttons.googlesyndication.com +2404:6800:8005::62 domains.googlesyndication.com +2404:6800:8005::62 afd.l.google.com +2404:6800:8005::98 tpc.googlesyndication.com +2404:6800:8005::98 pagead-tpc.l.google.com +2404:6800:4005:4::6 v1.cache1.c.googlesyndication.com +2404:6800:4005:4::6 v1.cache2.c.googlesyndication.com +2404:6800:4005:1::6 v1.cache3.c.googlesyndication.com +2404:6800:4005:1::6 v1.cache4.c.googlesyndication.com +2404:6800:4005:6::6 v1.cache5.c.googlesyndication.com +2404:6800:4005:6::6 v1.cache6.c.googlesyndication.com +2404:6800:4005:3::6 v1.cache7.c.googlesyndication.com +2404:6800:4005:3::6 v1.cache8.c.googlesyndication.com +2404:6800:4005::a v2.cache1.c.googlesyndication.com +2404:6800:4005::a v2.cache2.c.googlesyndication.com +2404:6800:4005:5::a v2.cache3.c.googlesyndication.com +2404:6800:4005:5::a v2.cache4.c.googlesyndication.com +2404:6800:4005:2::a v2.cache5.c.googlesyndication.com +2404:6800:4005:2::a v2.cache6.c.googlesyndication.com +2404:6800:4005:7::a v2.cache7.c.googlesyndication.com +2404:6800:4005:7::a v2.cache8.c.googlesyndication.com +2404:6800:4005::e v3.cache1.c.googlesyndication.com +2404:6800:4005::e v3.cache2.c.googlesyndication.com +2404:6800:4005:1::e v3.cache3.c.googlesyndication.com +2404:6800:4005:1::e v3.cache4.c.googlesyndication.com +2404:6800:4005:6::e v3.cache5.c.googlesyndication.com +2404:6800:4005:6::e v3.cache6.c.googlesyndication.com +2404:6800:4005:3::e v3.cache7.c.googlesyndication.com +2404:6800:4005:7::e v3.cache8.c.googlesyndication.com +2404:6800:4005:4::12 v4.cache1.c.googlesyndication.com +2404:6800:4005::12 v4.cache2.c.googlesyndication.com +2404:6800:4005:1::12 v4.cache3.c.googlesyndication.com +2404:6800:4005:1::12 v4.cache4.c.googlesyndication.com +2404:6800:4005:2::12 v4.cache5.c.googlesyndication.com +2404:6800:4005:6::12 v4.cache6.c.googlesyndication.com +2404:6800:4005:7::12 v4.cache7.c.googlesyndication.com +2404:6800:4005:3::12 v4.cache8.c.googlesyndication.com +2404:6800:4005:4::16 v5.cache1.c.googlesyndication.com +2404:6800:4005::16 v5.cache2.c.googlesyndication.com +2404:6800:4005:5::16 v5.cache3.c.googlesyndication.com +2404:6800:4005:5::16 v5.cache4.c.googlesyndication.com +2404:6800:4005:2::16 v5.cache5.c.googlesyndication.com +2404:6800:4005:6::16 v5.cache6.c.googlesyndication.com +2404:6800:4005:7::16 v5.cache7.c.googlesyndication.com +2404:6800:4005:7::16 v5.cache8.c.googlesyndication.com +2404:6800:4005:4::7 v6.cache1.c.googlesyndication.com +2404:6800:4005:4::7 v6.cache2.c.googlesyndication.com +2404:6800:4005:1::7 v6.cache3.c.googlesyndication.com +2404:6800:4005:1::7 v6.cache4.c.googlesyndication.com +2404:6800:4005:2::7 v6.cache5.c.googlesyndication.com +2404:6800:4005:6::7 v6.cache6.c.googlesyndication.com +2404:6800:4005:7::7 v6.cache7.c.googlesyndication.com +2404:6800:4005:3::7 v6.cache8.c.googlesyndication.com +2404:6800:4005:4::b v7.cache1.c.googlesyndication.com +2404:6800:4005:4::b v7.cache2.c.googlesyndication.com +2404:6800:4005:5::b v7.cache3.c.googlesyndication.com +2404:6800:4005:5::b v7.cache4.c.googlesyndication.com +2404:6800:4005:2::b v7.cache5.c.googlesyndication.com +2404:6800:4005:2::b v7.cache6.c.googlesyndication.com +2404:6800:4005:7::b v7.cache7.c.googlesyndication.com +2404:6800:4005:7::b v7.cache8.c.googlesyndication.com +2404:6800:4005:4::f v8.cache1.c.googlesyndication.com +2404:6800:4005::f v8.cache2.c.googlesyndication.com +2404:6800:4005:1::f v8.cache3.c.googlesyndication.com +2404:6800:4005:5::f v8.cache4.c.googlesyndication.com +2404:6800:4005:2::f v8.cache5.c.googlesyndication.com +2404:6800:4005:2::f v8.cache6.c.googlesyndication.com +2404:6800:4005:3::f v8.cache7.c.googlesyndication.com +2404:6800:4005:3::f v8.cache8.c.googlesyndication.com +2404:6800:4005:4::13 v9.cache1.c.googlesyndication.com +2404:6800:4005:4::13 v9.cache2.c.googlesyndication.com +2404:6800:4005:1::13 v9.cache3.c.googlesyndication.com +2404:6800:4005:5::13 v9.cache4.c.googlesyndication.com +2404:6800:4005:6::13 v9.cache5.c.googlesyndication.com +2404:6800:4005:2::13 v9.cache6.c.googlesyndication.com +2404:6800:4005:3::13 v9.cache7.c.googlesyndication.com +2404:6800:4005:3::13 v9.cache8.c.googlesyndication.com +2404:6800:4005:4::17 v10.cache1.c.googlesyndication.com +2404:6800:4005:4::17 v10.cache2.c.googlesyndication.com +2404:6800:4005:1::17 v10.cache3.c.googlesyndication.com +2404:6800:4005:5::17 v10.cache4.c.googlesyndication.com +2404:6800:4005:6::17 v10.cache5.c.googlesyndication.com +2404:6800:4005:2::17 v10.cache6.c.googlesyndication.com +2404:6800:4005:7::17 v10.cache7.c.googlesyndication.com +2404:6800:4005:7::17 v10.cache8.c.googlesyndication.com +2404:6800:4005::8 v11.cache1.c.googlesyndication.com +2404:6800:4005::8 v11.cache2.c.googlesyndication.com +2404:6800:4005:1::8 v11.cache3.c.googlesyndication.com +2404:6800:4005:5::8 v11.cache4.c.googlesyndication.com +2404:6800:4005:6::8 v11.cache5.c.googlesyndication.com +2404:6800:4005:2::8 v11.cache6.c.googlesyndication.com +2404:6800:4005:7::8 v11.cache7.c.googlesyndication.com +2404:6800:4005:3::8 v11.cache8.c.googlesyndication.com +2404:6800:4005::c v12.cache1.c.googlesyndication.com +2404:6800:4005:4::c v12.cache2.c.googlesyndication.com +2404:6800:4005:1::c v12.cache3.c.googlesyndication.com +2404:6800:4005:1::c v12.cache4.c.googlesyndication.com +2404:6800:4005:2::c v12.cache5.c.googlesyndication.com +2404:6800:4005:6::c v12.cache6.c.googlesyndication.com +2404:6800:4005:7::c v12.cache7.c.googlesyndication.com +2404:6800:4005:7::c v12.cache8.c.googlesyndication.com +2404:6800:4005::10 v13.cache1.c.googlesyndication.com +2404:6800:4005::10 v13.cache2.c.googlesyndication.com +2404:6800:4005:1::10 v13.cache3.c.googlesyndication.com +2404:6800:4005:1::10 v13.cache4.c.googlesyndication.com +2404:6800:4005:6::10 v13.cache5.c.googlesyndication.com +2404:6800:4005:6::10 v13.cache6.c.googlesyndication.com +2404:6800:4005:7::10 v13.cache7.c.googlesyndication.com +2404:6800:4005:7::10 v13.cache8.c.googlesyndication.com +2404:6800:4005:4::14 v14.cache1.c.googlesyndication.com +2404:6800:4005:4::14 v14.cache2.c.googlesyndication.com +2404:6800:4005:1::14 v14.cache3.c.googlesyndication.com +2404:6800:4005:1::14 v14.cache4.c.googlesyndication.com +2404:6800:4005:6::14 v14.cache5.c.googlesyndication.com +2404:6800:4005:2::14 v14.cache6.c.googlesyndication.com +2404:6800:4005:3::14 v14.cache7.c.googlesyndication.com +2404:6800:4005:7::14 v14.cache8.c.googlesyndication.com +2404:6800:4005::18 v15.cache1.c.googlesyndication.com +2404:6800:4005::18 v15.cache2.c.googlesyndication.com +2404:6800:4005:5::18 v15.cache3.c.googlesyndication.com +2404:6800:4005:1::18 v15.cache4.c.googlesyndication.com +2404:6800:4005:6::18 v15.cache5.c.googlesyndication.com +2404:6800:4005:6::18 v15.cache6.c.googlesyndication.com +2404:6800:4005:3::18 v15.cache7.c.googlesyndication.com +2404:6800:4005:7::18 v15.cache8.c.googlesyndication.com +2404:6800:4005::9 v16.cache1.c.googlesyndication.com +2404:6800:4005::9 v16.cache2.c.googlesyndication.com +2404:6800:4005:1::9 v16.cache3.c.googlesyndication.com +2404:6800:4005:1::9 v16.cache4.c.googlesyndication.com +2404:6800:4005:2::9 v16.cache5.c.googlesyndication.com +2404:6800:4005:6::9 v16.cache6.c.googlesyndication.com +2404:6800:4005:3::9 v16.cache7.c.googlesyndication.com +2404:6800:4005:3::9 v16.cache8.c.googlesyndication.com +2404:6800:4005:4::d v17.cache1.c.googlesyndication.com +2404:6800:4005:4::d v17.cache2.c.googlesyndication.com +2404:6800:4005:5::d v17.cache3.c.googlesyndication.com +2404:6800:4005:5::d v17.cache4.c.googlesyndication.com +2404:6800:4005:6::d v17.cache5.c.googlesyndication.com +2404:6800:4005:2::d v17.cache6.c.googlesyndication.com +2404:6800:4005:3::d v17.cache7.c.googlesyndication.com +2404:6800:4005:3::d v17.cache8.c.googlesyndication.com +2404:6800:4005::11 v18.cache1.c.googlesyndication.com +2404:6800:4005::11 v18.cache2.c.googlesyndication.com +2404:6800:4005:5::11 v18.cache3.c.googlesyndication.com +2404:6800:4005:5::11 v18.cache4.c.googlesyndication.com +2404:6800:4005:6::11 v18.cache5.c.googlesyndication.com +2404:6800:4005:2::11 v18.cache6.c.googlesyndication.com +2404:6800:4005:7::11 v18.cache7.c.googlesyndication.com +2404:6800:4005:3::11 v18.cache8.c.googlesyndication.com +2404:6800:4005:4::15 v19.cache1.c.googlesyndication.com +2404:6800:4005:4::15 v19.cache2.c.googlesyndication.com +2404:6800:4005:1::15 v19.cache3.c.googlesyndication.com +2404:6800:4005:1::15 v19.cache4.c.googlesyndication.com +2404:6800:4005:6::15 v19.cache5.c.googlesyndication.com +2404:6800:4005:2::15 v19.cache6.c.googlesyndication.com +2404:6800:4005:7::15 v19.cache7.c.googlesyndication.com +2404:6800:4005:7::15 v19.cache8.c.googlesyndication.com +2404:6800:4005:4::19 v20.cache1.c.googlesyndication.com +2404:6800:4005::19 v20.cache2.c.googlesyndication.com +2404:6800:4005:5::19 v20.cache3.c.googlesyndication.com +2404:6800:4005:1::19 v20.cache4.c.googlesyndication.com +2404:6800:4005:2::19 v20.cache5.c.googlesyndication.com +2404:6800:4005:6::19 v20.cache6.c.googlesyndication.com +2404:6800:4005:3::19 v20.cache7.c.googlesyndication.com +2404:6800:4005:7::19 v20.cache8.c.googlesyndication.com +2404:6800:4005:4::6 v21.cache1.c.googlesyndication.com +2404:6800:4005:4::6 v21.cache2.c.googlesyndication.com +2404:6800:4005:5::6 v21.cache3.c.googlesyndication.com +2404:6800:4005:5::6 v21.cache4.c.googlesyndication.com +2404:6800:4005:2::6 v21.cache5.c.googlesyndication.com +2404:6800:4005:6::6 v21.cache6.c.googlesyndication.com +2404:6800:4005:3::6 v21.cache7.c.googlesyndication.com +2404:6800:4005:3::6 v21.cache8.c.googlesyndication.com +2404:6800:4005:4::a v22.cache1.c.googlesyndication.com +2404:6800:4005::a v22.cache2.c.googlesyndication.com +2404:6800:4005:5::a v22.cache3.c.googlesyndication.com +2404:6800:4005:1::a v22.cache4.c.googlesyndication.com +2404:6800:4005:6::a v22.cache5.c.googlesyndication.com +2404:6800:4005:2::a v22.cache6.c.googlesyndication.com +2404:6800:4005:3::a v22.cache7.c.googlesyndication.com +2404:6800:4005:7::a v22.cache8.c.googlesyndication.com +2404:6800:4005:4::e v23.cache1.c.googlesyndication.com +2404:6800:4005:4::e v23.cache2.c.googlesyndication.com +2404:6800:4005:1::e v23.cache3.c.googlesyndication.com +2404:6800:4005:1::e v23.cache4.c.googlesyndication.com +2404:6800:4005:6::e v23.cache5.c.googlesyndication.com +2404:6800:4005:2::e v23.cache6.c.googlesyndication.com +2404:6800:4005:7::e v23.cache7.c.googlesyndication.com +2404:6800:4005:7::e v23.cache8.c.googlesyndication.com +2404:6800:4005::12 v24.cache1.c.googlesyndication.com +2404:6800:4005:4::12 v24.cache2.c.googlesyndication.com +2404:6800:4005:5::12 v24.cache3.c.googlesyndication.com +2404:6800:4005:1::12 v24.cache4.c.googlesyndication.com +2404:6800:4005:6::12 v24.cache5.c.googlesyndication.com +2404:6800:4005:6::12 v24.cache6.c.googlesyndication.com +2404:6800:4005:7::12 v24.cache7.c.googlesyndication.com +2404:6800:4005:3::12 v24.cache8.c.googlesyndication.com +2404:6800:4005:4::6 v1.lscache1.c.googlesyndication.com +2404:6800:4005:4::6 v1.lscache2.c.googlesyndication.com +2404:6800:4005:1::6 v1.lscache3.c.googlesyndication.com +2404:6800:4005:1::6 v1.lscache4.c.googlesyndication.com +2404:6800:4005:6::6 v1.lscache5.c.googlesyndication.com +2404:6800:4005:6::6 v1.lscache6.c.googlesyndication.com +2404:6800:4005:3::6 v1.lscache7.c.googlesyndication.com +2404:6800:4005:3::6 v1.lscache8.c.googlesyndication.com +2404:6800:4005::a v2.lscache1.c.googlesyndication.com +2404:6800:4005::a v2.lscache2.c.googlesyndication.com +2404:6800:4005:5::a v2.lscache3.c.googlesyndication.com +2404:6800:4005:5::a v2.lscache4.c.googlesyndication.com +2404:6800:4005:2::a v2.lscache5.c.googlesyndication.com +2404:6800:4005:2::a v2.lscache6.c.googlesyndication.com +2404:6800:4005:7::a v2.lscache7.c.googlesyndication.com +2404:6800:4005:7::a v2.lscache8.c.googlesyndication.com +2404:6800:4005::e v3.lscache1.c.googlesyndication.com +2404:6800:4005::e v3.lscache2.c.googlesyndication.com +2404:6800:4005:1::e v3.lscache3.c.googlesyndication.com +2404:6800:4005:1::e v3.lscache4.c.googlesyndication.com +2404:6800:4005:6::e v3.lscache5.c.googlesyndication.com +2404:6800:4005:6::e v3.lscache6.c.googlesyndication.com +2404:6800:4005:3::e v3.lscache7.c.googlesyndication.com +2404:6800:4005:7::e v3.lscache8.c.googlesyndication.com +2404:6800:4005:4::12 v4.lscache1.c.googlesyndication.com +2404:6800:4005::12 v4.lscache2.c.googlesyndication.com +2404:6800:4005:1::12 v4.lscache3.c.googlesyndication.com +2404:6800:4005:1::12 v4.lscache4.c.googlesyndication.com +2404:6800:4005:2::12 v4.lscache5.c.googlesyndication.com +2404:6800:4005:6::12 v4.lscache6.c.googlesyndication.com +2404:6800:4005:7::12 v4.lscache7.c.googlesyndication.com +2404:6800:4005:3::12 v4.lscache8.c.googlesyndication.com +2404:6800:4005:4::16 v5.lscache1.c.googlesyndication.com +2404:6800:4005::16 v5.lscache2.c.googlesyndication.com +2404:6800:4005:5::16 v5.lscache3.c.googlesyndication.com +2404:6800:4005:5::16 v5.lscache4.c.googlesyndication.com +2404:6800:4005:2::16 v5.lscache5.c.googlesyndication.com +2404:6800:4005:6::16 v5.lscache6.c.googlesyndication.com +2404:6800:4005:7::16 v5.lscache7.c.googlesyndication.com +2404:6800:4005:7::16 v5.lscache8.c.googlesyndication.com +2404:6800:4005:4::7 v6.lscache1.c.googlesyndication.com +2404:6800:4005:4::7 v6.lscache2.c.googlesyndication.com +2404:6800:4005:1::7 v6.lscache3.c.googlesyndication.com +2404:6800:4005:1::7 v6.lscache4.c.googlesyndication.com +2404:6800:4005:2::7 v6.lscache5.c.googlesyndication.com +2404:6800:4005:6::7 v6.lscache6.c.googlesyndication.com +2404:6800:4005:7::7 v6.lscache7.c.googlesyndication.com +2404:6800:4005:3::7 v6.lscache8.c.googlesyndication.com +2404:6800:4005:4::b v7.lscache1.c.googlesyndication.com +2404:6800:4005:4::b v7.lscache2.c.googlesyndication.com +2404:6800:4005:5::b v7.lscache3.c.googlesyndication.com +2404:6800:4005:5::b v7.lscache4.c.googlesyndication.com +2404:6800:4005:2::b v7.lscache5.c.googlesyndication.com +2404:6800:4005:2::b v7.lscache6.c.googlesyndication.com +2404:6800:4005:7::b v7.lscache7.c.googlesyndication.com +2404:6800:4005:7::b v7.lscache8.c.googlesyndication.com +2404:6800:4005:4::f v8.lscache1.c.googlesyndication.com +2404:6800:4005::f v8.lscache2.c.googlesyndication.com +2404:6800:4005:1::f v8.lscache3.c.googlesyndication.com +2404:6800:4005:5::f v8.lscache4.c.googlesyndication.com +2404:6800:4005:2::f v8.lscache5.c.googlesyndication.com +2404:6800:4005:2::f v8.lscache6.c.googlesyndication.com +2404:6800:4005:3::f v8.lscache7.c.googlesyndication.com +2404:6800:4005:3::f v8.lscache8.c.googlesyndication.com +2404:6800:4005:4::13 v9.lscache1.c.googlesyndication.com +2404:6800:4005:4::13 v9.lscache2.c.googlesyndication.com +2404:6800:4005:1::13 v9.lscache3.c.googlesyndication.com +2404:6800:4005:5::13 v9.lscache4.c.googlesyndication.com +2404:6800:4005:6::13 v9.lscache5.c.googlesyndication.com +2404:6800:4005:2::13 v9.lscache6.c.googlesyndication.com +2404:6800:4005:3::13 v9.lscache7.c.googlesyndication.com +2404:6800:4005:3::13 v9.lscache8.c.googlesyndication.com +2404:6800:4005:4::17 v10.lscache1.c.googlesyndication.com +2404:6800:4005:4::17 v10.lscache2.c.googlesyndication.com +2404:6800:4005:1::17 v10.lscache3.c.googlesyndication.com +2404:6800:4005:5::17 v10.lscache4.c.googlesyndication.com +2404:6800:4005:6::17 v10.lscache5.c.googlesyndication.com +2404:6800:4005:2::17 v10.lscache6.c.googlesyndication.com +2404:6800:4005:7::17 v10.lscache7.c.googlesyndication.com +2404:6800:4005:7::17 v10.lscache8.c.googlesyndication.com +2404:6800:4005::8 v11.lscache1.c.googlesyndication.com +2404:6800:4005::8 v11.lscache2.c.googlesyndication.com +2404:6800:4005:1::8 v11.lscache3.c.googlesyndication.com +2404:6800:4005:5::8 v11.lscache4.c.googlesyndication.com +2404:6800:4005:6::8 v11.lscache5.c.googlesyndication.com +2404:6800:4005:2::8 v11.lscache6.c.googlesyndication.com +2404:6800:4005:7::8 v11.lscache7.c.googlesyndication.com +2404:6800:4005:3::8 v11.lscache8.c.googlesyndication.com +2404:6800:4005::c v12.lscache1.c.googlesyndication.com +2404:6800:4005:4::c v12.lscache2.c.googlesyndication.com +2404:6800:4005:1::c v12.lscache3.c.googlesyndication.com +2404:6800:4005:1::c v12.lscache4.c.googlesyndication.com +2404:6800:4005:2::c v12.lscache5.c.googlesyndication.com +2404:6800:4005:6::c v12.lscache6.c.googlesyndication.com +2404:6800:4005:7::c v12.lscache7.c.googlesyndication.com +2404:6800:4005:7::c v12.lscache8.c.googlesyndication.com +2404:6800:4005::10 v13.lscache1.c.googlesyndication.com +2404:6800:4005::10 v13.lscache2.c.googlesyndication.com +2404:6800:4005:1::10 v13.lscache3.c.googlesyndication.com +2404:6800:4005:1::10 v13.lscache4.c.googlesyndication.com +2404:6800:4005:6::10 v13.lscache5.c.googlesyndication.com +2404:6800:4005:6::10 v13.lscache6.c.googlesyndication.com +2404:6800:4005:7::10 v13.lscache7.c.googlesyndication.com +2404:6800:4005:7::10 v13.lscache8.c.googlesyndication.com +2404:6800:4005:4::14 v14.lscache1.c.googlesyndication.com +2404:6800:4005:4::14 v14.lscache2.c.googlesyndication.com +2404:6800:4005:1::14 v14.lscache3.c.googlesyndication.com +2404:6800:4005:1::14 v14.lscache4.c.googlesyndication.com +2404:6800:4005:6::14 v14.lscache5.c.googlesyndication.com +2404:6800:4005:2::14 v14.lscache6.c.googlesyndication.com +2404:6800:4005:3::14 v14.lscache7.c.googlesyndication.com +2404:6800:4005:7::14 v14.lscache8.c.googlesyndication.com +2404:6800:4005::18 v15.lscache1.c.googlesyndication.com +2404:6800:4005::18 v15.lscache2.c.googlesyndication.com +2404:6800:4005:5::18 v15.lscache3.c.googlesyndication.com +2404:6800:4005:1::18 v15.lscache4.c.googlesyndication.com +2404:6800:4005:6::18 v15.lscache5.c.googlesyndication.com +2404:6800:4005:6::18 v15.lscache6.c.googlesyndication.com +2404:6800:4005:3::18 v15.lscache7.c.googlesyndication.com +2404:6800:4005:7::18 v15.lscache8.c.googlesyndication.com +2404:6800:4005::9 v16.lscache1.c.googlesyndication.com +2404:6800:4005::9 v16.lscache2.c.googlesyndication.com +2404:6800:4005:1::9 v16.lscache3.c.googlesyndication.com +2404:6800:4005:1::9 v16.lscache4.c.googlesyndication.com +2404:6800:4005:2::9 v16.lscache5.c.googlesyndication.com +2404:6800:4005:6::9 v16.lscache6.c.googlesyndication.com +2404:6800:4005:3::9 v16.lscache7.c.googlesyndication.com +2404:6800:4005:3::9 v16.lscache8.c.googlesyndication.com +2404:6800:4005:4::d v17.lscache1.c.googlesyndication.com +2404:6800:4005:4::d v17.lscache2.c.googlesyndication.com +2404:6800:4005:5::d v17.lscache3.c.googlesyndication.com +2404:6800:4005:5::d v17.lscache4.c.googlesyndication.com +2404:6800:4005:6::d v17.lscache5.c.googlesyndication.com +2404:6800:4005:2::d v17.lscache6.c.googlesyndication.com +2404:6800:4005:3::d v17.lscache7.c.googlesyndication.com +2404:6800:4005:3::d v17.lscache8.c.googlesyndication.com +2404:6800:4005::11 v18.lscache1.c.googlesyndication.com +2404:6800:4005::11 v18.lscache2.c.googlesyndication.com +2404:6800:4005:5::11 v18.lscache3.c.googlesyndication.com +2404:6800:4005:5::11 v18.lscache4.c.googlesyndication.com +2404:6800:4005:6::11 v18.lscache5.c.googlesyndication.com +2404:6800:4005:2::11 v18.lscache6.c.googlesyndication.com +2404:6800:4005:7::11 v18.lscache7.c.googlesyndication.com +2404:6800:4005:3::11 v18.lscache8.c.googlesyndication.com +2404:6800:4005:4::15 v19.lscache1.c.googlesyndication.com +2404:6800:4005:4::15 v19.lscache2.c.googlesyndication.com +2404:6800:4005:1::15 v19.lscache3.c.googlesyndication.com +2404:6800:4005:1::15 v19.lscache4.c.googlesyndication.com +2404:6800:4005:6::15 v19.lscache5.c.googlesyndication.com +2404:6800:4005:2::15 v19.lscache6.c.googlesyndication.com +2404:6800:4005:7::15 v19.lscache7.c.googlesyndication.com +2404:6800:4005:7::15 v19.lscache8.c.googlesyndication.com +2404:6800:4005:4::19 v20.lscache1.c.googlesyndication.com +2404:6800:4005::19 v20.lscache2.c.googlesyndication.com +2404:6800:4005:5::19 v20.lscache3.c.googlesyndication.com +2404:6800:4005:1::19 v20.lscache4.c.googlesyndication.com +2404:6800:4005:2::19 v20.lscache5.c.googlesyndication.com +2404:6800:4005:6::19 v20.lscache6.c.googlesyndication.com +2404:6800:4005:3::19 v20.lscache7.c.googlesyndication.com +2404:6800:4005:7::19 v20.lscache8.c.googlesyndication.com +2404:6800:4005:4::6 v21.lscache1.c.googlesyndication.com +2404:6800:4005:4::6 v21.lscache2.c.googlesyndication.com +2404:6800:4005:5::6 v21.lscache3.c.googlesyndication.com +2404:6800:4005:5::6 v21.lscache4.c.googlesyndication.com +2404:6800:4005:2::6 v21.lscache5.c.googlesyndication.com +2404:6800:4005:6::6 v21.lscache6.c.googlesyndication.com +2404:6800:4005:3::6 v21.lscache7.c.googlesyndication.com +2404:6800:4005:3::6 v21.lscache8.c.googlesyndication.com +2404:6800:4005:4::a v22.lscache1.c.googlesyndication.com +2404:6800:4005::a v22.lscache2.c.googlesyndication.com +2404:6800:4005:5::a v22.lscache3.c.googlesyndication.com +2404:6800:4005:1::a v22.lscache4.c.googlesyndication.com +2404:6800:4005:6::a v22.lscache5.c.googlesyndication.com +2404:6800:4005:2::a v22.lscache6.c.googlesyndication.com +2404:6800:4005:3::a v22.lscache7.c.googlesyndication.com +2404:6800:4005:7::a v22.lscache8.c.googlesyndication.com +2404:6800:4005:4::e v23.lscache1.c.googlesyndication.com +2404:6800:4005:4::e v23.lscache2.c.googlesyndication.com +2404:6800:4005:1::e v23.lscache3.c.googlesyndication.com +2404:6800:4005:1::e v23.lscache4.c.googlesyndication.com +2404:6800:4005:6::e v23.lscache5.c.googlesyndication.com +2404:6800:4005:2::e v23.lscache6.c.googlesyndication.com +2404:6800:4005:7::e v23.lscache7.c.googlesyndication.com +2404:6800:4005:7::e v23.lscache8.c.googlesyndication.com +2404:6800:4005::12 v24.lscache1.c.googlesyndication.com +2404:6800:4005:4::12 v24.lscache2.c.googlesyndication.com +2404:6800:4005:5::12 v24.lscache3.c.googlesyndication.com +2404:6800:4005:1::12 v24.lscache4.c.googlesyndication.com +2404:6800:4005:6::12 v24.lscache5.c.googlesyndication.com +2404:6800:4005:6::12 v24.lscache6.c.googlesyndication.com +2404:6800:4005:7::12 v24.lscache7.c.googlesyndication.com +2404:6800:4005:3::12 v24.lscache8.c.googlesyndication.com +2404:6800:8005::70 adwords.google.com +2404:6800:8005::41 adwords.google.sk +2404:6800:8005::a7 partner.googleadservices.com +2404:6800:8005::a7 partnerad.l.google.com +#DoubleClick +2404:6800:8005::94 ad.doubleclick.net +2404:6800:8005::95 ad-g.doubleclick.net +2404:6800:8005::95 ad-apac.doubleclick.net +2404:6800:8005::95 dart-apac.l.doubleclick.net +2404:6800:8005::95 dart.l.doubleclick.net +2404:6800:8005::95 n4061ad.hk.doubleclick.net +2404:6800:8005::95 fls.doubleclick.net +2404:6800:8005::9a googleads.g.doubleclick.net +2404:6800:8005::9b feedads.g.doubleclick.net +2404:6800:8005::9c pubads.g.doubleclick.net +2404:6800:8005::9c www.googletagservices.com +2404:6800:8005::9c partnerad.l.doubleclick.net +2404:6800:8005::9d s0.2mdn.net +2404:6800:8005::9d s1.2mdn.net +2404:6800:8005::90 fls.uk.doubleclick.net +2404:6800:8005::8e *.au.doubleclick.net +2404:6800:8005::8f *.de.doubleclick.net +2404:6800:8005::90 *.uk.doubleclick.net +2404:6800:8005::90 *.fr.doubleclick.net +2404:6800:8005::92 *.jp.doubleclick.net + +#Analytics 分析 +2404:6800:8005::71 www.google-analytics.com +2404:6800:8005::61 ssl.google-analytics.com +2404:6800:8005::61 *.google-analytics.com +2404:6800:8005::b8 service.urchin.com +2404:6800:8005::71 www-google-analytics.l.google.com +2404:6800:8005::61 ssl-google-analytics.l.google.com +2404:6800:8005::93 analytics.google.com + +#Android Google 移动操作系统 +2404:6800:8005::62 www.android.com +2404:6800:8005::62 android.com +2404:6800:8005::8a developer.android.com +2404:6800:8005::62 source.android.com +2404:6800:8005::62 market.android.com +#2404:6800:8005::8b android.clients.google.com #未实际测试过 + +#Google Art Project +2404:6800:8005::79 www.googleartproject.com +2404:6800:8005::62 art-project-media.appspot.com + +#FeedBurner +2404:6800:8005::62 feedburner.google.com +2404:6800:8005::62 www.feedburner.com +2404:6800:8005::62 feeds.feedburner.com +2404:6800:8005::62 feeds2.feedburner.com +2404:6800:8005::76 feedproxy.google.com #Feed 跳转代理 + +#The Go Programming Language Go 编程语言 +2404:6800:8005::62 golang.org +2404:6800:8005::62 www.golang.org +2404:6800:8005::79 blog.golang.org + +#Goo.gl Google短网址服务 +2404:6800:8005::65 goo.gl +2404:6800:8005::65 g.co + +#html5rocks HTML5 推广专题网站 +2404:6800:8005::79 www.html5rocks.com +2404:6800:8005::79 slides.html5rocks.com +2404:6800:8005::79 playground.html5rocks.com +2404:6800:8005::79 studio.html5rocks.com + +#Google.org Google 公益 +2404:6800:8005::79 blog.google.org + +#Panoramio +2404:6800:8002::8d www.panoramio.com +2404:6800:8005::80 static.panoramio.com +2404:6800:8005::79 blog.panoramio.com + +#Recaptcha 验证码服务 +2404:6800:8005::69 www.recaptcha.net +2404:6800:8005::93 api.recaptcha.net + +#Google Wave Federation Protocol · GWave 协议开源计划 +2404:6800:8005::79 www.waveprotocol.org + +#The WebM Project WebM 开放多媒体项目 +2404:6800:8005::65 webmproject.org +2404:6800:8005::65 www.webmproject.org +2404:6800:8005::79 lists.webmproject.org +2404:6800:8005::79 blog.webmproject.org +2404:6800:8005::79 www.webm-ccl.org + +#Chrome 相关专题网站 +2404:6800:8005::8b www.chromercise.com +2404:6800:8005::79 www.chromeexperiments.com +2404:6800:8005::79 www.20thingsilearned.com + +#IPv6 专题 +2404:6800:8005::65 ipv6test.google.com +2404:6800:8005::65 ipv4.google.com +#2404:6800:8005::68 ??????.ds.ipv6test.google.com #四级域名为随机六位数字 +#2404:6800:8005::93 p2.*.*.??????.i2.ds.ipv6-exp.l.google.com +2404:6800:8005::69 ds.ipv6test.l.google.com +2404:6800:8005::6a ds.ipv6test.l.google.com + +#其他专题站 +2404:6800:8005::66 www.data-vocabulary.org +2404:6800:8005::65 www.googleinsidesearch.com +2404:6800:8005::79 www.teachparentstech.org +2404:6800:8005::76 www.agoogleaday.com +2404:6800:8005::79 www.gosetsuden.jp +2404:6800:8005::79 www.emailintervention.com +2404:6800:8005::79 www.thegooglepuzzle.com +2404:6800:8005::79 www.thegobridgeoglepuzzle.com +2404:6800:8005::8a www.oneworldmanystories.com +2404:6800:8005::79 www.googlezeitgeist.com +2404:6800:8005::66 www.googleinsidesearch.com + +###其他主流服务 +#bit.ly / j.mp +#2001:418:e801::10:1 bit.ly +#2001:418:e801::11:1 bit.ly +#2001:418:e801::12:1 bit.ly +#2001:418:e801::15:1 bit.ly +#2001:418:e801::10:2 j.mp +#2001:418:e801::11:2 j.mp +#2001:418:e801::12:2 j.mp +#2001:418:e801::15:2 j.mp +#2001:418:e801::10:2 bitly.com +#2001:418:e801::11:2 bitly.com +#2001:418:e801::13:2 bitly.com +#2001:418:e801::15:2 bitly.com +#2001:418:e801::10:7 api.bit.ly +#2001:418:e801::11:7 api.bit.ly +#2001:418:e801::12:7 api.bit.ly +#2001:418:e801::15:7 api.bit.ly +#2001:418:e801::10:7 api.bitly.com +#2001:418:e801::11:7 api.bitly.com +#2001:418:e801::12:7 api.bitly.com +#2001:418:e801::15:7 api.bitly.com + +#Facebook 脸谱网(尚未完全部署) +2620:0:1cfe:face:b00c::3 www.v6.facebook.com #Facebook(官方指定主机名) +2620:0:1cfe:face:b00c::3 attachments.facebook.com +2620:0:1cfe:face:b00c::6 apps.v6.facebook.com #网页应用 +2620:0:1cfe:face:b00c::6 apps.facebook.com +2620:0:1cfe:face:b00c::3 graph.facebook.com +2620:0:1cfe:face:b00c::4 login.v6.facebook.com #登陆(官方指定主机名) +2620:0:1c18:0:face:b00c:0:2 www.facebook.com +2620:0:1c00:0:face:b00c:0:1 zh-cn.facebook.com +2620:0:1c00:0:face:b00c:0:1 zh-tw.facebook.com +2620:0:1cfe:face:b00c::3 facebook.com #Facebook(非官方指定但可用,不推荐) +2620:0:1cfe:face:b00c::4 login.facebook.com #登陆(非官方指定但可用,不推荐,会有证书警告) +2620:0:1cfe:face:b00c::3 12.channel.facebook.com +2620:0:1cfe:face:b00c::3 12.ctest.facebook.com +#如需正常使用页面底部聊天功能,请通过 pac 代理 *.12.channel.facebook.com (不支持 ipv6) +2620:0:1c00:0:face:b00c:0:5 developers.facebook.com +2620:0:1cfe:face:b00c::3 developers.v6.facebook.com +2620:0:1cfe:face:b00c::3 external.ak.fbcdn.net +2620:0:1cfe:face:b00c::3 error.12.channel.facebook.com +2620:0:1cfe:face:b00c::3 pixel.facebook.com +2620:0:1cfe:face:b00c::3 pixel.v6.facebook.com +2620:0:1cfe:face:b00c::7 m.v6.facebook.com #移动版 +2620:0:1cfe:face:b00c::7 m.facebook.com +2610:d0:face::9 www.lisp6.facebook.com +2610:d0:face::9 m.lisp6.facebook.com +2620:0:1c00:0:face:b00c:0:3 check6.facebook.com +#Facebook CDN +2620:0:1cfe:face:b00c::3 static.ak.fbcdn.net #网站资源(图像、脚本等)新机制尚未部署至 ipv6 +#2620:0:1cfe:face:b00c::4 s-static.ak.facebook.com #https 连接用网站资源,证书有问题 + +#Plurk +2607:f128:47::1:1 plurk.com +2607:f128:47::1:1 www.plurk.com +2607:f128:47::1:1 ipv6.plurk.com + +###其他IPv6网站 +##CN 中国大陆 +#GuAo.hk 谷奥 +2001:470:a803::250 www.guao.hk +2001:470:a803::250 guao.hk +2600:3c00::f03c:91ff:fe93:67f2 img.gd + +#kenengba.com 可能吧 +2001:470:18:5f5::2 www.kenengba.com +2001:470:18:5f5::2 m.kenengba.com + +#Shooter 射手网 +2001:da8:205::6 shooter.cn +2001:da8:205::6 edu.shooter.cn +2001:da8:205::6 edufile0.shooter.cn + +#Yegle 一阁 +2001:470:0:19b::b869:805d yegle.net + +##US 美国 +#Mozilla +2620:101:8001:5::2:4 www.mozilla.org +2620:101:8001:5::2:5 www.mozilla.com +2620:101:8001:5::2:6 addons.mozilla.org +2620:101:8001:5::2:8 www.firefox.com + +#Microsoft 微软 +2002:836b:9820::836b:9886 www.ipv6.microsoft.com + +#WikiMedia 维基媒体 +#2620:0:860:2:21d:9ff:fe33:f235 static.wikipedia.org #Wikipedia Static HTML Dumps +#2620:0:860:2:21d:9ff:fe33:f235 hume.wikimedia.org #Wikipedia Static HTML Dumps +#2620:0:860:2:230:48ff:fe5a:eb1e download.wikimedia.org #Wikimedia Downloads +#2620:0:860:2:230:48ff:fe5a:eb1e download.wikipedia.org #Wikimedia Downloads +#2620:0:860:2:230:48ff:fe5a:eb1e dumps.wikimedia.org #Wikimedia Downloads +#2620:0:860:2:230:48ff:fe5a:eb1e dataset2.wikimedia.org #Wikimedia Downloads +2620:0:862:1::25:1 lists.wikimedia.org +#2620:0:862:1:209:3dff:fe10:d5d2 svn.wikimedia.org #Wikimedia Subversion +#2620:0:862:1:209:3dff:fe10:d5d2 mayflower.esams.wikimedia.org +2620:0:862:1:6666:6666:6666:6666 ipv6and4.labs.wikimedia.org #Wikimedia IPv6 AAAA record testing + +#ARIN +2001:500:4:13::81 www.arin.net +2001:500:4:13::80 www.arin.net + +##AU 澳大利亚 +#APNIC +2001:dc0:2001:11::211 www.apnic.net +2001:dc0:2001::14 wq.apnic.net + +##JP 日本 +#JAIST 北陸先端科学技術大学院大学 +2001:200:141:feed::feed ftp.jaist.ac.jp +2001:200:141:feed::feed jaist.dl.sourceforge.net + +##NL 荷兰 +#Ubuntu Archive +2001:7b8:3:37::21:2 ipv6.torrent.ubuntu.com #IPv6 Tracker +2001:7b8:3:37::21:2 nl.archive.ubuntu.com #Archive Mirror NL + +#其他托管在 Google 的服务 +#BlogSpot 上的其他常用博客 +2404:6800:8005::62 googlesystem.blogspot.com #Google Operating System +2404:6800:8005::62 ipv6-or-no-ipv6.blogspot.com #IPv6 Related Stuff +2404:6800:8005::62 gfwrev.blogspot.com #GFW 技术评论 +2404:6800:8005::62 kamonka.blogspot.com #NHK 纪录片精选 + +#Google Code 上的其他常用项目 +2404:6800:8005::52 ipv6-hosts.googlecode.com +2404:6800:8005::52 kibbles.googlecode.com + +# GAE 上的常用应用 +2404:6800:8002::8d wave-api.appspot.com #Google Wave API +2404:6800:8002::8d wave-skynet.appspot.com #SkyNet +2404:6800:8002::8d mytracks.appspot.com #My Tracks for Android +2404:6800:8002::8d autoproxy2pac.appspot.com #自动代理配置脚本生成器 +2404:6800:8002::8d fakegdict.appspot.com #Fake Google Dictionary + +# 托管在 Google 上的其他常用域名 +2404:6800:8005::79 wdyl.com +2404:6800:8005::79 www.wdyl.com +2404:6800:8005::79 blog.twitter.com +2404:6800:8005::79 blog.opensocial.org + +### IPv6/IPv4 转换代理 +# 以下地址均非各网站官方地址,属于 LITNET-NAT64 网段提供转换代理服务(技术细节)。 +# DNS64 服务器: +2001:778::37 dns64.litnet.lt + +#bit.ly / j.mp +2001:778:0:ffff:64:0:a88f:ac35 bit.ly +2001:778:0:ffff:64:0:a88f:ac35 j.mp +2001:778:0:ffff:64:0:a88f:ac36 api.bit.ly +2001:778:0:ffff:64:0:a88f:ac36 api.bitly.com +2001:778:0:ffff:64:0:a88f:ac39 bitly.com + +#Facebook CDN +2001:778:0:ffff:64:0:5f64:926e s-static.ak.facebook.com +2001:778:0:ffff:64:0:42dc:9747 0-12.channel.facebook.com +2001:778:0:ffff:64:0:42dc:9747 1-12.channel.facebook.com +2001:778:0:ffff:64:0:42dc:9747 2-12.channel.facebook.com +2001:778:0:ffff:64:0:42dc:9747 3-12.channel.facebook.com +2001:778:0:ffff:64:0:42dc:9747 4-12.channel.facebook.com +2001:778:0:ffff:64:0:42dc:9747 5-12.channel.facebook.com +2001:778:0:ffff:64:0:42dc:9747 6-12.channel.facebook.com +2001:778:0:ffff:64:0:5435:a7ce s-external.ak.fbcdn.net +2001:778:0:ffff:64:0:42dc:9a1d hphotos-iad1.fbcdn.net +2001:778:0:ffff:64:0:453f:b897 hphotos-ash1.fbcdn.net +2001:778:0:ffff:64:0:42dc:9a23 hphotos-ash2.fbcdn.net +2001:778:0:ffff:64:0:45ab:f00a hphotos-ash4.fbcdn.net +2001:778:0:ffff:64:0:453f:b723 hphotos-sjc1.fbcdn.net +2001:778:0:ffff:64:0:453f:b4bb hphotos-snc1.fbcdn.net +2001:778:0:ffff:64:0:453f:b703 hphotos-snc3.fbcdn.net +2001:778:0:ffff:64:0:453f:b743 hphotos-snc4.fbcdn.net +2001:778:0:ffff:64:0:42dc:9717 hphotos-snc6.fbcdn.net +2001:778:0:ffff:64:0:45ab:e318 hphotos-snc7.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4408 creative.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4421 creative.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f53 photos-a.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f73 photos-e.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f7b photos-i.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f80 photos-m.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f92 photos-q.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f99 photos-u.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f99 photos-y.ak.fbcdn.net#photos-a.ak.facebook.com.edgesuite.net +2001:778:0:ffff:64:0:c12d:f59 photos-b.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f60 photos-f.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f63 photos-j.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f91 photos-n.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f91 photos-r.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f91 photos-v.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f91 photos-z.ak.fbcdn.net#photos-b.ak.facebook.com.edgesuite.net +2001:778:0:ffff:64:0:c12d:f52 photos-c.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f58 photos-g.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f5a photos-k.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f89 photos-o.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f8b photos-s.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f8b photos-w.ak.fbcdn.net#photos-c.ak.facebook.com.edgesuite.net +2001:778:0:ffff:64:0:c12d:f61 photos-d.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f6b photos-h.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f71 photos-l.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f88 photos-p.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f90 photos-t.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f90 photos-x.ak.fbcdn.net#photos-d.ak.facebook.com.edgesuite.net +2001:778:0:ffff:64:0:5c7b:4491 a1.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:449b a1.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44a2 a1.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44a3 a1.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44a9 a1.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44ab a1.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44b1 a1.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:446b a2.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4473 a2.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4479 a2.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:447a a2.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4480 a2.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4481 a2.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4498 a2.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:449a a2.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44a0 a2.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4510 a3.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4520 a3.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4523 a3.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:452b a3.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:450a a4.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:450b a4.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4528 a4.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4529 a4.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4469 a5.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4471 a5.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4473 a5.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4491 a5.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:449a a5.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44a0 a5.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44a1 a5.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44a2 a5.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44b1 a5.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:446a a6.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4473 a6.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4478 a6.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:447a a6.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:447b a6.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4482 a6.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4483 a6.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:448a a6.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44b3 a6.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d4d9 a7.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d4db a7.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d4e3 a7.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d509 a7.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d513 a7.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d522 a7.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d523 a7.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d529 a7.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d52a a7.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d4c9 a8.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d4ca a8.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d4d1 a8.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d4e2 a8.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d4ea a8.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d4fa a8.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d508 a8.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d509 a8.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7a:d52b a8.sphotos.ak.fbcdn.net +2001:778:0:ffff:64:0:40d6:ce19 profile.ak.fbcdn.net +2001:778:0:ffff:64:0:40d6:ce1a profile.ak.fbcdn.net +2001:778:0:ffff:64:0:40d6:ce20 profile.ak.fbcdn.net +2001:778:0:ffff:64:0:40d6:ce29 profile.ak.fbcdn.net +2001:778:0:ffff:64:0:40d6:ce08 profile.ak.fbcdn.net +2001:778:0:ffff:64:0:40d6:ce09 profile.ak.fbcdn.net +2001:778:0:ffff:64:0:40d6:ce12 profile.ak.fbcdn.net +2001:778:0:ffff:64:0:40d6:ce13 profile.ak.fbcdn.net +2001:778:0:ffff:64:0:40d6:ce18 profile.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:442b static.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4422 static.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4478 vthumb.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:447b vthumb.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4481 vthumb.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4483 vthumb.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:448b vthumb.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4498 vthumb.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:4499 vthumb.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:449a vthumb.ak.fbcdn.net +2001:778:0:ffff:64:0:5c7b:44b0 vthumb.ak.fbcdn.net +2001:778:0:ffff:64:0:c12d:f58 fbcdn-photos-a.akamaihd.net +2001:778:0:ffff:64:0:c12d:f59 fbcdn-photos-a.akamaihd.net +2001:778:0:ffff:64:0:c12d:f60 fbcdn-photos-a.akamaihd.net +2001:778:0:ffff:64:0:c12d:f63 fbcdn-photos-a.akamaihd.net +2001:778:0:ffff:64:0:c12d:f70 fbcdn-photos-a.akamaihd.net +2001:778:0:ffff:64:0:c12d:f73 fbcdn-photos-a.akamaihd.net +2001:778:0:ffff:64:0:c12d:f80 fbcdn-photos-a.akamaihd.net +2001:778:0:ffff:64:0:c12d:f8b fbcdn-photos-a.akamaihd.net +2001:778:0:ffff:64:0:c12d:f92 fbcdn-photos-a.akamaihd.net +2001:778:0:ffff:64:0:40d6:ce0a fbcdn-profile-a.akamaihd.net +2001:778:0:ffff:64:0:40d6:ce21 fbcdn-profile-a.akamaihd.net +2001:778:0:ffff:64:0:40d6:ce22 fbcdn-profile-a.akamaihd.net +2001:778:0:ffff:64:0:40d6:ce29 fbcdn-profile-a.akamaihd.net +2001:778:0:ffff:64:0:5c7a:d4fb fbcdn-sphotos-a.akamaihd.net +2001:778:0:ffff:64:0:5c7a:d4e0 fbcdn-sphotos-a.akamaihd.net +2001:778:0:ffff:64:0:5c7a:d4e2 fbcdn-sphotos-a.akamaihd.net +2001:778:0:ffff:64:0:5c7b:4470 fbcdn-vthumb-a.akamaihd.net +2001:778:0:ffff:64:0:5c7b:4472 fbcdn-vthumb-a.akamaihd.net +2001:778:0:ffff:64:0:5c7b:447b fbcdn-vthumb-a.akamaihd.net +2001:778:0:ffff:64:0:5c7b:4483 fbcdn-vthumb-a.akamaihd.net +2001:778:0:ffff:64:0:5c7b:4488 fbcdn-vthumb-a.akamaihd.net +2001:778:0:ffff:64:0:5c7b:448b fbcdn-vthumb-a.akamaihd.net +2001:778:0:ffff:64:0:5c7b:4498 fbcdn-vthumb-a.akamaihd.net +2001:778:0:ffff:64:0:5c7b:44b0 fbcdn-vthumb-a.akamaihd.net +2001:778:0:ffff:64:0:5c7b:44b3 fbcdn-vthumb-a.akamaihd.net + +#Twitter +2001:778:0:ffff:64:0:c73b:95e6 twitter.com +2001:778:0:ffff:64:0:c73b:940a twitter.com +2001:778:0:ffff:64:0:c73b:9452 twitter.com +2001:778:0:ffff:64:0:c73b:9415 assets0.twitter.com +2001:778:0:ffff:64:0:c73b:945c assets0.twitter.com +2001:778:0:ffff:64:0:c73b:95c7 assets0.twitter.com +2001:778:0:ffff:64:0:c73b:95e7 assets0.twitter.com +2001:778:0:ffff:64:0:c73b:9415 assets1.twitter.com +2001:778:0:ffff:64:0:c73b:945c assets1.twitter.com +2001:778:0:ffff:64:0:c73b:95c7 assets1.twitter.com +2001:778:0:ffff:64:0:c73b:95e7 assets1.twitter.com +2001:778:0:ffff:64:0:c73b:9415 assets2.twitter.com +2001:778:0:ffff:64:0:c73b:945c assets2.twitter.com +2001:778:0:ffff:64:0:c73b:95c7 assets2.twitter.com +2001:778:0:ffff:64:0:c73b:95e7 assets2.twitter.com +2001:778:0:ffff:64:0:c73b:9415 assets3.twitter.com +2001:778:0:ffff:64:0:c73b:945c assets3.twitter.com +2001:778:0:ffff:64:0:c73b:95c7 assets3.twitter.com +2001:778:0:ffff:64:0:c73b:95e7 assets3.twitter.com +2001:778:0:ffff:64:0:c73b:9415 assets4.twitter.com +2001:778:0:ffff:64:0:c73b:945c assets4.twitter.com +2001:778:0:ffff:64:0:c73b:95c7 assets4.twitter.com +2001:778:0:ffff:64:0:c73b:95e7 assets4.twitter.com +2001:778:0:ffff:64:0:c73b:95e8 api.twitter.com +2001:778:0:ffff:64:0:c73b:9414 api.twitter.com +2001:778:0:ffff:64:0:c73b:9457 api.twitter.com +2001:778:0:ffff:64:0:c73b:95c8 api.twitter.com +2001:778:0:ffff:64:0:210:2537 cdn.api.twitter.com +2001:778:0:ffff:64:0:5c7b:4429 urls.api.twitter.com +2001:778:0:ffff:64:0:5c7b:4439 urls.api.twitter.com +2001:778:0:ffff:64:0:c73b:9454 business.twitter.com +2001:778:0:ffff:64:0:c73b:95d0 business.twitter.com +2001:778:0:ffff:64:0:c73b:95f3 business.twitter.com +2001:778:0:ffff:64:0:c73b:940b business.twitter.com +2001:778:0:ffff:64:0:c73b:9589 dev.twitter.com +2001:778:0:ffff:64:0:c73b:94ce help.twitter.com +2001:778:0:ffff:64:0:c73b:95f0 m.twitter.com +2001:778:0:ffff:64:0:c73b:9460 m.twitter.com +2001:778:0:ffff:64:0:adcb:d34a media.twitter.com +2001:778:0:ffff:64:0:c73b:95f0 mobile.twitter.com +2001:778:0:ffff:64:0:c73b:9460 mobile.twitter.com +2001:778:0:ffff:64:0:c73b:9454 oauth.twitter.com +2001:778:0:ffff:64:0:c73b:95d0 oauth.twitter.com +2001:778:0:ffff:64:0:c73b:95f3 oauth.twitter.com +2001:778:0:ffff:64:0:c73b:940b oauth.twitter.com +2001:778:0:ffff:64:0:210:2137 platform.twitter.com +2001:778:0:ffff:64:0:214:b52b platform0.twitter.com +2001:778:0:ffff:64:0:214:b551 platform0.twitter.com +2001:778:0:ffff:64:0:214:b511 platform1.twitter.com +2001:778:0:ffff:64:0:214:b548 platform1.twitter.com +2001:778:0:ffff:64:0:214:b543 platform2.twitter.com +2001:778:0:ffff:64:0:214:b549 platform2.twitter.com +2001:778:0:ffff:64:0:214:b551 platform3.twitter.com +2001:778:0:ffff:64:0:214:b559 platform3.twitter.com +2001:778:0:ffff:64:0:c73b:9454 scribe.twitter.com +2001:778:0:ffff:64:0:c73b:95d0 scribe.twitter.com +2001:778:0:ffff:64:0:c73b:95f3 scribe.twitter.com +2001:778:0:ffff:64:0:c73b:940b scribe.twitter.com +2001:778:0:ffff:64:0:c73b:94c9 search.twitter.com +2001:778:0:ffff:64:0:c73b:9415 static.twitter.com +2001:778:0:ffff:64:0:c73b:945c static.twitter.com +2001:778:0:ffff:64:0:c73b:95c7 static.twitter.com +2001:778:0:ffff:64:0:c73b:95e7 static.twitter.com +2001:778:0:ffff:64:0:4820:e708 status.twitter.com +2001:778:0:ffff:64:0:c73b:948a stream.twitter.com +2001:778:0:ffff:64:0:c73b:94ce support.twitter.com +2001:778:0:ffff:64:0:c73b:948b userstream.twitter.com +2001:778:0:ffff:64:0:c73b:94d5 upload.twitter.com +2001:778:0:ffff:64:0:c73b:940a www.twitter.com +2001:778:0:ffff:64:0:c73b:95c6 www.twitter.com +2001:778:0:ffff:64:0:c73b:95e6 www.twitter.com +2001:778:0:ffff:64:0:c73b:940c t.co +2001:778:0:ffff:64:0:214:b632 twimg0-a.akamaihd.net +2001:778:0:ffff:64:0:214:b618 twimg0-a.akamaihd.net +2001:778:0:ffff:64:0:5e7f:4c35 si0.twimg.com +2001:778:0:ffff:64:0:214:b618 a0.twimg.com +2001:778:0:ffff:64:0:214:b632 a0.twimg.com +2001:778:0:ffff:64:0:214:b633 a1.twimg.com +2001:778:0:ffff:64:0:214:b61b a1.twimg.com +2001:778:0:ffff:64:0:214:b631 a2.twimg.com +2001:778:0:ffff:64:0:214:b619 a2.twimg.com +2001:778:0:ffff:64:0:214:b612 a3.twimg.com +2001:778:0:ffff:64:0:214:b630 a3.twimg.com +2001:778:0:ffff:64:0:80c:d47e a4.twimg.com +2001:778:0:ffff:64:0:81b:807e a4.twimg.com +2001:778:0:ffff:64:0:417:2c7e a4.twimg.com +2001:778:0:ffff:64:0:214:b610 a5.twimg.com +2001:778:0:ffff:64:0:214:b60a a5.twimg.com +2001:778:0:ffff:64:0:c0dd:6d51 p.twimg.com +2001:778:0:ffff:64:0:5e7f:4c35 si0.twimg.com +2001:778:0:ffff:64:0:5e7f:4c35 si1.twimg.com +2001:778:0:ffff:64:0:5e7f:4c35 si2.twimg.com +2001:778:0:ffff:64:0:5c7b:9790 si3.twimg.com +2001:778:0:ffff:64:0:5e7f:4c35 si4.twimg.com +2001:778:0:ffff:64:0:5c7b:9790 si5.twimg.com +2001:778:0:ffff:64:0:cdfb:dbe5 widgets.twimg.com +2001:778:0:ffff:64:0:cdfb:db4a widgets.twimg.com +2001:778:0:ffff:64:0:cdfb:db73 widgets.twimg.com +2001:778:0:ffff:64:0:cdfb:db8e widgets.twimg.com +2001:778:0:ffff:64:0:cdfb:db90 widgets.twimg.com +2001:778:0:ffff:64:0:cdfb:dbac widgets.twimg.com +2001:778:0:ffff:64:0:cdfb:dbbb widgets.twimg.com +2001:778:0:ffff:64:0:cdfb:dbde widgets.twimg.com + +#WikiMedia 维基媒体 +2001:778:0:ffff:64:0:5bc6:aee9 bits.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee9 bits-geo.wikimedia.org +2001:778:0:ffff:64:0:d050:9894 blog.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee0 commons.wikimedia.org #维基共享资源 +2001:778:0:ffff:64:0:d050:98b9 dataset2.wikimedia.org +2001:778:0:ffff:64:0:d050:98b9 download.wikimedia.org +2001:778:0:ffff:64:0:d050:98b9 dumps.wikimedia.org +2001:778:0:ffff:64:0:d050:98a2 en.planet.wikimedia.org +2001:778:0:ffff:64:0:d050:9893 formey.wikimedia.org +2001:778:0:ffff:64:0:d050:98a1 ganglia.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee9 geoiplookup.wikimedia.org +2001:778:0:ffff:64:0:d050:9894 hooper.wikimedia.org +2620:0:860:2:21d:9ff:fe33:f235 hume.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee0 incubator.wikimedia.org +2001:778:0:ffff:64:0:d050:9885 mail.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee0 meta.wikimedia.org #元维基 +2001:778:0:ffff:64:0:d050:9886 secure.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee0 species.wikimedia.org #维基物种 +2001:778:0:ffff:64:0:d050:98a1 spence.wikimedia.org +2001:778:0:ffff:64:0:d050:98a2 singer.wikimedia.org +2620:0:860:2:21d:9ff:fe33:f235 static.wikipedia.org +2001:778:0:ffff:64:0:d050:9893 svn.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aeea upload.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee0 www.wikimedia.org +2001:778:0:ffff:64:0:d050:98d1 wikimediafoundation.org +2001:778:0:ffff:64:0:5bc6:aee7 beta.wikiversity.org #维基学院 +2001:778:0:ffff:64:0:5bc6:aee8 www.mediawiki.org +2001:778:0:ffff:64:0:5bc6:aee0 wikimedia-lb.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee1 wikipedia-lb.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee2 wiktionary-lb.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee3 wikiquote-lb.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee4 wikibooks-lb.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee5 wikisource-lb.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee6 wikinews-lb.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee7 wikiversity-lb.wikimedia.org +2001:778:0:ffff:64:0:5bc6:aee8 mediawiki-lb.wikimedia.org +2001:778:0:ffff:64:0:d050:9aec m.wikimedia.org +#WikiBooks 维基教科书 +2001:778:0:ffff:64:0:5bc6:aee4 aa.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 af.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ak.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 als.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ang.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ar.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 as.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ast.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ay.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 az.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ba.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 be.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 bg.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 bi.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 bm.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 bn.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 bo.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 bs.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ca.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ch.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 co.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 cs.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 cv.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 cy.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 da.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 de.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 el.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 en.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 eo.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 es.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 et.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 eu.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 fa.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 fi.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 fr.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 fy.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ga.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 gl.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 gn.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 got.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 gu.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 he.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 hi.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 hr.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 hu.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 hy.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ia.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 id.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ie.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 is.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 it.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ja.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ka.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 kk.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 km.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 kn.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ko.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ks.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ku.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ky.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 la.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 lb.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 li.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ln.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 lt.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 lv.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 mg.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 mi.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 mk.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ml.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 mn.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 mr.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ms.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 my.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 na.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 nah.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 nds.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ne.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 nl.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 no.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 oc.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 pa.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 pl.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ps.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 pt.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 qu.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 rm.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ro.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ru.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 sa.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 se.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 si.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 simple.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 sk.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 sl.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 sq.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 sr.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 su.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 sv.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 sw.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ta.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 te.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 tg.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 th.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 tk.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 tl.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 tr.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 tt.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ug.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 uk.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 ur.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 uz.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 vi.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 vo.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 wa.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 xh.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 yo.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 za.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 zh.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 zh-min-nan.wikibooks.org +2001:778:0:ffff:64:0:5bc6:aee4 zu.wikibooks.org +#WikiNews 维基新闻 +2001:778:0:ffff:64:0:5bc6:aee6 ar.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 bg.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 bs.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 ca.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 cs.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 de.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 el.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 en.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 eo.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 es.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 fa.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 fi.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 fr.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 he.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 hu.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 it.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 ja.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 ko.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 nl.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 no.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 pl.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 pt.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 ro.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 ru.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 sd.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 sq.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 sr.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 sv.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 ta.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 th.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 tr.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 uk.wikinews.org +2001:778:0:ffff:64:0:5bc6:aee6 zh.wikinews.org +#WikiPedia 维基百科 +2001:778:0:ffff:64:0:d050:98c9 wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 www.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 aa.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ab.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ace.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 af.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ak.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 als.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 am.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 an.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ang.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ar.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 arc.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 arz.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 as.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ast.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 av.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ay.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 az.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ba.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bar.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bat-smg.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bcl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 be.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 be-x-old.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bg.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bh.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bi.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bm.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bpy.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 br.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bs.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bug.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 bxr.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ca.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 cbk-zam.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 cdo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ce.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ceb.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ch.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 chr.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 chy.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ckb.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 co.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 cr.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 crh.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 cs.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 csb.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 cu.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 cv.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 cy.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 da.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 de.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 diq.wikipedia.org +2001:778:0:ffff:64:0:d050:98b9 download.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 dsb.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 dv.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 dz.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ee.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 el.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 eml.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 en.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 eo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 es.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 et.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 eu.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ext.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 fa.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ff.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 fi.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 fiu-vro.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 fj.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 fo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 fr.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 frp.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 fur.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 fy.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ga.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 gan.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 gd.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 gl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 glk.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 gn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 got.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 gu.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 gv.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ha.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 hak.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 haw.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 he.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 hi.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 hif.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 hr.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 hsb.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ht.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 hu.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 hy.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ia.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 id.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ie.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ig.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ik.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ilo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 io.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 is.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 it.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 iu.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ja.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 jbo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 jv.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ka.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 kaa.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 kab.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 kg.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ki.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 kk.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 kl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 km.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 kn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ko.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 krc.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ks.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ksh.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ku.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 kv.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 kw.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ky.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 la.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 lad.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 lb.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 lbe.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 lg.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 li.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 lij.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 lmo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ln.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 lo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 lt.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 lv.wikipedia.org +2001:778:0:ffff:64:0:d050:9885 mail.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 map-bms.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mdf.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mg.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mh.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mhr.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mi.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mk.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ml.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mr.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ms.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mt.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mwl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 my.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 myv.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 mzn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 na.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 nah.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 nap.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 nds.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 nds-nl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ne.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 new.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ng.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 nl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 nn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 no.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 nov.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 nrm.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 nv.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ny.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 oc.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 om.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 or.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 os.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pa.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pag.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pam.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pap.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pcd.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pdc.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pi.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pih.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pms.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pnb.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pnt.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ps.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 pt.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 qu.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 rm.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 rmy.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 rn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ro.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 roa-rup.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 roa-tara.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ru.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 rw.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sa.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sah.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sc.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 scn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sco.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sd.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 se.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sg.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sh.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 si.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 simple.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sk.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sm.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 so.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sq.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sr.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 srn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ss.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 st.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 stq.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 su.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sv.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 sw.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 szl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ta.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 te.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tet.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tg.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 th.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ti.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tk.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tl.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tn.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 to.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tpi.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tr.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ts.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tt.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tum.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 tw.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ty.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 udm.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ug.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 uk.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ur.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 uz.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 ve.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 vec.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 vi.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 vls.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 vo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 wa.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 war.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 wo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 wuu.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 xal.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 xh.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 yi.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 yo.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 za.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 zea.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 zh.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 zh-classical.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 zh-min-nan.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 zh-yue.wikipedia.org +2001:778:0:ffff:64:0:5bc6:aee1 zu.wikipedia.org +#WikiPedia Mobile 维基百科移动版 +2001:778:0:ffff:64:0:d050:9aec m.wikipedia.org +2001:778:0:ffff:64:0:d050:9aec en.m.wikipedia.org +2001:778:0:ffff:64:0:d050:9aec zh.m.wikipedia.org +#WikiQuote 维基语录 +2001:778:0:ffff:64:0:5bc6:aee3 af.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 als.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 am.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ang.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ar.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ast.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 az.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 be.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 bg.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 bm.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 br.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 bs.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ca.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 co.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 cr.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 cs.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 cy.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 da.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 de.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 el.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 en.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 eo.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 es.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 et.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 eu.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 fa.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 fi.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 fr.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ga.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 gl.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 gu.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 he.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 hi.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 hr.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 hu.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 hy.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 id.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 is.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 it.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ja.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ka.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 kk.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 kn.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ko.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 kr.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ks.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ku.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 kw.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ky.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 la.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 lb.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 li.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 lt.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ml.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 mr.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 na.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 nds.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 nl.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 nn.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 no.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 pl.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 pt.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 qu.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ro.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ru.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 simple.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 sk.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 sl.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 sq.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 sr.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 su.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 sv.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ta.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 te.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 th.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 tk.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 tr.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 tt.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ug.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 uk.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 ur.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 uz.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 vi.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 vo.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 wo.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 za.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 zh.wikiquote.org +2001:778:0:ffff:64:0:5bc6:aee3 zh-min-nan.wikiquote.org +#WikiSource 维基文库 +2001:778:0:ffff:64:0:5bc6:aee5 ang.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 ar.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 az.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 bg.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 bn.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 br.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 bs.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 ca.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 cs.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 cy.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 da.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 de.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 el.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 en.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 eo.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 es.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 et.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 fa.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 fi.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 fo.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 fr.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 gl.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 he.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 hr.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 ht.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 hu.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 hy.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 id.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 is.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 it.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 ja.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 kn.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 ko.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 la.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 li.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 lt.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 mk.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 ml.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 nl.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 no.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 pl.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 pt.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 ro.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 ru.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 sa.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 sah.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 sk.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 sl.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 sr.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 sv.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 ta.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 te.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 th.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 tr.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 uk.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 vec.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 vi.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 www.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 yi.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 zh.wikisource.org +2001:778:0:ffff:64:0:5bc6:aee5 zh-min-nan.wikisource.org +#Wiktionary 维基词典 +2001:778:0:ffff:64:0:5bc6:aee2 aa.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ab.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 af.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ak.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 als.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 am.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 an.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ang.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ar.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 as.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ast.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 av.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ay.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 az.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 be.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 bg.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 bh.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 bi.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 bm.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 bn.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 bo.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 br.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 bs.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ca.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ch.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 chr.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 co.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 cr.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 cs.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 csb.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 cy.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 da.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 de.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 dv.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 dz.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 el.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 en.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 eo.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 es.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 et.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 eu.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 fa.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 fi.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 fj.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 fo.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 fr.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 fy.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ga.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 gd.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 gl.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 gn.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 gu.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 gv.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ha.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 he.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 hi.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 hr.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 hsb.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 hu.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 hy.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ia.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 id.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ie.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ik.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 io.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 is.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 it.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 iu.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ja.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 jbo.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 jv.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ka.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 kk.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 kl.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 km.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 kn.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ko.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ks.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ku.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 kw.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ky.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 la.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 lb.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 li.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ln.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 lo.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 lt.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 lv.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 mg.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 mh.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 mi.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 mk.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ml.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 mn.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 mo.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 mr.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ms.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 mt.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 my.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 na.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 nah.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 nds.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ne.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 nl.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 nn.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 no.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 oc.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 om.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 or.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 pa.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 pi.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 pl.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ps.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 pt.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 qu.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 rm.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 rn.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ro.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 roa-rup.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ru.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 rw.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sa.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sc.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 scn.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sd.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sg.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sh.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 si.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 simple.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sk.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sl.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sm.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sn.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 so.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sq.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sr.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ss.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 st.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 su.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sv.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 sw.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ta.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 te.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 tg.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 th.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ti.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 tk.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 tl.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 tn.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 to.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 tpi.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 tr.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ts.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 tt.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 tw.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ug.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 uk.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 ur.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 uz.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 vi.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 vo.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 wa.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 wo.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 xh.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 yi.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 yo.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 za.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 zh.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 zh-min-nan.wiktionary.org +2001:778:0:ffff:64:0:5bc6:aee2 zu.wiktionary.org \ No newline at end of file diff --git a/users/home/templates/home/index.html b/users/home/templates/home/index.html new file mode 100644 index 0000000..d017475 --- /dev/null +++ b/users/home/templates/home/index.html @@ -0,0 +1,25 @@ +{% extends 'layout.html' %} + +{% block body_class %}homepage{% endblock %} + +{% block main_content %} +

    实验室新闻 & 通知

    + +{% for notification in notifications %} + +
    +

    {{ notification.title }}

    +

    + 发布时间:{{ notification.created_at|date:"Y-m-d H:i" }} | + 更新时间:{{ notification.updated_at|date:"Y-m-d H:i" }} +

    +
    {{ notification.content|safe }}
    +
    + +{% empty %} + +

    暂时没有任何新闻和通知。

    + +{% endfor %} + +{% endblock main_content %} diff --git a/users/home/views.py b/users/home/views.py new file mode 100644 index 0000000..832412b --- /dev/null +++ b/users/home/views.py @@ -0,0 +1,8 @@ +from annoying.decorators import render_to +from notification.models import Notification + +@render_to('home/index.html') +def index(request): + notifications = Notification.objects.order_by('-updated_at')[:10] + + return { 'notifications': notifications } diff --git a/users/home/views.pyc b/users/home/views.pyc new file mode 100644 index 0000000..00d32e9 Binary files /dev/null and b/users/home/views.pyc differ diff --git a/users/manage.py b/users/manage.py new file mode 100644 index 0000000..3e4eedc --- /dev/null +++ b/users/manage.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +from django.core.management import execute_manager +import imp +try: + imp.find_module('settings') # Assumed to be in the same directory. +except ImportError: + import sys + sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__) + sys.exit(1) + +import settings + +if __name__ == "__main__": + execute_manager(settings) diff --git a/users/notification/__init__.py b/users/notification/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/notification/__init__.pyc b/users/notification/__init__.pyc new file mode 100644 index 0000000..f72473f Binary files /dev/null and b/users/notification/__init__.pyc differ diff --git a/users/notification/admin.py b/users/notification/admin.py new file mode 100644 index 0000000..52c2330 --- /dev/null +++ b/users/notification/admin.py @@ -0,0 +1,4 @@ +from django.contrib import admin +from notification.models import Notification + +admin.site.register(Notification) diff --git a/users/notification/admin.pyc b/users/notification/admin.pyc new file mode 100644 index 0000000..8aede93 Binary files /dev/null and b/users/notification/admin.pyc differ diff --git a/users/notification/migrations/0001_initial.py b/users/notification/migrations/0001_initial.py new file mode 100644 index 0000000..902882b --- /dev/null +++ b/users/notification/migrations/0001_initial.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): + # Adding model 'Notification' + db.create_table('notification_notification', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('title', self.gf('django.db.models.fields.CharField')(max_length=255)), + ('content', self.gf('django.db.models.fields.TextField')()), + ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), + ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), + )) + db.send_create_signal('notification', ['Notification']) + + def backwards(self, orm): + # Deleting model 'Notification' + db.delete_table('notification_notification') + + models = { + 'notification.notification': { + 'Meta': {'object_name': 'Notification'}, + 'content': ('django.db.models.fields.TextField', [], {}), + 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), + 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) + } + } + + complete_apps = ['notification'] \ No newline at end of file diff --git a/users/notification/migrations/0001_initial.pyc b/users/notification/migrations/0001_initial.pyc new file mode 100644 index 0000000..454ed0d Binary files /dev/null and b/users/notification/migrations/0001_initial.pyc differ diff --git a/users/notification/migrations/__init__.py b/users/notification/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/notification/migrations/__init__.pyc b/users/notification/migrations/__init__.pyc new file mode 100644 index 0000000..ac6133b Binary files /dev/null and b/users/notification/migrations/__init__.pyc differ diff --git a/users/notification/models.py b/users/notification/models.py new file mode 100644 index 0000000..cdcb7f2 --- /dev/null +++ b/users/notification/models.py @@ -0,0 +1,17 @@ +# encoding: utf-8 + +from django.db import models + + +class Notification(models.Model): + title = models.CharField(max_length=255, verbose_name=u'标题') + content = models.TextField(verbose_name=u'内容') + created_at = models.DateTimeField(auto_now_add=True) + updated_at = models.DateTimeField(auto_now=True) + + class Meta: + verbose_name = u'通知' + verbose_name_plural = u'通知' + + def __unicode__(self): + return self.title diff --git a/users/notification/models.pyc b/users/notification/models.pyc new file mode 100644 index 0000000..ca6a070 Binary files /dev/null and b/users/notification/models.pyc differ diff --git a/users/requirements.txt b/users/requirements.txt new file mode 100644 index 0000000..7c07c9a --- /dev/null +++ b/users/requirements.txt @@ -0,0 +1,5 @@ +django +django-debug-toolbar +django-annoying +-e hg+http://bitbucket.org/andrewgodwin/south#egg=south +beautifulsoup diff --git a/users/sass/.sass-cache/65d043d6342f1f9e995199fa3272102f9c4385c3/ie.sassc b/users/sass/.sass-cache/65d043d6342f1f9e995199fa3272102f9c4385c3/ie.sassc new file mode 100644 index 0000000..de2c5a6 Binary files /dev/null and b/users/sass/.sass-cache/65d043d6342f1f9e995199fa3272102f9c4385c3/ie.sassc differ diff --git a/users/sass/.sass-cache/68f837f754e6ba7c7deaeaedd1291ccfbb9e2e65/config.rbc b/users/sass/.sass-cache/68f837f754e6ba7c7deaeaedd1291ccfbb9e2e65/config.rbc new file mode 100644 index 0000000..c9dd922 Binary files /dev/null and b/users/sass/.sass-cache/68f837f754e6ba7c7deaeaedd1291ccfbb9e2e65/config.rbc differ diff --git a/users/sass/.sass-cache/68f837f754e6ba7c7deaeaedd1291ccfbb9e2e65/ie.sassc b/users/sass/.sass-cache/68f837f754e6ba7c7deaeaedd1291ccfbb9e2e65/ie.sassc new file mode 100644 index 0000000..59fde9f Binary files /dev/null and b/users/sass/.sass-cache/68f837f754e6ba7c7deaeaedd1291ccfbb9e2e65/ie.sassc differ diff --git a/users/sass/assets/css/.css b/users/sass/assets/css/.css new file mode 100644 index 0000000..e69de29 diff --git a/users/sass/config.rb b/users/sass/config.rb new file mode 100644 index 0000000..80eb547 --- /dev/null +++ b/users/sass/config.rb @@ -0,0 +1,17 @@ +# Require any additional compass plugins here. + +# Set this to the root of your project when deployed: +http_path = "/" +css_dir = "assets/css" +sass_dir = "sass" +images_dir = "assets/images" +javascripts_dir = "assets/js" + +# You can select your preferred output style here (can be overridden via the command line): +# output_style = :expanded or :nested or :compact or :compressed +output_style = :expanded + +# To disable debugging comments that display the original location of your selectors. Uncomment: +# line_comments = false + +preferred_syntax = :sass diff --git a/users/sass/ie.sass b/users/sass/ie.sass new file mode 100644 index 0000000..5bad66b --- /dev/null +++ b/users/sass/ie.sass @@ -0,0 +1,3 @@ +@import blueprint + ++blueprint-ie diff --git a/users/sass/screen.sass b/users/sass/screen.sass new file mode 100644 index 0000000..dbbb2de --- /dev/null +++ b/users/sass/screen.sass @@ -0,0 +1,170 @@ +@import compass/css3 +@import blueprint/reset +@import blueprint + +$blueprint-grid-columns : 14 +$blueprint-container-size : 960px +$blueprint-grid-margin : 20px +$blueprint-grid-width: ($blueprint-container-size + $blueprint-grid-margin) / $blueprint-grid-columns - $blueprint-grid-margin + +$blueprint-font-family: Helvetica, Arial, "Microsoft Yahei", "WenQuanYi Micro Hei", STXihei, SimHei, sans-serif +$blueprint-font-size: 12px + ++blueprint-typography ++blueprint-interaction ++blueprint-utilities + +body + background: url(../images/afterdark.png) + +a + text-decoration: none + +dl + dt + @extend .text-shadow + +column(1) + width: 160px + font-size: 16px + font-weight: bold + text-align: justify + clear: left + dd + +column(5, true) + font-size: 14px + margin: 2px 0 0 0 + width: 280px + +ul, ol, p + margin: 0 + padding: 0 + +.button-like + padding: 8px 14px + +border-radius + border: none + background-color: #2459a0 + color: #fff + + &:hover + cursor: pointer + background-color: #ccc + color: #333 + +form + label, input[type=text], input[type=password], input[type=submit] + display: block + font-size: 1.2em + input[type=text], input[type=password] + width: 240px + padding: 7px + border: 1px solid #ccc + outline: none + + &:focus + border: 1px solid #ee4400 + input[type=submit] + margin-top: 10px + @extend .button-like + .errorlist + margin-bottom: 10px + list-style-type: none + color: red + +thead th + background-color: lighten(#2459a0, 30%) + color: #fff + + &:first-child + +border-left-radius + &:last-child + +border-right-radius + +h2 + color: #e40 + +text-shadow(#999, 1px, 1px, 0px) + +#page + +container + margin-top: 20px + padding: 30px 40px + background-color: #fff + +border-radius(10px) + +#logo h1 + +column(14, true) + +text-shadow(#aaa, 0, 0, 5px) + +#navigation + +column(14, true) + margin: 15px 0 30px 0 + list-style-type: none + + li + float: left + width: 180px + margin-right: 2px + + a + display: block + padding: 5px + font-size: 16px + line-height: 1.8 + text-align: center + background-color: #2459a0 + color: #fff + + &:hover + background-color: #ccc + color: #333 + + &:first-child a + +border-left-radius(10px) + &:last-child a + +border-right-radius(10px) + +#flash-messages + clear: both + +#main + +column(14, true) + margin-bottom: 20px + +#main-content + +column(9.8) + +append(0.2) + +border(#ddd, 2px) + border-right: 2px dotted #ddd + +#main-sidebar + +prepend(0.2) + +column(3.8, true) + +#footer + +column(14, true) + padding-top: 10px + text-align: center + color: #999 + border-top: 2px dotted #ddd + +#contacts-list + list-style-type: none + font-size: 16px + +body.homepage + .notification + h3 + margin-bottom: 8px + .date + color: #999 + font-size: 0.9em + .content + margin-top: 10px + font-size: 1.1em + +body.contacts-me + #actions + margin-top: 15px + a + @extend .button-like + margin-right: 15px diff --git a/users/settings.py b/users/settings.py new file mode 100644 index 0000000..8df6834 --- /dev/null +++ b/users/settings.py @@ -0,0 +1,102 @@ +# encoding: utf-8 +import os +PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) +relative_to_project_root = lambda *x: os.path.join(PROJECT_ROOT, *x) + +DEBUG = True +TEMPLATE_DEBUG = DEBUG + +ADMINS = () +MANAGERS = ADMINS + +DATABASES = { + 'default': { + 'ENGINE': 'mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. + 'NAME': 'istweb_db', + 'USER': 'istweb_proj', # Not used with sqlite3. + 'PASSWORD': '123456', # Not used with sqlite3. + 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. + 'PORT': '', # Set to empty string for default. Not used with sqlite3. + } +} + +MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage' + +TIME_ZONE = 'Asia/Shanghai' +LANGUAGE_CODE = 'zh-cn' +USE_I18N = True +USE_L10N = True + +SITE_ID = 1 + +MEDIA_ROOT = relative_to_project_root('uploads') +MEDIA_URL = '' + +STATIC_ROOT = relative_to_project_root('static') +STATIC_URL = '/static/' +ADMIN_MEDIA_PREFIX = '/static/admin/' + +STATICFILES_DIRS = ( + relative_to_project_root('assets'), +) + +STATICFILES_FINDERS = ( + 'django.contrib.staticfiles.finders.FileSystemFinder', + 'django.contrib.staticfiles.finders.AppDirectoriesFinder', +) + +# Make this unique, and don't share it with anybody. +SECRET_KEY = 'YymbZIjMfikrwqPnWARCVaplc`[zeJto\\OKFXgvShLuUx_^HTQ' + +TEMPLATE_DIRS = ( + relative_to_project_root('templates'), +) + +TEMPLATE_LOADERS = ( + 'django.template.loaders.filesystem.Loader', + 'django.template.loaders.app_directories.Loader', +) + +MIDDLEWARE_CLASSES = ( + 'django.middleware.common.CommonMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'users.middleware.RestrictAccessMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', +) + +TEMPLATE_CONTEXT_PROCESSORS = ( + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + 'django.core.context_processors.static', + 'contacts.views.contacts_list', +) + +INSTALLED_APPS = ( + 'south', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.admin', + 'django.contrib.staticfiles', + 'home', + 'users', + 'contacts', + 'notification', +) + +ROOT_URLCONF = 'istweb.urls' + +LOGIN_URL = '/login' +LOGOUT_URL = '/logout' +LOGIN_REDIRECT_URL = '/' +EXCLUDED_URLS = ( + '/admin', + '/login', + '/favicon.ico', + '/static/', +) + +RESTRICTED_URLS = () diff --git a/users/show_urls.py b/users/show_urls.py new file mode 100644 index 0000000..24ac19b --- /dev/null +++ b/users/show_urls.py @@ -0,0 +1,21 @@ +import os +os.environ['DJANGO_SETTINGS_MODULE'] = 'settings' + +import urls + +def show_urls(urllist, depth=0): + for entry in urllist: + if hasattr(entry, 'name'): + print " " * depth, entry.name, entry.regex.pattern + else: + print " " * depth, entry.regex.pattern + if hasattr(entry, 'url_patterns'): + show_urls(entry.url_patterns, depth + 1) + + +def main(): + show_urls(urls.urlpatterns) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/users/src/pip-delete-this-directory.txt b/users/src/pip-delete-this-directory.txt new file mode 100644 index 0000000..c8883ea --- /dev/null +++ b/users/src/pip-delete-this-directory.txt @@ -0,0 +1,5 @@ +This file is placed here by pip to indicate the source was put +here by pip. + +Once this package is successfully installed this source code will be +deleted (unless you remove this file). diff --git a/users/src/south/.hg/00changelog.i b/users/src/south/.hg/00changelog.i new file mode 100644 index 0000000..d3a8311 Binary files /dev/null and b/users/src/south/.hg/00changelog.i differ diff --git a/users/src/south/.hg/branch b/users/src/south/.hg/branch new file mode 100644 index 0000000..4ad96d5 --- /dev/null +++ b/users/src/south/.hg/branch @@ -0,0 +1 @@ +default diff --git a/users/src/south/.hg/cache/branchheads b/users/src/south/.hg/cache/branchheads new file mode 100644 index 0000000..8aa753b --- /dev/null +++ b/users/src/south/.hg/cache/branchheads @@ -0,0 +1,13 @@ +e1867ac2b350d9c110f054f4ad0d3af6620dc1c5 995 +35a2411f80e518b129a96dac408731641b747a67 autofakefirst +8f2c38e8ea4e4bd6f7d77a9c66ff32f1ba426c1b bug-700 +f2a261316ee356b42f8961bfaded624b4fe777d5 schemamigration_pep8_output +6a48630b7bd12d7347ad1e5fe423f5523d414b26 default +e1867ac2b350d9c110f054f4ad0d3af6620dc1c5 default +ac5e8aa150525347a4323a2c5e98c6fa901bdc63 rebase +8651d1202de634293ff7baa9710c617b62e8ba57 rebase +99ba041f8b585d361bcb6c2a0ede8cd27629b672 migrate_+_or_- +516b011fe68c233222995929d3dfc8fa564b6b6c 0.7 +780045ce7bf5a76f9c9bc37d2c18080b6136f637 output_failing_sql +fba423187735e4b375a7e7e6d4706be038606aea list_migrations_with_time_applied +e1ac69aed52648bcc0c0cfcd81fa8979ec72e2bb noparsing diff --git a/users/src/south/.hg/dirstate b/users/src/south/.hg/dirstate new file mode 100644 index 0000000..6019a65 Binary files /dev/null and b/users/src/south/.hg/dirstate differ diff --git a/users/src/south/.hg/hgrc b/users/src/south/.hg/hgrc new file mode 100644 index 0000000..b6437ae --- /dev/null +++ b/users/src/south/.hg/hgrc @@ -0,0 +1,3 @@ +[paths] +default = http://bitbucket.org/andrewgodwin/south + diff --git a/users/src/south/.hg/requires b/users/src/south/.hg/requires new file mode 100644 index 0000000..ca69271 --- /dev/null +++ b/users/src/south/.hg/requires @@ -0,0 +1,4 @@ +revlogv1 +store +fncache +dotencode diff --git a/users/src/south/.hg/store/00changelog.d b/users/src/south/.hg/store/00changelog.d new file mode 100644 index 0000000..9f0cc92 Binary files /dev/null and b/users/src/south/.hg/store/00changelog.d differ diff --git a/users/src/south/.hg/store/00changelog.i b/users/src/south/.hg/store/00changelog.i new file mode 100644 index 0000000..f8d03b1 Binary files /dev/null and b/users/src/south/.hg/store/00changelog.i differ diff --git a/users/src/south/.hg/store/00manifest.d b/users/src/south/.hg/store/00manifest.d new file mode 100644 index 0000000..011c555 Binary files /dev/null and b/users/src/south/.hg/store/00manifest.d differ diff --git a/users/src/south/.hg/store/00manifest.i b/users/src/south/.hg/store/00manifest.i new file mode 100644 index 0000000..eed1311 Binary files /dev/null and b/users/src/south/.hg/store/00manifest.i differ diff --git a/users/src/south/.hg/store/data/____init____.py.i b/users/src/south/.hg/store/data/____init____.py.i new file mode 100644 index 0000000..fd29cac Binary files /dev/null and b/users/src/south/.hg/store/data/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/_l_i_c_e_n_s_e.i b/users/src/south/.hg/store/data/_l_i_c_e_n_s_e.i new file mode 100644 index 0000000..d46c284 Binary files /dev/null and b/users/src/south/.hg/store/data/_l_i_c_e_n_s_e.i differ diff --git a/users/src/south/.hg/store/data/_r_e_a_d_m_e.i b/users/src/south/.hg/store/data/_r_e_a_d_m_e.i new file mode 100644 index 0000000..bfbad2f Binary files /dev/null and b/users/src/south/.hg/store/data/_r_e_a_d_m_e.i differ diff --git a/users/src/south/.hg/store/data/clean.sh.i b/users/src/south/.hg/store/data/clean.sh.i new file mode 100644 index 0000000..9676d2d Binary files /dev/null and b/users/src/south/.hg/store/data/clean.sh.i differ diff --git a/users/src/south/.hg/store/data/db/____init____.py.i b/users/src/south/.hg/store/data/db/____init____.py.i new file mode 100644 index 0000000..a73d219 Binary files /dev/null and b/users/src/south/.hg/store/data/db/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/db/generic.py.i b/users/src/south/.hg/store/data/db/generic.py.i new file mode 100644 index 0000000..1782f36 Binary files /dev/null and b/users/src/south/.hg/store/data/db/generic.py.i differ diff --git a/users/src/south/.hg/store/data/db/mysql.py.i b/users/src/south/.hg/store/data/db/mysql.py.i new file mode 100644 index 0000000..88a688f Binary files /dev/null and b/users/src/south/.hg/store/data/db/mysql.py.i differ diff --git a/users/src/south/.hg/store/data/db/postgresql__psycopg2.py.i b/users/src/south/.hg/store/data/db/postgresql__psycopg2.py.i new file mode 100644 index 0000000..9adbe31 Binary files /dev/null and b/users/src/south/.hg/store/data/db/postgresql__psycopg2.py.i differ diff --git a/users/src/south/.hg/store/data/db/sql__server/____init____.py.i b/users/src/south/.hg/store/data/db/sql__server/____init____.py.i new file mode 100644 index 0000000..07efbc6 Binary files /dev/null and b/users/src/south/.hg/store/data/db/sql__server/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/db/sql__server/pyodbc.py.i b/users/src/south/.hg/store/data/db/sql__server/pyodbc.py.i new file mode 100644 index 0000000..7fe6ff0 Binary files /dev/null and b/users/src/south/.hg/store/data/db/sql__server/pyodbc.py.i differ diff --git a/users/src/south/.hg/store/data/db/sqlite3.py.i b/users/src/south/.hg/store/data/db/sqlite3.py.i new file mode 100644 index 0000000..123a294 Binary files /dev/null and b/users/src/south/.hg/store/data/db/sqlite3.py.i differ diff --git a/users/src/south/.hg/store/data/docs/__static/empty.i b/users/src/south/.hg/store/data/docs/__static/empty.i new file mode 100644 index 0000000..f68c5b4 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/__static/empty.i differ diff --git a/users/src/south/.hg/store/data/docs/__templates/empty.i b/users/src/south/.hg/store/data/docs/__templates/empty.i new file mode 100644 index 0000000..f68c5b4 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/__templates/empty.i differ diff --git a/users/src/south/.hg/store/data/docs/__theme/nature/static/nature.css__t.i b/users/src/south/.hg/store/data/docs/__theme/nature/static/nature.css__t.i new file mode 100644 index 0000000..91796ad Binary files /dev/null and b/users/src/south/.hg/store/data/docs/__theme/nature/static/nature.css__t.i differ diff --git a/users/src/south/.hg/store/data/docs/__theme/nature/static/pygments.css.i b/users/src/south/.hg/store/data/docs/__theme/nature/static/pygments.css.i new file mode 100644 index 0000000..5c1a2ef Binary files /dev/null and b/users/src/south/.hg/store/data/docs/__theme/nature/static/pygments.css.i differ diff --git a/users/src/south/.hg/store/data/docs/__theme/nature/theme.conf.i b/users/src/south/.hg/store/data/docs/__theme/nature/theme.conf.i new file mode 100644 index 0000000..6c71e19 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/__theme/nature/theme.conf.i differ diff --git a/users/src/south/.hg/store/data/docs/_c_h_a_n_g_e_l_o_g.i b/users/src/south/.hg/store/data/docs/_c_h_a_n_g_e_l_o_g.i new file mode 100644 index 0000000..98644bf Binary files /dev/null and b/users/src/south/.hg/store/data/docs/_c_h_a_n_g_e_l_o_g.i differ diff --git a/users/src/south/.hg/store/data/docs/_c_o_n_t_r_i_b_u_t_i_n_g.i b/users/src/south/.hg/store/data/docs/_c_o_n_t_r_i_b_u_t_i_n_g.i new file mode 100644 index 0000000..e82b916 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/_c_o_n_t_r_i_b_u_t_i_n_g.i differ diff --git a/users/src/south/.hg/store/data/docs/_l_i_c_e_n_s_e.i b/users/src/south/.hg/store/data/docs/_l_i_c_e_n_s_e.i new file mode 100644 index 0000000..5c37be1 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/_l_i_c_e_n_s_e.i differ diff --git a/users/src/south/.hg/store/data/docs/_makefile.i b/users/src/south/.hg/store/data/docs/_makefile.i new file mode 100644 index 0000000..a7bbe20 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/_makefile.i differ diff --git a/users/src/south/.hg/store/data/docs/_r_e_a_d_m_e.i b/users/src/south/.hg/store/data/docs/_r_e_a_d_m_e.i new file mode 100644 index 0000000..487a78d Binary files /dev/null and b/users/src/south/.hg/store/data/docs/_r_e_a_d_m_e.i differ diff --git a/users/src/south/.hg/store/data/docs/about.rst.i b/users/src/south/.hg/store/data/docs/about.rst.i new file mode 100644 index 0000000..f06dec4 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/about.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/autodetector.rst.i b/users/src/south/.hg/store/data/docs/autodetector.rst.i new file mode 100644 index 0000000..408cd2d Binary files /dev/null and b/users/src/south/.hg/store/data/docs/autodetector.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/commands.rst.i b/users/src/south/.hg/store/data/docs/commands.rst.i new file mode 100644 index 0000000..b840470 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/commands.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/conf.py.i b/users/src/south/.hg/store/data/docs/conf.py.i new file mode 100644 index 0000000..93d888b Binary files /dev/null and b/users/src/south/.hg/store/data/docs/conf.py.i differ diff --git a/users/src/south/.hg/store/data/docs/convertinganapp.rst.i b/users/src/south/.hg/store/data/docs/convertinganapp.rst.i new file mode 100644 index 0000000..973da18 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/convertinganapp.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/customfields.rst.i b/users/src/south/.hg/store/data/docs/customfields.rst.i new file mode 100644 index 0000000..52ae4fc Binary files /dev/null and b/users/src/south/.hg/store/data/docs/customfields.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/databaseapi.rst.i b/users/src/south/.hg/store/data/docs/databaseapi.rst.i new file mode 100644 index 0000000..9d779be Binary files /dev/null and b/users/src/south/.hg/store/data/docs/databaseapi.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/db.txt.i b/users/src/south/.hg/store/data/docs/db.txt.i new file mode 100644 index 0000000..6a56381 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/db.txt.i differ diff --git a/users/src/south/.hg/store/data/docs/dependencies.rst.i b/users/src/south/.hg/store/data/docs/dependencies.rst.i new file mode 100644 index 0000000..fef6afe Binary files /dev/null and b/users/src/south/.hg/store/data/docs/dependencies.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/extendingintrospection.rst.i b/users/src/south/.hg/store/data/docs/extendingintrospection.rst.i new file mode 100644 index 0000000..cc01c14 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/extendingintrospection.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/fixtures.rst.i b/users/src/south/.hg/store/data/docs/fixtures.rst.i new file mode 100644 index 0000000..b2dd92e Binary files /dev/null and b/users/src/south/.hg/store/data/docs/fixtures.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/generics.rst.i b/users/src/south/.hg/store/data/docs/generics.rst.i new file mode 100644 index 0000000..93afcdd Binary files /dev/null and b/users/src/south/.hg/store/data/docs/generics.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/index.rst.i b/users/src/south/.hg/store/data/docs/index.rst.i new file mode 100644 index 0000000..be8fd07 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/index.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/installation.rst.i b/users/src/south/.hg/store/data/docs/installation.rst.i new file mode 100644 index 0000000..8ab2755 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/installation.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/migrationstructure.rst.i b/users/src/south/.hg/store/data/docs/migrationstructure.rst.i new file mode 100644 index 0000000..eed2399 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/migrationstructure.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/ormfreezing.rst.i b/users/src/south/.hg/store/data/docs/ormfreezing.rst.i new file mode 100644 index 0000000..5e72757 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/ormfreezing.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/releasenotes/0.7.1.rst.i b/users/src/south/.hg/store/data/docs/releasenotes/0.7.1.rst.i new file mode 100644 index 0000000..4b2baf8 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/releasenotes/0.7.1.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/releasenotes/0.7.2.rst.i b/users/src/south/.hg/store/data/docs/releasenotes/0.7.2.rst.i new file mode 100644 index 0000000..7e1f095 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/releasenotes/0.7.2.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/releasenotes/0.7.3.rst.i b/users/src/south/.hg/store/data/docs/releasenotes/0.7.3.rst.i new file mode 100644 index 0000000..ae7ebec Binary files /dev/null and b/users/src/south/.hg/store/data/docs/releasenotes/0.7.3.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/releasenotes/0.7.rst.i b/users/src/south/.hg/store/data/docs/releasenotes/0.7.rst.i new file mode 100644 index 0000000..8c7961e Binary files /dev/null and b/users/src/south/.hg/store/data/docs/releasenotes/0.7.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/releasenotes/index.rst.i b/users/src/south/.hg/store/data/docs/releasenotes/index.rst.i new file mode 100644 index 0000000..2f4ceb7 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/releasenotes/index.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/settings.rst.i b/users/src/south/.hg/store/data/docs/settings.rst.i new file mode 100644 index 0000000..53e21f1 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/settings.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/signals.rst.i b/users/src/south/.hg/store/data/docs/signals.rst.i new file mode 100644 index 0000000..cb38d57 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/signals.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/tutorial/index.rst.i b/users/src/south/.hg/store/data/docs/tutorial/index.rst.i new file mode 100644 index 0000000..cd1a380 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/tutorial/index.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/tutorial/part1.rst.i b/users/src/south/.hg/store/data/docs/tutorial/part1.rst.i new file mode 100644 index 0000000..775f62a Binary files /dev/null and b/users/src/south/.hg/store/data/docs/tutorial/part1.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/tutorial/part2.rst.i b/users/src/south/.hg/store/data/docs/tutorial/part2.rst.i new file mode 100644 index 0000000..5f72fce Binary files /dev/null and b/users/src/south/.hg/store/data/docs/tutorial/part2.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/tutorial/part3.rst.i b/users/src/south/.hg/store/data/docs/tutorial/part3.rst.i new file mode 100644 index 0000000..f6c91c9 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/tutorial/part3.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/tutorial/part4.rst.i b/users/src/south/.hg/store/data/docs/tutorial/part4.rst.i new file mode 100644 index 0000000..b34f16f Binary files /dev/null and b/users/src/south/.hg/store/data/docs/tutorial/part4.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/tutorial/part5.rst.i b/users/src/south/.hg/store/data/docs/tutorial/part5.rst.i new file mode 100644 index 0000000..8dd1347 Binary files /dev/null and b/users/src/south/.hg/store/data/docs/tutorial/part5.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/unittests.rst.i b/users/src/south/.hg/store/data/docs/unittests.rst.i new file mode 100644 index 0000000..e2a2d8f Binary files /dev/null and b/users/src/south/.hg/store/data/docs/unittests.rst.i differ diff --git a/users/src/south/.hg/store/data/docs/whataremigrations.rst.i b/users/src/south/.hg/store/data/docs/whataremigrations.rst.i new file mode 100644 index 0000000..124554a Binary files /dev/null and b/users/src/south/.hg/store/data/docs/whataremigrations.rst.i differ diff --git a/users/src/south/.hg/store/data/install/_r_e_a_d_m_e.i b/users/src/south/.hg/store/data/install/_r_e_a_d_m_e.i new file mode 100644 index 0000000..d233e62 Binary files /dev/null and b/users/src/south/.hg/store/data/install/_r_e_a_d_m_e.i differ diff --git a/users/src/south/.hg/store/data/install/setup.py.i b/users/src/south/.hg/store/data/install/setup.py.i new file mode 100644 index 0000000..cff2d83 Binary files /dev/null and b/users/src/south/.hg/store/data/install/setup.py.i differ diff --git a/users/src/south/.hg/store/data/management/____init____.py.i b/users/src/south/.hg/store/data/management/____init____.py.i new file mode 100644 index 0000000..2431023 Binary files /dev/null and b/users/src/south/.hg/store/data/management/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/management/commands/____init____.py.i b/users/src/south/.hg/store/data/management/commands/____init____.py.i new file mode 100644 index 0000000..2431023 Binary files /dev/null and b/users/src/south/.hg/store/data/management/commands/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/management/commands/migrate.py.i b/users/src/south/.hg/store/data/management/commands/migrate.py.i new file mode 100644 index 0000000..2cf5255 Binary files /dev/null and b/users/src/south/.hg/store/data/management/commands/migrate.py.i differ diff --git a/users/src/south/.hg/store/data/management/commands/startmigration.py.i b/users/src/south/.hg/store/data/management/commands/startmigration.py.i new file mode 100644 index 0000000..caf9b5e Binary files /dev/null and b/users/src/south/.hg/store/data/management/commands/startmigration.py.i differ diff --git a/users/src/south/.hg/store/data/management/commands/startmigration2.py.i b/users/src/south/.hg/store/data/management/commands/startmigration2.py.i new file mode 100644 index 0000000..fc9a0ef Binary files /dev/null and b/users/src/south/.hg/store/data/management/commands/startmigration2.py.i differ diff --git a/users/src/south/.hg/store/data/management/commands/syncdb.py.i b/users/src/south/.hg/store/data/management/commands/syncdb.py.i new file mode 100644 index 0000000..d979cf1 Binary files /dev/null and b/users/src/south/.hg/store/data/management/commands/syncdb.py.i differ diff --git a/users/src/south/.hg/store/data/management/commands/test.py.i b/users/src/south/.hg/store/data/management/commands/test.py.i new file mode 100644 index 0000000..b712f8a Binary files /dev/null and b/users/src/south/.hg/store/data/management/commands/test.py.i differ diff --git a/users/src/south/.hg/store/data/migration.py.i b/users/src/south/.hg/store/data/migration.py.i new file mode 100644 index 0000000..0d90f44 Binary files /dev/null and b/users/src/south/.hg/store/data/migration.py.i differ diff --git a/users/src/south/.hg/store/data/models.py.i b/users/src/south/.hg/store/data/models.py.i new file mode 100644 index 0000000..5d616d3 Binary files /dev/null and b/users/src/south/.hg/store/data/models.py.i differ diff --git a/users/src/south/.hg/store/data/modelsparser.py.i b/users/src/south/.hg/store/data/modelsparser.py.i new file mode 100644 index 0000000..cc9b41b Binary files /dev/null and b/users/src/south/.hg/store/data/modelsparser.py.i differ diff --git a/users/src/south/.hg/store/data/orm.py.i b/users/src/south/.hg/store/data/orm.py.i new file mode 100644 index 0000000..4811778 Binary files /dev/null and b/users/src/south/.hg/store/data/orm.py.i differ diff --git a/users/src/south/.hg/store/data/setup.py.i b/users/src/south/.hg/store/data/setup.py.i new file mode 100644 index 0000000..245877d Binary files /dev/null and b/users/src/south/.hg/store/data/setup.py.i differ diff --git a/users/src/south/.hg/store/data/south/____init____.py.i b/users/src/south/.hg/store/data/south/____init____.py.i new file mode 100644 index 0000000..290cb2f Binary files /dev/null and b/users/src/south/.hg/store/data/south/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/creator/____init____.py.i b/users/src/south/.hg/store/data/south/creator/____init____.py.i new file mode 100644 index 0000000..a53984e Binary files /dev/null and b/users/src/south/.hg/store/data/south/creator/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/creator/actions.py.i b/users/src/south/.hg/store/data/south/creator/actions.py.i new file mode 100644 index 0000000..bad7ae1 Binary files /dev/null and b/users/src/south/.hg/store/data/south/creator/actions.py.i differ diff --git a/users/src/south/.hg/store/data/south/creator/changes.py.i b/users/src/south/.hg/store/data/south/creator/changes.py.i new file mode 100644 index 0000000..d1cc0e6 Binary files /dev/null and b/users/src/south/.hg/store/data/south/creator/changes.py.i differ diff --git a/users/src/south/.hg/store/data/south/creator/freezer.py.i b/users/src/south/.hg/store/data/south/creator/freezer.py.i new file mode 100644 index 0000000..b2a101c Binary files /dev/null and b/users/src/south/.hg/store/data/south/creator/freezer.py.i differ diff --git a/users/src/south/.hg/store/data/south/db/____init____.py.i b/users/src/south/.hg/store/data/south/db/____init____.py.i new file mode 100644 index 0000000..8029452 Binary files /dev/null and b/users/src/south/.hg/store/data/south/db/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/db/generic.py.i b/users/src/south/.hg/store/data/south/db/generic.py.i new file mode 100644 index 0000000..3313bb8 Binary files /dev/null and b/users/src/south/.hg/store/data/south/db/generic.py.i differ diff --git a/users/src/south/.hg/store/data/south/db/mysql.py.i b/users/src/south/.hg/store/data/south/db/mysql.py.i new file mode 100644 index 0000000..bfbade4 Binary files /dev/null and b/users/src/south/.hg/store/data/south/db/mysql.py.i differ diff --git a/users/src/south/.hg/store/data/south/db/oracle.py.i b/users/src/south/.hg/store/data/south/db/oracle.py.i new file mode 100644 index 0000000..28edb55 Binary files /dev/null and b/users/src/south/.hg/store/data/south/db/oracle.py.i differ diff --git a/users/src/south/.hg/store/data/south/db/postgresql__psycopg2.py.i b/users/src/south/.hg/store/data/south/db/postgresql__psycopg2.py.i new file mode 100644 index 0000000..17b7e71 Binary files /dev/null and b/users/src/south/.hg/store/data/south/db/postgresql__psycopg2.py.i differ diff --git a/users/src/south/.hg/store/data/south/db/sql__server/____init____.py.i b/users/src/south/.hg/store/data/south/db/sql__server/____init____.py.i new file mode 100644 index 0000000..19d0535 Binary files /dev/null and b/users/src/south/.hg/store/data/south/db/sql__server/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/db/sql__server/pyodbc.py.i b/users/src/south/.hg/store/data/south/db/sql__server/pyodbc.py.i new file mode 100644 index 0000000..8f6aa52 Binary files /dev/null and b/users/src/south/.hg/store/data/south/db/sql__server/pyodbc.py.i differ diff --git a/users/src/south/.hg/store/data/south/db/sqlite3.py.i b/users/src/south/.hg/store/data/south/db/sqlite3.py.i new file mode 100644 index 0000000..6654b96 Binary files /dev/null and b/users/src/south/.hg/store/data/south/db/sqlite3.py.i differ diff --git a/users/src/south/.hg/store/data/south/exceptions.py.i b/users/src/south/.hg/store/data/south/exceptions.py.i new file mode 100644 index 0000000..bd2c676 Binary files /dev/null and b/users/src/south/.hg/store/data/south/exceptions.py.i differ diff --git a/users/src/south/.hg/store/data/south/hacks/____init____.py.i b/users/src/south/.hg/store/data/south/hacks/____init____.py.i new file mode 100644 index 0000000..8dfac5e Binary files /dev/null and b/users/src/south/.hg/store/data/south/hacks/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/hacks/django__1__0.py.i b/users/src/south/.hg/store/data/south/hacks/django__1__0.py.i new file mode 100644 index 0000000..b95832b Binary files /dev/null and b/users/src/south/.hg/store/data/south/hacks/django__1__0.py.i differ diff --git a/users/src/south/.hg/store/data/south/introspection__plugins/____init____.py.i b/users/src/south/.hg/store/data/south/introspection__plugins/____init____.py.i new file mode 100644 index 0000000..d75a1c4 Binary files /dev/null and b/users/src/south/.hg/store/data/south/introspection__plugins/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/introspection__plugins/annoying__autoonetoone.py.i b/users/src/south/.hg/store/data/south/introspection__plugins/annoying__autoonetoone.py.i new file mode 100644 index 0000000..b612fe5 Binary files /dev/null and b/users/src/south/.hg/store/data/south/introspection__plugins/annoying__autoonetoone.py.i differ diff --git a/users/src/south/.hg/store/data/south/introspection__plugins/django__audit__log.py.i b/users/src/south/.hg/store/data/south/introspection__plugins/django__audit__log.py.i new file mode 100644 index 0000000..99d9dfe Binary files /dev/null and b/users/src/south/.hg/store/data/south/introspection__plugins/django__audit__log.py.i differ diff --git a/users/src/south/.hg/store/data/south/introspection__plugins/django__objectpermissions.py.i b/users/src/south/.hg/store/data/south/introspection__plugins/django__objectpermissions.py.i new file mode 100644 index 0000000..d86895f Binary files /dev/null and b/users/src/south/.hg/store/data/south/introspection__plugins/django__objectpermissions.py.i differ diff --git a/users/src/south/.hg/store/data/south/introspection__plugins/django__tagging.py.i b/users/src/south/.hg/store/data/south/introspection__plugins/django__tagging.py.i new file mode 100644 index 0000000..cb7e3ce Binary files /dev/null and b/users/src/south/.hg/store/data/south/introspection__plugins/django__tagging.py.i differ diff --git a/users/src/south/.hg/store/data/south/introspection__plugins/django__taggit.py.i b/users/src/south/.hg/store/data/south/introspection__plugins/django__taggit.py.i new file mode 100644 index 0000000..a4ec48c Binary files /dev/null and b/users/src/south/.hg/store/data/south/introspection__plugins/django__taggit.py.i differ diff --git a/users/src/south/.hg/store/data/south/introspection__plugins/django__timezones.py.i b/users/src/south/.hg/store/data/south/introspection__plugins/django__timezones.py.i new file mode 100644 index 0000000..fa149c4 Binary files /dev/null and b/users/src/south/.hg/store/data/south/introspection__plugins/django__timezones.py.i differ diff --git a/users/src/south/.hg/store/data/south/introspection__plugins/geodjango.py.i b/users/src/south/.hg/store/data/south/introspection__plugins/geodjango.py.i new file mode 100644 index 0000000..f9e5cc3 Binary files /dev/null and b/users/src/south/.hg/store/data/south/introspection__plugins/geodjango.py.i differ diff --git a/users/src/south/.hg/store/data/south/logger.py.i b/users/src/south/.hg/store/data/south/logger.py.i new file mode 100644 index 0000000..fb293c7 Binary files /dev/null and b/users/src/south/.hg/store/data/south/logger.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/____init____.py.i b/users/src/south/.hg/store/data/south/management/____init____.py.i new file mode 100644 index 0000000..f376b26 Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/____init____.py.i b/users/src/south/.hg/store/data/south/management/commands/____init____.py.i new file mode 100644 index 0000000..f4982bc Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/convert__to__south.py.i b/users/src/south/.hg/store/data/south/management/commands/convert__to__south.py.i new file mode 100644 index 0000000..b0b377e Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/convert__to__south.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/datamigration.py.i b/users/src/south/.hg/store/data/south/management/commands/datamigration.py.i new file mode 100644 index 0000000..830a165 Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/datamigration.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/graphmigrations.py.i b/users/src/south/.hg/store/data/south/management/commands/graphmigrations.py.i new file mode 100644 index 0000000..bd4a67d Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/graphmigrations.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/migrate.py.i b/users/src/south/.hg/store/data/south/management/commands/migrate.py.i new file mode 100644 index 0000000..fa6e99b Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/migrate.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/migrationcheck.py.i b/users/src/south/.hg/store/data/south/management/commands/migrationcheck.py.i new file mode 100644 index 0000000..6d44fdb Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/migrationcheck.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/schemamigration.py.i b/users/src/south/.hg/store/data/south/management/commands/schemamigration.py.i new file mode 100644 index 0000000..d875668 Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/schemamigration.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/startmigration.py.i b/users/src/south/.hg/store/data/south/management/commands/startmigration.py.i new file mode 100644 index 0000000..7d440e8 Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/startmigration.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/syncdb.py.i b/users/src/south/.hg/store/data/south/management/commands/syncdb.py.i new file mode 100644 index 0000000..cee3223 Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/syncdb.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/test.py.i b/users/src/south/.hg/store/data/south/management/commands/test.py.i new file mode 100644 index 0000000..aabf6cd Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/test.py.i differ diff --git a/users/src/south/.hg/store/data/south/management/commands/testserver.py.i b/users/src/south/.hg/store/data/south/management/commands/testserver.py.i new file mode 100644 index 0000000..d716371 Binary files /dev/null and b/users/src/south/.hg/store/data/south/management/commands/testserver.py.i differ diff --git a/users/src/south/.hg/store/data/south/migration.py.i b/users/src/south/.hg/store/data/south/migration.py.i new file mode 100644 index 0000000..7689b4d Binary files /dev/null and b/users/src/south/.hg/store/data/south/migration.py.i differ diff --git a/users/src/south/.hg/store/data/south/migration/____init____.py.i b/users/src/south/.hg/store/data/south/migration/____init____.py.i new file mode 100644 index 0000000..5a59ab9 Binary files /dev/null and b/users/src/south/.hg/store/data/south/migration/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/migration/base.py.i b/users/src/south/.hg/store/data/south/migration/base.py.i new file mode 100644 index 0000000..f171da5 Binary files /dev/null and b/users/src/south/.hg/store/data/south/migration/base.py.i differ diff --git a/users/src/south/.hg/store/data/south/migration/migrators.py.i b/users/src/south/.hg/store/data/south/migration/migrators.py.i new file mode 100644 index 0000000..753c1c7 Binary files /dev/null and b/users/src/south/.hg/store/data/south/migration/migrators.py.i differ diff --git a/users/src/south/.hg/store/data/south/migration/utils.py.i b/users/src/south/.hg/store/data/south/migration/utils.py.i new file mode 100644 index 0000000..0ffcef0 Binary files /dev/null and b/users/src/south/.hg/store/data/south/migration/utils.py.i differ diff --git a/users/src/south/.hg/store/data/south/models.py.i b/users/src/south/.hg/store/data/south/models.py.i new file mode 100644 index 0000000..0d4ef55 Binary files /dev/null and b/users/src/south/.hg/store/data/south/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/modelsinspector.py.i b/users/src/south/.hg/store/data/south/modelsinspector.py.i new file mode 100644 index 0000000..b902484 Binary files /dev/null and b/users/src/south/.hg/store/data/south/modelsinspector.py.i differ diff --git a/users/src/south/.hg/store/data/south/modelsparser.py.i b/users/src/south/.hg/store/data/south/modelsparser.py.i new file mode 100644 index 0000000..9b12eba Binary files /dev/null and b/users/src/south/.hg/store/data/south/modelsparser.py.i differ diff --git a/users/src/south/.hg/store/data/south/orm.py.i b/users/src/south/.hg/store/data/south/orm.py.i new file mode 100644 index 0000000..5841529 Binary files /dev/null and b/users/src/south/.hg/store/data/south/orm.py.i differ diff --git a/users/src/south/.hg/store/data/south/signals.py.i b/users/src/south/.hg/store/data/south/signals.py.i new file mode 100644 index 0000000..864b8ae Binary files /dev/null and b/users/src/south/.hg/store/data/south/signals.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/____init____.py.i b/users/src/south/.hg/store/data/south/tests/____init____.py.i new file mode 100644 index 0000000..837ae2c Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/autodetection.py.i b/users/src/south/.hg/store/data/south/tests/autodetection.py.i new file mode 100644 index 0000000..482d47a Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/autodetection.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/brokenapp/____init____.py.i b/users/src/south/.hg/store/data/south/tests/brokenapp/____init____.py.i new file mode 100644 index 0000000..edb6a51 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/brokenapp/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0001__depends__on__unmigrated.py.i b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0001__depends__on__unmigrated.py.i new file mode 100644 index 0000000..b64a767 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0001__depends__on__unmigrated.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0002__depends__on__unknown.py.i b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0002__depends__on__unknown.py.i new file mode 100644 index 0000000..2d47a5d Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0002__depends__on__unknown.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0003__depends__on__higher.py.i b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0003__depends__on__higher.py.i new file mode 100644 index 0000000..dca0dc1 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0003__depends__on__higher.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0004__higher.py.i b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0004__higher.py.i new file mode 100644 index 0000000..e1a5196 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/0004__higher.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/____init____.py.i new file mode 100644 index 0000000..edb6a51 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/brokenapp/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/brokenapp/models.py.i b/users/src/south/.hg/store/data/south/tests/brokenapp/models.py.i new file mode 100644 index 0000000..2675d50 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/brokenapp/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/circular__a/____init____.py.i b/users/src/south/.hg/store/data/south/tests/circular__a/____init____.py.i new file mode 100644 index 0000000..892913f Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/circular__a/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/circular__a/migrations/0001__first.py.i b/users/src/south/.hg/store/data/south/tests/circular__a/migrations/0001__first.py.i new file mode 100644 index 0000000..ebf6f43 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/circular__a/migrations/0001__first.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/circular__a/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/circular__a/migrations/____init____.py.i new file mode 100644 index 0000000..892913f Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/circular__a/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/circular__a/models.py.i b/users/src/south/.hg/store/data/south/tests/circular__a/models.py.i new file mode 100644 index 0000000..892913f Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/circular__a/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/circular__b/____init____.py.i b/users/src/south/.hg/store/data/south/tests/circular__b/____init____.py.i new file mode 100644 index 0000000..892913f Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/circular__b/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/circular__b/migrations/0001__first.py.i b/users/src/south/.hg/store/data/south/tests/circular__b/migrations/0001__first.py.i new file mode 100644 index 0000000..6452eb8 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/circular__b/migrations/0001__first.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/circular__b/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/circular__b/migrations/____init____.py.i new file mode 100644 index 0000000..892913f Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/circular__b/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/circular__b/models.py.i b/users/src/south/.hg/store/data/south/tests/circular__b/models.py.i new file mode 100644 index 0000000..892913f Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/circular__b/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/db.py.i b/users/src/south/.hg/store/data/south/tests/db.py.i new file mode 100644 index 0000000..d221a2c Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/db.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/db__mysql.py.i b/users/src/south/.hg/store/data/south/tests/db__mysql.py.i new file mode 100644 index 0000000..9f731be Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/db__mysql.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__a/____init____.py.i b/users/src/south/.hg/store/data/south/tests/deps__a/____init____.py.i new file mode 100644 index 0000000..5ba6574 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__a/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0001__a.py.i b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0001__a.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0001__a.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0002__a.py.i b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0002__a.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0002__a.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0003__a.py.i b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0003__a.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0003__a.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0004__a.py.i b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0004__a.py.i new file mode 100644 index 0000000..6c7e589 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0004__a.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0005__a.py.i b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0005__a.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/0005__a.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__a/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/____init____.py.i new file mode 100644 index 0000000..5ba6574 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__a/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__a/models.py.i b/users/src/south/.hg/store/data/south/tests/deps__a/models.py.i new file mode 100644 index 0000000..5ba6574 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__a/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__b/____init____.py.i b/users/src/south/.hg/store/data/south/tests/deps__b/____init____.py.i new file mode 100644 index 0000000..5ba6574 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__b/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0001__b.py.i b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0001__b.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0001__b.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0002__b.py.i b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0002__b.py.i new file mode 100644 index 0000000..18b3497 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0002__b.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0003__b.py.i b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0003__b.py.i new file mode 100644 index 0000000..c08bf55 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0003__b.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0004__b.py.i b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0004__b.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0004__b.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0005__b.py.i b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0005__b.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/0005__b.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__b/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/____init____.py.i new file mode 100644 index 0000000..5ba6574 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__b/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__b/models.py.i b/users/src/south/.hg/store/data/south/tests/deps__b/models.py.i new file mode 100644 index 0000000..5ba6574 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__b/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__c/____init____.py.i b/users/src/south/.hg/store/data/south/tests/deps__c/____init____.py.i new file mode 100644 index 0000000..5ba6574 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__c/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0001__c.py.i b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0001__c.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0001__c.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0002__c.py.i b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0002__c.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0002__c.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0003__c.py.i b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0003__c.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0003__c.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0004__c.py.i b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0004__c.py.i new file mode 100644 index 0000000..52d1a88 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0004__c.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0005__c.py.i b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0005__c.py.i new file mode 100644 index 0000000..18b3497 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/0005__c.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__c/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/____init____.py.i new file mode 100644 index 0000000..5ba6574 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__c/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/deps__c/models.py.i b/users/src/south/.hg/store/data/south/tests/deps__c/models.py.i new file mode 100644 index 0000000..5ba6574 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/deps__c/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/emptyapp/____init____.py.i b/users/src/south/.hg/store/data/south/tests/emptyapp/____init____.py.i new file mode 100644 index 0000000..f8c1e58 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/emptyapp/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/emptyapp/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/emptyapp/migrations/____init____.py.i new file mode 100644 index 0000000..f8c1e58 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/emptyapp/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/emptyapp/models.py.i b/users/src/south/.hg/store/data/south/tests/emptyapp/models.py.i new file mode 100644 index 0000000..f8c1e58 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/emptyapp/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/fakeapp/____init____.py.i b/users/src/south/.hg/store/data/south/tests/fakeapp/____init____.py.i new file mode 100644 index 0000000..11278bf Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/fakeapp/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/0001__spam.py.i b/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/0001__spam.py.i new file mode 100644 index 0000000..7e4390f Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/0001__spam.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/0002__eggs.py.i b/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/0002__eggs.py.i new file mode 100644 index 0000000..dcaac0b Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/0002__eggs.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/0003__alter__spam.py.i b/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/0003__alter__spam.py.i new file mode 100644 index 0000000..4620581 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/0003__alter__spam.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/____init____.py.i new file mode 100644 index 0000000..dca13fe Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/fakeapp/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/fakeapp/models.py.i b/users/src/south/.hg/store/data/south/tests/fakeapp/models.py.i new file mode 100644 index 0000000..b7fa67c Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/fakeapp/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/freezer.py.i b/users/src/south/.hg/store/data/south/tests/freezer.py.i new file mode 100644 index 0000000..fa2472b Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/freezer.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/inspector.py.i b/users/src/south/.hg/store/data/south/tests/inspector.py.i new file mode 100644 index 0000000..099f5b0 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/inspector.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/logger.py.i b/users/src/south/.hg/store/data/south/tests/logger.py.i new file mode 100644 index 0000000..f6e8ea9 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/logger.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/logic.py.i b/users/src/south/.hg/store/data/south/tests/logic.py.i new file mode 100644 index 0000000..945d8da Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/logic.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/modelsparser.py.i b/users/src/south/.hg/store/data/south/tests/modelsparser.py.i new file mode 100644 index 0000000..ff280d1 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/modelsparser.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/otherfakeapp/____init____.py.i b/users/src/south/.hg/store/data/south/tests/otherfakeapp/____init____.py.i new file mode 100644 index 0000000..081870a Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/otherfakeapp/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/0001__first.py.i b/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/0001__first.py.i new file mode 100644 index 0000000..16d667f Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/0001__first.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/0002__second.py.i b/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/0002__second.py.i new file mode 100644 index 0000000..f32a4f7 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/0002__second.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/0003__third.py.i b/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/0003__third.py.i new file mode 100644 index 0000000..d92ad5d Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/0003__third.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/____init____.py.i new file mode 100644 index 0000000..081870a Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/otherfakeapp/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/otherfakeapp/models.py.i b/users/src/south/.hg/store/data/south/tests/otherfakeapp/models.py.i new file mode 100644 index 0000000..1d07e2c Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/otherfakeapp/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/rebasedapp/____init____.py.i b/users/src/south/.hg/store/data/south/tests/rebasedapp/____init____.py.i new file mode 100644 index 0000000..e97097a Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/rebasedapp/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0001__bottom.py.i b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0001__bottom.py.i new file mode 100644 index 0000000..8713433 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0001__bottom.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0002__mid.py.i b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0002__mid.py.i new file mode 100644 index 0000000..8713433 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0002__mid.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0003__rebase.py.i b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0003__rebase.py.i new file mode 100644 index 0000000..b807ddf Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0003__rebase.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0004__top.py.i b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0004__top.py.i new file mode 100644 index 0000000..8713433 Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/0004__top.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/____init____.py.i b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/____init____.py.i new file mode 100644 index 0000000..e97097a Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/rebasedapp/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/south/tests/rebasedapp/models.py.i b/users/src/south/.hg/store/data/south/tests/rebasedapp/models.py.i new file mode 100644 index 0000000..e97097a Binary files /dev/null and b/users/src/south/.hg/store/data/south/tests/rebasedapp/models.py.i differ diff --git a/users/src/south/.hg/store/data/south/utils.py.i b/users/src/south/.hg/store/data/south/utils.py.i new file mode 100644 index 0000000..7fc9a68 Binary files /dev/null and b/users/src/south/.hg/store/data/south/utils.py.i differ diff --git a/users/src/south/.hg/store/data/south/v2.py.i b/users/src/south/.hg/store/data/south/v2.py.i new file mode 100644 index 0000000..3c23848 Binary files /dev/null and b/users/src/south/.hg/store/data/south/v2.py.i differ diff --git a/users/src/south/.hg/store/data/tests/____init____.py.i b/users/src/south/.hg/store/data/tests/____init____.py.i new file mode 100644 index 0000000..771c0d2 Binary files /dev/null and b/users/src/south/.hg/store/data/tests/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/tests/db.py.i b/users/src/south/.hg/store/data/tests/db.py.i new file mode 100644 index 0000000..b8cf152 Binary files /dev/null and b/users/src/south/.hg/store/data/tests/db.py.i differ diff --git a/users/src/south/.hg/store/data/tests/fakeapp/____init____.py.i b/users/src/south/.hg/store/data/tests/fakeapp/____init____.py.i new file mode 100644 index 0000000..5079c2d Binary files /dev/null and b/users/src/south/.hg/store/data/tests/fakeapp/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/tests/fakeapp/migrations/0001__spam.py.i b/users/src/south/.hg/store/data/tests/fakeapp/migrations/0001__spam.py.i new file mode 100644 index 0000000..8255068 Binary files /dev/null and b/users/src/south/.hg/store/data/tests/fakeapp/migrations/0001__spam.py.i differ diff --git a/users/src/south/.hg/store/data/tests/fakeapp/migrations/0002__eggs.py.i b/users/src/south/.hg/store/data/tests/fakeapp/migrations/0002__eggs.py.i new file mode 100644 index 0000000..d4ddd01 Binary files /dev/null and b/users/src/south/.hg/store/data/tests/fakeapp/migrations/0002__eggs.py.i differ diff --git a/users/src/south/.hg/store/data/tests/fakeapp/migrations/0003__alter__spam.py.i b/users/src/south/.hg/store/data/tests/fakeapp/migrations/0003__alter__spam.py.i new file mode 100644 index 0000000..46d9caa Binary files /dev/null and b/users/src/south/.hg/store/data/tests/fakeapp/migrations/0003__alter__spam.py.i differ diff --git a/users/src/south/.hg/store/data/tests/fakeapp/migrations/____init____.py.i b/users/src/south/.hg/store/data/tests/fakeapp/migrations/____init____.py.i new file mode 100644 index 0000000..5079c2d Binary files /dev/null and b/users/src/south/.hg/store/data/tests/fakeapp/migrations/____init____.py.i differ diff --git a/users/src/south/.hg/store/data/tests/fakeapp/models.py.i b/users/src/south/.hg/store/data/tests/fakeapp/models.py.i new file mode 100644 index 0000000..da1f9ac Binary files /dev/null and b/users/src/south/.hg/store/data/tests/fakeapp/models.py.i differ diff --git a/users/src/south/.hg/store/data/tests/logic.py.i b/users/src/south/.hg/store/data/tests/logic.py.i new file mode 100644 index 0000000..af85d3f Binary files /dev/null and b/users/src/south/.hg/store/data/tests/logic.py.i differ diff --git a/users/src/south/.hg/store/data/tests/modelsparser.py.i b/users/src/south/.hg/store/data/tests/modelsparser.py.i new file mode 100644 index 0000000..81cd494 Binary files /dev/null and b/users/src/south/.hg/store/data/tests/modelsparser.py.i differ diff --git a/users/src/south/.hg/store/data/~2ehgignore.i b/users/src/south/.hg/store/data/~2ehgignore.i new file mode 100644 index 0000000..9325011 Binary files /dev/null and b/users/src/south/.hg/store/data/~2ehgignore.i differ diff --git a/users/src/south/.hg/store/data/~2ehgtags.i b/users/src/south/.hg/store/data/~2ehgtags.i new file mode 100644 index 0000000..a8507ab Binary files /dev/null and b/users/src/south/.hg/store/data/~2ehgtags.i differ diff --git a/users/src/south/.hg/store/fncache b/users/src/south/.hg/store/fncache new file mode 100644 index 0000000..006cf33 --- /dev/null +++ b/users/src/south/.hg/store/fncache @@ -0,0 +1,197 @@ +data/south/tests/rebasedapp/migrations/0002_mid.py.i +data/south/__init__.py.i +data/management/commands/__init__.py.i +data/modelsparser.py.i +data/tests/db.py.i +data/south/tests/deps_b/migrations/0003_b.py.i +data/south/management/commands/migrationcheck.py.i +data/docs/_theme/nature/static/nature.css_t.i +data/south/introspection_plugins/django_objectpermissions.py.i +data/south/tests/rebasedapp/migrations/0001_bottom.py.i +data/management/commands/test.py.i +data/south/tests/deps_a/migrations/0003_a.py.i +data/south/migration/base.py.i +data/south/creator/freezer.py.i +data/south/tests/deps_a/migrations/0001_a.py.i +data/docs/about.rst.i +data/south/introspection_plugins/django_audit_log.py.i +data/docs/unittests.rst.i +data/south/tests/freezer.py.i +data/south/migration/utils.py.i +data/docs/_static/empty.i +data/south/migration.py.i +data/south/utils.py.i +data/docs/db.txt.i +data/clean.sh.i +data/south/tests/fakeapp/migrations/0003_alter_spam.py.i +data/south/tests/deps_b/models.py.i +data/docs/CHANGELOG.i +data/south/exceptions.py.i +data/docs/index.rst.i +data/tests/fakeapp/models.py.i +data/LICENSE.i +data/tests/fakeapp/__init__.py.i +data/south/tests/db.py.i +data/south/modelsparser.py.i +data/south/db/postgresql_psycopg2.py.i +data/south/db/sqlite3.py.i +data/.hgtags.i +data/south/creator/changes.py.i +data/south/tests/emptyapp/__init__.py.i +data/south/tests/deps_c/migrations/0004_c.py.i +data/south/tests/circular_b/__init__.py.i +data/docs/releasenotes/0.7.1.rst.i +data/south/tests/emptyapp/models.py.i +data/south/management/commands/testserver.py.i +data/south/tests/circular_b/migrations/__init__.py.i +data/south/tests/deps_a/models.py.i +data/south/tests/deps_a/migrations/__init__.py.i +data/docs/_templates/empty.i +data/docs/signals.rst.i +data/docs/README.i +data/db/__init__.py.i +data/south/tests/brokenapp/migrations/0004_higher.py.i +data/docs/convertinganapp.rst.i +data/south/tests/rebasedapp/migrations/0003_rebase.py.i +data/south/tests/brokenapp/__init__.py.i +data/south/tests/circular_a/migrations/__init__.py.i +data/tests/fakeapp/migrations/0003_alter_spam.py.i +data/south/modelsinspector.py.i +data/db/sql_server/pyodbc.py.i +data/south/tests/deps_b/migrations/__init__.py.i +data/docs/conf.py.i +data/docs/extendingintrospection.rst.i +data/__init__.py.i +data/tests/modelsparser.py.i +data/install/README.i +data/south/creator/actions.py.i +data/south/tests/fakeapp/migrations/0001_spam.py.i +data/south/tests/inspector.py.i +data/management/commands/migrate.py.i +data/tests/__init__.py.i +data/docs/commands.rst.i +data/south/db/sql_server/pyodbc.py.i +data/south/tests/deps_b/migrations/0004_b.py.i +data/docs/tutorial/part1.rst.i +data/south/tests/deps_b/migrations/0001_b.py.i +data/docs/generics.rst.i +data/docs/tutorial/part5.rst.i +data/management/commands/syncdb.py.i +data/south/tests/brokenapp/migrations/0003_depends_on_higher.py.i +data/south/management/commands/startmigration.py.i +data/docs/installation.rst.i +data/docs/dependencies.rst.i +data/south/tests/circular_a/migrations/0001_first.py.i +data/tests/fakeapp/migrations/0002_eggs.py.i +data/south/db/mysql.py.i +data/db/postgresql_psycopg2.py.i +data/docs/tutorial/index.rst.i +data/south/tests/brokenapp/migrations/0002_depends_on_unknown.py.i +data/docs/releasenotes/0.7.2.rst.i +data/docs/migrationstructure.rst.i +data/south/tests/__init__.py.i +data/south/tests/deps_c/migrations/0005_c.py.i +data/south/tests/deps_c/__init__.py.i +data/south/introspection_plugins/__init__.py.i +data/south/tests/otherfakeapp/migrations/0002_second.py.i +data/south/tests/rebasedapp/__init__.py.i +data/south/v2.py.i +data/south/tests/deps_c/models.py.i +data/south/tests/rebasedapp/migrations/__init__.py.i +data/south/tests/circular_b/migrations/0001_first.py.i +data/south/management/commands/schemamigration.py.i +data/docs/releasenotes/0.7.3.rst.i +data/docs/tutorial/part4.rst.i +data/setup.py.i +data/south/management/commands/syncdb.py.i +data/south/creator/__init__.py.i +data/docs/fixtures.rst.i +data/management/commands/startmigration.py.i +data/tests/logic.py.i +data/docs/customfields.rst.i +data/south/tests/deps_c/migrations/0003_c.py.i +data/docs/ormfreezing.rst.i +data/south/migration/migrators.py.i +data/south/tests/brokenapp/models.py.i +data/south/tests/rebasedapp/models.py.i +data/docs/whataremigrations.rst.i +data/docs/tutorial/part2.rst.i +data/south/orm.py.i +data/south/tests/fakeapp/migrations/0002_eggs.py.i +data/south/tests/otherfakeapp/migrations/0001_first.py.i +data/docs/_theme/nature/static/pygments.css.i +data/south/tests/rebasedapp/migrations/0004_top.py.i +data/tests/fakeapp/migrations/__init__.py.i +data/south/tests/deps_c/migrations/0002_c.py.i +data/south/tests/brokenapp/migrations/__init__.py.i +data/south/tests/brokenapp/migrations/0001_depends_on_unmigrated.py.i +data/south/tests/deps_a/migrations/0002_a.py.i +data/south/hacks/__init__.py.i +data/db/sqlite3.py.i +data/south/signals.py.i +data/south/tests/circular_a/models.py.i +data/docs/releasenotes/index.rst.i +data/south/management/commands/convert_to_south.py.i +data/south/tests/otherfakeapp/__init__.py.i +data/south/tests/circular_b/models.py.i +data/south/tests/deps_b/__init__.py.i +data/south/tests/deps_b/migrations/0005_b.py.i +data/south/db/sql_server/__init__.py.i +data/south/tests/otherfakeapp/migrations/__init__.py.i +data/south/tests/deps_a/migrations/0005_a.py.i +data/south/introspection_plugins/django_tagging.py.i +data/south/tests/modelsparser.py.i +data/south/tests/circular_a/__init__.py.i +data/south/tests/db_mysql.py.i +data/management/commands/startmigration2.py.i +data/db/mysql.py.i +data/docs/autodetector.rst.i +data/south/management/commands/test.py.i +data/south/tests/fakeapp/__init__.py.i +data/south/logger.py.i +data/docs/settings.rst.i +data/db/generic.py.i +data/docs/Makefile.i +data/docs/CONTRIBUTING.i +data/south/tests/deps_a/migrations/0004_a.py.i +data/south/tests/logger.py.i +data/README.i +data/models.py.i +data/south/tests/emptyapp/migrations/__init__.py.i +data/south/db/oracle.py.i +data/south/management/commands/migrate.py.i +data/tests/fakeapp/migrations/0001_spam.py.i +data/docs/databaseapi.rst.i +data/south/tests/otherfakeapp/models.py.i +data/south/tests/fakeapp/models.py.i +data/south/tests/logic.py.i +data/south/tests/deps_b/migrations/0002_b.py.i +data/south/db/__init__.py.i +data/south/introspection_plugins/django_timezones.py.i +data/south/introspection_plugins/django_taggit.py.i +data/south/models.py.i +data/docs/releasenotes/0.7.rst.i +data/south/tests/deps_c/migrations/__init__.py.i +data/south/migration/__init__.py.i +data/management/__init__.py.i +data/south/tests/otherfakeapp/migrations/0003_third.py.i +data/docs/LICENSE.i +data/south/tests/deps_a/__init__.py.i +data/orm.py.i +data/install/setup.py.i +data/south/introspection_plugins/geodjango.py.i +data/south/introspection_plugins/annoying_autoonetoone.py.i +data/db/sql_server/__init__.py.i +data/docs/_theme/nature/theme.conf.i +data/south/hacks/django_1_0.py.i +data/south/db/generic.py.i +data/.hgignore.i +data/migration.py.i +data/south/management/commands/datamigration.py.i +data/south/management/commands/graphmigrations.py.i +data/south/management/commands/__init__.py.i +data/south/tests/autodetection.py.i +data/south/management/__init__.py.i +data/south/tests/deps_c/migrations/0001_c.py.i +data/docs/tutorial/part3.rst.i +data/south/tests/fakeapp/migrations/__init__.py.i diff --git a/users/src/south/.hg/store/undo b/users/src/south/.hg/store/undo new file mode 100644 index 0000000..63c8666 Binary files /dev/null and b/users/src/south/.hg/store/undo differ diff --git a/users/src/south/.hg/undo.bookmarks b/users/src/south/.hg/undo.bookmarks new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/.hg/undo.branch b/users/src/south/.hg/undo.branch new file mode 100644 index 0000000..331d858 --- /dev/null +++ b/users/src/south/.hg/undo.branch @@ -0,0 +1 @@ +default \ No newline at end of file diff --git a/users/src/south/.hg/undo.desc b/users/src/south/.hg/undo.desc new file mode 100644 index 0000000..221d76b --- /dev/null +++ b/users/src/south/.hg/undo.desc @@ -0,0 +1,3 @@ +0 +pull +http://bitbucket.org/andrewgodwin/south diff --git a/users/src/south/.hg/undo.dirstate b/users/src/south/.hg/undo.dirstate new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/.hgignore b/users/src/south/.hgignore new file mode 100644 index 0000000..e758659 --- /dev/null +++ b/users/src/south/.hgignore @@ -0,0 +1,16 @@ +# Globbing is cool. +syntax: glob +# No python bytecode +*.pyc +# Ignore dist builds and egg info +dist/* +build +South.egg-info/* +# Ignore test log +south/tests/test.log +# Vim swap files +*.swp + +# Also ignore Sphinx files +syntax: regexp +^docs/\_build/ diff --git a/users/src/south/.hgtags b/users/src/south/.hgtags new file mode 100644 index 0000000..e0d1def --- /dev/null +++ b/users/src/south/.hgtags @@ -0,0 +1,72 @@ +2d3621db9559d0a7225448e3e1a25db5180bae15 0.4 +401d26c06d55e61987a99896dd295d589f4d987e 0.5 +1b7449c6d92c4a58c79afc24e4915ee55f205b0c 0.2 +7b837b0502aa246c17bccba54e2e843463e24a22 0.3 +0aa5b5fb6e742e78afc0e59147ad0a662f4bd9e1 stableish +0aa5b5fb6e742e78afc0e59147ad0a662f4bd9e1 stableish +b40737ec465d0883eab987e144c890fd1b65a68e stableish +b40737ec465d0883eab987e144c890fd1b65a68e stableish +8359559b3c6c41f12e01149de564b383b36899db stableish +8359559b3c6c41f12e01149de564b383b36899db stableish +246282228af45725657ac431cf1fdc1b43337345 stableish +246282228af45725657ac431cf1fdc1b43337345 stableish +54644bf72703ec85c0b58428e523051acc78d395 stableish +54644bf72703ec85c0b58428e523051acc78d395 stableish +dd5b53a69684a661cc6470a2c7a6fba65e64b169 stableish +dd5b53a69684a661cc6470a2c7a6fba65e64b169 stableish +791ae6ca917ffae932876774340b056567ceb40c stableish +791ae6ca917ffae932876774340b056567ceb40c stableish +11e74884e6c31fc25642fa3f2bd644df930019b4 stableish +11e74884e6c31fc25642fa3f2bd644df930019b4 stableish +bb2fb9c5991c13d14225424a8d63a455b04926eb stableish +bb2fb9c5991c13d14225424a8d63a455b04926eb stableish +92e1d7754d192043c6eb3c0f7538b7345b26f505 stableish +92e1d7754d192043c6eb3c0f7538b7345b26f505 stableish +dbbb515d88cbef75d92864f2ada0ba27b9c487a6 stableish +dbbb515d88cbef75d92864f2ada0ba27b9c487a6 stableish +48f193b5cfcc68e663bee6a320dac71758e6cf39 stableish +48f193b5cfcc68e663bee6a320dac71758e6cf39 0.6-rc1 +48f193b5cfcc68e663bee6a320dac71758e6cf39 stableish +0333951892018fd1ecc23d1147a5332b2574ca60 stableish +48f193b5cfcc68e663bee6a320dac71758e6cf39 0.6-rc1 +0333951892018fd1ecc23d1147a5332b2574ca60 0.6-rc1 +8e48729e0eef357f218f194526967593ccfe2117 0.6 +0333951892018fd1ecc23d1147a5332b2574ca60 stableish +8e48729e0eef357f218f194526967593ccfe2117 stableish +8e48729e0eef357f218f194526967593ccfe2117 stableish +2e88bc1e247dee4bcea9455c1d69001375aabe11 stableish +68d43989205be40d726c847b24b8e21ca7a0c654 0.6.1 +2e88bc1e247dee4bcea9455c1d69001375aabe11 stableish +68d43989205be40d726c847b24b8e21ca7a0c654 stableish +68d43989205be40d726c847b24b8e21ca7a0c654 stableish +0ea73a5bb1f67b2c2718c7832065551085dec97e stableish +35f3f8efa71821515027276919ccd38e1e5f473c migration-refactor +0ea73a5bb1f67b2c2718c7832065551085dec97e stableish +2d053731761e7639f7be11fbdefc8d8da2c1bae2 stableish +751ee7d091f942ca175c870dbbf41f2b2e7f1459 stableish +751ee7d091f942ca175c870dbbf41f2b2e7f1459 stableish +95eb414d55616a910edbb299274d3ac4d5d85c3d stableish +95eb414d55616a910edbb299274d3ac4d5d85c3d 0.6.2 +95eb414d55616a910edbb299274d3ac4d5d85c3d stableish +7dc7f1ee13e779b509505c4d46b50f40512ca358 stableish +95eb414d55616a910edbb299274d3ac4d5d85c3d 0.6.2 +7dc7f1ee13e779b509505c4d46b50f40512ca358 0.6.2 +7dc7f1ee13e779b509505c4d46b50f40512ca358 stableish +63b8a8c528cdb747691cdb1dbf673045131b670c stableish +63b8a8c528cdb747691cdb1dbf673045131b670c stableish +6e99ee799d3fb3ce269b8ed8e84d8b10fc56905e stableish +4cea19eb55d960dcbbd95adaec2fb986b6ed24d8 0.7-rc1 +6e99ee799d3fb3ce269b8ed8e84d8b10fc56905e stableish +679d808d1208c13df1f19dd4593086fe961e3f9b stableish +679d808d1208c13df1f19dd4593086fe961e3f9b stableish +69b4986003d642cf374778ea8db85be7bd7c4950 stableish +69b4986003d642cf374778ea8db85be7bd7c4950 0.7 +c395caf4b3b7f0537ed99ed9c09754f35fc52d97 0.7.1 +69b4986003d642cf374778ea8db85be7bd7c4950 stableish +f9e1f1c0d6753bc581ea153892fd7da5eb4eef9b stableish +f9e1f1c0d6753bc581ea153892fd7da5eb4eef9b stableish +c1a9f39849fabd49ac5fc31cf51c00f3805b42ac stableish +7e4cbf82243f359651c5c14dca85cb331a282e3f 0.7.2 +c1a9f39849fabd49ac5fc31cf51c00f3805b42ac stableish +7e4cbf82243f359651c5c14dca85cb331a282e3f stableish +b35fdff583d19e534a648cb9a5fd03e866b6a475 0.7.3 diff --git a/users/src/south/LICENSE b/users/src/south/LICENSE new file mode 100644 index 0000000..1914f85 --- /dev/null +++ b/users/src/south/LICENSE @@ -0,0 +1,55 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + 1. You must give any other recipients of the Work or Derivative Works a copy of this License; and + + 2. You must cause any modified files to carry prominent notices stating that You changed the files; and + + 3. You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + 4. If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/users/src/south/README b/users/src/south/README new file mode 100644 index 0000000..e96a165 --- /dev/null +++ b/users/src/south/README @@ -0,0 +1,10 @@ +This is South, a Django application to provide migrations in a sane way. + +By sane, we mean that the status of every migration is tracked individually, +rather than just the number of the top migration reached; this means South +can detect when you have an unapplied migration that's sitting in the middle +of a whole load of applied ones, and will let you apply it straight off, +or let you roll back to it, and apply from there forward. + +Documentation on South is currently available on our project site; +you can find it at http://south.aeracode.org/docs/ diff --git a/users/src/south/South.egg-info/PKG-INFO b/users/src/south/South.egg-info/PKG-INFO new file mode 100644 index 0000000..f83b147 --- /dev/null +++ b/users/src/south/South.egg-info/PKG-INFO @@ -0,0 +1,19 @@ +Metadata-Version: 1.0 +Name: South +Version: 0.7.3 +Summary: South: Migrations for Django +Home-page: http://south.aeracode.org/ +Author: Andrew Godwin & Andy McCurdy +Author-email: south@aeracode.org +License: UNKNOWN +Download-URL: http://south.aeracode.org/wiki/Download +Description: South is an intelligent database migrations library for the Django web framework. It is database-independent and DVCS-friendly, as well as a whole host of other features. +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Framework :: Django +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: System Administrators +Classifier: Intended Audience :: System Administrators +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Topic :: Software Development diff --git a/users/src/south/South.egg-info/SOURCES.txt b/users/src/south/South.egg-info/SOURCES.txt new file mode 100644 index 0000000..caa01a5 --- /dev/null +++ b/users/src/south/South.egg-info/SOURCES.txt @@ -0,0 +1,114 @@ +README +South.egg-info/PKG-INFO +South.egg-info/SOURCES.txt +South.egg-info/dependency_links.txt +South.egg-info/top_level.txt +south/__init__.py +south/exceptions.py +south/logger.py +south/models.py +south/modelsinspector.py +south/orm.py +south/signals.py +south/utils.py +south/v2.py +south/creator/__init__.py +south/creator/actions.py +south/creator/changes.py +south/creator/freezer.py +south/db/__init__.py +south/db/generic.py +south/db/mysql.py +south/db/oracle.py +south/db/postgresql_psycopg2.py +south/db/sqlite3.py +south/db/sql_server/__init__.py +south/db/sql_server/pyodbc.py +south/hacks/__init__.py +south/hacks/django_1_0.py +south/introspection_plugins/__init__.py +south/introspection_plugins/annoying_autoonetoone.py +south/introspection_plugins/django_audit_log.py +south/introspection_plugins/django_objectpermissions.py +south/introspection_plugins/django_tagging.py +south/introspection_plugins/django_taggit.py +south/introspection_plugins/django_timezones.py +south/introspection_plugins/geodjango.py +south/management/__init__.py +south/management/commands/__init__.py +south/management/commands/convert_to_south.py +south/management/commands/datamigration.py +south/management/commands/graphmigrations.py +south/management/commands/migrate.py +south/management/commands/migrationcheck.py +south/management/commands/schemamigration.py +south/management/commands/startmigration.py +south/management/commands/syncdb.py +south/management/commands/test.py +south/management/commands/testserver.py +south/migration/__init__.py +south/migration/base.py +south/migration/migrators.py +south/migration/utils.py +south/tests/__init__.py +south/tests/autodetection.py +south/tests/db.py +south/tests/db_mysql.py +south/tests/freezer.py +south/tests/inspector.py +south/tests/logger.py +south/tests/logic.py +south/tests/brokenapp/__init__.py +south/tests/brokenapp/models.py +south/tests/brokenapp/migrations/0001_depends_on_unmigrated.py +south/tests/brokenapp/migrations/0002_depends_on_unknown.py +south/tests/brokenapp/migrations/0003_depends_on_higher.py +south/tests/brokenapp/migrations/0004_higher.py +south/tests/brokenapp/migrations/__init__.py +south/tests/circular_a/__init__.py +south/tests/circular_a/models.py +south/tests/circular_a/migrations/0001_first.py +south/tests/circular_a/migrations/__init__.py +south/tests/circular_b/__init__.py +south/tests/circular_b/models.py +south/tests/circular_b/migrations/0001_first.py +south/tests/circular_b/migrations/__init__.py +south/tests/deps_a/__init__.py +south/tests/deps_a/models.py +south/tests/deps_a/migrations/0001_a.py +south/tests/deps_a/migrations/0002_a.py +south/tests/deps_a/migrations/0003_a.py +south/tests/deps_a/migrations/0004_a.py +south/tests/deps_a/migrations/0005_a.py +south/tests/deps_a/migrations/__init__.py +south/tests/deps_b/__init__.py +south/tests/deps_b/models.py +south/tests/deps_b/migrations/0001_b.py +south/tests/deps_b/migrations/0002_b.py +south/tests/deps_b/migrations/0003_b.py +south/tests/deps_b/migrations/0004_b.py +south/tests/deps_b/migrations/0005_b.py +south/tests/deps_b/migrations/__init__.py +south/tests/deps_c/__init__.py +south/tests/deps_c/models.py +south/tests/deps_c/migrations/0001_c.py +south/tests/deps_c/migrations/0002_c.py +south/tests/deps_c/migrations/0003_c.py +south/tests/deps_c/migrations/0004_c.py +south/tests/deps_c/migrations/0005_c.py +south/tests/deps_c/migrations/__init__.py +south/tests/emptyapp/__init__.py +south/tests/emptyapp/models.py +south/tests/emptyapp/migrations/__init__.py +south/tests/fakeapp/__init__.py +south/tests/fakeapp/models.py +south/tests/fakeapp/migrations/0001_spam.py +south/tests/fakeapp/migrations/0002_eggs.py +south/tests/fakeapp/migrations/0003_alter_spam.py +south/tests/fakeapp/migrations/__init__.py +south/tests/otherfakeapp/__init__.py +south/tests/otherfakeapp/models.py +south/tests/otherfakeapp/migrations/0001_first.py +south/tests/otherfakeapp/migrations/0002_second.py +south/tests/otherfakeapp/migrations/0003_third.py +south/tests/otherfakeapp/migrations/__init__.py \ No newline at end of file diff --git a/users/src/south/South.egg-info/dependency_links.txt b/users/src/south/South.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/users/src/south/South.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/users/src/south/South.egg-info/top_level.txt b/users/src/south/South.egg-info/top_level.txt new file mode 100644 index 0000000..40e0ef0 --- /dev/null +++ b/users/src/south/South.egg-info/top_level.txt @@ -0,0 +1 @@ +south diff --git a/users/src/south/clean.sh b/users/src/south/clean.sh new file mode 100755 index 0000000..dfff1cd --- /dev/null +++ b/users/src/south/clean.sh @@ -0,0 +1,2 @@ +#!/bin/bash +find . -name "*.pyc" -print0 | xargs -0 rm -rf diff --git a/users/src/south/docs/Makefile b/users/src/south/docs/Makefile new file mode 100644 index 0000000..3436405 --- /dev/null +++ b/users/src/south/docs/Makefile @@ -0,0 +1,88 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest + +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + +clean: + -rm -rf _build/* + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html + @echo + @echo "Build finished. The HTML pages are in _build/html." + +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) _build/dirhtml + @echo + @echo "Build finished. The HTML pages are in _build/dirhtml." + +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json + @echo + @echo "Build finished; now you can process the JSON files." + +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in _build/htmlhelp." + +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) _build/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in _build/qthelp, like this:" + @echo "# qcollectiongenerator _build/qthelp/South.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile _build/qthelp/South.qhc" + +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex + @echo + @echo "Build finished; the LaTeX files are in _build/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes + @echo + @echo "The overview file is in _build/changes." + +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in _build/linkcheck/output.txt." + +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) _build/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in _build/doctest/output.txt." diff --git a/users/src/south/docs/_static/empty b/users/src/south/docs/_static/empty new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/docs/_templates/empty b/users/src/south/docs/_templates/empty new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/docs/_theme/nature/static/nature.css_t b/users/src/south/docs/_theme/nature/static/nature.css_t new file mode 100644 index 0000000..03b0379 --- /dev/null +++ b/users/src/south/docs/_theme/nature/static/nature.css_t @@ -0,0 +1,229 @@ +/** + * Sphinx stylesheet -- default theme + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: Arial, sans-serif; + font-size: 100%; + background-color: #111; + color: #555; + margin: 0; + padding: 0; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 230px; +} + +hr{ + border: 1px solid #B1B4B6; +} + +div.document { + background-color: #eee; +} + +div.body { + background-color: #ffffff; + color: #3E4349; + padding: 0 30px 30px 30px; + font-size: 0.8em; +} + +div.footer { + color: #555; + width: 100%; + padding: 13px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #444; + text-decoration: underline; +} + +div.related { + background-color: #6BA81E; + line-height: 32px; + color: #fff; + text-shadow: 0px 1px 0 #444; + font-size: 0.80em; +} + +div.related a { + color: #E2F3CC; +} + +div.sphinxsidebar { + font-size: 0.75em; + line-height: 1.5em; +} + +div.sphinxsidebarwrapper{ + padding: 20px 0; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: Arial, sans-serif; + color: #222; + font-size: 1.2em; + font-weight: normal; + margin: 0; + padding: 5px 10px; + background-color: #ddd; + text-shadow: 1px 1px 0 white +} + +div.sphinxsidebar h4{ + font-size: 1.1em; +} + +div.sphinxsidebar h3 a { + color: #444; +} + + +div.sphinxsidebar p { + color: #888; + padding: 5px 20px; +} + +div.sphinxsidebar p.topless { +} + +div.sphinxsidebar ul { + margin: 10px 20px; + padding: 0; + color: #000; +} + +div.sphinxsidebar a { + color: #444; +} + +div.sphinxsidebar input { + border: 1px solid #ccc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar input[type=text]{ + margin-left: 20px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #005B81; + text-decoration: none; +} + +a:hover { + color: #E32E00; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: Arial, sans-serif; + background-color: #BED4EB; + font-weight: normal; + color: #212224; + margin: 30px 0px 10px 0px; + padding: 5px 0 5px 10px; + text-shadow: 0px 1px 0 white +} + +div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 150%; background-color: #C8D5E3; } +div.body h3 { font-size: 120%; background-color: #D8DEE3; } +div.body h4 { font-size: 110%; background-color: #D8DEE3; } +div.body h5 { font-size: 100%; background-color: #D8DEE3; } +div.body h6 { font-size: 100%; background-color: #D8DEE3; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li { + line-height: 1.5em; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.highlight{ + background-color: white; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 10px; + background-color: White; + color: #222; + line-height: 1.2em; + border: 1px solid #C6C9CB; + font-size: 1.2em; + margin: 1.5em 0 1.5em 0; + -webkit-box-shadow: 1px 1px 1px #d8d8d8; + -moz-box-shadow: 1px 1px 1px #d8d8d8; +} + +tt { + background-color: #ecf0f3; + color: #222; + padding: 1px 2px; + font-size: 1.2em; + font-family: monospace; +} diff --git a/users/src/south/docs/_theme/nature/static/pygments.css b/users/src/south/docs/_theme/nature/static/pygments.css new file mode 100644 index 0000000..652b761 --- /dev/null +++ b/users/src/south/docs/_theme/nature/static/pygments.css @@ -0,0 +1,54 @@ +.c { color: #999988; font-style: italic } /* Comment */ +.k { font-weight: bold } /* Keyword */ +.o { font-weight: bold } /* Operator */ +.cm { color: #999988; font-style: italic } /* Comment.Multiline */ +.cp { color: #999999; font-weight: bold } /* Comment.preproc */ +.c1 { color: #999988; font-style: italic } /* Comment.Single */ +.gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ +.ge { font-style: italic } /* Generic.Emph */ +.gr { color: #aa0000 } /* Generic.Error */ +.gh { color: #999999 } /* Generic.Heading */ +.gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ +.go { color: #111 } /* Generic.Output */ +.gp { color: #555555 } /* Generic.Prompt */ +.gs { font-weight: bold } /* Generic.Strong */ +.gu { color: #aaaaaa } /* Generic.Subheading */ +.gt { color: #aa0000 } /* Generic.Traceback */ +.kc { font-weight: bold } /* Keyword.Constant */ +.kd { font-weight: bold } /* Keyword.Declaration */ +.kp { font-weight: bold } /* Keyword.Pseudo */ +.kr { font-weight: bold } /* Keyword.Reserved */ +.kt { color: #445588; font-weight: bold } /* Keyword.Type */ +.m { color: #009999 } /* Literal.Number */ +.s { color: #bb8844 } /* Literal.String */ +.na { color: #008080 } /* Name.Attribute */ +.nb { color: #999999 } /* Name.Builtin */ +.nc { color: #445588; font-weight: bold } /* Name.Class */ +.no { color: #ff99ff } /* Name.Constant */ +.ni { color: #800080 } /* Name.Entity */ +.ne { color: #990000; font-weight: bold } /* Name.Exception */ +.nf { color: #990000; font-weight: bold } /* Name.Function */ +.nn { color: #555555 } /* Name.Namespace */ +.nt { color: #000080 } /* Name.Tag */ +.nv { color: purple } /* Name.Variable */ +.ow { font-weight: bold } /* Operator.Word */ +.mf { color: #009999 } /* Literal.Number.Float */ +.mh { color: #009999 } /* Literal.Number.Hex */ +.mi { color: #009999 } /* Literal.Number.Integer */ +.mo { color: #009999 } /* Literal.Number.Oct */ +.sb { color: #bb8844 } /* Literal.String.Backtick */ +.sc { color: #bb8844 } /* Literal.String.Char */ +.sd { color: #bb8844 } /* Literal.String.Doc */ +.s2 { color: #bb8844 } /* Literal.String.Double */ +.se { color: #bb8844 } /* Literal.String.Escape */ +.sh { color: #bb8844 } /* Literal.String.Heredoc */ +.si { color: #bb8844 } /* Literal.String.Interpol */ +.sx { color: #bb8844 } /* Literal.String.Other */ +.sr { color: #808000 } /* Literal.String.Regex */ +.s1 { color: #bb8844 } /* Literal.String.Single */ +.ss { color: #bb8844 } /* Literal.String.Symbol */ +.bp { color: #999999 } /* Name.Builtin.Pseudo */ +.vc { color: #ff99ff } /* Name.Variable.Class */ +.vg { color: #ff99ff } /* Name.Variable.Global */ +.vi { color: #ff99ff } /* Name.Variable.Instance */ +.il { color: #009999 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/users/src/south/docs/_theme/nature/theme.conf b/users/src/south/docs/_theme/nature/theme.conf new file mode 100644 index 0000000..1cc4004 --- /dev/null +++ b/users/src/south/docs/_theme/nature/theme.conf @@ -0,0 +1,4 @@ +[theme] +inherit = basic +stylesheet = nature.css +pygments_style = tango diff --git a/users/src/south/docs/about.rst b/users/src/south/docs/about.rst new file mode 100644 index 0000000..55478aa --- /dev/null +++ b/users/src/south/docs/about.rst @@ -0,0 +1,46 @@ +.. _about: + +About South +=========== + +South brings migrations to Django applications. Its main objectives are to +provide a simple, stable and database-independent migration layer to prevent +all the hassle schema changes over time bring to your Django applications. + +We try to make South both as easy-to-use and intuitive as possible, by making it +automate most of your schema-changing tasks, while at the same time providing a +powerful set of tools for large or complex projects; you can easily write your +own migrations by hand, or even use the database altering API directly. + +While South started as a relative unknown in the Django database-schema-altering +world, it has slowly risen in popularity and is now widely regarded as the most +popular schema migration tool for Django. + +Key features +------------ + +South has a few key features: + + - Automatic migration creation: South can see what's changed in your models.py + file and automatically write migrations that match your changes. + - Database independence: As far as possible, South is completely + database-agnostic, supporting five different database backends. + - App-savvy: South knows and works with the concept of Django apps, allowing + you to use migrations for some of your apps and leave the rest to carry on + using syncdb. + - VCS-proof: South will notice if someone else commits migrations to the same + app as you and they conflict. + +A brief history +--------------- + +South was originally developed at `Torchbox `_ in 2008, +when no existing solution provided the workflow and features that were needed. +It was open-sourced shortly thereafter, and quickly gained steam after the +Schema Evolution panel at DjangoCon 2008. + +Sometime in 2009, it became the most popular of the various migration +alternatives, and seems to have been going strong ever since. While there have +been growing calls to integrate South, or something like it, into Django itself, +such an integration has not yet been made, mostly due to the relative +immaturity of database migration solutions. diff --git a/users/src/south/docs/autodetector.rst b/users/src/south/docs/autodetector.rst new file mode 100644 index 0000000..d516983 --- /dev/null +++ b/users/src/south/docs/autodetector.rst @@ -0,0 +1,81 @@ + +.. _autodetector: + +The Autodetector +================ + +The autodetector is the part of South you'll probably be using the most, as well +as being the feature that people seem to like the most. + +The general use of the autodetector is covered in :ref:`tutorial-part-1`; this +is more of a reference of what it's capable of. + +When the autodetector runs, it compares your current models with those frozen +in your most recent migration on the app, and if it finds any changes, yields +one or more Actions to the South migration-file-writer. + +.. _autodetector-supported-actions: + +Supported Actions +----------------- + +Model creation and deletion +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +South will happily detect the creation and deletion of models; this is the +oldest and most well-worn feature of the autodetector, and so has very few +caveats. + +One thing to note is that, while South calls the post_syncdb hook on your +models (much like ``syncdb`` does), it calls it when it initially creates the +table, not at the end of the migration, so your hook might well get called +when the model doesn't have its full table. + +Consider moving your hook code into its own data migration, or use one of +our own :ref:`signals`. + + +Field addition and removal +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +South detects addition and removal of fields fine, and should correctly create +indexes and constraints for new fields. + +Note that when you add or remove a field, you need a default specified; there's +more explanation on this in the :ref:`tutorial-part-2-defaults` part of the +tutorial. + + +Field changes +^^^^^^^^^^^^^ + +South will detect if you change a field, and should correctly change the field +type, with one exception: + + - If you alter to a field with a CHECK constraint (e.g. ``PositiveIntegerField``) + the constraint won't be added to the column (it is removed if you alter away, + however). This will be fixed in a future release. + + +ManyToMany addition and removal +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +ManyToMany fields are detected on addition and removal; when you add the field, +South will create the table the ManyToMany represents, and when you remove the +field, the table will be deleted. + +The one exception to this is when you have a 'through model' (i.e. you're using +the ``through=`` option) - since the table for the model is already created when +the model is detected, South does nothing with these types of ManyToMany fields. + + +Unique changes +^^^^^^^^^^^^^^ + +If you change the ``unique=`` attribute on a field, or the ``unique_together`` +in a model's Meta, South will detect and change the constraints on the database +accordingly (except on SQLite, where we don't get have the code to edit UNIQUE +constraints). + + + diff --git a/users/src/south/docs/commands.rst b/users/src/south/docs/commands.rst new file mode 100644 index 0000000..cc7104a --- /dev/null +++ b/users/src/south/docs/commands.rst @@ -0,0 +1,259 @@ + +.. _commands: + +Command Reference +================= + +South is mainly used via the console and its three important commands: migrate, +schemamigration and datamigration. It also overrides a few parts of syncdb. + +migrate +------- + +The migrate command is used to control the migration of the system forwards or +backwards through the series of migrations for any given app. + +The most common use is:: + + ./manage.py migrate myapp + +This will migrate the app myapp forwards through all the migrations. +If you want to migrate all the apps at once, run:: + + ./manage.py migrate + +This has the same effect as calling the first example for every app, +and will deal with Dependencies properly. + +You can also specify a specific migration to migrate to:: + + ./manage.py migrate myapp 0002_add_username + +Note that, if the system has already migrated past the specified migration, +it will roll back to it instead. If you want to migrate all the way back, +specify the special migration name zero:: + + ./manage.py migrate myapp zero + +You can also just give prefixes of migrations, to save typing:: + + ./manage.py migrate myapp 0002 + +But they must be unique:: + + $ ./manage.py migrate myapp 000 + Running migrations for myapp: + - Prefix 00 matches more than one migration: + 0001_initial + 0002_add_username + +Options +^^^^^^^ + + - ``--all``: Used instead of an app name, allows you to migrate all + applications to the same target. For example, + ``./manage.py migrate --all --fake 0001`` if you are converting a lot of apps. + - ``--list``: Shows what migrations are available, and puts a * next to + ones which have been applied. + - ``--merge``: Runs any missed (out-of-order) migrations without rolling + back to them. + - ``--no-initial-data``: Doesn't load in any initial data fixtures after a + full upwards migration, if there are any. + - ``--fake``: Records the migration sequence as having been applied, but + doesn't actually run it. Useful for :ref:`converting-an-app`. + - ``--db-dry-run``: Loads and runs the migration, but doesn't actually + access the database (the SQL generated is thrown away at the last minute). + The migration is also not recorded as being run; this is useful for + sanity-testing migrations to check API calls are correct. + +Conflict Resolution +^^^^^^^^^^^^^^^^^^^ + +South's migration system really comes into its own when you start getting +conflicting migrations - that is, migrations that have been applied in +the wrong sequence. + +One example is if Anne writes new migrations 0003_foo and 0004_bar, runs the +migration up to 0004 to make sure her local copy is up-to-date, and then updates +her code from (say) Subversion. In the meantime, her coworker Bob has written a +migration 0003_baz, which gets pulled in. + +Now, there's a problem. 0003_baz should have been applied before 0004_bar, +but it hasn't been; in this situation, South will helpfully say something like:: + + Running migrations for aeblog: + - Current migration: 5 (after 0004_bar) + - Target migration: 5 (after 0004_bar) + ! These migrations should have been applied already, but aren't: + - 0003_baz + ! Please re-run migrate with one of these switches: + --skip: Ignore this migration mismatch and keep going + --merge: Just apply the missing migrations out of order + If you want to roll back to the first of these migrations + and then roll forward, do: + ./manage.py migrate --skip 0002_add_username + ./manage.py migrate + +As you can see, you have two real options; ``--merge``, which will just apply +the missing migration and continue, and the two commands which roll back to +before the missing migration (using ``--skip`` to ignore the error we're dealing +with) and then migrating properly, in order, from there to the end. + +Using ``--skip`` by itself will let you continue, but isn't much of a solution; +South will still complain the next time you run a migrate without ``--skip``. + +Sometimes, even worse things happen and South finds out that an applied +migration has gone missing from the filesystem. In this scenario, it will +politely tell you to go fix the problem yourself, although in more recent +versions, you also have the option to tell South to wipe all records of the +missing migrations being applied. + +Initial Data and post_syncdb +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +South will load initial_data files in the same way as syncdb, but it loads them +at the end of every successful migration process, so ensure they are kept +up-to-date, along with the rest of your fixtures (something to help ease the +pain of migrating fixtures may appear shortly in South). + +South also sends the post_syncdb signal when a model's table is first created +(this functionality requires that you generated those migrations with +startmigration). This behaviour is intended to mirror the behaviour of syncdb, +although for sanity reasons you may want to consider moving any setup code +connected to such a signal into a migration. + +schemamigration +--------------- + +*(In South 0.6 and below, this is called startmigration)* + +While migrate is the real meat and bones of South, schemamigration is by +comparison an entirely optional extra. It's a utility to help write some of +your migrations (specifically, the ones which change the schema) for +you; if you like, you can ignore it and write everything youself, in which +case we wish you good luck, and happy typing. + +However, if you have a sense of reason, you'll realise that having the large +majority of your migrations written for you is undoubtedly a good thing. + +The main use of schemamigration is when you've just finished your shiny new +models.py and want to load up your database. In vanilla Django, you'd just run +syncdb - however, with migrations, you'll need a migration to create the tables. + +In this scenario, you just run:: + + ./manage.py schemamigration myapp --initial + +That will write one big migration to create all the tables for the models in +your app; just run ``./manage.py migrate`` to get it in and you're done in only +one more step than syncdb! + +Later on, you'll add models to your app, or change your fields. Each time you do +this, run schemamigration with the --auto flag:: + + ./manage.py schemamigration myapp --auto changed_user_model_bug_434 + +You can also manually specify changes:: + + ./manage.py schemamigration mitest some_cols --add-field User.age --add-model User + +See the tutorial for more. + +Finally, if you're writing a schema migration that South can't automatically create +for you (yet!) then you can just create a skeleton: + +./manage.py schemamigration myapp my_new_column_migration --empty + +Note that if you're writing a data migration, you should use the +:ref:`commands-datamigration` command instead. + +Options +^^^^^^^ + +Note that you can combine as many ``--add-X`` options as you like. + + - ``--add-model``: Generates a creation migration for the given modelname. + - ``--add-field``: Generates an add-column migration for modelname.field. + - ``--add-index``: Generates an add-index migration for modelname.field. + - ``--initial``: Like having --model for every model in your app. + You should use this only for your first migration. + - ``--auto``: Generates a migration with automatically-detected actions. + - ``--stdout``: Writes the migration to stdout instead of a file. + +.. _commands-datamigration: + +datamigration +------------- + +*(In South 0.6 and below, this is called startmigration)* + +When you want to create a data migration, use this command to create a blank +template to write your migration with:: + + ./manage.py datamigration books capitalise_titles + +You can also freeze in additional apps if you want:: + + ./manage.py datamigration books capitalise_titles --freeze awards + +Options +^^^^^^^ + + - ``--freeze``: Use appname to additional models into the app. + - ``--stdout``: Writes the migration to stdout instead of a file. + + +graphmigrations +--------------- + +*(New in South 0.7)* + +Run this command to generate a graphviz .dot file for your migrations; you +can then use this to generate a graph of your migrations' dependencies. + +Typical usage:: + + ./manage.py graphmigrations | dot -Tpng -omigrations.png + +This command can be particularly helpful to examine complex dependency sets +between lots of different apps [#]_. + + .. [#] This command was written and used for the first time while helping the + debug the rather complex set of dependencies in django-cms; it's + `quite a sight to behold `_. + +Options +^^^^^^^ + +This command has no options. + + +syncdb +------ + +South overrides the Django syncdb command; as well as changing the output +to show apps delineated by their migration status, it also makes syncdb only +work on a subset of the apps - those without migrations. + +If you want to run syncdb on all of the apps, then use ``--all``, but be warned; +this will put your database schema and migrations out of sync. If you do this, +you *might* be able to fix it with:: + + ./manage.py migrate --fake + +Options +^^^^^^^ + + - ``--all``: Makes syncdb operate on all apps, not just unmigrated ones. + + +convert_to_south +---------------- + +An alias command that both creates an initial migration for an app and then +fake-applies it. Takes one argument, the app label of the app to convert:: + + ./manage.py convert_to_south myapp + +There's more documentation on how to use this in the :ref:`converting-an-app` +section. diff --git a/users/src/south/docs/conf.py b/users/src/south/docs/conf.py new file mode 100644 index 0000000..6132890 --- /dev/null +++ b/users/src/south/docs/conf.py @@ -0,0 +1,194 @@ +# -*- coding: utf-8 -*- +# +# South documentation build configuration file, created by +# sphinx-quickstart on Mon Feb 15 23:32:07 2010. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +#sys.path.append(os.path.abspath('.')) + +# -- General configuration ----------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'South' +copyright = u'2010, Andrew Godwin' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '0.7' +# The full version, including alpha/beta/rc tags. +release = '0.7' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +#unused_docs = [] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. Major themes that come with +# Sphinx are currently 'default' and 'sphinxdoc'. +html_theme = 'nature' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +html_theme_path = ["_theme"] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_use_modindex = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Southdoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'South.tex', u'South Documentation', + u'Andrew Godwin', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_use_modindex = True diff --git a/users/src/south/docs/convertinganapp.rst b/users/src/south/docs/convertinganapp.rst new file mode 100644 index 0000000..cf259ea --- /dev/null +++ b/users/src/south/docs/convertinganapp.rst @@ -0,0 +1,37 @@ + +.. _converting-an-app: + +Converting An App +================= + +Converting an app to use South is very easy: + + - Edit your settings.py and put 'south' into `INSTALLED_APPS` + (assuming you've installed it to the right place) + + - Run ``./manage.py syncdb`` to load the South table into the database. + Note that syncdb looks different now - South modifies it. + + - Run ``./manage.py convert_to_south myapp`` - South will automatically make and + pretend to apply your first migration. + +Note that you'll need to convert before you make any changes; South detects +changes by comparing against the frozen state of the last migration, so it +cannot detect changes from before you converted to using South. + +Converting other installations and servers +------------------------------------------ + +The convert_to_south command only works entirely on the first machine you run it +on. Once you've committed the initial migrations it made into your VCS, +you'll have to run ``./manage.py migrate myapp 0001 --fake`` on every machine that +has a copy of the codebase (make sure they were up-to-date with models and +schema first). + +(For the interested, this is required because the initial migration that +convert_to_south makes will try and create all the existing tables; instead, you +tell South that it's already applied using --fake, so the next migrations +apply correctly.) + +Remember that new installations of the codebase after this don't need these +steps; you need only do a syncdb then a normal migrate. diff --git a/users/src/south/docs/customfields.rst b/users/src/south/docs/customfields.rst new file mode 100644 index 0000000..75e40a7 --- /dev/null +++ b/users/src/south/docs/customfields.rst @@ -0,0 +1,244 @@ + +.. _custom-fields: + +Custom Fields +============= + +The Problem +----------- + +South stores field definitions by storing both their class and the arguments that +need to be passed to the field's constructor, so it can recreate the field +instance simply by calling the class with the stored arguments. + +However, since Python offers no way to get the arguments used in a class' +constructor directly, South uses something called the *model introspector* to +work out what arguments fields were passed. This knows what variables the +arguments are stored into on the field, and using this knowledge, can +reconstruct the arguments directly. + +This isn't the case for custom fields [#]_, however; South has never seen them +before, and it can't guess at which variables mean what arguments, or what +arguments are even needed; it only knows the rules for Django's internal fields +and those of common third-party apps (those which are either South-aware, or +which South ships with a rules module for, such as django-tagging). + +.. [#] 'Custom Fields' in this context refers to any field that is not part + of Django's core. GeoDjango fields are part of the core, but ones in + third-party apps are 'custom'. Note also that a field is considered + custom even if it inherits directly from a core field and doesn't + override anything; there's no way for South to reliably tell that it does + so. + +The Solution +------------ + +There are two ways to tell South how to work with a custom field; if it's +similar in form to other fields (in that it has a set type and a few options) +you'll probably want to :ref:`extend South's introspection rules +`. + +However, if it's particularly odd - such as a field which takes fields as +arguments, or dynamically changes based on other factors - you'll probably find +it easier to :ref:`add a south_field_triple method `. + + +.. _extending-introspection: + +Extending Introspection +======================= + +(Note: This is also featured in the tutorial in :ref:`tutorial-part-4`) + +South does the majority of its field introspection using a set of simple rules; +South works out what class a field is, and then runs all rules which have been +defined for either that class or a parent class of it. + +This way, all of the common options (such as ``null=``) are defined against the +main ``Field`` class (which all fields inherit from), while specific options +(such as ``max_length``) are defined on the specific fields they apply to +(in this case, ``CharField``). + +If your custom field inherits from a core Django field, or another field for +which there are already introspection rules, and it doesn't add any new +attributes, then you probably won't have to add any rules for it, as it will +inherit all those from its parents. In this case, a call like this should work:: + + from south.modelsinspector import add_introspection_rules + add_introspection_rules([], ["^myapp\.stuff\.fields\.SomeNewField"]) + +Note that you must always specify a field as allowed, even if specifies no +new rules of its own - the alternative is that South must presume all fields +without any new rules specified only have the options of their parents, which +is wrong some of the time. + +Thus, there are two stages to adding support for your custom field to South; +firstly, adding some rules for the new arguments it introduces (or possibly +not adding any), and secondly, adding its field name to the list of patterns +South knows are safe to introspect. + +Rules +----- + +Rules are what make up the core logic of the introspector; you'll need to pass +South a (possibly empty) list of them. They consist of a tuple, containing: + + - A tuple or list of one or more classes to which the rules apply (remember, the rules + apply to the specified classes and all subclasses of them). + + - Rules for recovering positional arguments, in order of the arguments (you are + strongly advised not to use this feature, and use keyword argument instead). + + - A dictionary of keyword argument rules, with the key being the name of the + keyword argument, and the value being the rule. + +Each rule is itself a list or tuple with two elements: + + - The first element is the name of the attribute the value is taken from - if + a field stored its max_length argument as ``self.max_length``, say, this + would be ``"max_length"``. + + - The second element is a (possibly empty) dictionary of options describing the + various different variations on handling of the value. + +An example (this is the South rule for the many-to-one relationships in core +Django):: + + rules = [ + ( + (models.ForeignKey, models.OneToOneField), + [], + { + "to": ["rel.to", {}], + "to_field": ["rel.field_name", {"default_attr": "rel.to._meta.pk.name"}], + "related_name": ["rel.related_name", {"default": None}], + "db_index": ["db_index", {"default": True}], + }, + ) + ] + +You'll notice that you're allowed to have dots in the attribute name; ForeignKeys, +for example, store their destination model as ``self.rel.to``, so the attribute +name is ``"rel.to"``. + +The various options are detailed below; most of them allow you to specify the +default value for a parameter, so arguments can be omitted for clarity where +they're not necessary. + +.. _is-value-keyword: + +The one special case is the ``is_value`` keyword; if this is present and True, +then the first item in the list will be interpreted as the actual value, rather +than the attribute path to it on the field. For example:: + + "frozen_by_south": [True, {"is_value": True}], + +Parameters +^^^^^^^^^^ + + - default: The default value of this field (directly as a Python object). + If the value retrieved ends up being this, the keyword will be omitted + from the frozen result. For example, the base Field class' "null" attribute + has {'default':False}, so it's usually omitted, much like in the models. + + - default_attr: Similar to default, but the value given is another attribute + to compare to for the default. This is used in to_field above, as this + attribute's default value is the other model's pk name. + + - default_attr_concat: For when your default value is even more complex, + default_attr_concat is a list where the first element is a format string, + and the rest is a list of attribute names whose values should be formatted + into the string. + + - ignore_if: Specifies an attribute that, if it coerces to true, causes this + keyword to be omitted. Useful for ``db_index``, which has + ``{'ignore_if': 'primary_key'}``, since it's always True in that case. + + - ignore_dynamics: If this is True, any value that is "dynamic" - such as model + instances - will cause the field to be omitted instead. Used internally + for the ``default`` keyword. + + - is_value: If present, the 'attribute name' is instead used directly as the + value. See :ref:`above ` for more info. + + +Field name patterns +------------------- + +The second of the two steps is to tell South that your field is now safe to +introspect (as you've made sure you've added all the rules it needs). + +Internally, South just has a long list of regular expressions it checks fields' +classes against; all you need to do is provide extra arguments to this list. + +Example (this is in the GeoDjango module South ships with, and presumes +``rules`` is the rules triple you defined previously):: + + from south.modelsinspector import add_introspection_rules + add_introspection_rules(rules, ["^django\.contrib\.gis"]) + +Additionally, you can ignore some fields completely if you know they're not +needed. For example, django-taggit has a manager that actually shows up as a +fake field (this makes the API for using it much nicer, but confuses South to no +end). The django-taggit module we ship with contains this rule to ignore it:: + + from south.modelsinspector import add_ignored_fields + add_ignored_fields(["^taggit\.managers"]) + +Where to put the code +--------------------- + +You need to put the call to ``add_introspection_rules`` somewhere where it will +get called before South runs; it's probably a good choice to have it either in +your ``models.py`` file or the module the custom fields are defined in. + +General Caveats +--------------- + +If you have a custom field which adds other fields to the model dynamically +(i.e. it overrides contribute_to_class and adds more fields onto the model), +you'll need to write your introspection rules appropriately, to make South +ignore the extra fields at migration-freezing time, or to add a flag to your +field which tells it not to make the new fields again. An example can be +found `here `_. + +.. _south-field-triple: + +south_field_triple +================== + +There are some cases where introspection of fields just isn't enough; +for example, field classes which dynamically change their database column +type based on options, or other odd things. + +Note: :ref:`Extending the introspector ` is often far +cleaner and easier than this method. + +The method to implement for these fields is ``south_field_triple()``. + +It should return the standard triple of:: + + ('full.path.to.SomeFieldClass', ['positionalArg1', '"positionalArg2"'], {'kwarg':'"value"'}) + +(this is the same format used by the :ref:`ORM Freezer `; +South will just use your output verbatim). + +Note that the strings are ones that will be passed into eval, so for this +reason, a variable reference would be ``'foo'`` while a string +would be ``'"foo"'``. + +Example +------- + +Here's an example of this method for django-modeltranslation's TranslationField. +This custom field stores the type it's wrapping in an attribute of itself, +so we'll just use that:: + + def south_field_triple(self): + "Returns a suitable description of this field for South." + # We'll just introspect the _actual_ field. + from south.modelsinspector import introspector + field_class = self.translated_field.__class__.__module__ + "." + self.translated_field.__class__.__name__ + args, kwargs = introspector(self.translated_field) + # That's our definition! + return (field_class, args, kwargs) diff --git a/users/src/south/docs/databaseapi.rst b/users/src/south/docs/databaseapi.rst new file mode 100644 index 0000000..edb52b2 --- /dev/null +++ b/users/src/south/docs/databaseapi.rst @@ -0,0 +1,596 @@ + +.. _database-api: + +Database API +============ + +South ships with a full database-agnostic API for performing schema changes +on databases, much like Django's ORM provides data manipulation support. + +Currently, South supports: + + - PostgreSQL + - MySQL + - SQLite + - Microsoft SQL Server (beta support) + - Oracle (alpha support) + + +.. _accessing-the-api: + +Accessing The API +----------------- + +South automatically exposes the correct set of database API operations as +``south.db.db``; it detects which database backend you're using from your +Django settings file. It's usually imported using:: + + from south.db import db + +If you're using multiple database support (Django 1.2 and higher), +there's a corresponding ``south.db.dbs`` dictionary +which contains a DatabaseOperations object (the object which has the methods +defined above) for each database alias in your configuration file:: + + from south.db import dbs + dbs['users'].create_table(...) + +You can tell which backend you're talking to inside of a migration by examining +``db.backend_name`` - it will be one of ``postgres``, ``mysql``, ``sqlite3``, +``pyodbc`` or ``oracle``. + + +Database-Specific Issues +------------------------ + +South provides a large amount of features, and not all features are supported by +all database backends. + + - PostgreSQL supports all of the South features; if you're unsure which database + engine to pick, it's the one we recommend for migrating on. + + - MySQL doesn't have transaction support for schema modification, meaning that + if a migration fails to apply, the database is left in an inconsistent state, + and you'll probably have to manually fix it. South will try and sanity-check + migrations in a dry-run phase, and give you hints of what to do when it + fails, however. + + - SQLite doesn't natively support much schema altering at all, but South + has workarounds to allow deletion/altering of columns. Unique indexes are + still unsupported, however; South will silently ignore any such commands. + + - SQL Server has been supported for a while, and works in theory, but the + implementation itself may have bugs, as it's a contributed module and isn't + under primary development. Patches and bug reports are welcome. + + - Oracle is a new module as of the 0.7 release, and so is very much alpha. + The most common operations work, but others may be missing completely; + we welcome bug reports and patches against it (as with all other modules). + + +Methods +------- + +These are how you perform changes on the database. See :ref:`accessing-the-api` +to see how to get access to the ``db`` object. + +.. contents:: + :local: + :depth: 1 + + + +db.add_column +^^^^^^^^^^^^^ + +:: + + db.add_column(table_name, field_name, field, keep_default=True) + +Adds a column called ``field_name`` to the table ``table_name``, of the type +specified by the field instance field. + +If ``keep_default`` is True, then any default value specified on the field will +be added to the database schema for that column permanently. If not, then the +default is only used when adding the column, and then dropped afterwards. + +Note that the default value for fields given here is only ever used when +adding the column to a non-empty table; the default used by the ORM in your +application is the one specified on the field in your models.py file, as Django +handles adding default values before the query hits the database. + +The only case where having the default stored in the database as well would make +a difference would be where you are interacting with the database from somewhere +else, or Django doesn't know about the added column at all. + +Also, note that the name you give for the column is the **field name**, not the +column name - if the field you pass in is a ForeignKey, for example, the +real column name will have _id on the end. + +Examples +"""""""" + +A normal column addition (the column is nullable, so all existing rows will have +it set to NULL):: + + db.add_column('core_profile', 'height', models.IntegerField(null=True)) + +Providing a default value instead, so all current rows will get this value for +'height':: + + db.add_column('core_profile', 'height', models.IntegerField(default=-1)) + +Same as above, but the default is not left in the database schema:: + + db.add_column('core_profile', 'height', models.IntegerField(default=-1), keep_default=False) + + + +db.alter_column +^^^^^^^^^^^^^^^ + +:: + + db.alter_column(table_name, column_name, field, explicit_name=True) + +Alters the column ``column_name`` on the table ``table_name`` to match +``field``. Note that this cannot alter all field attributes; for example, if +you want to make a field ``unique=True``, you should instead use +``db.add_index`` with ``unique=True``, and if you want to make it a primary +key, you should look into ``db.drop_primary_key`` and ``db.create_primary_key``. + +If explicit_name is false, ForeignKey? fields will have _id appended to the end +of the given column name - this lets you address fields as they are represented +in the model itself, rather than as the column name. + +Examples +"""""""" + +A simple change of the length of a VARCHAR column:: + + # Assume the table was created with name = models.CharField(max_length=50) + db.alter_column('core_nation', 'name', models.CharField(max_length=200)) + +We can also change it to a compatible field type:: + + db.alter_column('core_nation', 'name', models.TextField()) + +If we have a ForeignKey? named 'user', we can address it without the implicit '_id' on the end:: + + db.alter_column('core_profile', 'user', models.ForeignKey(orm['auth.User'], null=True, blank=True), explicit_name=False) + +Or you can specify the same operation with an explicit name:: + + db.alter_column('core_profile', 'user_id', models.ForeignKey(orm['auth.User'], null=True, blank=True)) + + + +db.clear_table +^^^^^^^^^^^^^^ + +:: + + db.clear_table(table_name) + +Deletes all rows from the table (truncation). Never used by South's +autogenerators, but can prove useful if you're writing data migrations. + +Examples +"""""""" + +Clear all cached geocode results, as the schema is changing:: + + db.clear_table('core_geocoded') + db.add_column('core_geocoded', ...) + + + +db.commit_transaction +^^^^^^^^^^^^^^^^^^^^^ + +:: + + db.commit_transaction() + +Commits the transaction started at a ``db.start_transaction`` call. + + + +db.create_index +^^^^^^^^^^^^^^^ + +:: + + db.create_index(table_name, column_names, unique=False, db_tablespace='') + +Creates an index on the list of columns ``column_names`` on the table +``table_name``. + +By default, the index is simply for speed; if you would like a unique index, +then specify ``unique=True``, although you're better off using +``db.create_unique`` for that. + +``db_tablespace`` is an Oracle-specific option, and it's likely you won't need +to use it. + +Examples +"""""""" + +Creating an index on the 'name' column:: + + db.create_index('core_profile', ['name']) + +Creating a unique index on the combination of 'name' and 'age' columns:: + + db.create_index('core_profile', ['name', 'age'], unique=True) + + + +db.create_primary_key +^^^^^^^^^^^^^^^^^^^^^ + +:: + + db.create_primary_key(table_name, columns) + +Creates a primary key spanning the given ``columns`` for the table. Remember, +you can only have one primary key per table; use ``db.delete_primary_key`` +first if you already have one. + +Examples +"""""""" + +Swapping from the ``id`` to ``uuid`` as a primary key:: + + db.delete_primary_key('core_upload') + db.create_primary_key('core_upload', ['uuid']) + +Adding a new composite primary key on "first name" and "last name":: + + db.create_primary_key('core_people', ['first_name', 'last_name']) + + + +db.create_table +^^^^^^^^^^^^^^^ + +:: + + db.create_table(table_name, fields) + fields = ((field_name, models.SomeField(somearg=4)), ...) + +This call creates a table called *table_name* in the database with the schema +specified by *fields*, which is a tuple of ``(field_name, field_instance)`` +tuples. + +Note that this call will not automatically add an id column; +you are responsible for doing that. + +We recommend you create calls to this function using ``schemamigration``, either +in ``--auto`` mode, or by using ``--add-model``. + +Examples +"""""""" + +A simple table, with one field, name, and the default id column:: + + db.create_table('core_planet', ( + ('id', models.AutoField(primary_key=True)), + ('name', models.CharField(unique=True, max_length=50)), + )) + +A more complex table, which uses the ORM Freezer for its foreign keys:: + + db.create_table('core_nation', ( + ('name', models.CharField(max_length=255)), + ('short_name', models.CharField(max_length=50)), + ('slug', models.SlugField(unique=True)), + ('planet', models.ForeignKey(orm.Planet, related_name="nations")), + ('flag', models.ForeignKey(orm.Flag, related_name="nations")), + ('planet_name', models.CharField(max_length=50)), + ('id', models.AutoField(primary_key=True)), + )) + + + +db.create_unique +^^^^^^^^^^^^^^^^ + +:: + + create_unique(table_name, columns) + +Creates a unique index or constraint on the list of columns ``columns`` on the +table ``table_name``. + +Examples +"""""""" + +Declare the pair of fields ``first_name`` and ``last_name`` to be unique:: + + db.create_unique('core_people', ['first_name', 'last_name']) + + + +db.delete_column +^^^^^^^^^^^^^^^^ + +:: + + db.delete_column(table_name, column_name) + +Deletes the column ``column_name`` from the table ``table_name``. + +Examples +"""""""" + +Delete a column from a table:: + + db.delete_column('core_nation', 'title') + + + +db.delete_index +^^^^^^^^^^^^^^^ + +:: + + db.delete_index(table_name, column_names, db_tablespace='') + +Deletes an index created by db.create_index or one of the other South +functions. Pass the column_names in exactly the same order as the other call +to ensure this works; we use a hashing algorithm to make sure you can delete +migrations by only specifying column names. + +db_tablespace is an Oracle-specific option. + +Examples +"""""""" + +Deleting an index on 'name':: + + db.delete_index('core_profile', ['name']) + +Deleting the unique index on the combination of 'name' and 'age' columns +(from the db.create_index examples):: + + db.delete_index('core_profile', ['name', 'age']) + + + +db.delete_foreign_key +^^^^^^^^^^^^^^^^^^^^^ + +:: + + delete_foreign_key(table_name, column) + +Drops any foreign key constraints on the given column, if the database backend +supported them in the first place. + +Examples +"""""""" + +Remove the foreign key constraint from user_id: + + db.delete_foreign_key('core_people', 'user_id') + + + +db.delete_primary_key +^^^^^^^^^^^^^^^^^^^^^ + +:: + + db.delete_primary_key(table_name) + +Deletes the current primary key constraint on the table. Does not remove the +columns the primary key was using. + +Examples +"""""""" + +Swapping from the ``id`` to ``uuid`` as a primary key:: + + db.delete_primary_key('core_upload') + db.create_primary_key('core_upload', ['uuid']) + + + +db.delete_table +^^^^^^^^^^^^^^^ + +:: + + db.delete_table(table_name, cascade=True) + +Deletes (drops) the named table from the database. If cascade is True, drops any +related constraints as well. + +Examples +"""""""" + +Usual call:: + + db.delete_table("core_planet") + +Not cascading (beware, may fail):: + + db.delete_table("core_planet", cascade=False) + + + +db.delete_unique +^^^^^^^^^^^^^^^^ + +:: + + delete_unique(table_name, columns) + +Deletes a unique index or constraint on the list of columns ``columns`` on the +table ``table_name``. The constraint/index. must already exist. + +Examples +"""""""" + +Declare the pair of fields ``first_name`` and ``last_name`` to no longer +be unique:: + + db.delete_unique('core_people', ['first_name', 'last_name']) + + + +db.execute +^^^^^^^^^^ + +:: + + db.execute(sql, params=[]) + +Executes the **single** raw SQL statement ``sql`` on the database; optionally +use params to replace the %s instances in sql (this is the recommended way of +doing parameters, as it escapes them correctly for all databases). + +If you want to execute a series of SQL statements instead, use +``db.execute_many``. + +Note that you should avoid using raw SQL wherever possible, as it will break the +database abstraction in many cases. If you want to handle data, consider using +the ORM Freezer, and remember that many operations such as creating indexes and +changing primary keys have functions in the DB layer. + +If there's a common operation you'd like to see added to the DB abstraction +layer in South, consider asking on the mailing list or creating a ticket. + +Examples +"""""""" + +VACUUMing a table:: + + db.execute("VACUUM ANALYZE core_profile") + +Updating values (this sort of task should really be done using the frozen ORM):: + + db.execute("UPDATE core_profile SET name = %s WHERE name = %s", ["andy", "andrew"]) + + + +db.execute_many +^^^^^^^^^^^^^^^ + +:: + + db.execute_many(sql, regex=r"(?mx) ([^';]* (?:'[^']*'[^';]*)*)", comment_regex=r"(?mx) (?:^\s*$)|(?:--.*$)") + +Executes the given multi-statement SQL string ``sql``. The two parameters are +the regular expressions for splitting up statements (``regex``) and removing +comments (``comment_regex``). We recommend you leave these at their default +values, as they work on almost all SQL files. + +If you only want to execute a single SQL statement, consider using +``db.execute``, as it offers parameter escaping, and the regexes sometimes get +the splitting wrong. + +Examples +"""""""" + +Run the PostGIS initialisation file:: + + db.execute_many(open("/path/to/lwpostgis.sql").read()) + + + +db.rename_column +^^^^^^^^^^^^^^^^ + +:: + + db.rename_column(table_name, column_name, new_column_name) + +Renames the column ``column_name`` in table ``table_name`` to +``new_column_name``. + +Examples +"""""""" + +Simple rename:: + + db.rename_column('core_nation', 'name', 'title') + + + +db.rename_table +^^^^^^^^^^^^^^^ + +:: + + db.rename_table(table_name, new_table_name) + +Renames the table table_name to the new name new_table_name. + +This won't affect what tables your models are looking for, of course; +this is useful, for example, if you've renamed a model +(and don't want to specify the old table name in Meta). + +Examples +"""""""" + +Simple rename:: + + db.rename_table('core_profile', 'core_userprofile') + + + +db.rollback_transaction +^^^^^^^^^^^^^^^^^^^^^^^ + +:: + + db.rollback_transaction() + +Rolls back the transaction started at a ``db.start_transaction`` call. + + + +db.send_create_signal +^^^^^^^^^^^^^^^^^^^^^ + +:: + + db.send_create_signal(app_label, model_names) + +Sends the post_syncdb signal for the given models ``model_names`` in the app +``app_label``. + +This signal is used by various bits of django internals - such as contenttypes +- to hook new models into themselves, so you should really call it after the +relevant ``db.create_table`` call. ``startmigration`` will add this +automatically for you. + +Note that the signals are not sent until the end of the whole migration +sequence, so your handlers will not get called until all migrations are done. +This is so that your handlers can deal with the most recent version of the +model's schema, rather than the one in the migration where the signal is +originally sent. + +Examples +"""""""" + +Sending a signal for the 'Profile' and 'Planet' models in my app 'core':: + + db.send_create_signal('core', ['Profile', 'Planet']) + + + +db.start_transaction +^^^^^^^^^^^^^^^^^^^^ + +:: + + db.start_transaction() + +Wraps the following code (until it meets a ``db.rollback_transaction`` or +``db.commit_transaction`` call) in a transaction. + + + diff --git a/users/src/south/docs/dependencies.rst b/users/src/south/docs/dependencies.rst new file mode 100644 index 0000000..2616e3c --- /dev/null +++ b/users/src/south/docs/dependencies.rst @@ -0,0 +1,57 @@ + +.. _dependencies: + +Dependencies +============ + +Migrations for apps are nice 'n all, but when you start writing a large project, +with a lot of apps, you realise you have foreign key relationships between apps +and working out what order migrations would need to be applied in for each app +is just painful. + +Luckily, we also had this problem, so South has a dependency system. Inside a +migration, you can declare that it depends on having another app having run a +certain migration first; for example, if my app "forum" depends on the +"accounts" app having created its user profile table, we can do:: + + # forum/migrations/0002_post.py + class Migration: + + depends_on = ( + ("accounts", "0003_add_user_profile"), + ) + + def forwards(self): + .... + +Then, if you try and migrate to or beyond 0002_post in the forum app, it will +first make sure accounts is migrated at least up to 0003_add_user_profile, +and if not will migrate it for you. + +Dependencies also work in reverse; South knows not to undo that +0003_add_user_profile migration until it has undone the 0002_post migration. + +You can have multiple dependencies, and all sorts of wacky structures; +there are, however, two rules: + + - No circular dependencies (two or more migrations depending on each other) + - No upwards dependencies in the same app (so you can't make 0002_post in the forum app depend on 0003_room in the same app, either directly or through a dependency chain. + +Reverse Dependencies +-------------------- + +South also supports "reverse dependencies" - a dependecy where you say your +migration must be run before another, rather than vice-versa. This is useful +if you're trying to run a migration before another in a separate, third-party +(or unchangeable) code. + +Declaring these is just like the other kind, except you use needed_by:: + + class Migration: + + needed_by = ( + ("accounts", "0005_make_fks"), + ) + + def forwards(self): + .... \ No newline at end of file diff --git a/users/src/south/docs/fixtures.rst b/users/src/south/docs/fixtures.rst new file mode 100644 index 0000000..3aa673e --- /dev/null +++ b/users/src/south/docs/fixtures.rst @@ -0,0 +1,36 @@ +Fixtures +======== + +A few things change when you're using fixtures with South. + +initial_data +------------ + +Much like syncdb, South will load the initial_data fixture when an app has been +successfully migrated to the latest migration for an app. Note that the data in +the fixture will not be available before then; South only applies it at the end, +as it may not match the current database schema. + +Fixtures from migrations +------------------------ + +If you need to load a fixture as part of your database setup - say, you have a +migration that depends on it being around - the best thing to do is to write a +new migration to load the fixture in. That way, the fixture will always be +loaded at the correct time. + +To make such a migration, first make a blank migration:: + + ./manage.py datamigration appname load_myfixture + +Then, open the new migration file, and restructure your forwards() method +so it looks like this:: + + def forwards(self, orm): + from django.core.management import call_command + call_command("loaddata", "my_fixture.json") + +(you'll have to leave backwards() empty, +as there's not much you can do to reverse this). + +Then, when this migration is run, it will load the given fixture. diff --git a/users/src/south/docs/generics.rst b/users/src/south/docs/generics.rst new file mode 100644 index 0000000..9764f52 --- /dev/null +++ b/users/src/south/docs/generics.rst @@ -0,0 +1,18 @@ +Generic Relations +================= + +Generic relations' fields are be frozen, but unfortunately +not the GenericForeignKey itself (see :ref:`orm-freezing` for a reason why). +To add it back onto a model, add the import +for generic at the top of the migration and then in the body of forwards() put:: + + gfk = generic.GenericForeignKey() + gfk.contribute_to_class(orm.FooModel, "object_link") + +This will add the GenericForeignKey onto the model as model.object_link. +You can pass the optional content_type and id field names into the +constructor as usual. + +Also, be careful when using ContentType; make sure to use the frozen +orm['contenttypes.ContentType'], and don't import it directly, +otherwise comparisons may fail. \ No newline at end of file diff --git a/users/src/south/docs/index.rst b/users/src/south/docs/index.rst new file mode 100644 index 0000000..f2d3acf --- /dev/null +++ b/users/src/south/docs/index.rst @@ -0,0 +1,45 @@ + +.. _index: + +South documentation +=================== + +South is a tool to provide consistent, easy-to-use and database-agnostic +migrations for Django applications. + +This is the documentation for the current version (0.7); previous versions' +documentation was written directly into our wiki, but is mostly a subset of +what is written here. + +If you want to view the old docs (for example, if you need something that was +only in 0.6) they're `still available `_. + +Support +------- + +For initial help with problems, see our `mailing list `_, or `#django-south `_ on `freenode `_. If you find a real bug, then `file a new ticket `_. + +Contents +-------- + +.. toctree:: + :maxdepth: 2 + + about + whataremigrations + installation + tutorial/index + databaseapi + convertinganapp + migrationstructure + dependencies + commands + unittests + ormfreezing + generics + customfields + autodetector + signals + fixtures + settings + releasenotes/index \ No newline at end of file diff --git a/users/src/south/docs/installation.rst b/users/src/south/docs/installation.rst new file mode 100644 index 0000000..d371561 --- /dev/null +++ b/users/src/south/docs/installation.rst @@ -0,0 +1,132 @@ + +.. _installation: + +Installation +============ + +South's current release is :ref:`0.7.3 <0-7-3-release-notes>`. + +There are a few different ways to install South: + + - :ref:`Using easy_install ` (or pip), which is recommended if you want stable releases. + - :ref:`Using a Mercurial checkout `, recommended if you want cutting-edge features. + - :ref:`Using our downloadable archives `, useful if you don't have easy_install or Mercurial. + +Some Linux distributions are also starting to include South in their package +repositories; if you're running unstable Debian you can +``apt-get install python-django-south``, and on new Fedoras you can use +``yum install Django-south``. Note that this may give you an older version - +check the version before using the packages. + +South should work with versions of Django from 0.97-pre through to 1.2, although +some features (such as multi-db) may not be available for older Django versions. + + +.. _installation-easy-install: + +Using easy_install +------------------ + +If you have easy_install available on your system, just type:: + + easy_install South + +If you've already got an old version of South, and want to upgrade, use:: + + easy_install -U South + +That's all that's needed to install the package; you'll now want to +:ref:`configure your Django installation `. + + +.. _installation-mercurial: + +Using Mercurial +--------------- + +You can install directly from our Mercurial repo, allowing you to recieve +updates and bugfixes as soon as they're made. You'll need Mercurial installed +on your system; if it's not already, you'll want to get it. The package name +is ``mercurial`` on most Linux distributions; OSX and Windows users can download +packages from http://mercurial.berkwood.com. + +Make sure you're in a directory where you want the ``south`` directory to +appear, and run:: + + hg clone http://bitbucket.org/andrewgodwin/south/ + +To update an existing Mercurial checkout to the newest version, run:: + + hg pull + hg up -C tip + +(Rather than running from tip, you can also use the ``stableish`` tag, which is +manually set on reasonably stable trunk commits, or pick a version number tag.) + +Once you have this directory, move onto :ref:`installation-from-directory`. + + +.. _installation-archives: + +Using downloadable archives +--------------------------- + +If you're averse to using Mercurial, and don't have easy_install available, then +you can install from one of our ``.tar.gz`` files. + +First, download the archive of your choice from +`our releases page `_, and extract it to +create a ``south`` folder. Then, proceed with our instructions for +:ref:`installation-from-directory`. + + + +.. _installation-from-directory: + +Installing from a directory +--------------------------- + +If you've obtained a copy of South using either Mercurial or a downloadable +archive, you'll need to install the copy you have system-wide. Try running:: + + python setup.py develop + +If that fails, you don't have ``setuptools`` or an equivalent installed; either +install them, or run:: + + python setup.py install + +Note that ``develop`` sets the installed version to run from the directory you +just created, while ``install`` copies all the files to Python's +``site-packages`` folder, meaning that if you update your checkout you'll need +to re-run ``install``. + +You could also install South locally for only one project, by either including +with your project and modifying ``sys.path`` in your settings file, or +(preferably) by using virtualenv, pip and a requirements.txt. A tutorial in how +to use these is outside the scope of this documentation, but `there are +tutorials elsewhere `_. + +Once you've done one of those, you'll want to +:ref:`configure your Django installation `. + + +.. _installation-configure: + +Configuring your Django installation +------------------------------------ + +Now you've installed South system-wide, you'll need to configure Django to use +it. Doing so is simple; just edit your ``settings.py`` and add ``'south'`` to +the end of ``INSTALLED_APPS``. + +If Django doesn't seem to pick this up, check that you're not overriding +``INSTALLED_APPS`` elsewhere, and that you can run ``import south`` from inside +``./manage.py shell`` with no errors. + +Once South is added in, you'll need to run ``./manage.py syncdb`` to make the +South migration-tracking tables (South doesn't use migrations for +its own models, for various reasons). + +Now South is loaded into your project and ready to go, you'll probably want to +take a look at our :ref:`tutorial`. diff --git a/users/src/south/docs/migrationstructure.rst b/users/src/south/docs/migrationstructure.rst new file mode 100644 index 0000000..7a89e08 --- /dev/null +++ b/users/src/south/docs/migrationstructure.rst @@ -0,0 +1,49 @@ + +.. _migration-structure: + +Migration Structure +=================== + +Migrations are, at the most basic level, files inside your app's migrations/ +directory. + +When South loads migrations, it loads all the python files inside migrations/ +in ASCII sort order (e.g. 1 is before 10 is before 2), and expects to find a +class called Migration inside each one, with at least a ``forwards()`` +and ``backwards()`` method. + +When South wants to apply a migration, it simply calls the ``forwards()`` +method, and similarly when it wants to roll back a migration it calls +``backwards()``. It's up to you what you do inside these methods; the usual +thing is to do database changes, but you don't have to. + +Sort Order +---------- + +Since migrations are loaded in ASCII sort order, they won't be applied in the +correct order if you call them ``1_first, 2_second, ..., 10_tenth``. +(10 sorts before 2). + +Rather than force a specific naming convention, we suggest that if you want to +use numerical migrations in this fashion (as we suggest you do) that you prefix +the numbers with zeroes like so: ``0001_first, 0002_second, 0010_tenth``. + +All of South's automatic creation code will follow this scheme. + +Transactions +------------ + +Whenever ``forwards()`` or ``backwards()`` is called it is called inside a +database transaction, which is committed if the method executes successfully +or rolled back if it raises an error. + +If you need to use two or more transactions inside a migration, either use +two separate migrations (if you think it's appropriate), or have a snippet +like this where you want a new transaction:: + + db.commit_transaction() # Commit the first transaction + db.start_transaction() # Start the second, committed on completion + +Note that you must commit and start the next transaction if you are making +both data and column changes. If you don't do this, you'll end up with your +database hating you for asking it the impossible. diff --git a/users/src/south/docs/ormfreezing.rst b/users/src/south/docs/ormfreezing.rst new file mode 100644 index 0000000..d5ba882 --- /dev/null +++ b/users/src/south/docs/ormfreezing.rst @@ -0,0 +1,87 @@ + +.. _orm-freezing: + +ORM Freezing +============ + +South freezes the state of the ORM and models whenever you do a migration, +meaning that when your migrations run in the future, they see the models and +fields they're expecting (the ones that were around when they were created), +rather than the current set (which could be months or even years newer). + +This is accomplished by serialising the models into a large dictionary called +``models`` at the bottom of every migration. It's easy to see; it's the large +chunk of dense code at the bottom. + +Rationale behind the serialisation +---------------------------------- + +South doesn't freeze every aspect of a model; for example, it doesn't +preserve new managers, or custom model methods, as these would require +serialising the python code that runs those method (and the code that depends on, +and so forth). + +If you want custom methods in your migration, you'll have to copy the code in, +including any imports it relies on to work. +Remember, however, for every import that you add, you're promising to keep +that import valid for the life for the migration. + +We also use a human-readable format that's easy to change; since South relies +on the frozen models not only for reacreating the ORM but also for detecting +changes, it's really useful to be able to edit them now and again (and also +serves as a valuable debugging tool if you attach failing migrations to a +ticket). + +Serialisation format +-------------------- + +``models`` is a dict of ``{'appname.modelname': fields}``, and ``fields`` is a +dict of ``{'fieldname': (fieldclass, positional_args, kwd_args)}``. ``'Meta'`` +is also a valid entry in fields, in which case the value should be a dict +of its attributes. + +Make note that the entries in positional_args and kwd_args are +**strings passed into eval**; thus, a string would be ``'"hello"'``. +We strongly recommend you use schemamigration/datamigration to freeze things. + +Accessing the ORM +----------------- + +From inside a migration, you can access models from the frozen ORM in two ways. +If the model you're accessing is part of the same app, you can simply call:: + + orm.ModelName + +Otherwise, you'll need to specify the app name as well, using:: + + orm['myapp.ModelName'] + +For example, if you wanted to get a user with ID 1, you could use:: + + orm['auth.User'].objects.get(id=1) + +Note that you can only access models that have been frozen; South automatically +includes anything that could be reaches via foreign keys or many-to-many +relationships, but if you want to add other models in, simply pass ``--freeze appname`` +to the ``./manage.py datamigration`` command. + +Also note that the ``backwards()`` method gets the ORM as frozen by the previous +migration except for migrations that define ``symmetrical = True`` (new in South 1.0) + +Frozen Meta Attributes +---------------------- + +As well as freezing fields (for which South has a whole slew of rules on +what to freeze - see :ref:`extending-introspection`), it also freezes certain +meta attributes of a model (the ones which we think will have an impact on the +table schema or your frozen ORM use). + +Currently, South freezes:: + + db_table + db_tablespace + unique_together + ordering + +If there's something else you think should be frozen in the Meta, but which +isn't, file a bug and we'll look into it. \ No newline at end of file diff --git a/users/src/south/docs/releasenotes/0.7.1.rst b/users/src/south/docs/releasenotes/0.7.1.rst new file mode 100644 index 0000000..1059a68 --- /dev/null +++ b/users/src/south/docs/releasenotes/0.7.1.rst @@ -0,0 +1,61 @@ + +.. _0-7-1-release-notes: + +=========== +South 0.7.1 +=========== + +This is a minor new release of South, and the first bugfix release for the +:ref:`0.7 series <0-7-release-notes>`. + + +Backwards incompatible changes +============================== + +None. + + +Changes +============= + +South tests +----------- + +South's internal test suite now doesn't run by default (the ``SKIP_SOUTH_TESTS`` +setting now defaults to True). This is mainly because the test suite is meant +to be run in isolation (the test framework continually changes +``INSTALLED_APPS`` and fiddles with the ORM as it runs, among other things), and +was causing compatability problems with other applications. + +If you wish to run the tests still, simply set ``SKIP_SOUTH_TESTS = False``. + +Data Migrations +--------------- + +There was an annoying issue that caused failing data migrations under MySQL +to suddenly run their backwards() method and produce an error completely +unrelated to the original problem. This has been fixed. + +Commands +-------- + +``./manage.py migrate`` has gained a new ``--ignore-ghost-migrations``, which +will temporarily silence South's complaining about missing migrations on disk +if you really know what you're doing (i.e. temporary branch switching). + +In addition, --noinput is now correctly respected for the "./manage.py migrate" +command. + +Dependencies +------------ + +A bug and some nondeterminism in the new dependency engine has been fixed +(previously, dependencies were sometimes calculated wrongly, and the +non-determinism meant that this only happened on certain architectures). + +Other changes +------------- + +A whole assortment of minor bugs has been fixed; for the complete list, see +the `milestone in our Trac `_. + diff --git a/users/src/south/docs/releasenotes/0.7.2.rst b/users/src/south/docs/releasenotes/0.7.2.rst new file mode 100644 index 0000000..dd23fc0 --- /dev/null +++ b/users/src/south/docs/releasenotes/0.7.2.rst @@ -0,0 +1,51 @@ + +.. _0-7-2-release-notes: + +=========== +South 0.7.2 +=========== + +This is a minor new release of South, and the second bugfix release for the +:ref:`0.7 series <0-7-release-notes>`. + + +Backwards incompatible changes +============================== + +None. + + +Changes +======= + +Ordering of actions +------------------- + +A few issues with ordering of index deletion versus field/table deletion have +now been fixed, so hopefully things will delete or migrate backwards first time. + +blank +----- + +If you have a CharField or TextField with blank=True, you now no longer need +to specify a default value. In addition, changes to blank no longer trigger +an alteration migration for that field, since it doesn't affect the database. + +Schemas +------- + +South should now work if you aren't using the 'public' schema; you'll need to +set the SCHEMA database setting first, though. + +Arguments +--------- + +A bit of tidying up has been done for arguments; `migrate` now accepts `--noinput` +and `convert_to_south` accepts the ghost migration options. + +Other changes +------------- + +A whole assortment of minor bugs has been fixed; for the complete list, see +the `milestone in our Trac `_. + diff --git a/users/src/south/docs/releasenotes/0.7.3.rst b/users/src/south/docs/releasenotes/0.7.3.rst new file mode 100644 index 0000000..a554c78 --- /dev/null +++ b/users/src/south/docs/releasenotes/0.7.3.rst @@ -0,0 +1,37 @@ + +.. _0-7-3-release-notes: + +=========== +South 0.7.3 +=========== + +This is a minor new release of South, and the third bugfix release for the +:ref:`0.7 series <0-7-release-notes>`. + + +Backwards incompatible changes +============================== + +None. + + +More NULL safety checks +======================= + +South now also checks if you're converting a field to/from NULL and makes you add defaults as appropriate. + +Circular Dependency Fixes +========================= + +South's circular-dependency-checking engine has had some fixes to stop false positives. + +PyODBC backend improvements +=========================== + +Thanks to Shai Berger, the MSSQL backend has had some much-needed improvements. + +Various other improvements +========================== + +Fixes to generated migration names, table name escaping, WSGI compatability, MySQL foreign key checks, and 2.4 compatability. + diff --git a/users/src/south/docs/releasenotes/0.7.rst b/users/src/south/docs/releasenotes/0.7.rst new file mode 100644 index 0000000..0c806ae --- /dev/null +++ b/users/src/south/docs/releasenotes/0.7.rst @@ -0,0 +1,130 @@ + +.. _0-7-release-notes: + +========= +South 0.7 +========= + +This is a major new release of South. A lot of work has been done to the +internals, and a few annoying remnants from South's history have +finally been eradicated. + + +Backwards incompatible changes +============================== + +Tests now run with migrations by default, not using syncdb for everything as in +0.6. This is the behaviour most people expect; to turn it off again, +set ``SOUTH_TESTS_MIGRATE`` to False (migrating everything can be slow). + +In addition, you may note that some or all of your custom fields don't work when +you upgrade; read more about this at :ref:`custom-fields`. You may also wish to +change your old migration files and insert the full path to custom field classes +in the ``models`` dictionary entries, to prevent future issues. + +Finally, migration names must now not contain any dashes (or other characters +invalid in Python module names) - if they do, you'll need to rename them and +also fix the appropriate entries in your south_migrationhistory table. + + +Major changes +============= + +Core Refactoring +---------------- + +The entire migration and dependency engine has been refactored to be +class-based, rather than the mess of functions and variables it was before, +and will now be a lot easier to maintain, as well as being nice and quick. + +Much thanks to Simon Law for doing a lot of the legwork on this one. + +Command Changes +--------------- + +The ``startmigration`` command (which used to be one massive file) has been +removed, and refactored into new commands: + + - ``schemamigration``, which is very similar to the old ``startmigration`` + - ``datamigration``, which should be used to create new data migrations + +In addition, the ``--model`` argument to ``startmigration`` is now +``--add-model`` on ``schemamigration``, for consistency with the other +arguments, and ``schemamigration`` no longer requires a migration name; if you +don't provide one, it will autogenerate a reasonably sensible one. + +Finally, South now detects when you're adding a column that needs a default +value, and prompts you for it, rather than crashing when you tried to apply +the migration, like before. + +Django Support +-------------- + +This version of South fully supports Django 1.2 (as well as 1.1 and 1.0), +and has some limited multi-db functionality (migrate has gained a --database +option) [#]_. + +.. [#] Note that multi-db functionality is unavailable if using South 0.7 with + earlier versions of Django. + +Custom Fields +------------- + +Custom fields are no longer parsed if they don't introspect; instead, an +error is raised every time. This is because parsing was causing scenarios where +migrations sometimes worked, and then failed mysteriously later; the new +solution means they'll always work or fail. + +This does have the unfortunate side-effect of making South not "magically" make +your simpler custom fields work any more; we're trying to help by shipping +introspection modules for the more common third-party apps with South, but you +may also want to read the new +:ref:`reference for your own introspection rules `, or +:ref:`our new tutorial chapter on it `. + +Migration Directories +--------------------- + +You can now set custom migration directories (actually done as Python modules) +if you need per-project migrations for an app, or if you are using third-party +apps and don't want to store the migrations with the app. + +You simply need to set the new :ref:`setting-south-migration-modules` setting. + +Supported Databases +------------------- + +SQLite now has full, near-bulletproof support for altering columns, +deleting columns, and other basic operations SQLite doesn't support natively. + +Oracle now has alpha support. + +Migrations Files +---------------- + +Migrations files no longer import from appname.models; model classes are now +referred to by their full path, and retrieved using ``Migration.gf`` - this +means a field now looks like:: + + self.gf('django.db.models.fields.TextField')(blank=True) + +Also, migration classes should now inherit from ``south.v2.SchemaMigration`` or +``south.v2.DataMigration``. This doesn't do much at the moment, but is designed +so we can easily change the migration API in future and keep backwards +compatability. + +Bugfixes and minor changes +-------------------------- + +There's also an assorted array of bugfixes; see the `milestone status page +`_ for details. + +Thanks +====== + +This release wouldn't have been possible without: + + - Simon Law, who wrote most of the migration refactor and now knows too much about how our dependencies work + - `Torchbox `_, who sponsored Andrew's work on the startmigration refactor, the rest of the migration refactor, and a lot of other small things. + - Ilya Roitburg, who contributed the Oracle database module. + diff --git a/users/src/south/docs/releasenotes/index.rst b/users/src/south/docs/releasenotes/index.rst new file mode 100644 index 0000000..81c96b5 --- /dev/null +++ b/users/src/south/docs/releasenotes/index.rst @@ -0,0 +1,12 @@ +Release Notes +============= + +Release notes from various versions of South. + +.. toctree:: + :maxdepth: 1 + + 0.7 + 0.7.1 + 0.7.2 + 0.7.3 diff --git a/users/src/south/docs/settings.rst b/users/src/south/docs/settings.rst new file mode 100644 index 0000000..79b2c1c --- /dev/null +++ b/users/src/south/docs/settings.rst @@ -0,0 +1,120 @@ + +.. _settings: + +Settings +======== + +South has its own clutch of custom settings you can use to tweak its operation. +As with normal Django settings, these go in ``settings.py``, or a variant thereof. + +SKIP_SOUTH_TESTS +---------------- + +South has a somewhat fragile test suite, as it has to fiddle with +``INSTALLED_APPS`` at runtime to load in its own testing apps. If the South +tests are failing for you, and you'd rather they be ignored +(by your CI system or similar, in particlar) set this to ``True``. +Defaults to ``False``. + +SOUTH_DATABASE_ADAPTER +---------------------- + +*(Django 1.1 and below)* + +If set, overrides the database module South uses for generating DDL commands. +Defaults to ``south.db.``. + +SOUTH_DATABASE_ADAPTERS +----------------------- + +*(Django 1.2 and above)* + +A dictionary with database aliases as keys and the database module South will +use as values. South defaults to using the internal ``south.db. modules``. + +MySQL STORAGE_ENGINE +-------------------- + +If (database-specific) ``STORAGE_ENGINE`` is set, South will tell MySQL to use +the given storage engine for new items. + +For Django version before 1.2 the (global) setting is ``DATABASE_STORAGE_ENGINE``. + +Example for Django 1.2 and above:: + + DATABASES = { + 'default': { + ... + 'STORAGE_ENGINE': 'INNODB' + } + } + +For Django before 1.2:: + + DATABASE_STORAGE_ENGINE = 'INNODB' + +SOUTH_AUTO_FREEZE_APP +--------------------- + +When set, South freezes a migration's app and appends it to the bottom of the +migration file (the default behaviour, and required for ``--auto`` to work). +If you want to manually pass in ``--freeze appname`` instead, or just don't +like the clutter, set this to ``False``. Defaults to ``True``. + +SOUTH_TESTS_MIGRATE +------------------- + +If this is ``False``, South's test runner integration will make the test +database be created using syncdb, rather than via migrations (the default). +Set this to ``False`` if you have migrations which take too long to migrate +every time tests run, but be wary if you rely on migrations to do special things. +Defaults to ``True`` in 0.7 and above, ``False`` in 0.6 and below. + +SOUTH_LOGGING_ON +---------------- + +If this is True the SQL run by South is logged to a file. +You must also set ``SOUTH_LOGGING_FILE`` to a valid file that you want to log to. + +SOUTH_LOGGING_FILE +------------------ + +See SOUTH_LOGGING_ON for more info. + +A sample setting would be:: + + SOUTH_LOGGING_FILE = os.path.join(os.path.dirname(__file__),"south.log") + + +.. _setting-south-migration-modules: + +SOUTH_MIGRATION_MODULES +----------------------- + +*(South 0.7 and higher)* + +A dictionary of alternative migration modules for apps. By default, apps look +for their migrations in ".migrations", but you can override this here, +if you have project-specific migrations sets. + +Note that the keys in this dictionary are 'app labels', not the full paths to apps; +for example, were I to provide a migrations directory for ``django.contrib.auth``, +I'd want to use ``auth`` as the key here. + +Example:: + + SOUTH_MIGRATION_MODULES = { + 'books': 'myproject.migrations.books', + } + +Additionally, you can use this setting to turn off migrations for certain apps, +by saying their migrations are in some nonexistent module; for example:: + + SOUTH_MIGRATION_MODULES = { + 'books': 'ignore', + } + +SOUTH_USE_PYC +------------- + +If set to ``True``, South will also use .pyc files for migrations. Useful if you distribute your code only in .pyc format. diff --git a/users/src/south/docs/signals.rst b/users/src/south/docs/signals.rst new file mode 100644 index 0000000..5fec21e --- /dev/null +++ b/users/src/south/docs/signals.rst @@ -0,0 +1,37 @@ + +.. _signals: + +Signals +======= + +South offers its own signals, if you want to write code which executes before +or after migrations. They're available from ``south.signals``. + + +pre_migrate +----------- + +Sent just before South starts running migrations for an app. + +Provides one argument, ``app``, a string containing the app's label. + + +post_migrate +------------ + +Sent just after South successfully finishes running migrations for an app. Note +that if the migrations fail in the middle of executing, this will not get called. + +Provides one argument, ``app``, a string containing the app's label. + + +ran_migration +------------ + +Sent just after South successfully runs a single migration file; can easily be +sent multiple times in one run of South, possibly hundreds of times if you +have hundreds of migrations, and are doing a fresh install. + +Provides three arguments, ``app``, a string containing the app's label, +``migration``, a string containing the name of the migration file without the +file extension, and ``method``, which is either ``"forwards"`` or ``"backwards"``. \ No newline at end of file diff --git a/users/src/south/docs/tutorial/index.rst b/users/src/south/docs/tutorial/index.rst new file mode 100644 index 0000000..c0b316c --- /dev/null +++ b/users/src/south/docs/tutorial/index.rst @@ -0,0 +1,22 @@ + +.. _tutorial: + +Tutorial +======== + +This is South's new tutorial, designed to give a reasonably comprehensive +grounding in all of the basic features of South. We recommend you work through +from the beginning if you're new, or dip in if you already know about some of +the features. + +Advanced features have their own extensive documentation, as well; the +:ref:`main page ` gives you an overview of all the topics. + +.. toctree:: + :maxdepth: 2 + + part1 + part2 + part3 + part4 + part5 \ No newline at end of file diff --git a/users/src/south/docs/tutorial/part1.rst b/users/src/south/docs/tutorial/part1.rst new file mode 100644 index 0000000..064deff --- /dev/null +++ b/users/src/south/docs/tutorial/part1.rst @@ -0,0 +1,137 @@ + +.. _tutorial-part-1: + +Part 1: The Basics +================== + +Welcome to the South tutorial; here, we'll try and cover all the basic usage of +South, as well as giving you some general hints about what else to do. + +If you've never heard of the idea of a migrations library, then please read +:ref:`what-are-migrations` first; that will help you get a better understanding +of what both South (and others, such as django-evolution) are trying to achieve. + +This tutorial assumes you have South installed correctly; if not, see the +:ref:`installation instructions `. + +Starting off +------------ + +In this tutorial, we'll follow the process of using migrations on a brand new +app. Don't worry about converting your existing apps; we'll cover that in the +next part. + +The first thing to note is that South is per-application; migrations are stored +along with the app's code [#]_. If an app doesn't have any migrations defined, +South will ignore it, and it will behave as normal (that is, using syncdb). + +.. [#] You can also :ref:`store them elsewhere ` if you like. + +So, find a project to work in (or make a new one, and set it up with a database +and other settings), and let's create our new app:: + + ./manage.py startapp southtut + +As usual, this should make a new directory ``southtut/``. First, add it to +``INSTALLED_APPS``, then open up the newly-created ``southtut/models.py``, +and create a new model:: + + from django.db import models + + class Knight(models.Model): + name = models.CharField(max_length=100) + of_the_round_table = models.BooleanField() + +It's quite simple, but it'll do. Now, instead of running ``syncdb`` to create +a table for the model in our database, we'll create a migration for it. + +The First Migration +------------------- + +South has several ways of creating migrations; some are automatic, some are +manual. As a basic user, you'll probably use the two automatic ways - ``--auto`` +and ``--initial``. + +``--auto`` looks at the previous migration, works out what's changed, and +creates a migration which applies the differences - for example, if you add a +field to a model, ``--auto`` will notice this, and make a migration which +creates a new column for that field on its model's table. + +However, you'll notice that ``--auto`` needs a previous migration - our new +app doesn't have one. Instead, in this case, we need to use ``--initial``, which +will create tables and indexes for all of the models in the app; it's what you +use first, much like ``syncdb``, and ``--auto`` is then used afterwards for +each change. + +So, let's create our first migration:: + + $ ./manage.py schemamigration southtut --initial + Creating migrations directory at '/home/andrew/Programs/litret/southtut/migrations'... + Creating __init__.py in '/home/andrew/Programs/litret/southtut/migrations'... + + Added model southtut.Knight + Created 0001_initial.py. You can now apply this migration with: ./manage.py migrate southtut + +(If this fails complaining that ``south_migrationhistory`` does not exist, you +forgot to run syncdb :ref:`after you installed South `.) + +As you can see, that's created a migrations directory for us, and made a new +migration inside it. All we need to do now is apply our new migration:: + + $ ./manage.py migrate southtut + Running migrations for southtut: + - Migrating forwards to 0001_initial. + > southtut:0001_initial + - Loading initial data for southtut. + +With that, South has created the new table for our model; check if you like, and +try adding a few Knights using ``./manage.py shell``. + + +Changing the model +------------------ + +So far, we've done nothing that ``syncdb`` couldn't accomplish; time to change +that (or rather, our model). Let's add another field to our model:: + + from django.db import models + + class Knight(models.Model): + name = models.CharField(max_length=100) + of_the_round_table = models.BooleanField() + dances_whenever_able = models.BooleanField() + +Now, if we weren't using migrations, making this new column appear on our +``southtut_knight`` table would be annoying at best. However, with South, we +need only do two, quick steps: make a migration for the change, then apply it. + +First, make the new migration, using the --auto feature:: + + $ ./manage.py schemamigration southtut --auto + + Added field dances_whenever_able on southtut.Knight + Created 0002_auto__add_field_knight_dances_whenever_able.py. You can now apply this migration with: ./manage.py migrate southtut + +*(Notice that South has automatically picked a name for this migration; you +can instead give migrations custom names by providing it as another argument)* + +Now, apply it:: + + $ ./manage.py migrate southtut + Running migrations for southtut: + - Migrating forwards to 0002_auto__add_field_knight_dances_whenever_able. + > southtut:0002_auto__add_field_knight_dances_whenever_able + - Loading initial data for southtut. + +With that, our new column is created; again, go and check, you'll be able to +add Knights who can dance whenever they're able. + +Converting existing apps +------------------------ + +Sometimes, especially when introducting South into a project, you will want +to use it for existing apps - ones for which the tables have already been created. + +This is different from adding migrations to an all-new app, and you should +see the :ref:`converting-an-app` page for more information on how to do it. + +Once you're happy with this basic usage of South, move on to +:ref:`tutorial-part-2`. \ No newline at end of file diff --git a/users/src/south/docs/tutorial/part2.rst b/users/src/south/docs/tutorial/part2.rst new file mode 100644 index 0000000..b6e8865 --- /dev/null +++ b/users/src/south/docs/tutorial/part2.rst @@ -0,0 +1,154 @@ + +.. _tutorial-part-2: + +Part 2: Advanced Changes +======================== + +Now you've done a simple change to the model, let's look at some of the more +advanced changes you can do with South. + +.. _tutorial-part-2-defaults: + +Defaults +-------- + +Firstly, let's deal with more tricky column types. In the previous part, we +added a ``BooleanField`` to the table - this is easy for a database to handle, +as it has a default value (of ``False``) specified, so that's the value that +gets used for the column in all of the existing rows. + +However, some columns don't have a default defined. If the column is nullable - +that is, ``null=True`` - then the existing rows will have NULL in the new +column. Otherwise, if you've given no default, but the column is ``NOT NULL`` +(i.e. ``null=False``, the default), there's no value the database can put in +the new column, and so you won't be able to reliably add the column [#]_. + +.. [#] Some database backends will let you add the column anyway if the table + is empty, while some will refuse outright in this scenario. + +If South detects such a situation, it will pop up and ask you what to do; let's +make it do so. + +First, change your model to add a new field that has no default, but is also +not nullable:: + + from django.db import models + + class Knight(models.Model): + name = models.CharField(max_length=100) + of_the_round_table = models.BooleanField() + dances_whenever_able = models.BooleanField() + shrubberies = models.IntegerField(null=False) + +Now, let's try and get South to automatically generate a migration for that:: + + ./manage.py schemamigration southtut --auto + ? The field 'Knight.shrubberies' does not have a default specified, yet is NOT NULL. + ? Since you are adding or removing this field, you MUST specify a default + ? value to use for existing rows. Would you like to: + ? 1. Quit now, and add a default to the field in models.py + ? 2. Specify a one-off value to use for existing columns now + ? Please select a choice: + +South presents you with two options; if you select choice one, the command will +quit without doing anything, and you should edit your ``models.py`` and add a +default to the new field. + +If you select choice two, you'll get a Python prompt, where you should enter the +default value you want to use for this migration. The default you enter will +only ever be used for the currently-existing rows - this is a good option if +you don't want the field on your model to have a default value. + +We'll select choice two, and use ``0`` as our default (it is an IntegerField, +after all):: + + ? Please select a choice: 2 + ? Please enter Python code for your one-off default value. + ? The datetime module is available, so you can do e.g. datetime.date.today() + >>> 0 + + Added field shrubberies on southtut.Knight + Created 0003_auto__add_field_knight_shrubberies.py. You can now apply this migration with: ./manage.py migrate southtut + +If you look at the generated migration, you'll see that there's a default +specified for the new field, so your database won't cry. Finish off by running +the migration:: + + $ ./manage.py migrate southtut + Running migrations for southtut: + - Migrating forwards to 0003_auto__add_field_knight_shrubberies. + > southtut:0003_auto__add_field_knight_shrubberies + - Loading initial data for southtut. + + +Uniques +------- + +As well as detecting new fields (and also ones you've removed), South also +detects most changes to fields, including changing their ``unique`` attributes. + +First, let's make our Knights have unique names:: + + from django.db import models + + class Knight(models.Model): + name = models.CharField(max_length=100, unique=True) + of_the_round_table = models.BooleanField() + dances_whenever_able = models.BooleanField() + shrubberies = models.IntegerField(null=False) + +Run the automatic migration creator:: + + $ ./manage.py schemamigration --auto southtut + + Added unique constraint for ['name'] on southtut.Knight + Created 0004_auto__add_unique_knight_name.py. You can now apply this migration with: ./manage.py migrate southtut + +As you can see, it's detected the change in ``unique``; you can now apply it:: + + $ ./manage.py migrate southtut + Running migrations for southtut: + - Migrating forwards to 0004_auto__add_unique_knight_name. + > southtut:0004_auto__add_unique_knight_name + - Loading initial data for southtut. + +South also detects changes to ``unique_together`` in your model's ``Meta`` in +the same way. + + +ManyToMany fields +----------------- + +South should automatically detect ManyToMany fields; when you add the field, +South will create the table the ManyToMany represents, and when you remove the +field, the table will be deleted. + +The one exception to this is when you have a 'through model' (i.e. you're using +the ``through=`` option) - since the table for the model is already created when +the model is detected, South does nothing with these types of ManyToMany fields. + +Custom fields +------------- + +If you've looked closely at the migration files, you'll see that South stores +field definitions by storing their class, and the arguments that need to be +passed to the field's constructor. + +Since Python offers no way to get the arguments used in a class' constructor +directly, South uses something called the *model introspector* to work out +what arguments fields were passed. This knows what variables the arguments +are stored into on the field, and using this knowledge, can reconstruct the +arguments directly. + +Because custom fields (either those written by you, or included with third-party +apps) are all different, South can't work out how to get their arguments without +extra help, so if you try to add, change or remove custom fields, South will +bail out and say that you need to give it rules for your custom fields; this +topic is covered in detail in :ref:`custom-fields`. + +More? +----- + +South supports most operations you'll do on your models day-to-day; if you're +interested, there's a :ref:`full list of what the autodetector supports +`. + +You'll probably want to read :ref:`tutorial-part-3` next. diff --git a/users/src/south/docs/tutorial/part3.rst b/users/src/south/docs/tutorial/part3.rst new file mode 100644 index 0000000..29b9d36 --- /dev/null +++ b/users/src/south/docs/tutorial/part3.rst @@ -0,0 +1,209 @@ + +.. _tutorial-part-3: + +Part 3: Advanced Commands and Data Migrations +============================================= + +Listing current migrations +-------------------------- + +It can be very useful to know what migrations you currently have applied, and +which ones are available. For this reason, there's ``./manage.py migrate --list``. + +Run against our project from before, we get:: + + $ ./manage.py migrate --list + + southtut + (*) 0001_initial + (*) 0002_auto__add_field_knight_dances_whenever_able + (*) 0003_auto__add_field_knight_shrubberies + (*) 0004_auto__add_unique_knight_name + +The output has an asterisk ``(*)`` next to a migration name if it has been +applied, and an empty space ``( )`` if not [#]_. + +If you have a lot of apps or migrations, you can also specify an app name +to show just the migrations from that app. + +.. [#] An interesting side effect of this is that you can run the command + ``./manage.py migrate --list |grep -v "*"`` to see which migrations + are unapplied, and need running. + +Data migrations +--------------- + +The previous parts have only covered *schema migrations* - migrations which +change the layout of your columns and indexes. There's also another kind of +migration, the so-called *data migration*. + +Data migrations are used to change the data stored in your database to match +a new schema, or feature. For example, if you've been storing passwords in +plain text [#]_, and you're moving to salted and hashed passwords, you might +have these three steps (where each step corresponds to a migration): + + - Create two new columns, ``password_salt`` and ``password_hash`` (a schema + migration). + - Using the contents of the old ``password`` column, calculate salts and hashes + for each user (a data migration) + - Remove the old ``password`` column (a schema migration). + +.. [#] If you're actually storing passwords in plaintext, please convert. Now. + +The first and last migrations you already know how to do; make the relevant +changes in the models.py file, and run ``./manage.py schemamigration --auto +myapp``. Remember that you need to add the two columns separately to deleting +the old column, as otherwise the old column won't be around for us to get data +out of, and you'll have lost all your users' passwords [#]_. + +.. [#] Always, always, backup your database before doing any kind of potentially + destructive migration. One time, it *will* go wrong. + +Let's follow a real example. Make a new app, and call it ``southtut2``. Add it +to ``INSTALLED_APPS``, and then give it this model:: + + from django.db import models + + class User(models.Model): + + username = models.CharField(max_length=255) + password = models.CharField(max_length=60) + name = models.TextField() + +Make an initial migration for it, apply it, and then add a record:: + + $ ./manage.py schemamigration --initial southtut2 + Creating migrations directory at '/home/andrew/Programs/litret/southtut2/migrations'... + Creating __init__.py in '/home/andrew/Programs/litret/southtut2/migrations'... + + Added model southtut2.User + Created 0001_initial.py. You can now apply this migration with: ./manage.py migrate southtut2 + + $ ./manage.py migrate southtut2 + Running migrations for southtut2: + - Migrating forwards to 0001_initial. + > southtut2:0001_initial + - Loading initial data for southtut2. + + $ ./manage.py shell + In [1]: from southtut2.models import User + + In [2]: User.objects.create(username="andrew", password="ihopetheycantseethis", name="Andrew Godwin") + Out[2]: + + In [3]: User.objects.get(id=1).password + Out[3]: u'ihopetheycantseethis' + +As you can see, the password is clearly visible, which isn't good. Let's move +to password hashing, while keeping everyone's password valid. Firstly, +modify the model so it looks like this:: + + from django.db import models + import sha + + class User(models.Model): + + username = models.CharField(max_length=255) + password = models.CharField(max_length=60) + password_salt = models.CharField(max_length=8, null=True) + password_hash = models.CharField(max_length=40, null=True) + name = models.TextField() + + def check_password(self, password): + return sha.sha(self.password_salt + password).hexdigest() == self.password_hash + +Make a schema migration that will create our two new columns (notice that +they've both been added as ``null=True``; once they have data, we'll +alter them to be ``null=False``):: + + $ ./manage.py schemamigration southtut2 --auto + + Added field password_salt on southtut2.User + + Added field password_hash on southtut2.User + Created 0002_auto__add_field_user_password_salt__add_field_user_password_hash.py. You can now apply this migration with: ./manage.py migrate southtut2 + +Now, the second migration is more interesting. Firstly, we need to create a +skeleton data migration (unlike schema migrations, South can't write these for +you):: + + $ ./manage.py datamigration southtut2 hash_passwords + Created 0003_hash_passwords.py. + +If you open up the file, you'll see that South has made the shell of a migration; +the models definitions are there, the forwards() and backwards() functions are +these, but there's no code in either. We'll write some code to port the +passwords over in the forwards function:: + + def forwards(self, orm): + import random, sha, string + for user in orm.User.objects.all(): + user.password_salt = "".join([random.choice(string.letters) for i in range(8)]) + user.password_hash = sha.sha(user.password_salt + user.password).hexdigest() + user.save() + +Notice that we use ``orm.User`` to access the User model - this gives us the +version of User from when this migration was created, so if we want to run +the migration in future, it won't get a completely different, new, User model. + +If you want to access models from other apps in your data migration, use a +syntax like ``orm['contenttypes.ContentType']``. Models will be available if you +can somehow get to them via ForeignKey or ManyToMany traversal from your app's +models; if you want to freeze other models, simply pass ``--freeze appname`` on +the ``datamigration`` command line. + +We should also raise an error in the ``backwards()`` method, since this process +is by its very nature irreversible:: + + def backwards(self, orm): + raise RuntimeError("Cannot reverse this migration.") + +That looks good. Finally, remove the ``password`` field from your model, and +run ``schemamigration`` one last time to make a migration to remove that field:: + + $ ./manage.py schemamigration southtut2 --auto + ? The field 'User.password' does not have a default specified, yet is NOT NULL. + ? Since you are adding or removing this field, you MUST specify a default + ? value to use for existing rows. Would you like to: + ? 1. Quit now, and add a default to the field in models.py + ? 2. Specify a one-off value to use for existing columns now + ? Please select a choice: 2 + ? Please enter Python code for your one-off default value. + ? The datetime module is available, so you can do e.g. datetime.date.today() + >>> "" + - Deleted field password on southtut2.User + Created 0004_auto__del_field_user_password.py. You can now apply this migration with: ./manage.py migrate southtut2 + +Notice that South is asking for a default value for ``password``; if you were to +reverse this migration, it tries to re-add the ``password`` column, and thus +needs either a default value or for the field to be ``null=True``. Here, I've +fed it the empty string, as that's a reasonable default in this case. + +Finally, let's apply all three migrations:: + + $ ./manage.py migrate southtut2 + Running migrations for southtut2: + - Migrating forwards to 0004_auto__del_field_user_password. + > southtut2:0002_auto__add_field_user_password_salt__add_field_user_password_hash + > southtut2:0003_hash_passwords + > southtut2:0004_auto__del_field_user_password + - Loading initial data for southtut2. + +Looks good - we've added the new columns, migrated the passwords over, and then +deleted the old column. Let's check our data was preserved:: + + $ ./manage.py shell + In [1]: from southtut2.models import User + + In [2]: User.objects.get(id=1).check_password("ihopetheycantseethis") + Out[2]: True + + In [3]: User.objects.get(id=1).check_password("fakepass") + Out[3]: False + +That looks like a successful data migration! + +You can do a lot more with this inside a data migration; any model can be +available to you. The only caveat is that you won't have access to any custom +methods or managers on your models, as they're not preserved as part of the +freezing process (there's no way to do this generally); you'll have to copy any +code you want into the migration itself. Feel free to make them methods on +the ``Migration`` class; South ignores everything apart from ``forwards`` and +``backwards``. \ No newline at end of file diff --git a/users/src/south/docs/tutorial/part4.rst b/users/src/south/docs/tutorial/part4.rst new file mode 100644 index 0000000..372b3d4 --- /dev/null +++ b/users/src/south/docs/tutorial/part4.rst @@ -0,0 +1,152 @@ + +.. _tutorial-part-4: + +Part 4: Custom Fields +===================== + +South 0.7 introduced a reasonably radical change from previous versions. Before, +if you had a custom field, South would attempt to use magic [#]_ to determine +how to freeze that field, so it could be recreated in a migration. + +.. [#] And not very nice magic, either; a combination of regexes and the python + ``parser`` module. + +While this worked surprisingly well for most people, in a small percentage of +cases it would get it completely wrong - even worse, you wouldn't know it was +wrong until things changed a few weeks later. In the interests of both sanity +and having less magic, you must now tell South how to freeze your custom fields. + +Don't worry, it's pretty easy, and you only have to do it once per field. + +Our Field +--------- + +In this example, we'll be using a custom field which stores a list of tags in +the database. We'll just store them in a TEXT column, with some delimiter +separating the values (by default, we'll use ``|``, but they can pass in +something else as a keyword argument). + +Here's the field class; in my code, I put this in ``appname/fields.py`` +(for more on writing custom fields, see the +`Django docs `_):: + + from django.db import models + + class TagField(models.TextField): + + description = "Stores tags in a single database column." + + __metaclass__ = models.SubfieldBase + + def __init__(self, delimiter="|", *args, **kwargs): + self.delimiter = delimiter + super(TagField, self).__init__(*args, **kwargs) + + def to_python(self, value): + # If it's already a list, leave it + if isinstance(value, list): + return value + + # Otherwise, split by delimiter + return value.split(self.delimiter) + + def get_prep_value(self, value): + return self.delimiter.join(value) + +To tell South about a custom field, you need to tell it two things; that this +particular class is OK to use, and how to reconstruct the keyword arguments +from a Field instance. + +Keyword Arguments +----------------- + +South freezes fields by storing their class name and module (so it can get the +field class itself) and the keyword arguments you used for that particular +instance (for example, ``CharField(max_length=50)`` is a different database +type to ``CharField(max_length=150)``). + +Since Python doesn't store the keyword arguments a class was passed, South has +to reconstruct them using the field instance. For example, we know that +``CharField``'s ``max_length`` attribute is stored as ``self.max_length``, while +``ForeignKeys`` store their ``to`` attribute (the model they point to - also the +first positional argument) as ``self.rel.to``. + +South knows all these rules for the core Django fields, but you need to tell it +about your own ones. The good news is that South will trace the inheritance tree +of your field class and add on rules from parent classes it knows about - thus, +you only need tell South about extra keyword arguments you've added, not every +possible argument the field could have. + +In our example, we've only specified one extra keyword: ``delimiter``. Here's +the code we'd add for South to work with our new field; I'll explain it in a +minute:: + + from south.modelsinspector import add_introspection_rules + add_introspection_rules([ + ( + [TagField], # Class(es) these apply to + [], # Positional arguments (not used) + { # Keyword argument + "delimiter": ["delimiter", {"default": "|"}], + }, + ), + ], ["^southtut\.fields\.TagField"]) + +As you can see, to tell South about your new fields, you need to call the +``south.modelsinspector.add_introspection_rules`` function. You should put this +code next to the definition of your field; the last thing you want is for the +field to get imported, but for this code to not run. + +``add_introspection_rules`` takes two arguments; a list of rules, and a list of +regular expressions. The regular expressions are used by South to see if a field +is allowed to be introspected; just having a rule that matches it isn't enough, +as rule inheritance means that any custom field class will have at least some +rules on it (as they will inherit from ``Field``, if not something more specific +like ``CharField``), and some custom fields can get by with only those +inherited rules (more on that shortly). + +The first argument is the list of rules. Each rule is a tuple (or list) with +three items: + + - A list of classes these rules apply to. You'll almost certainly have just + ``[MyField]`` here. + - Positional argument specification. This should always be left blank, as an + empty list - ``[]``. + - Keyword argument specification. This is a dictionary, with the key being the + name of the keyword argument, and the value being a tuple or list of + ``(attribute_name, options)``. + +The attribute name says where the value of the keyword can be found - in our +case, it's ``'delimiter'``, as we stored our keyword in ``self.delimiter``. (If +this was the ``ForeignKey`` rule, we'd put ``'rel.to'`` here) + +``options`` is a dictionary. You can safely leave it blank, but to make things +nicer, we can use it to specify the default value of this keyword - if the value +South finds matches this, it will leave out this keyword from the frozen +definition. This helps keep the frozen definitions shorter and more readable. + +Simple Inheritance +------------------ + +If your field inherits directly from another Django field - say ``CharField`` - +and doesn't add any new keyword arguments, there's no need to have any rules +in your ``add_introspection_rules``; you can just tell South that the field +is alright as it is:: + + class UpperCaseField(models.TextField): + "Makes sure its content is always upper-case." + + def to_python(self, value): + return value.upper() + + def get_prep_value(self, value): + return value.upper() + + from south.modelsinspector import add_introspection_rules + add_introspection_rules([], ["^southtut\.fields\.UpperCaseField"]) + +More Information +---------------- + +There's more documentation on this subject, and on all the possible options, +in the :ref:`extending-introspection` section. \ No newline at end of file diff --git a/users/src/south/docs/tutorial/part5.rst b/users/src/south/docs/tutorial/part5.rst new file mode 100644 index 0000000..6e43bec --- /dev/null +++ b/users/src/south/docs/tutorial/part5.rst @@ -0,0 +1,98 @@ + +.. _tutorial-part-5: + +Part 5: Teams and Workflow +========================== + +Migrations are all about improving the workflow for the developers and database +administrators of projects, and we think it's very important that it doesn't add +too much overhead to your daily coding, while at the same time reducing headaches +caused by the inevitable changes in schema every project has. + +Firstly, note that migrations aren't a magic bullet. If you've suddenly decided +you're going to rearchitect your entire database schema, it might well be easier +to not write migrations and just start again, especially if you have no +production sites using the code (if you do, you might find custom +serialisation/unserialisation to be a better way of saving your data). + +With that in mind, migrations are really something you should be using the rest +of the time. Hopefully, the previous parts of the tutorial have got you familiar +with what can easily be achieved with them; we've tried to cover a good +percentage of use cases, and if you think something should be included, don't +hesitate to ask for it. + + +Developer Workflow +------------------ + +As a developer, you should be doing things in this order: + + - Make the change to your models.py file (and affected code, such as + post_syncdb signal hooks) + - Make the migration + - Rinse, repeat. + +Don't try to make migrations before you make the changes; this will both +invalidate the frozen model data on the migration and make startmigration --auto +think nothing has changed. If you're making a large change, and want to split it +over several migrations, do each schema change to models.py separately, then make +the migration, and then make the next small change. + + +Team Workflow +------------- + +While migrations for an individual developer are useful, teams are perhaps the +real reason they exist. It's very likely more than one member of your team will +be making database changes, and migrations allow the other developers to apply +their schema changes effortlessly and reproducibly. + +You should keep all of your migrations in a VCS (for obvious reasons), and +encourage developers to run ``./manage.py migrate`` if they see a new migration come +in when they do an update or pull. + +The issue with teams and migrations occurs when more than one person makes a +migration in the same timeslot, and they both get committed without the other +having been applied. This is analogous to two people editing the same file in a +VCS at the same time, and like a VCS, South has ways of resolving the problem. + +If this happens, the first thing to note is that South will detect the problem, +and issue a message like this:: + + Inconsistent migration history + The following options are available: + --merge: will just attempt the migration ignoring any potential dependency conflicts. + +If you re-run migrate with ``--merge``, South will simply apply the migrations +that were missing out-of-order. This usually works, as teams are working on +separate models; if it doesn't, you'll need to look at the actual migration +changes and resolve them manually, as it's likely they'll conflict. + +The second thing to note is that, when you pull in someone else's model changes +complete with their own migration, you'll need to make a new empty migration +that has the changes from both branches of development frozen in (if you've +used mercurial, this is equivalent to a merge commit). To do so, simply run:: + + ./manage.py schemamigration --empty appname merge_models + +*(Note that merge_models is just a migration name; change it for whatever you +like)* + +The important message here is that *South is no substitute for team coordination* +- in fact, most of the features are there purely to warn you that you haven't +coordinated, and the simple merging on offer is only there for the easy cases. +Make sure your team know who is working on what, so they don't write migrations +that affect the same parts of the DB at the same time. + + +Complex Application Sets +------------------------ + +It's often the case that, with Django projects, there is a set of apps which +references each others' models.py files. This is, at its truest form, a +dependency, and to ensure your migrations for such sets of applications apply +sanely (i.e. the migrations that create the tables in one app happen before the +migration that adds ForeignKeys to them in another app), South has a +:ref:`Dependencies ` feature. Once you've added dependencies to +your migrations, South will ensure all prerequisites of a migration are +applied before applying the migration itself. diff --git a/users/src/south/docs/unittests.rst b/users/src/south/docs/unittests.rst new file mode 100644 index 0000000..e97f025 --- /dev/null +++ b/users/src/south/docs/unittests.rst @@ -0,0 +1,21 @@ +Unit Test Integration +===================== + +By default, South's syncdb command will also apply migrations if it's run in +non-interactive mode, which includes when you're running tests - it will run +every migration every time you run your tests. + +If you want the test runner to use syncdb instead of migrate - for example, if +your migrations are taking way too long to apply - simply set +``SOUTH_TESTS_MIGRATE = False`` in settings.py. + +South's own unit tests +---------------------- + +South has its own set of unit tests; these will also be run when you run +./manage.py test. They do some fiddling with Django internals to set up a +proper test environment; it's non-destructive, but if it's fouling up your own +tests please submit a ticket about it. + +You can also set ``SKIP_SOUTH_TESTS=True`` in settings.py to stop South's tests +running, should they be causing issues. \ No newline at end of file diff --git a/users/src/south/docs/whataremigrations.rst b/users/src/south/docs/whataremigrations.rst new file mode 100644 index 0000000..778b972 --- /dev/null +++ b/users/src/south/docs/whataremigrations.rst @@ -0,0 +1,27 @@ + +.. _what-are-migrations: + +What are migrations? +==================== + +For the uninitiated, migrations (also known as 'schema evolution' or +'mutations') are a way of changing your database schema from one version into +another. Django by itself can only do this by adding new models, but nearly all +projects will find themselves changing other aspects of models - be it adding a +new field to a model, or changing a database column to have null=True. + +South, and other solutions, provide a way of getting round this by giving you +the tools to easily and predictably upgrade your database schema. You write +migrations, which tell South how to upgrade from one version to the next, and by +stringing these migrations together you can move forwards (or backwards) through +the history of your database schema. + +In South, the migrations also form the way of creating your database initially +- the first migration simply migrates from an empty schema to your first tables. +This way, running through all the migrations brings your database up-to-date +with the most current version of the app, and if you already have an older +version, you simply need to run through the ones that appeared since last time. + +Running through the :ref:`tutorial ` will give you a good +idea of how migrations work and how they're useful to you, with some +solid examples. \ No newline at end of file diff --git a/users/src/south/setup.py b/users/src/south/setup.py new file mode 100755 index 0000000..1c7a622 --- /dev/null +++ b/users/src/south/setup.py @@ -0,0 +1,59 @@ +#!/usr/bin/python + +# Use setuptools if we can +try: + from setuptools.core import setup +except ImportError: + from distutils.core import setup +from south import __version__ + +setup( + name='South', + version=__version__, + description='South: Migrations for Django', + long_description='South is an intelligent database migrations library for the Django web framework. It is database-independent and DVCS-friendly, as well as a whole host of other features.', + author='Andrew Godwin & Andy McCurdy', + author_email='south@aeracode.org', + url='http://south.aeracode.org/', + download_url='http://south.aeracode.org/wiki/Download', + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Framework :: Django", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Topic :: Software Development" + ], + packages=[ + 'south', + 'south.creator', + 'south.db', + 'south.management', + 'south.introspection_plugins', + 'south.hacks', + 'south.migration', + 'south.tests', + 'south.db.sql_server', + 'south.management.commands', + 'south.tests.circular_a', + 'south.tests.emptyapp', + 'south.tests.deps_a', + 'south.tests.fakeapp', + 'south.tests.brokenapp', + 'south.tests.circular_b', + 'south.tests.otherfakeapp', + 'south.tests.deps_c', + 'south.tests.deps_b', + 'south.tests.circular_a.migrations', + 'south.tests.emptyapp.migrations', + 'south.tests.deps_a.migrations', + 'south.tests.fakeapp.migrations', + 'south.tests.brokenapp.migrations', + 'south.tests.circular_b.migrations', + 'south.tests.otherfakeapp.migrations', + 'south.tests.deps_c.migrations', + 'south.tests.deps_b.migrations' + ], +) diff --git a/users/src/south/south/__init__.py b/users/src/south/south/__init__.py new file mode 100644 index 0000000..e6abf28 --- /dev/null +++ b/users/src/south/south/__init__.py @@ -0,0 +1,9 @@ +""" +South - Useable migrations for Django apps +""" + +__version__ = "0.7.3" +__authors__ = [ + "Andrew Godwin ", + "Andy McCurdy " +] diff --git a/users/src/south/south/__init__.pyc b/users/src/south/south/__init__.pyc new file mode 100644 index 0000000..64705dc Binary files /dev/null and b/users/src/south/south/__init__.pyc differ diff --git a/users/src/south/south/creator/__init__.py b/users/src/south/south/creator/__init__.py new file mode 100644 index 0000000..96a1a80 --- /dev/null +++ b/users/src/south/south/creator/__init__.py @@ -0,0 +1,5 @@ +""" +The creator module is responsible for making new migration files, either +as blank templates or autodetecting changes. It contains code that used to +all be in startmigration.py. +""" diff --git a/users/src/south/south/creator/actions.py b/users/src/south/south/creator/actions.py new file mode 100644 index 0000000..cc4eef3 --- /dev/null +++ b/users/src/south/south/creator/actions.py @@ -0,0 +1,539 @@ +""" +Actions - things like 'a model was removed' or 'a field was changed'. +Each one has a class, which can take the action description and insert code +blocks into the forwards() and backwards() methods, in the right place. +""" + +import sys +import datetime + +from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT +from django.db.models.fields import FieldDoesNotExist, NOT_PROVIDED, CharField, TextField + +from south import modelsinspector +from south.creator.freezer import remove_useless_attributes, model_key + +class Action(object): + """ + Generic base Action class. Contains utility methods for inserting into + the forwards() and backwards() method lists. + """ + + prepend_forwards = False + prepend_backwards = False + + def forwards_code(self): + raise NotImplementedError + + def backwards_code(self): + raise NotImplementedError + + def add_forwards(self, forwards): + if self.prepend_forwards: + forwards.insert(0, self.forwards_code()) + else: + forwards.append(self.forwards_code()) + + def add_backwards(self, backwards): + if self.prepend_backwards: + backwards.insert(0, self.backwards_code()) + else: + backwards.append(self.backwards_code()) + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + raise NotImplementedError + + @classmethod + def triples_to_defs(cls, fields): + # Turn the (class, args, kwargs) format into a string + for field, triple in fields.items(): + fields[field] = cls.triple_to_def(triple) + return fields + + @classmethod + def triple_to_def(cls, triple): + "Turns a single triple into a definition." + return "self.gf(%r)(%s)" % ( + triple[0], # Field full path + ", ".join(triple[1] + ["%s=%s" % (kwd, val) for kwd, val in triple[2].items()]), # args and kwds + ) + + +class AddModel(Action): + """ + Addition of a model. Takes the Model subclass that is being created. + """ + + FORWARDS_TEMPLATE = ''' + # Adding model '%(model_name)s' + db.create_table(%(table_name)r, ( + %(field_defs)s + )) + db.send_create_signal(%(app_label)r, [%(model_name)r])'''[1:] + "\n" + + BACKWARDS_TEMPLATE = ''' + # Deleting model '%(model_name)s' + db.delete_table(%(table_name)r)'''[1:] + "\n" + + def __init__(self, model, model_def): + self.model = model + self.model_def = model_def + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " + Added model %s.%s" % ( + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def forwards_code(self): + "Produces the code snippet that gets put into forwards()" + field_defs = ",\n ".join([ + "(%r, %s)" % (name, defn) for name, defn + in self.triples_to_defs(self.model_def).items() + ]) + "," + + return self.FORWARDS_TEMPLATE % { + "model_name": self.model._meta.object_name, + "table_name": self.model._meta.db_table, + "app_label": self.model._meta.app_label, + "field_defs": field_defs, + } + + def backwards_code(self): + "Produces the code snippet that gets put into backwards()" + return self.BACKWARDS_TEMPLATE % { + "model_name": self.model._meta.object_name, + "table_name": self.model._meta.db_table, + } + + +class DeleteModel(AddModel): + """ + Deletion of a model. Takes the Model subclass that is being created. + """ + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " - Deleted model %s.%s" % ( + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def forwards_code(self): + return AddModel.backwards_code(self) + + def backwards_code(self): + return AddModel.forwards_code(self) + + +class _NullIssuesField(object): + """ + A field that might need to ask a question about rogue NULL values. + """ + + allow_third_null_option = False + irreversible = False + + IRREVERSIBLE_TEMPLATE = ''' + # User chose to not deal with backwards NULL issues for '%(model_name)s.%(field_name)s' + raise RuntimeError("Cannot reverse this migration. '%(model_name)s.%(field_name)s' and its values cannot be restored.")''' + + def deal_with_not_null_no_default(self, field, field_def): + # If it's a CharField or TextField that's blank, skip this step. + if isinstance(field, (CharField, TextField)) and field.blank: + field_def[2]['default'] = repr("") + return + # Oh dear. Ask them what to do. + print " ? The field '%s.%s' does not have a default specified, yet is NOT NULL." % ( + self.model._meta.object_name, + field.name, + ) + print " ? Since you are %s, you MUST specify a default" % self.null_reason + print " ? value to use for existing rows. Would you like to:" + print " ? 1. Quit now, and add a default to the field in models.py" + print " ? 2. Specify a one-off value to use for existing columns now" + if self.allow_third_null_option: + print " ? 3. Disable the backwards migration by raising an exception." + while True: + choice = raw_input(" ? Please select a choice: ") + if choice == "1": + sys.exit(1) + elif choice == "2": + break + elif choice == "3" and self.allow_third_null_option: + break + else: + print " ! Invalid choice." + if choice == "2": + self.add_one_time_default(field, field_def) + elif choice == "3": + self.irreversible = True + + def add_one_time_default(self, field, field_def): + # OK, they want to pick their own one-time default. Who are we to refuse? + print " ? Please enter Python code for your one-off default value." + print " ? The datetime module is available, so you can do e.g. datetime.date.today()" + while True: + code = raw_input(" >>> ") + if not code: + print " ! Please enter some code, or 'exit' (with no quotes) to exit." + elif code == "exit": + sys.exit(1) + else: + try: + result = eval(code, {}, {"datetime": datetime}) + except (SyntaxError, NameError), e: + print " ! Invalid input: %s" % e + else: + break + # Right, add the default in. + field_def[2]['default'] = repr(result) + + def irreversable_code(self, field): + return self.IRREVERSIBLE_TEMPLATE % { + "model_name": self.model._meta.object_name, + "table_name": self.model._meta.db_table, + "field_name": field.name, + "field_column": field.column, + } + + +class AddField(Action, _NullIssuesField): + """ + Adds a field to a model. Takes a Model class and the field name. + """ + + null_reason = "adding this field" + + FORWARDS_TEMPLATE = ''' + # Adding field '%(model_name)s.%(field_name)s' + db.add_column(%(table_name)r, %(field_name)r, + %(field_def)s, + keep_default=False)'''[1:] + "\n" + + BACKWARDS_TEMPLATE = ''' + # Deleting field '%(model_name)s.%(field_name)s' + db.delete_column(%(table_name)r, %(field_column)r)'''[1:] + "\n" + + def __init__(self, model, field, field_def): + self.model = model + self.field = field + self.field_def = field_def + + # See if they've made a NOT NULL column but also have no default (far too common) + is_null = self.field.null + default = (self.field.default is not None) and (self.field.default is not NOT_PROVIDED) + + if not is_null and not default: + self.deal_with_not_null_no_default(self.field, self.field_def) + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " + Added field %s on %s.%s" % ( + self.field.name, + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def forwards_code(self): + + return self.FORWARDS_TEMPLATE % { + "model_name": self.model._meta.object_name, + "table_name": self.model._meta.db_table, + "field_name": self.field.name, + "field_column": self.field.column, + "field_def": self.triple_to_def(self.field_def), + } + + def backwards_code(self): + return self.BACKWARDS_TEMPLATE % { + "model_name": self.model._meta.object_name, + "table_name": self.model._meta.db_table, + "field_name": self.field.name, + "field_column": self.field.column, + } + + +class DeleteField(AddField): + """ + Removes a field from a model. Takes a Model class and the field name. + """ + + null_reason = "removing this field" + allow_third_null_option = True + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " - Deleted field %s on %s.%s" % ( + self.field.name, + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def forwards_code(self): + return AddField.backwards_code(self) + + def backwards_code(self): + if not self.irreversible: + return AddField.forwards_code(self) + else: + return self.irreversable_code(self.field) + + +class ChangeField(Action, _NullIssuesField): + """ + Changes a field's type/options on a model. + """ + + null_reason = "making this field non-nullable" + + FORWARDS_TEMPLATE = BACKWARDS_TEMPLATE = ''' + # Changing field '%(model_name)s.%(field_name)s' + db.alter_column(%(table_name)r, %(field_column)r, %(field_def)s)''' + + RENAME_TEMPLATE = ''' + # Renaming column for '%(model_name)s.%(field_name)s' to match new field type. + db.rename_column(%(table_name)r, %(old_column)r, %(new_column)r)''' + + def __init__(self, model, old_field, new_field, old_def, new_def): + self.model = model + self.old_field = old_field + self.new_field = new_field + self.old_def = old_def + self.new_def = new_def + + # See if they've changed a not-null field to be null + new_default = (self.new_field.default is not None) and (self.new_field.default is not NOT_PROVIDED) + old_default = (self.old_field.default is not None) and (self.old_field.default is not NOT_PROVIDED) + if self.old_field.null and not self.new_field.null and not new_default: + self.deal_with_not_null_no_default(self.new_field, self.new_def) + if not self.old_field.null and self.new_field.null and not old_default: + self.null_reason = "making this field nullable" + self.allow_third_null_option = True + self.deal_with_not_null_no_default(self.old_field, self.old_def) + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " ~ Changed field %s on %s.%s" % ( + self.new_field.name, + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def _code(self, old_field, new_field, new_def): + + output = "" + + if self.old_field.column != self.new_field.column: + output += self.RENAME_TEMPLATE % { + "model_name": self.model._meta.object_name, + "table_name": self.model._meta.db_table, + "field_name": new_field.name, + "old_column": old_field.column, + "new_column": new_field.column, + } + + output += self.FORWARDS_TEMPLATE % { + "model_name": self.model._meta.object_name, + "table_name": self.model._meta.db_table, + "field_name": new_field.name, + "field_column": new_field.column, + "field_def": self.triple_to_def(new_def), + } + + return output + + def forwards_code(self): + return self._code(self.old_field, self.new_field, self.new_def) + + def backwards_code(self): + if not self.irreversible: + return self._code(self.new_field, self.old_field, self.old_def) + else: + return self.irreversable_code(self.old_field) + + +class AddUnique(Action): + """ + Adds a unique constraint to a model. Takes a Model class and the field names. + """ + + FORWARDS_TEMPLATE = ''' + # Adding unique constraint on '%(model_name)s', fields %(field_names)s + db.create_unique(%(table_name)r, %(fields)r)'''[1:] + "\n" + + BACKWARDS_TEMPLATE = ''' + # Removing unique constraint on '%(model_name)s', fields %(field_names)s + db.delete_unique(%(table_name)r, %(fields)r)'''[1:] + "\n" + + prepend_backwards = True + + def __init__(self, model, fields): + self.model = model + self.fields = fields + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " + Added unique constraint for %s on %s.%s" % ( + [x.name for x in self.fields], + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def forwards_code(self): + + return self.FORWARDS_TEMPLATE % { + "model_name": self.model._meta.object_name, + "table_name": self.model._meta.db_table, + "fields": [field.column for field in self.fields], + "field_names": [field.name for field in self.fields], + } + + def backwards_code(self): + return self.BACKWARDS_TEMPLATE % { + "model_name": self.model._meta.object_name, + "table_name": self.model._meta.db_table, + "fields": [field.column for field in self.fields], + "field_names": [field.name for field in self.fields], + } + + +class DeleteUnique(AddUnique): + """ + Removes a unique constraint from a model. Takes a Model class and the field names. + """ + + prepend_forwards = True + prepend_backwards = False + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " - Deleted unique constraint for %s on %s.%s" % ( + [x.name for x in self.fields], + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def forwards_code(self): + return AddUnique.backwards_code(self) + + def backwards_code(self): + return AddUnique.forwards_code(self) + + +class AddIndex(AddUnique): + """ + Adds an index to a model field[s]. Takes a Model class and the field names. + """ + + FORWARDS_TEMPLATE = ''' + # Adding index on '%(model_name)s', fields %(field_names)s + db.create_index(%(table_name)r, %(fields)r)'''[1:] + "\n" + + BACKWARDS_TEMPLATE = ''' + # Removing index on '%(model_name)s', fields %(field_names)s + db.delete_index(%(table_name)r, %(fields)r)'''[1:] + "\n" + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " + Added index for %s on %s.%s" % ( + [x.name for x in self.fields], + self.model._meta.app_label, + self.model._meta.object_name, + ) + + +class DeleteIndex(AddIndex): + """ + Deletes an index off a model field[s]. Takes a Model class and the field names. + """ + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " + Deleted index for %s on %s.%s" % ( + [x.name for x in self.fields], + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def forwards_code(self): + return AddIndex.backwards_code(self) + + def backwards_code(self): + return AddIndex.forwards_code(self) + + +class AddM2M(Action): + """ + Adds a unique constraint to a model. Takes a Model class and the field names. + """ + + FORWARDS_TEMPLATE = ''' + # Adding M2M table for field %(field_name)s on '%(model_name)s' + db.create_table(%(table_name)r, ( + ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), + (%(left_field)r, models.ForeignKey(orm[%(left_model_key)r], null=False)), + (%(right_field)r, models.ForeignKey(orm[%(right_model_key)r], null=False)) + )) + db.create_unique(%(table_name)r, [%(left_column)r, %(right_column)r])'''[1:] + "\n" + + BACKWARDS_TEMPLATE = ''' + # Removing M2M table for field %(field_name)s on '%(model_name)s' + db.delete_table('%(table_name)s')'''[1:] + "\n" + + def __init__(self, model, field): + self.model = model + self.field = field + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " + Added M2M table for %s on %s.%s" % ( + self.field.name, + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def forwards_code(self): + + return self.FORWARDS_TEMPLATE % { + "model_name": self.model._meta.object_name, + "field_name": self.field.name, + "table_name": self.field.m2m_db_table(), + "left_field": self.field.m2m_column_name()[:-3], # Remove the _id part + "left_column": self.field.m2m_column_name(), + "left_model_key": model_key(self.model), + "right_field": self.field.m2m_reverse_name()[:-3], # Remove the _id part + "right_column": self.field.m2m_reverse_name(), + "right_model_key": model_key(self.field.rel.to), + } + + def backwards_code(self): + + return self.BACKWARDS_TEMPLATE % { + "model_name": self.model._meta.object_name, + "field_name": self.field.name, + "table_name": self.field.m2m_db_table(), + } + + +class DeleteM2M(AddM2M): + """ + Adds a unique constraint to a model. Takes a Model class and the field names. + """ + + def console_line(self): + "Returns the string to print on the console, e.g. ' + Added field foo'" + return " - Deleted M2M table for %s on %s.%s" % ( + self.field.name, + self.model._meta.app_label, + self.model._meta.object_name, + ) + + def forwards_code(self): + return AddM2M.backwards_code(self) + + def backwards_code(self): + return AddM2M.forwards_code(self) + diff --git a/users/src/south/south/creator/changes.py b/users/src/south/south/creator/changes.py new file mode 100644 index 0000000..114b749 --- /dev/null +++ b/users/src/south/south/creator/changes.py @@ -0,0 +1,485 @@ +""" +Contains things to detect changes - either using options passed in on the +commandline, or by using autodetection, etc. +""" + +from django.db import models +from django.contrib.contenttypes.generic import GenericRelation +from django.utils.datastructures import SortedDict + +from south.creator.freezer import remove_useless_attributes, freeze_apps, model_key +from south.utils import auto_through + +class BaseChanges(object): + """ + Base changes class. + """ + def suggest_name(self): + return '' + + def split_model_def(self, model, model_def): + """ + Given a model and its model def (a dict of field: triple), returns three + items: the real fields dict, the Meta dict, and the M2M fields dict. + """ + real_fields = SortedDict() + meta = SortedDict() + m2m_fields = SortedDict() + for name, triple in model_def.items(): + if name == "Meta": + meta = triple + elif isinstance(model._meta.get_field_by_name(name)[0], models.ManyToManyField): + m2m_fields[name] = triple + else: + real_fields[name] = triple + return real_fields, meta, m2m_fields + + def current_model_from_key(self, key): + app_label, model_name = key.split(".") + return models.get_model(app_label, model_name) + + def current_field_from_key(self, key, fieldname): + app_label, model_name = key.split(".") + # Special, for the magical field from order_with_respect_to + if fieldname == "_order": + field = models.IntegerField() + field.name = "_order" + field.attname = "_order" + field.column = "_order" + field.default = 0 + return field + # Otherwise, normal. + return models.get_model(app_label, model_name)._meta.get_field_by_name(fieldname)[0] + + +class AutoChanges(BaseChanges): + """ + Detects changes by 'diffing' two sets of frozen model definitions. + """ + + # Field types we don't generate add/remove field changes for. + IGNORED_FIELD_TYPES = [ + GenericRelation, + ] + + def __init__(self, migrations, old_defs, old_orm, new_defs): + self.migrations = migrations + self.old_defs = old_defs + self.old_orm = old_orm + self.new_defs = new_defs + + def suggest_name(self): + parts = ["auto"] + for change_name, params in self.get_changes(): + if change_name == "AddModel": + parts.append("add_%s" % params['model']._meta.object_name.lower()) + elif change_name == "DeleteModel": + parts.append("del_%s" % params['model']._meta.object_name.lower()) + elif change_name == "AddField": + parts.append("add_field_%s_%s" % ( + params['model']._meta.object_name.lower(), + params['field'].name, + )) + elif change_name == "DeleteField": + parts.append("del_field_%s_%s" % ( + params['model']._meta.object_name.lower(), + params['field'].name, + )) + elif change_name == "ChangeField": + parts.append("chg_field_%s_%s" % ( + params['model']._meta.object_name.lower(), + params['new_field'].name, + )) + elif change_name == "AddUnique": + parts.append("add_unique_%s_%s" % ( + params['model']._meta.object_name.lower(), + "_".join([x.name for x in params['fields']]), + )) + elif change_name == "DeleteUnique": + parts.append("del_unique_%s_%s" % ( + params['model']._meta.object_name.lower(), + "_".join([x.name for x in params['fields']]), + )) + return ("__".join(parts))[:70] + + def get_changes(self): + """ + Returns the difference between the old and new sets of models as a 5-tuple: + added_models, deleted_models, added_fields, deleted_fields, changed_fields + """ + + deleted_models = set() + + # See if anything's vanished + for key in self.old_defs: + if key not in self.new_defs: + # We shouldn't delete it if it was managed=False + old_fields, old_meta, old_m2ms = self.split_model_def(self.old_orm[key], self.old_defs[key]) + if old_meta.get("managed", "True") != "False": + # Alright, delete it. + yield ("DeleteModel", { + "model": self.old_orm[key], + "model_def": old_fields, + }) + # Also make sure we delete any M2Ms it had. + for fieldname in old_m2ms: + # Only delete its stuff if it wasn't a through=. + field = self.old_orm[key + ":" + fieldname] + if auto_through(field): + yield ("DeleteM2M", {"model": self.old_orm[key], "field": field}) + # And any unique constraints it had + unique_together = eval(old_meta.get("unique_together", "[]")) + if unique_together: + # If it's only a single tuple, make it into the longer one + if isinstance(unique_together[0], basestring): + unique_together = [unique_together] + # For each combination, make an action for it + for fields in unique_together: + yield ("DeleteUnique", { + "model": self.old_orm[key], + "fields": [self.old_orm[key]._meta.get_field_by_name(x)[0] for x in fields], + }) + # We always add it in here so we ignore it later + deleted_models.add(key) + + # Or appeared + for key in self.new_defs: + if key not in self.old_defs: + # We shouldn't add it if it's managed=False + new_fields, new_meta, new_m2ms = self.split_model_def(self.current_model_from_key(key), self.new_defs[key]) + if new_meta.get("managed", "True") != "False": + yield ("AddModel", { + "model": self.current_model_from_key(key), + "model_def": new_fields, + }) + # Also make sure we add any M2Ms it has. + for fieldname in new_m2ms: + # Only create its stuff if it wasn't a through=. + field = self.current_field_from_key(key, fieldname) + if auto_through(field): + yield ("AddM2M", {"model": self.current_model_from_key(key), "field": field}) + # And any unique constraints it has + unique_together = eval(new_meta.get("unique_together", "[]")) + if unique_together: + # If it's only a single tuple, make it into the longer one + if isinstance(unique_together[0], basestring): + unique_together = [unique_together] + # For each combination, make an action for it + for fields in unique_together: + yield ("AddUnique", { + "model": self.current_model_from_key(key), + "fields": [self.current_model_from_key(key)._meta.get_field_by_name(x)[0] for x in fields], + }) + + # Now, for every model that's stayed the same, check its fields. + for key in self.old_defs: + if key not in deleted_models: + + old_fields, old_meta, old_m2ms = self.split_model_def(self.old_orm[key], self.old_defs[key]) + new_fields, new_meta, new_m2ms = self.split_model_def(self.current_model_from_key(key), self.new_defs[key]) + + # Find fields that have vanished. + for fieldname in old_fields: + if fieldname not in new_fields: + # Don't do it for any fields we're ignoring + field = self.old_orm[key + ":" + fieldname] + field_allowed = True + for field_type in self.IGNORED_FIELD_TYPES: + if isinstance(field, field_type): + field_allowed = False + if field_allowed: + # Looks alright. + yield ("DeleteField", { + "model": self.old_orm[key], + "field": field, + "field_def": old_fields[fieldname], + }) + + # And ones that have appeared + for fieldname in new_fields: + if fieldname not in old_fields: + # Don't do it for any fields we're ignoring + field = self.current_field_from_key(key, fieldname) + field_allowed = True + for field_type in self.IGNORED_FIELD_TYPES: + if isinstance(field, field_type): + field_allowed = False + if field_allowed: + # Looks alright. + yield ("AddField", { + "model": self.current_model_from_key(key), + "field": field, + "field_def": new_fields[fieldname], + }) + + # Find M2Ms that have vanished + for fieldname in old_m2ms: + if fieldname not in new_m2ms: + # Only delete its stuff if it wasn't a through=. + field = self.old_orm[key + ":" + fieldname] + if auto_through(field): + yield ("DeleteM2M", {"model": self.old_orm[key], "field": field}) + + # Find M2Ms that have appeared + for fieldname in new_m2ms: + if fieldname not in old_m2ms: + # Only create its stuff if it wasn't a through=. + field = self.current_field_from_key(key, fieldname) + if auto_through(field): + yield ("AddM2M", {"model": self.current_model_from_key(key), "field": field}) + + # For the ones that exist in both models, see if they were changed + for fieldname in set(old_fields).intersection(set(new_fields)): + # Non-index changes + if self.different_attributes( + remove_useless_attributes(old_fields[fieldname], True, True), + remove_useless_attributes(new_fields[fieldname], True, True)): + yield ("ChangeField", { + "model": self.current_model_from_key(key), + "old_field": self.old_orm[key + ":" + fieldname], + "new_field": self.current_field_from_key(key, fieldname), + "old_def": old_fields[fieldname], + "new_def": new_fields[fieldname], + }) + # Index changes + old_field = self.old_orm[key + ":" + fieldname] + new_field = self.current_field_from_key(key, fieldname) + if not old_field.db_index and new_field.db_index: + # They've added an index. + yield ("AddIndex", { + "model": self.current_model_from_key(key), + "fields": [new_field], + }) + if old_field.db_index and not new_field.db_index: + # They've removed an index. + yield ("DeleteIndex", { + "model": self.old_orm[key], + "fields": [old_field], + }) + # See if their uniques have changed + if old_field.unique != new_field.unique: + # Make sure we look at the one explicitly given to see what happened + if new_field.unique: + yield ("AddUnique", { + "model": self.current_model_from_key(key), + "fields": [new_field], + }) + else: + yield ("DeleteUnique", { + "model": self.old_orm[key], + "fields": [old_field], + }) + + # See if there's any M2Ms that have changed. + for fieldname in set(old_m2ms).intersection(set(new_m2ms)): + old_field = self.old_orm[key + ":" + fieldname] + new_field = self.current_field_from_key(key, fieldname) + # Have they _added_ a through= ? + if auto_through(old_field) and not auto_through(new_field): + yield ("DeleteM2M", {"model": self.old_orm[key], "field": old_field}) + # Have they _removed_ a through= ? + if not auto_through(old_field) and auto_through(new_field): + yield ("AddM2M", {"model": self.current_model_from_key(key), "field": new_field}) + + ## See if the unique_togethers have changed + # First, normalise them into lists of sets. + old_unique_together = eval(old_meta.get("unique_together", "[]")) + new_unique_together = eval(new_meta.get("unique_together", "[]")) + if old_unique_together and isinstance(old_unique_together[0], basestring): + old_unique_together = [old_unique_together] + if new_unique_together and isinstance(new_unique_together[0], basestring): + new_unique_together = [new_unique_together] + old_unique_together = map(set, old_unique_together) + new_unique_together = map(set, new_unique_together) + # See if any appeared or disappeared + for item in old_unique_together: + if item not in new_unique_together: + yield ("DeleteUnique", { + "model": self.old_orm[key], + "fields": [self.old_orm[key + ":" + x] for x in item], + }) + for item in new_unique_together: + if item not in old_unique_together: + yield ("AddUnique", { + "model": self.current_model_from_key(key), + "fields": [self.current_field_from_key(key, x) for x in item], + }) + + @classmethod + def is_triple(cls, triple): + "Returns whether the argument is a triple." + return isinstance(triple, (list, tuple)) and len(triple) == 3 and \ + isinstance(triple[0], (str, unicode)) and \ + isinstance(triple[1], (list, tuple)) and \ + isinstance(triple[2], dict) + + @classmethod + def different_attributes(cls, old, new): + """ + Backwards-compat comparison that ignores orm. on the RHS and not the left + and which knows django.db.models.fields.CharField = models.CharField. + Has a whole load of tests in tests/autodetection.py. + """ + + # If they're not triples, just do normal comparison + if not cls.is_triple(old) or not cls.is_triple(new): + return old != new + + # Expand them out into parts + old_field, old_pos, old_kwd = old + new_field, new_pos, new_kwd = new + + # Copy the positional and keyword arguments so we can compare them and pop off things + old_pos, new_pos = old_pos[:], new_pos[:] + old_kwd = dict(old_kwd.items()) + new_kwd = dict(new_kwd.items()) + + # Remove comparison of the existence of 'unique', that's done elsewhere. + # TODO: Make this work for custom fields where unique= means something else? + if "unique" in old_kwd: + del old_kwd['unique'] + if "unique" in new_kwd: + del new_kwd['unique'] + + # If the first bit is different, check it's not by dj.db.models... + if old_field != new_field: + if old_field.startswith("models.") and (new_field.startswith("django.db.models") \ + or new_field.startswith("django.contrib.gis")): + if old_field.split(".")[-1] != new_field.split(".")[-1]: + return True + else: + # Remove those fields from the final comparison + old_field = new_field = "" + + # If there's a positional argument in the first, and a 'to' in the second, + # see if they're actually comparable. + if (old_pos and "to" in new_kwd) and ("orm" in new_kwd['to'] and "orm" not in old_pos[0]): + # Do special comparison to fix #153 + try: + if old_pos[0] != new_kwd['to'].split("'")[1].split(".")[1]: + return True + except IndexError: + pass # Fall back to next comparison + # Remove those attrs from the final comparison + old_pos = old_pos[1:] + del new_kwd['to'] + + return old_field != new_field or old_pos != new_pos or old_kwd != new_kwd + + +class ManualChanges(BaseChanges): + """ + Detects changes by reading the command line. + """ + + def __init__(self, migrations, added_models, added_fields, added_indexes): + self.migrations = migrations + self.added_models = added_models + self.added_fields = added_fields + self.added_indexes = added_indexes + + def suggest_name(self): + bits = [] + for model_name in self.added_models: + bits.append('add_model_%s' % model_name) + for field_name in self.added_fields: + bits.append('add_field_%s' % field_name) + for index_name in self.added_indexes: + bits.append('add_index_%s' % index_name) + return '_'.join(bits).replace('.', '_') + + def get_changes(self): + # Get the model defs so we can use them for the yield later + model_defs = freeze_apps([self.migrations.app_label()]) + # Make the model changes + for model_name in self.added_models: + model = models.get_model(self.migrations.app_label(), model_name) + real_fields, meta, m2m_fields = self.split_model_def(model, model_defs[model_key(model)]) + yield ("AddModel", { + "model": model, + "model_def": real_fields, + }) + # And the field changes + for field_desc in self.added_fields: + try: + model_name, field_name = field_desc.split(".") + except (TypeError, ValueError): + raise ValueError("%r is not a valid field description." % field_desc) + model = models.get_model(self.migrations.app_label(), model_name) + real_fields, meta, m2m_fields = self.split_model_def(model, model_defs[model_key(model)]) + yield ("AddField", { + "model": model, + "field": model._meta.get_field_by_name(field_name)[0], + "field_def": real_fields[field_name], + }) + # And the indexes + for field_desc in self.added_indexes: + try: + model_name, field_name = field_desc.split(".") + except (TypeError, ValueError): + print "%r is not a valid field description." % field_desc + model = models.get_model(self.migrations.app_label(), model_name) + yield ("AddIndex", { + "model": model, + "fields": [model._meta.get_field_by_name(field_name)[0]], + }) + + +class InitialChanges(BaseChanges): + """ + Creates all models; handles --initial. + """ + def suggest_name(self): + return 'initial' + + def __init__(self, migrations): + self.migrations = migrations + + def get_changes(self): + # Get the frozen models for this app + model_defs = freeze_apps([self.migrations.app_label()]) + + for model in models.get_models(models.get_app(self.migrations.app_label())): + + # Don't do anything for unmanaged, abstract or proxy models + if model._meta.abstract or getattr(model._meta, "proxy", False) or not getattr(model._meta, "managed", True): + continue + + real_fields, meta, m2m_fields = self.split_model_def(model, model_defs[model_key(model)]) + + # Firstly, add the main table and fields + yield ("AddModel", { + "model": model, + "model_def": real_fields, + }) + + # Then, add any uniqueness that's around + if meta: + unique_together = eval(meta.get("unique_together", "[]")) + if unique_together: + # If it's only a single tuple, make it into the longer one + if isinstance(unique_together[0], basestring): + unique_together = [unique_together] + # For each combination, make an action for it + for fields in unique_together: + yield ("AddUnique", { + "model": model, + "fields": [model._meta.get_field_by_name(x)[0] for x in fields], + }) + + # Finally, see if there's some M2M action + for name, triple in m2m_fields.items(): + field = model._meta.get_field_by_name(name)[0] + # But only if it's not through=foo (#120) + if field.rel.through: + try: + # Django 1.1 and below + through_model = field.rel.through_model + except AttributeError: + # Django 1.2 + through_model = field.rel.through + if (not field.rel.through) or getattr(through_model._meta, "auto_created", False): + yield ("AddM2M", { + "model": model, + "field": field, + }) diff --git a/users/src/south/south/creator/freezer.py b/users/src/south/south/creator/freezer.py new file mode 100644 index 0000000..3c78e45 --- /dev/null +++ b/users/src/south/south/creator/freezer.py @@ -0,0 +1,190 @@ +""" +Handles freezing of models into FakeORMs. +""" + +import sys + +from django.db import models +from django.db.models.base import ModelBase, Model +from django.contrib.contenttypes.generic import GenericRelation + +from south.orm import FakeORM +from south.utils import get_attribute, auto_through +from south import modelsinspector + +def freeze_apps(apps): + """ + Takes a list of app labels, and returns a string of their frozen form. + """ + if isinstance(apps, basestring): + apps = [apps] + frozen_models = set() + # For each app, add in all its models + for app in apps: + for model in models.get_models(models.get_app(app)): + # Only add if it's not abstract or proxy + if not model._meta.abstract and not getattr(model._meta, "proxy", False): + frozen_models.add(model) + # Now, add all the dependencies + for model in list(frozen_models): + frozen_models.update(model_dependencies(model)) + # Serialise! + model_defs = {} + model_classes = {} + for model in frozen_models: + model_defs[model_key(model)] = prep_for_freeze(model) + model_classes[model_key(model)] = model + # Check for any custom fields that failed to freeze. + missing_fields = False + for key, fields in model_defs.items(): + for field_name, value in fields.items(): + if value is None: + missing_fields = True + model_class = model_classes[key] + field_class = model_class._meta.get_field_by_name(field_name)[0] + print " ! Cannot freeze field '%s.%s'" % (key, field_name) + print " ! (this field has class %s.%s)" % (field_class.__class__.__module__, field_class.__class__.__name__) + if missing_fields: + print "" + print " ! South cannot introspect some fields; this is probably because they are custom" + print " ! fields. If they worked in 0.6 or below, this is because we have removed the" + print " ! models parser (it often broke things)." + print " ! To fix this, read http://south.aeracode.org/wiki/MyFieldsDontWork" + sys.exit(1) + + return model_defs + +def freeze_apps_to_string(apps): + return pprint_frozen_models(freeze_apps(apps)) + +### + +def model_key(model): + "For a given model, return 'appname.modelname'." + return "%s.%s" % (model._meta.app_label, model._meta.object_name.lower()) + +def prep_for_freeze(model): + """ + Takes a model and returns the ready-to-serialise dict (all you need + to do is just pretty-print it). + """ + fields = modelsinspector.get_model_fields(model, m2m=True) + # Remove useless attributes (like 'choices') + for name, field in fields.items(): + fields[name] = remove_useless_attributes(field) + # See if there's a Meta + fields['Meta'] = remove_useless_meta(modelsinspector.get_model_meta(model)) + # Add in our own special items to track the object name and managed + fields['Meta']['object_name'] = model._meta.object_name # Special: not eval'able. + if not getattr(model._meta, "managed", True): + fields['Meta']['managed'] = repr(model._meta.managed) + return fields + +### Dependency resolvers + +def model_dependencies(model, checked_models=None): + """ + Returns a set of models this one depends on to be defined; things like + OneToOneFields as ID, ForeignKeys everywhere, etc. + """ + depends = set() + checked_models = checked_models or set() + # Get deps for each field + for field in model._meta.fields + model._meta.many_to_many: + depends.update(field_dependencies(field, checked_models)) + # Add in any non-abstract bases + for base in model.__bases__: + if issubclass(base, models.Model) and hasattr(base, '_meta') and not base._meta.abstract: + depends.add(base) + # Now recurse + new_to_check = depends - checked_models + while new_to_check: + checked_model = new_to_check.pop() + if checked_model == model or checked_model in checked_models: + continue + checked_models.add(checked_model) + deps = model_dependencies(checked_model, checked_models) + # Loop through dependencies... + for dep in deps: + # If the new dep is not already checked, add to the queue + if (dep not in depends) and (dep not in new_to_check) and (dep not in checked_models): + new_to_check.add(dep) + depends.add(dep) + return depends + +def field_dependencies(field, checked_models=None): + checked_models = checked_models or set() + depends = set() + arg_defs, kwarg_defs = modelsinspector.matching_details(field) + for attrname, options in arg_defs + kwarg_defs.values(): + if options.get("ignore_if_auto_through", False) and auto_through(field): + continue + if options.get("is_value", False): + value = attrname + elif attrname == 'rel.through' and hasattr(getattr(field, 'rel', None), 'through_model'): + # Hack for django 1.1 and below, where the through model is stored + # in rel.through_model while rel.through stores only the model name. + value = field.rel.through_model + else: + try: + value = get_attribute(field, attrname) + except AttributeError: + if options.get("ignore_missing", False): + continue + raise + if isinstance(value, Model): + value = value.__class__ + if not isinstance(value, ModelBase): + continue + if getattr(value._meta, "proxy", False): + value = value._meta.proxy_for_model + if value in checked_models: + continue + checked_models.add(value) + depends.add(value) + depends.update(model_dependencies(value, checked_models)) + + return depends + +### Prettyprinters + +def pprint_frozen_models(models): + return "{\n %s\n }" % ",\n ".join([ + "%r: %s" % (name, pprint_fields(fields)) + for name, fields in sorted(models.items()) + ]) + +def pprint_fields(fields): + return "{\n %s\n }" % ",\n ".join([ + "%r: %r" % (name, defn) + for name, defn in sorted(fields.items()) + ]) + +### Output sanitisers + +USELESS_KEYWORDS = ["choices", "help_text", "verbose_name"] +USELESS_DB_KEYWORDS = ["related_name", "default", "blank"] # Important for ORM, not for DB. +INDEX_KEYWORDS = ["db_index"] + +def remove_useless_attributes(field, db=False, indexes=False): + "Removes useless (for database) attributes from the field's defn." + # Work out what to remove, and remove it. + keywords = USELESS_KEYWORDS[:] + if db: + keywords += USELESS_DB_KEYWORDS[:] + if indexes: + keywords += INDEX_KEYWORDS[:] + if field: + for name in keywords: + if name in field[2]: + del field[2][name] + return field + +USELESS_META = ["verbose_name", "verbose_name_plural"] +def remove_useless_meta(meta): + "Removes useless (for database) attributes from the table's meta." + if meta: + for name in USELESS_META: + if name in meta: + del meta[name] + return meta diff --git a/users/src/south/south/db/__init__.py b/users/src/south/south/db/__init__.py new file mode 100644 index 0000000..67b742b --- /dev/null +++ b/users/src/south/south/db/__init__.py @@ -0,0 +1,76 @@ + +# Establish the common DatabaseOperations instance, which we call 'db'. +# Much thanks to cmkmrr for a lot of the code base here + +from django.conf import settings +import sys + +# A few aliases, because there's FQMNs now +engine_modules = { + 'django.db.backends.postgresql_psycopg2': 'postgresql_psycopg2', + 'django.db.backends.sqlite3': 'sqlite3', + 'django.db.backends.mysql': 'mysql', + 'django.db.backends.oracle': 'oracle', + 'sql_server.pyodbc': 'sql_server.pyodbc', #django-pyodbc + 'sqlserver_ado': 'sql_server.pyodbc', #django-mssql + 'django.contrib.gis.db.backends.postgis': 'postgresql_psycopg2', + 'django.contrib.gis.db.backends.spatialite': 'sqlite3', + 'django.contrib.gis.db.backends.mysql': 'mysql', + 'django.contrib.gis.db.backends.oracle': 'oracle', +} + +# First, work out if we're multi-db or not, and which databases we have +try: + from django.db import DEFAULT_DB_ALIAS +except ImportError: + #### 1.1 or below #### + # We'll 'fake' multi-db; set the default alias + DEFAULT_DB_ALIAS = 'default' + # SOUTH_DATABASE_ADAPTER is an optional override if you have a different module + engine = getattr(settings, "SOUTH_DATABASE_ADAPTER", "south.db.%s" % settings.DATABASE_ENGINE) + # And then, we have one database with one engine + db_engines = {DEFAULT_DB_ALIAS: engine} +else: + #### 1.2 or above #### + # Loop over the defined databases, gathering up their engines + db_engines = dict([ + # Note we check to see if contrib.gis has overridden us. + (alias, "south.db.%s" % engine_modules.get(db_settings['ENGINE'], None)) + for alias, db_settings in settings.DATABASES.items() + ]) + # Update with any overrides + db_engines.update(getattr(settings, "SOUTH_DATABASE_ADAPTERS", {})) + # Check there's no None engines, or... + for alias, engine in db_engines.items(): + if engine is None: + # They've used a backend we don't support + sys.stderr.write( + ( + "There is no South database module for your database backend '%s'. " + \ + "Please either choose a supported database, check for " + \ + "SOUTH_DATABASE_ADAPTER[S] settings, " + \ + "or remove South from INSTALLED_APPS.\n" + ) % (settings.DATABASES[alias]['ENGINE'],) + ) + sys.exit(1) + +# Now, turn that into a dict of +dbs = {} +try: + for alias, module_name in db_engines.items(): + module = __import__(module_name, {}, {}, ['']) + dbs[alias] = module.DatabaseOperations(alias) +except ImportError: + # This error should only be triggered on 1.1 and below. + sys.stderr.write( + ( + "There is no South database module '%s' for your database. " + \ + "Please either choose a supported database, check for " + \ + "SOUTH_DATABASE_ADAPTER[S] settings, " + \ + "or remove South from INSTALLED_APPS.\n" + ) % (module_name,) + ) + sys.exit(1) + +# Finally, to make old migrations work, keep 'db' around as the default database +db = dbs[DEFAULT_DB_ALIAS] diff --git a/users/src/south/south/db/__init__.pyc b/users/src/south/south/db/__init__.pyc new file mode 100644 index 0000000..ebc449d Binary files /dev/null and b/users/src/south/south/db/__init__.pyc differ diff --git a/users/src/south/south/db/generic.py b/users/src/south/south/db/generic.py new file mode 100644 index 0000000..0d57fe7 --- /dev/null +++ b/users/src/south/south/db/generic.py @@ -0,0 +1,1068 @@ + +import datetime +import string +import random +import re +import sys + +from django.core.management.color import no_style +from django.db import transaction, models +from django.db.utils import DatabaseError +from django.db.backends.util import truncate_name +from django.db.models.fields import NOT_PROVIDED +from django.dispatch import dispatcher +from django.conf import settings +from django.utils.datastructures import SortedDict + +from south.logger import get_logger + +def alias(attrname): + """ + Returns a function which calls 'attrname' - for function aliasing. + We can't just use foo = bar, as this breaks subclassing. + """ + def func(self, *args, **kwds): + return getattr(self, attrname)(*args, **kwds) + return func + +def invalidate_table_constraints(func): + def _cache_clear(self, table, *args, **opts): + self._set_cache(table, value=INVALID) + return func(self, table, *args, **opts) + return _cache_clear + +def delete_column_constraints(func): + def _column_rm(self, table, column, *args, **opts): + self._set_cache(table, column, value=[]) + return func(self, table, column, *args, **opts) + return _column_rm + +def copy_column_constraints(func): + def _column_cp(self, table, column_old, column_new, *args, **opts): + db_name = self._get_setting('NAME') + self._set_cache(table, column_new, value=self.lookup_constraint(db_name, table, column_old)) + return func(self, table, column_old, column_new, *args, **opts) + return _column_cp + +class INVALID(Exception): + def __repr__(self): + return 'INVALID' + +class DryRunError(ValueError): + pass + +class DatabaseOperations(object): + """ + Generic SQL implementation of the DatabaseOperations. + Some of this code comes from Django Evolution. + """ + + # We assume the generic DB can handle DDL transactions. MySQL wil change this. + has_ddl_transactions = True + + alter_string_set_type = 'ALTER COLUMN %(column)s TYPE %(type)s' + alter_string_set_null = 'ALTER COLUMN %(column)s DROP NOT NULL' + alter_string_drop_null = 'ALTER COLUMN %(column)s SET NOT NULL' + has_check_constraints = True + delete_check_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s' + allows_combined_alters = True + add_column_string = 'ALTER TABLE %s ADD COLUMN %s;' + delete_unique_sql = "ALTER TABLE %s DROP CONSTRAINT %s" + delete_foreign_key_sql = 'ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s' + supports_foreign_keys = True + max_index_name_length = 63 + drop_index_string = 'DROP INDEX %(index_name)s' + delete_column_string = 'ALTER TABLE %s DROP COLUMN %s CASCADE;' + create_primary_key_string = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s PRIMARY KEY (%(columns)s)" + delete_primary_key_sql = "ALTER TABLE %(table)s DROP CONSTRAINT %(constraint)s" + add_check_constraint_fragment = "ADD CONSTRAINT %(constraint)s CHECK (%(check)s)" + rename_table_sql = "ALTER TABLE %s RENAME TO %s;" + backend_name = None + default_schema_name = "public" + + def __init__(self, db_alias): + self.debug = False + self.deferred_sql = [] + self.dry_run = False + self.pending_transactions = 0 + self.pending_create_signals = [] + self.db_alias = db_alias + self._constraint_cache = {} + self._initialised = False + + def lookup_constraint(self, db_name, table_name, column_name=None): + """ return a set() of constraints for db_name.table_name.column_name """ + def _lookup(): + table = self._constraint_cache[db_name][table_name] + if table is INVALID: + raise INVALID + elif column_name is None: + return table.items() + else: + return table[column_name] + + try: + ret = _lookup() + return ret + except INVALID, e: + del self._constraint_cache[db_name][table_name] + self._fill_constraint_cache(db_name, table_name) + except KeyError, e: + if self._is_valid_cache(db_name, table_name): + return [] + self._fill_constraint_cache(db_name, table_name) + + return self.lookup_constraint(db_name, table_name, column_name) + + def _set_cache(self, table_name, column_name=None, value=INVALID): + db_name = self._get_setting('NAME') + try: + if column_name is not None: + self._constraint_cache[db_name][table_name][column_name] = value + else: + self._constraint_cache[db_name][table_name] = value + except (LookupError, TypeError): + pass + + def _is_valid_cache(self, db_name, table_name): + # we cache per-table so if the table is there it is valid + try: + return self._constraint_cache[db_name][table_name] is not INVALID + except KeyError: + return False + + def _is_multidb(self): + try: + from django.db import connections + except ImportError: + return False + else: + return True + + def _get_connection(self): + """ + Returns a django connection for a given DB Alias + """ + if self._is_multidb(): + from django.db import connections + return connections[self.db_alias] + else: + from django.db import connection + return connection + + def _get_setting(self, setting_name): + """ + Allows code to get a setting (like, for example, STORAGE_ENGINE) + """ + setting_name = setting_name.upper() + connection = self._get_connection() + if self._is_multidb(): + # Django 1.2 and above + return connection.settings_dict[setting_name] + else: + # Django 1.1 and below + return getattr(settings, "DATABASE_%s" % setting_name) + + def _has_setting(self, setting_name): + """ + Existence-checking version of _get_setting. + """ + try: + self._get_setting(setting_name) + except (KeyError, AttributeError): + return False + else: + return True + + def _get_schema_name(self): + try: + return self._get_setting('schema') + except (KeyError, AttributeError): + return self.default_schema_name + + + def _possibly_initialise(self): + if not self._initialised: + self.connection_init() + self._initialised = True + + def connection_init(self): + """ + Run before any SQL to let database-specific config be sent as a command, + e.g. which storage engine (MySQL) or transaction serialisability level. + """ + pass + + def quote_name(self, name): + """ + Uses the database backend to quote the given table/column name. + """ + return self._get_connection().ops.quote_name(name) + + def execute(self, sql, params=[]): + """ + Executes the given SQL statement, with optional parameters. + If the instance's debug attribute is True, prints out what it executes. + """ + + self._possibly_initialise() + + cursor = self._get_connection().cursor() + if self.debug: + print " = %s" % sql, params + + if self.dry_run: + return [] + + get_logger().debug('execute "%s" with params "%s"' % (sql, params)) + + try: + cursor.execute(sql, params) + except DatabaseError, e: + print >> sys.stderr, 'FATAL ERROR - The following SQL query failed: %s' % sql + print >> sys.stderr, 'The error was: %s' % e + sys.exit(1) + + try: + return cursor.fetchall() + except: + return [] + + + def execute_many(self, sql, regex=r"(?mx) ([^';]* (?:'[^']*'[^';]*)*)", comment_regex=r"(?mx) (?:^\s*$)|(?:--.*$)"): + """ + Takes a SQL file and executes it as many separate statements. + (Some backends, such as Postgres, don't work otherwise.) + """ + # Be warned: This function is full of dark magic. Make sure you really + # know regexes before trying to edit it. + # First, strip comments + sql = "\n".join([x.strip().replace("%", "%%") for x in re.split(comment_regex, sql) if x.strip()]) + # Now execute each statement + for st in re.split(regex, sql)[1:][::2]: + self.execute(st) + + + def add_deferred_sql(self, sql): + """ + Add a SQL statement to the deferred list, that won't be executed until + this instance's execute_deferred_sql method is run. + """ + self.deferred_sql.append(sql) + + + def execute_deferred_sql(self): + """ + Executes all deferred SQL, resetting the deferred_sql list + """ + for sql in self.deferred_sql: + self.execute(sql) + + self.deferred_sql = [] + + + def clear_deferred_sql(self): + """ + Resets the deferred_sql list to empty. + """ + self.deferred_sql = [] + + + def clear_run_data(self, pending_creates = None): + """ + Resets variables to how they should be before a run. Used for dry runs. + If you want, pass in an old panding_creates to reset to. + """ + self.clear_deferred_sql() + self.pending_create_signals = pending_creates or [] + + + def get_pending_creates(self): + return self.pending_create_signals + + + @invalidate_table_constraints + def create_table(self, table_name, fields): + """ + Creates the table 'table_name'. 'fields' is a tuple of fields, + each repsented by a 2-part tuple of field name and a + django.db.models.fields.Field object + """ + + if len(table_name) > 63: + print " ! WARNING: You have a table name longer than 63 characters; this will not fully work on PostgreSQL or MySQL." + + columns = [ + self.column_sql(table_name, field_name, field) + for field_name, field in fields + ] + + self.execute('CREATE TABLE %s (%s);' % ( + self.quote_name(table_name), + ', '.join([col for col in columns if col]), + )) + + add_table = alias('create_table') # Alias for consistency's sake + + + @invalidate_table_constraints + def rename_table(self, old_table_name, table_name): + """ + Renames the table 'old_table_name' to 'table_name'. + """ + if old_table_name == table_name: + # Short-circuit out. + return + params = (self.quote_name(old_table_name), self.quote_name(table_name)) + self.execute(self.rename_table_sql % params) + # Invalidate the not-yet-indexed table + self._set_cache(table_name, value=INVALID) + + + @invalidate_table_constraints + def delete_table(self, table_name, cascade=True): + """ + Deletes the table 'table_name'. + """ + params = (self.quote_name(table_name), ) + if cascade: + self.execute('DROP TABLE %s CASCADE;' % params) + else: + self.execute('DROP TABLE %s;' % params) + + drop_table = alias('delete_table') + + + @invalidate_table_constraints + def clear_table(self, table_name): + """ + Deletes all rows from 'table_name'. + """ + params = (self.quote_name(table_name), ) + self.execute('DELETE FROM %s;' % params) + + + + @invalidate_table_constraints + def add_column(self, table_name, name, field, keep_default=True): + """ + Adds the column 'name' to the table 'table_name'. + Uses the 'field' paramater, a django.db.models.fields.Field instance, + to generate the necessary sql + + @param table_name: The name of the table to add the column to + @param name: The name of the column to add + @param field: The field to use + """ + sql = self.column_sql(table_name, name, field) + if sql: + params = ( + self.quote_name(table_name), + sql, + ) + sql = self.add_column_string % params + self.execute(sql) + + # Now, drop the default if we need to + if not keep_default and field.default is not None: + field.default = NOT_PROVIDED + self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True) + + + def _db_type_for_alter_column(self, field): + """ + Returns a field's type suitable for ALTER COLUMN. + By default it just returns field.db_type(). + To be overriden by backend specific subclasses + @param field: The field to generate type for + """ + try: + return field.db_type(connection=self._get_connection()) + except TypeError: + return field.db_type() + + def _alter_add_column_mods(self, field, name, params, sqls): + """ + Subcommand of alter_column that modifies column definitions beyond + the type string -- e.g. adding constraints where they cannot be specified + as part of the type (overrideable) + """ + pass + + def _alter_set_defaults(self, field, name, params, sqls): + "Subcommand of alter_column that sets default values (overrideable)" + # Next, set any default + if not field.null and field.has_default(): + default = field.get_default() + sqls.append(('ALTER COLUMN %s SET DEFAULT %%s ' % (self.quote_name(name),), [default])) + else: + sqls.append(('ALTER COLUMN %s DROP DEFAULT' % (self.quote_name(name),), [])) + + @invalidate_table_constraints + def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False): + """ + Alters the given column name so it will match the given field. + Note that conversion between the two by the database must be possible. + Will not automatically add _id by default; to have this behavour, pass + explicit_name=False. + + @param table_name: The name of the table to add the column to + @param name: The name of the column to alter + @param field: The new field definition to use + """ + + if self.dry_run: + if self.debug: + print ' - no dry run output for alter_column() due to dynamic DDL, sorry' + return + + # hook for the field to do any resolution prior to it's attributes being queried + if hasattr(field, 'south_init'): + field.south_init() + + # Add _id or whatever if we need to + field.set_attributes_from_name(name) + if not explicit_name: + name = field.column + else: + field.column = name + + if not ignore_constraints: + # Drop all check constraints. Note that constraints will be added back + # with self.alter_string_set_type and self.alter_string_drop_null. + if self.has_check_constraints: + check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK") + for constraint in check_constraints: + self.execute(self.delete_check_sql % { + 'table': self.quote_name(table_name), + 'constraint': self.quote_name(constraint), + }) + + # Drop or add UNIQUE constraint + unique_constraint = list(self._constraints_affecting_columns(table_name, [name], "UNIQUE")) + if field.unique and not unique_constraint: + self.create_unique(table_name, [name]) + elif not field.unique and unique_constraint: + self.delete_unique(table_name, [name]) + + # Drop all foreign key constraints + try: + self.delete_foreign_key(table_name, name) + except ValueError: + # There weren't any + pass + + # First, change the type + params = { + "column": self.quote_name(name), + "type": self._db_type_for_alter_column(field), + "table_name": table_name + } + + # SQLs is a list of (SQL, values) pairs. + sqls = [] + + # Only alter the column if it has a type (Geometry ones sometimes don't) + if params["type"] is not None: + sqls.append((self.alter_string_set_type % params, [])) + + # Add any field- and backend- specific modifications + self._alter_add_column_mods(field, name, params, sqls) + # Next, nullity + if field.null: + sqls.append((self.alter_string_set_null % params, [])) + else: + sqls.append((self.alter_string_drop_null % params, [])) + + # Next, set any default + self._alter_set_defaults(field, name, params, sqls) + + # Finally, actually change the column + if self.allows_combined_alters: + sqls, values = zip(*sqls) + self.execute( + "ALTER TABLE %s %s;" % (self.quote_name(table_name), ", ".join(sqls)), + flatten(values), + ) + else: + # Databases like e.g. MySQL don't like more than one alter at once. + for sql, values in sqls: + self.execute("ALTER TABLE %s %s;" % (self.quote_name(table_name), sql), values) + + if not ignore_constraints: + # Add back FK constraints if needed + if field.rel and self.supports_foreign_keys: + self.execute( + self.foreign_key_sql( + table_name, + field.column, + field.rel.to._meta.db_table, + field.rel.to._meta.get_field(field.rel.field_name).column + ) + ) + + def _fill_constraint_cache(self, db_name, table_name): + + schema = self._get_schema_name() + ifsc_tables = ["constraint_column_usage", "key_column_usage"] + + self._constraint_cache.setdefault(db_name, {}) + self._constraint_cache[db_name][table_name] = {} + + for ifsc_table in ifsc_tables: + rows = self.execute(""" + SELECT kc.constraint_name, kc.column_name, c.constraint_type + FROM information_schema.%s AS kc + JOIN information_schema.table_constraints AS c ON + kc.table_schema = c.table_schema AND + kc.table_name = c.table_name AND + kc.constraint_name = c.constraint_name + WHERE + kc.table_schema = %%s AND + kc.table_name = %%s + """ % ifsc_table, [schema, table_name]) + for constraint, column, kind in rows: + self._constraint_cache[db_name][table_name].setdefault(column, set()) + self._constraint_cache[db_name][table_name][column].add((kind, constraint)) + return + + def _constraints_affecting_columns(self, table_name, columns, type="UNIQUE"): + """ + Gets the names of the constraints affecting the given columns. + If columns is None, returns all constraints of the type on the table. + """ + if self.dry_run: + raise DryRunError("Cannot get constraints for columns.") + + if columns is not None: + columns = set(map(lambda s: s.lower(), columns)) + + db_name = self._get_setting('NAME') + + cnames = {} + for col, constraints in self.lookup_constraint(db_name, table_name): + for kind, cname in constraints: + if kind == type: + cnames.setdefault(cname, set()) + cnames[cname].add(col.lower()) + + for cname, cols in cnames.items(): + if cols == columns or columns is None: + yield cname + + @invalidate_table_constraints + def create_unique(self, table_name, columns): + """ + Creates a UNIQUE constraint on the columns on the given table. + """ + + if not isinstance(columns, (list, tuple)): + columns = [columns] + + name = self.create_index_name(table_name, columns, suffix="_uniq") + + cols = ", ".join(map(self.quote_name, columns)) + self.execute("ALTER TABLE %s ADD CONSTRAINT %s UNIQUE (%s)" % ( + self.quote_name(table_name), + self.quote_name(name), + cols, + )) + return name + + @invalidate_table_constraints + def delete_unique(self, table_name, columns): + """ + Deletes a UNIQUE constraint on precisely the columns on the given table. + """ + + if not isinstance(columns, (list, tuple)): + columns = [columns] + + # Dry runs mean we can't do anything. + if self.dry_run: + if self.debug: + print ' - no dry run output for delete_unique_column() due to dynamic DDL, sorry' + return + + constraints = list(self._constraints_affecting_columns(table_name, columns)) + if not constraints: + raise ValueError("Cannot find a UNIQUE constraint on table %s, columns %r" % (table_name, columns)) + for constraint in constraints: + self.execute(self.delete_unique_sql % ( + self.quote_name(table_name), + self.quote_name(constraint), + )) + + + def column_sql(self, table_name, field_name, field, tablespace='', with_name=True, field_prepared=False): + """ + Creates the SQL snippet for a column. Used by add_column and add_table. + """ + + # If the field hasn't already been told its attribute name, do so. + if not field_prepared: + field.set_attributes_from_name(field_name) + + # hook for the field to do any resolution prior to it's attributes being queried + if hasattr(field, 'south_init'): + field.south_init() + + # Possible hook to fiddle with the fields (e.g. defaults & TEXT on MySQL) + field = self._field_sanity(field) + + try: + sql = field.db_type(connection=self._get_connection()) + except TypeError: + sql = field.db_type() + + if sql: + + # Some callers, like the sqlite stuff, just want the extended type. + if with_name: + field_output = [self.quote_name(field.column), sql] + else: + field_output = [sql] + + field_output.append('%sNULL' % (not field.null and 'NOT ' or '')) + if field.primary_key: + field_output.append('PRIMARY KEY') + elif field.unique: + # Just use UNIQUE (no indexes any more, we have delete_unique) + field_output.append('UNIQUE') + + tablespace = field.db_tablespace or tablespace + if tablespace and getattr(self._get_connection().features, "supports_tablespaces", False) and field.unique: + # We must specify the index tablespace inline, because we + # won't be generating a CREATE INDEX statement for this field. + field_output.append(self._get_connection().ops.tablespace_sql(tablespace, inline=True)) + + sql = ' '.join(field_output) + sqlparams = () + # if the field is "NOT NULL" and a default value is provided, create the column with it + # this allows the addition of a NOT NULL field to a table with existing rows + if not getattr(field, '_suppress_default', False): + if field.has_default(): + default = field.get_default() + # If the default is actually None, don't add a default term + if default is not None: + # If the default is a callable, then call it! + if callable(default): + default = default() + + default = field.get_db_prep_save(default, connection=self._get_connection()) + # Now do some very cheap quoting. TODO: Redesign return values to avoid this. + if isinstance(default, basestring): + default = "'%s'" % default.replace("'", "''") + # Escape any % signs in the output (bug #317) + if isinstance(default, basestring): + default = default.replace("%", "%%") + # Add it in + sql += " DEFAULT %s" + sqlparams = (default) + elif (not field.null and field.blank) or (field.get_default() == ''): + if field.empty_strings_allowed and self._get_connection().features.interprets_empty_strings_as_nulls: + sql += " DEFAULT ''" + # Error here would be nice, but doesn't seem to play fair. + #else: + # raise ValueError("Attempting to add a non null column that isn't character based without an explicit default value.") + + if field.rel and self.supports_foreign_keys: + self.add_deferred_sql( + self.foreign_key_sql( + table_name, + field.column, + field.rel.to._meta.db_table, + field.rel.to._meta.get_field(field.rel.field_name).column + ) + ) + + # Things like the contrib.gis module fields have this in 1.1 and below + if hasattr(field, 'post_create_sql'): + for stmt in field.post_create_sql(no_style(), table_name): + self.add_deferred_sql(stmt) + + # In 1.2 and above, you have to ask the DatabaseCreation stuff for it. + # This also creates normal indexes in 1.1. + if hasattr(self._get_connection().creation, "sql_indexes_for_field"): + # Make a fake model to pass in, with only db_table + model = self.mock_model("FakeModelForGISCreation", table_name) + for stmt in self._get_connection().creation.sql_indexes_for_field(model, field, no_style()): + self.add_deferred_sql(stmt) + + if sql: + return sql % sqlparams + else: + return None + + + def _field_sanity(self, field): + """ + Placeholder for DBMS-specific field alterations (some combos aren't valid, + e.g. DEFAULT and TEXT on MySQL) + """ + return field + + + def foreign_key_sql(self, from_table_name, from_column_name, to_table_name, to_column_name): + """ + Generates a full SQL statement to add a foreign key constraint + """ + constraint_name = '%s_refs_%s_%x' % (from_column_name, to_column_name, abs(hash((from_table_name, to_table_name)))) + return 'ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' % ( + self.quote_name(from_table_name), + self.quote_name(truncate_name(constraint_name, self._get_connection().ops.max_name_length())), + self.quote_name(from_column_name), + self.quote_name(to_table_name), + self.quote_name(to_column_name), + self._get_connection().ops.deferrable_sql() # Django knows this + ) + + + @invalidate_table_constraints + def delete_foreign_key(self, table_name, column): + "Drop a foreign key constraint" + if self.dry_run: + if self.debug: + print ' - no dry run output for delete_foreign_key() due to dynamic DDL, sorry' + return # We can't look at the DB to get the constraints + constraints = self._find_foreign_constraints(table_name, column) + if not constraints: + raise ValueError("Cannot find a FOREIGN KEY constraint on table %s, column %s" % (table_name, column)) + for constraint_name in constraints: + self.execute(self.delete_foreign_key_sql % { + "table": self.quote_name(table_name), + "constraint": self.quote_name(constraint_name), + }) + + drop_foreign_key = alias('delete_foreign_key') + + def _find_foreign_constraints(self, table_name, column_name=None): + return list(self._constraints_affecting_columns( + table_name, [column_name], "FOREIGN KEY")) + + def create_index_name(self, table_name, column_names, suffix=""): + """ + Generate a unique name for the index + """ + + table_name = table_name.replace('"', '').replace('.', '_') + index_unique_name = '' + + if len(column_names) > 1: + index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names)))) + + # If the index name is too long, truncate it + index_name = ('%s_%s%s%s' % (table_name, column_names[0], index_unique_name, suffix)).replace('"', '').replace('.', '_') + if len(index_name) > self.max_index_name_length: + part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix)) + index_name = '%s%s' % (table_name[:(self.max_index_name_length-len(part))], part) + + return index_name + + + def create_index_sql(self, table_name, column_names, unique=False, db_tablespace=''): + """ + Generates a create index statement on 'table_name' for a list of 'column_names' + """ + if not column_names: + print "No column names supplied on which to create an index" + return '' + + connection = self._get_connection() + if db_tablespace and connection.features.supports_tablespaces: + tablespace_sql = ' ' + connection.ops.tablespace_sql(db_tablespace) + else: + tablespace_sql = '' + + index_name = self.create_index_name(table_name, column_names) + return 'CREATE %sINDEX %s ON %s (%s)%s;' % ( + unique and 'UNIQUE ' or '', + self.quote_name(index_name), + self.quote_name(table_name), + ','.join([self.quote_name(field) for field in column_names]), + tablespace_sql + ) + + @invalidate_table_constraints + def create_index(self, table_name, column_names, unique=False, db_tablespace=''): + """ Executes a create index statement """ + sql = self.create_index_sql(table_name, column_names, unique, db_tablespace) + self.execute(sql) + + + @invalidate_table_constraints + def delete_index(self, table_name, column_names, db_tablespace=''): + """ + Deletes an index created with create_index. + This is possible using only columns due to the deterministic + index naming function which relies on column names. + """ + if isinstance(column_names, (str, unicode)): + column_names = [column_names] + name = self.create_index_name(table_name, column_names) + sql = self.drop_index_string % { + "index_name": self.quote_name(name), + "table_name": self.quote_name(table_name), + } + self.execute(sql) + + drop_index = alias('delete_index') + + + @delete_column_constraints + def delete_column(self, table_name, name): + """ + Deletes the column 'column_name' from the table 'table_name'. + """ + db_name = self._get_setting('NAME') + params = (self.quote_name(table_name), self.quote_name(name)) + self.execute(self.delete_column_string % params, []) + + drop_column = alias('delete_column') + + + def rename_column(self, table_name, old, new): + """ + Renames the column 'old' from the table 'table_name' to 'new'. + """ + raise NotImplementedError("rename_column has no generic SQL syntax") + + + @invalidate_table_constraints + def delete_primary_key(self, table_name): + """ + Drops the old primary key. + """ + # Dry runs mean we can't do anything. + if self.dry_run: + if self.debug: + print ' - no dry run output for delete_primary_key() due to dynamic DDL, sorry' + return + + constraints = list(self._constraints_affecting_columns(table_name, None, type="PRIMARY KEY")) + if not constraints: + raise ValueError("Cannot find a PRIMARY KEY constraint on table %s" % (table_name,)) + + for constraint in constraints: + self.execute(self.delete_primary_key_sql % { + "table": self.quote_name(table_name), + "constraint": self.quote_name(constraint), + }) + + drop_primary_key = alias('delete_primary_key') + + + @invalidate_table_constraints + def create_primary_key(self, table_name, columns): + """ + Creates a new primary key on the specified columns. + """ + if not isinstance(columns, (list, tuple)): + columns = [columns] + self.execute(self.create_primary_key_string % { + "table": self.quote_name(table_name), + "constraint": self.quote_name(table_name+"_pkey"), + "columns": ", ".join(map(self.quote_name, columns)), + }) + + + def start_transaction(self): + """ + Makes sure the following commands are inside a transaction. + Must be followed by a (commit|rollback)_transaction call. + """ + if self.dry_run: + self.pending_transactions += 1 + transaction.commit_unless_managed() + transaction.enter_transaction_management() + transaction.managed(True) + + + def commit_transaction(self): + """ + Commits the current transaction. + Must be preceded by a start_transaction call. + """ + if self.dry_run: + return + transaction.commit() + transaction.leave_transaction_management() + + + def rollback_transaction(self): + """ + Rolls back the current transaction. + Must be preceded by a start_transaction call. + """ + if self.dry_run: + self.pending_transactions -= 1 + transaction.rollback() + transaction.leave_transaction_management() + + def rollback_transactions_dry_run(self): + """ + Rolls back all pending_transactions during this dry run. + """ + if not self.dry_run: + return + while self.pending_transactions > 0: + self.rollback_transaction() + if transaction.is_dirty(): + # Force an exception, if we're still in a dirty transaction. + # This means we are missing a COMMIT/ROLLBACK. + transaction.leave_transaction_management() + + + def send_create_signal(self, app_label, model_names): + self.pending_create_signals.append((app_label, model_names)) + + + def send_pending_create_signals(self, verbosity=0, interactive=False): + # Group app_labels together + signals = SortedDict() + for (app_label, model_names) in self.pending_create_signals: + try: + signals[app_label].extend(model_names) + except KeyError: + signals[app_label] = list(model_names) + # Send only one signal per app. + for (app_label, model_names) in signals.iteritems(): + self.really_send_create_signal(app_label, list(set(model_names)), + verbosity=verbosity, + interactive=interactive) + self.pending_create_signals = [] + + + def really_send_create_signal(self, app_label, model_names, + verbosity=0, interactive=False): + """ + Sends a post_syncdb signal for the model specified. + + If the model is not found (perhaps it's been deleted?), + no signal is sent. + + TODO: The behavior of django.contrib.* apps seems flawed in that + they don't respect created_models. Rather, they blindly execute + over all models within the app sending the signal. This is a + patch we should push Django to make For now, this should work. + """ + + if self.debug: + print " - Sending post_syncdb signal for %s: %s" % (app_label, model_names) + + app = models.get_app(app_label) + if not app: + return + + created_models = [] + for model_name in model_names: + model = models.get_model(app_label, model_name) + if model: + created_models.append(model) + + if created_models: + + if hasattr(dispatcher, "send"): + # Older djangos + dispatcher.send(signal=models.signals.post_syncdb, sender=app, + app=app, created_models=created_models, + verbosity=verbosity, interactive=interactive) + else: + if self._is_multidb(): + # Django 1.2+ + models.signals.post_syncdb.send( + sender=app, + app=app, + created_models=created_models, + verbosity=verbosity, + interactive=interactive, + db=self.db_alias, + ) + else: + # Django 1.1 - 1.0 + models.signals.post_syncdb.send( + sender=app, + app=app, + created_models=created_models, + verbosity=verbosity, + interactive=interactive, + ) + + + def mock_model(self, model_name, db_table, db_tablespace='', + pk_field_name='id', pk_field_type=models.AutoField, + pk_field_args=[], pk_field_kwargs={}): + """ + Generates a MockModel class that provides enough information + to be used by a foreign key/many-to-many relationship. + + Migrations should prefer to use these rather than actual models + as models could get deleted over time, but these can remain in + migration files forever. + + Depreciated. + """ + class MockOptions(object): + def __init__(self): + self.db_table = db_table + self.db_tablespace = db_tablespace or settings.DEFAULT_TABLESPACE + self.object_name = model_name + self.module_name = model_name.lower() + + if pk_field_type == models.AutoField: + pk_field_kwargs['primary_key'] = True + + self.pk = pk_field_type(*pk_field_args, **pk_field_kwargs) + self.pk.set_attributes_from_name(pk_field_name) + self.abstract = False + + def get_field_by_name(self, field_name): + # we only care about the pk field + return (self.pk, self.model, True, False) + + def get_field(self, name): + # we only care about the pk field + return self.pk + + class MockModel(object): + _meta = None + + # We need to return an actual class object here, not an instance + MockModel._meta = MockOptions() + MockModel._meta.model = MockModel + return MockModel + + def _db_positive_type_for_alter_column(self, field): + """ + A helper for subclasses overriding _db_type_for_alter_column: + Remove the check constraint from the type string for PositiveInteger + and PositiveSmallInteger fields. + @param field: The field to generate type for + """ + super_result = super(type(self), self)._db_type_for_alter_column(field) + if isinstance(field, (models.PositiveSmallIntegerField, models.PositiveIntegerField)): + return super_result.split(" ", 1)[0] + return super_result + + def _alter_add_positive_check(self, field, name, params, sqls): + """ + A helper for subclasses overriding _alter_add_column_mods: + Add a check constraint verifying positivity to PositiveInteger and + PositiveSmallInteger fields. + """ + super(type(self), self)._alter_add_column_mods(field, name, params, sqls) + if isinstance(field, (models.PositiveSmallIntegerField, models.PositiveIntegerField)): + uniq_hash = abs(hash(tuple(params.values()))) + d = dict( + constraint = "CK_%s_PSTV_%s" % (name, hex(uniq_hash)[2:]), + check = "%s >= 0" % self.quote_name(name)) + sqls.append((self.add_check_constraint_fragment % d, [])) + + + +# Single-level flattening of lists +def flatten(ls): + nl = [] + for l in ls: + nl += l + return nl diff --git a/users/src/south/south/db/generic.pyc b/users/src/south/south/db/generic.pyc new file mode 100644 index 0000000..d9b5c21 Binary files /dev/null and b/users/src/south/south/db/generic.pyc differ diff --git a/users/src/south/south/db/mysql.py b/users/src/south/south/db/mysql.py new file mode 100644 index 0000000..0a6d915 --- /dev/null +++ b/users/src/south/south/db/mysql.py @@ -0,0 +1,280 @@ +# MySQL-specific implementations for south +# Original author: Andrew Godwin +# Patches by: F. Gabriel Gosselin + +from django.db import connection +from django.conf import settings +from south.db import generic +from south.db.generic import DryRunError, INVALID + +from south.logger import get_logger + +def delete_column_constraints(func): + """ + Decorates column operation functions for MySQL. + Deletes the constraints from the database and clears local cache. + """ + def _column_rm(self, table_name, column_name, *args, **opts): + # Delete foreign key constraints + try: + self.delete_foreign_key(table_name, column_name) + except ValueError: + pass # If no foreign key on column, OK because it checks first + # Delete constraints referring to this column + try: + reverse = self._lookup_reverse_constraint(table_name, column_name) + for cname, rtable, rcolumn in reverse: + self.delete_foreign_key(rtable, rcolumn) + except DryRunError: + pass + return func(self, table_name, column_name, *args, **opts) + return _column_rm + +def copy_column_constraints(func): + """ + Decorates column operation functions for MySQL. + Determines existing constraints and copies them to a new column + """ + def _column_cp(self, table_name, column_old, column_new, *args, **opts): + # Copy foreign key constraint + try: + constraint = self._find_foreign_constraints(table_name, column_old)[0] + (ftable, fcolumn) = self._lookup_constraint_references(table_name, constraint) + if ftable and fcolumn: + fk_sql = self.foreign_key_sql( + table_name, column_new, ftable, fcolumn) + get_logger().debug("Foreign key SQL: " + fk_sql) + self.add_deferred_sql(fk_sql) + except IndexError: + pass # No constraint exists so ignore + except DryRunError: + pass + # Copy constraints referring to this column + try: + reverse = self._lookup_reverse_constraint(table_name, column_old) + for cname, rtable, rcolumn in reverse: + fk_sql = self.foreign_key_sql( + rtable, rcolumn, table_name, column_new) + self.add_deferred_sql(fk_sql) + except DryRunError: + pass + return func(self, table_name, column_old, column_new, *args, **opts) + return _column_cp + +def invalidate_table_constraints(func): + """ + For MySQL we grab all table constraints simultaneously, so this is + effective. + It further solves the issues of invalidating referred table constraints. + """ + def _cache_clear(self, table, *args, **opts): + db_name = self._get_setting('NAME') + if db_name in self._constraint_cache: + del self._constraint_cache[db_name] + if db_name in self._reverse_cache: + del self._reverse_cache[db_name] + if db_name in self._constraint_references: + del self._constraint_references[db_name] + return func(self, table, *args, **opts) + return _cache_clear + +class DatabaseOperations(generic.DatabaseOperations): + """ + MySQL implementation of database operations. + + MySQL has no DDL transaction support This can confuse people when they ask + how to roll back - hence the dry runs, etc., found in the migration code. + """ + + backend_name = "mysql" + alter_string_set_type = '' + alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;' + alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;' + drop_index_string = 'DROP INDEX %(index_name)s ON %(table_name)s' + delete_primary_key_sql = "ALTER TABLE %(table)s DROP PRIMARY KEY" + delete_foreign_key_sql = "ALTER TABLE %(table)s DROP FOREIGN KEY %(constraint)s" + allows_combined_alters = False + has_ddl_transactions = False + has_check_constraints = False + delete_unique_sql = "ALTER TABLE %s DROP INDEX %s" + rename_table_sql = "RENAME TABLE %s TO %s;" + + geom_types = ['geometry', 'point', 'linestring', 'polygon'] + text_types = ['text', 'blob',] + + def __init__(self, db_alias): + self._constraint_references = {} + self._reverse_cache = {} + super(DatabaseOperations, self).__init__(db_alias) + + def _is_valid_cache(self, db_name, table_name): + cache = self._constraint_cache + # we cache the whole db so if there are any tables table_name is valid + return db_name in cache and cache[db_name].get(table_name, None) is not INVALID + + def _fill_constraint_cache(self, db_name, table_name): + # for MySQL grab all constraints for this database. It's just as cheap as a single column. + self._constraint_cache[db_name] = {} + self._constraint_cache[db_name][table_name] = {} + self._reverse_cache[db_name] = {} + self._constraint_references[db_name] = {} + + name_query = """ + SELECT kc.`constraint_name`, kc.`column_name`, kc.`table_name`, + kc.`referenced_table_name`, kc.`referenced_column_name` + FROM information_schema.key_column_usage AS kc + WHERE + kc.table_schema = %s + """ + rows = self.execute(name_query, [db_name]) + if not rows: + return + cnames = {} + for constraint, column, table, ref_table, ref_column in rows: + key = (table, constraint) + cnames.setdefault(key, set()) + cnames[key].add((column, ref_table, ref_column)) + + type_query = """ + SELECT c.constraint_name, c.table_name, c.constraint_type + FROM information_schema.table_constraints AS c + WHERE + c.table_schema = %s + """ + rows = self.execute(type_query, [db_name]) + for constraint, table, kind in rows: + key = (table, constraint) + self._constraint_cache[db_name].setdefault(table, {}) + try: + cols = cnames[key] + except KeyError: + cols = set() + for column_set in cols: + (column, ref_table, ref_column) = column_set + self._constraint_cache[db_name][table].setdefault(column, set()) + if kind == 'FOREIGN KEY': + self._constraint_cache[db_name][table][column].add((kind, + constraint)) + # Create constraint lookup, see constraint_references + self._constraint_references[db_name][(table, + constraint)] = (ref_table, ref_column) + # Create reverse table lookup, reverse_lookup + self._reverse_cache[db_name].setdefault(ref_table, {}) + self._reverse_cache[db_name][ref_table].setdefault(ref_column, + set()) + self._reverse_cache[db_name][ref_table][ref_column].add( + (constraint, table, column)) + else: + self._constraint_cache[db_name][table][column].add((kind, + constraint)) + + def connection_init(self): + """ + Run before any SQL to let database-specific config be sent as a command, + e.g. which storage engine (MySQL) or transaction serialisability level. + """ + cursor = self._get_connection().cursor() + if self._has_setting('STORAGE_ENGINE') and self._get_setting('STORAGE_ENGINE'): + cursor.execute("SET storage_engine=%s;" % self._get_setting('STORAGE_ENGINE')) + + def start_transaction(self): + super(DatabaseOperations, self).start_transaction() + self.execute("SET FOREIGN_KEY_CHECKS=0;") + + @copy_column_constraints + @delete_column_constraints + @invalidate_table_constraints + def rename_column(self, table_name, old, new): + if old == new or self.dry_run: + return [] + + rows = [x for x in self.execute('DESCRIBE %s' % (self.quote_name(table_name),)) if x[0] == old] + + if not rows: + raise ValueError("No column '%s' in '%s'." % (old, table_name)) + + params = ( + self.quote_name(table_name), + self.quote_name(old), + self.quote_name(new), + rows[0][1], + rows[0][2] == "YES" and "NULL" or "NOT NULL", + rows[0][4] and "DEFAULT " or "", + rows[0][4] and "%s" or "", + rows[0][5] or "", + ) + + sql = 'ALTER TABLE %s CHANGE COLUMN %s %s %s %s %s %s %s;' % params + + if rows[0][4]: + self.execute(sql, (rows[0][4],)) + else: + self.execute(sql) + + @delete_column_constraints + def delete_column(self, table_name, name): + super(DatabaseOperations, self).delete_column(table_name, name) + + @invalidate_table_constraints + def rename_table(self, old_table_name, table_name): + super(DatabaseOperations, self).rename_table(old_table_name, + table_name) + + @invalidate_table_constraints + def delete_table(self, table_name): + super(DatabaseOperations, self).delete_table(table_name) + + def _lookup_constraint_references(self, table_name, cname): + """ + Provided an existing table and constraint, returns tuple of (foreign + table, column) + """ + db_name = self._get_setting('NAME') + try: + return self._constraint_references[db_name][(table_name, cname)] + except KeyError: + return None + + def _lookup_reverse_constraint(self, table_name, column_name=None): + """Look for the column referenced by a foreign constraint""" + db_name = self._get_setting('NAME') + if self.dry_run: + raise DryRunError("Cannot get constraints for columns.") + + if not self._is_valid_cache(db_name, table_name): + # Piggy-back on lookup_constraint, ensures cache exists + self.lookup_constraint(db_name, table_name) + + try: + table = self._reverse_cache[db_name][table_name] + if column_name == None: + return [(y, tuple(y)) for x, y in table.items()] + else: + return tuple(table[column_name]) + except KeyError, e: + return [] + + def _field_sanity(self, field): + """ + This particular override stops us sending DEFAULTs for BLOB/TEXT columns. + """ + # MySQL does not support defaults for geometry columns also + type = self._db_type_for_alter_column(field).lower() + is_geom = True in [ type.find(t) > -1 for t in self.geom_types ] + is_text = True in [ type.find(t) > -1 for t in self.text_types ] + + if is_geom or is_text: + field._suppress_default = True + return field + + def _alter_set_defaults(self, field, name, params, sqls): + """ + MySQL does not support defaults on text or blob columns. + """ + type = params['type'] + # MySQL does not support defaults for geometry columns also + is_geom = True in [ type.find(t) > -1 for t in self.geom_types ] + is_text = True in [ type.find(t) > -1 for t in self.text_types ] + if not is_geom and not is_text: + super(DatabaseOperations, self)._alter_set_defaults(field, name, params, sqls) + diff --git a/users/src/south/south/db/mysql.pyc b/users/src/south/south/db/mysql.pyc new file mode 100644 index 0000000..66a25f4 Binary files /dev/null and b/users/src/south/south/db/mysql.pyc differ diff --git a/users/src/south/south/db/oracle.py b/users/src/south/south/db/oracle.py new file mode 100644 index 0000000..82527cb --- /dev/null +++ b/users/src/south/south/db/oracle.py @@ -0,0 +1,262 @@ +import os.path +import sys +import re +import warnings +import cx_Oracle + + +from django.db import connection, models +from django.db.backends.util import truncate_name +from django.core.management.color import no_style +from django.db.models.fields import NOT_PROVIDED +from django.db.utils import DatabaseError + +# In revision r16016 function get_sequence_name has been transformed into +# method of DatabaseOperations class. To make code backward-compatible we +# need to handle both situations. +try: + from django.db.backends.oracle.base import get_sequence_name\ + as original_get_sequence_name +except ImportError: + original_get_sequence_name = None + +from south.db import generic + +warnings.warn("! WARNING: South's Oracle support is still alpha. " + "Be wary of possible bugs.") + +class DatabaseOperations(generic.DatabaseOperations): + """ + Oracle implementation of database operations. + """ + backend_name = 'oracle' + + alter_string_set_type = 'ALTER TABLE %(table_name)s MODIFY %(column)s %(type)s %(nullity)s;' + alter_string_set_default = 'ALTER TABLE %(table_name)s MODIFY %(column)s DEFAULT %(default)s;' + add_column_string = 'ALTER TABLE %s ADD %s;' + delete_column_string = 'ALTER TABLE %s DROP COLUMN %s;' + add_constraint_string = 'ALTER TABLE %(table_name)s ADD CONSTRAINT %(constraint)s %(clause)s' + + allows_combined_alters = False + + constraints_dict = { + 'P': 'PRIMARY KEY', + 'U': 'UNIQUE', + 'C': 'CHECK', + 'R': 'FOREIGN KEY' + } + + def get_sequence_name(self, table_name): + if original_get_sequence_name is None: + return self._get_connection().ops._get_sequence_name(table_name) + else: + return original_get_sequence_name(table_name) + + def adj_column_sql(self, col): + # Fix boolean field values: need to be 1/0, not True/False + col = re.sub('DEFAULT True', 'DEFAULT 1', col) + col = re.sub('DEFAULT False', 'DEFAULT 0', col) + # Fix other things + col = re.sub('(?PCHECK \(.*\))(?P.*)(?PDEFAULT \d+)', + lambda mo: '%s %s%s'%(mo.group('default'), mo.group('constr'), mo.group('any')), col) #syntax fix for boolean/integer field only + col = re.sub('(?P(NOT )?NULL) (?P(.* )?)(?PDEFAULT.+)', + lambda mo: '%s %s %s'%(mo.group('default'),mo.group('not_null'),mo.group('misc') or ''), col) #fix order of NULL/NOT NULL and DEFAULT + return col + + def check_meta(self, table_name): + return table_name in [ m._meta.db_table for m in models.get_models() ] #caching provided by Django + + def normalize_name(self, name): + """ + Get the properly shortened and uppercased identifier as returned by quote_name(), but without the actual quotes. + """ + nn = self.quote_name(name) + if nn[0] == '"' and nn[-1] == '"': + nn = nn[1:-1] + return nn + + @generic.invalidate_table_constraints + def create_table(self, table_name, fields): + qn = self.quote_name(table_name) + columns = [] + autoinc_sql = '' + + for field_name, field in fields: + col = self.column_sql(table_name, field_name, field) + if not col: + continue + col = self.adj_column_sql(col) + + columns.append(col) + if isinstance(field, models.AutoField): + autoinc_sql = connection.ops.autoinc_sql(table_name, field_name) + + sql = 'CREATE TABLE %s (%s);' % (qn, ', '.join([col for col in columns])) + self.execute(sql) + if autoinc_sql: + self.execute(autoinc_sql[0]) + self.execute(autoinc_sql[1]) + + @generic.invalidate_table_constraints + def delete_table(self, table_name, cascade=True): + qn = self.quote_name(table_name) + + # Note: PURGE is not valid syntax for Oracle 9i (it was added in 10) + if cascade: + self.execute('DROP TABLE %s CASCADE CONSTRAINTS;' % qn) + else: + self.execute('DROP TABLE %s;' % qn) + + # If the table has an AutoField a sequence was created. + sequence_sql = """ +DECLARE + i INTEGER; +BEGIN + SELECT COUNT(*) INTO i FROM USER_CATALOG + WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE'; + IF i = 1 THEN + EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"'; + END IF; +END; +/""" % {'sq_name': self.get_sequence_name(table_name)} + self.execute(sequence_sql) + + @generic.invalidate_table_constraints + def alter_column(self, table_name, name, field, explicit_name=True): + qn = self.quote_name(table_name) + + # hook for the field to do any resolution prior to it's attributes being queried + if hasattr(field, 'south_init'): + field.south_init() + field = self._field_sanity(field) + + # Add _id or whatever if we need to + field.set_attributes_from_name(name) + if not explicit_name: + name = field.column + qn_col = self.quote_name(name) + + # First, change the type + # This will actually also add any CHECK constraints needed, + # since e.g. 'type' for a BooleanField is 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))' + params = { + 'table_name':qn, + 'column': qn_col, + 'type': self._db_type_for_alter_column(field), + 'nullity': 'NOT NULL', + 'default': 'NULL' + } + if field.null: + params['nullity'] = 'NULL' + + if not field.null and field.has_default(): + params['default'] = field.get_default() + + sql_templates = [ + (self.alter_string_set_type, params), + (self.alter_string_set_default, params.copy()), + ] + + # UNIQUE constraint + unique_constraint = list(self._constraints_affecting_columns(table_name, [name], 'UNIQUE')) + if field.unique and not unique_constraint: + self.create_unique(table_name, [name]) + elif not field.unique and unique_constraint: + self.delete_unique(table_name, [name]) + + # drop CHECK constraints. Make sure this is executed before the ALTER TABLE statements + # generated above, since those statements recreate the constraints we delete here. + check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK") + for constraint in check_constraints: + self.execute(self.delete_check_sql % { + 'table': self.quote_name(table_name), + 'constraint': self.quote_name(constraint), + }) + + for sql_template, params in sql_templates: + try: + self.execute(sql_template % params) + except DatabaseError, exc: + description = str(exc) + # Oracle complains if a column is already NULL/NOT NULL + if 'ORA-01442' in description or 'ORA-01451' in description: + # so we just drop NULL/NOT NULL part from target sql and retry + params['nullity'] = '' + sql = sql_template % params + self.execute(sql) + else: + raise + + @generic.copy_column_constraints + @generic.delete_column_constraints + def rename_column(self, table_name, old, new): + if old == new: + # Short-circuit out + return [] + self.execute('ALTER TABLE %s RENAME COLUMN %s TO %s;' % ( + self.quote_name(table_name), + self.quote_name(old), + self.quote_name(new), + )) + + @generic.invalidate_table_constraints + def add_column(self, table_name, name, field, keep_default=True): + sql = self.column_sql(table_name, name, field) + sql = self.adj_column_sql(sql) + + if sql: + params = ( + self.quote_name(table_name), + sql + ) + sql = self.add_column_string % params + self.execute(sql) + + # Now, drop the default if we need to + if not keep_default and field.default is not None: + field.default = NOT_PROVIDED + self.alter_column(table_name, name, field, explicit_name=False) + + def delete_column(self, table_name, name): + return super(DatabaseOperations, self).delete_column(self.quote_name(table_name), name) + + def lookup_constraint(self, db_name, table_name, column_name=None): + if column_name: + # Column names in the constraint cache come from the database, + # make sure we use the properly shortened/uppercased version + # for lookup. + column_name = self.normalize_name(column_name) + return super(DatabaseOperations, self).lookup_constraint(db_name, table_name, column_name) + + def _constraints_affecting_columns(self, table_name, columns, type="UNIQUE"): + if columns: + columns = [self.normalize_name(c) for c in columns] + return super(DatabaseOperations, self)._constraints_affecting_columns(table_name, columns, type) + + def _field_sanity(self, field): + """ + This particular override stops us sending DEFAULTs for BooleanField. + """ + if isinstance(field, models.BooleanField) and field.has_default(): + field.default = int(field.to_python(field.get_default())) + return field + + def _fill_constraint_cache(self, db_name, table_name): + self._constraint_cache.setdefault(db_name, {}) + self._constraint_cache[db_name][table_name] = {} + + rows = self.execute(""" + SELECT user_cons_columns.constraint_name, + user_cons_columns.column_name, + user_constraints.constraint_type + FROM user_constraints + JOIN user_cons_columns ON + user_constraints.table_name = user_cons_columns.table_name AND + user_constraints.constraint_name = user_cons_columns.constraint_name + WHERE user_constraints.table_name = '%s' + """ % self.normalize_name(table_name)) + + for constraint, column, kind in rows: + self._constraint_cache[db_name][table_name].setdefault(column, set()) + self._constraint_cache[db_name][table_name][column].add((self.constraints_dict[kind], constraint)) + return diff --git a/users/src/south/south/db/postgresql_psycopg2.py b/users/src/south/south/db/postgresql_psycopg2.py new file mode 100644 index 0000000..151bc88 --- /dev/null +++ b/users/src/south/south/db/postgresql_psycopg2.py @@ -0,0 +1,63 @@ + +from django.db import connection, models +from south.db import generic + +class DatabaseOperations(generic.DatabaseOperations): + + """ + PsycoPG2 implementation of database operations. + """ + + backend_name = "postgres" + + @generic.copy_column_constraints + @generic.delete_column_constraints + def rename_column(self, table_name, old, new): + if old == new: + # Short-circuit out + return [] + self.execute('ALTER TABLE %s RENAME COLUMN %s TO %s;' % ( + self.quote_name(table_name), + self.quote_name(old), + self.quote_name(new), + )) + + @generic.invalidate_table_constraints + def rename_table(self, old_table_name, table_name): + "will rename the table and an associated ID sequence and primary key index" + # First, rename the table + generic.DatabaseOperations.rename_table(self, old_table_name, table_name) + # Then, try renaming the ID sequence + # (if you're using other AutoFields... your problem, unfortunately) + self.commit_transaction() + self.start_transaction() + try: + generic.DatabaseOperations.rename_table(self, old_table_name+"_id_seq", table_name+"_id_seq") + except: + if self.debug: + print " ~ No such sequence (ignoring error)" + self.rollback_transaction() + else: + self.commit_transaction() + self.start_transaction() + + # Rename primary key index, will not rename other indices on + # the table that are used by django (e.g. foreign keys). Until + # figure out how, you need to do this yourself. + try: + generic.DatabaseOperations.rename_table(self, old_table_name+"_pkey", table_name+ "_pkey") + except: + if self.debug: + print " ~ No such primary key (ignoring error)" + self.rollback_transaction() + else: + self.commit_transaction() + self.start_transaction() + + + def rename_index(self, old_index_name, index_name): + "Rename an index individually" + generic.DatabaseOperations.rename_table(self, old_index_name, index_name) + + _db_type_for_alter_column = generic.alias("_db_positive_type_for_alter_column") + _alter_add_column_mods = generic.alias("_alter_add_positive_check") diff --git a/users/src/south/south/db/sql_server/__init__.py b/users/src/south/south/db/sql_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/db/sql_server/pyodbc.py b/users/src/south/south/db/sql_server/pyodbc.py new file mode 100644 index 0000000..0c0c147 --- /dev/null +++ b/users/src/south/south/db/sql_server/pyodbc.py @@ -0,0 +1,423 @@ +from datetime import date, datetime, time +from warnings import warn +from django.db import models +from django.db.models import fields +from south.db import generic +from south.db.generic import delete_column_constraints, invalidate_table_constraints, copy_column_constraints +from south.exceptions import ConstraintDropped + +class DatabaseOperations(generic.DatabaseOperations): + """ + django-pyodbc (sql_server.pyodbc) implementation of database operations. + """ + + backend_name = "pyodbc" + + add_column_string = 'ALTER TABLE %s ADD %s;' + alter_string_set_type = 'ALTER COLUMN %(column)s %(type)s' + alter_string_set_null = 'ALTER COLUMN %(column)s %(type)s NULL' + alter_string_drop_null = 'ALTER COLUMN %(column)s %(type)s NOT NULL' + + allows_combined_alters = False + + drop_index_string = 'DROP INDEX %(index_name)s ON %(table_name)s' + drop_constraint_string = 'ALTER TABLE %(table_name)s DROP CONSTRAINT %(constraint_name)s' + delete_column_string = 'ALTER TABLE %s DROP COLUMN %s' + + #create_check_constraint_sql = "ALTER TABLE %(table)s " + \ + # generic.DatabaseOperations.add_check_constraint_fragment + create_foreign_key_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s " + \ + "FOREIGN KEY (%(column)s) REFERENCES %(target)s" + create_unique_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s UNIQUE (%(columns)s)" + + + default_schema_name = "dbo" + + + @delete_column_constraints + def delete_column(self, table_name, name): + q_table_name, q_name = (self.quote_name(table_name), self.quote_name(name)) + + # Zap the indexes + for ind in self._find_indexes_for_column(table_name,name): + params = {'table_name':q_table_name, 'index_name': ind} + sql = self.drop_index_string % params + self.execute(sql, []) + + # Zap the constraints + for const in self._find_constraints_for_column(table_name,name): + params = {'table_name':q_table_name, 'constraint_name': const} + sql = self.drop_constraint_string % params + self.execute(sql, []) + + # Zap default if exists + drop_default = self.drop_column_default_sql(table_name, name) + if drop_default: + sql = "ALTER TABLE [%s] %s" % (table_name, drop_default) + self.execute(sql, []) + + # Finally zap the column itself + self.execute(self.delete_column_string % (q_table_name, q_name), []) + + def _find_indexes_for_column(self, table_name, name): + "Find the indexes that apply to a column, needed when deleting" + + sql = """ + SELECT si.name, si.id, sik.colid, sc.name + FROM dbo.sysindexes SI WITH (NOLOCK) + INNER JOIN dbo.sysindexkeys SIK WITH (NOLOCK) + ON SIK.id = Si.id + AND SIK.indid = SI.indid + INNER JOIN dbo.syscolumns SC WITH (NOLOCK) + ON SI.id = SC.id + AND SIK.colid = SC.colid + WHERE SI.indid !=0 + AND Si.id = OBJECT_ID('%s') + AND SC.name = '%s' + """ + idx = self.execute(sql % (table_name, name), []) + return [i[0] for i in idx] + + + def _find_constraints_for_column(self, table_name, name, just_names=True): + """ + Find the constraints that apply to a column, needed when deleting. Defaults not included. + This is more general than the parent _constraints_affecting_columns, as on MSSQL this + includes PK and FK constraints. + """ + + sql = """ + SELECT CC.[CONSTRAINT_NAME] + ,TC.[CONSTRAINT_TYPE] + ,CHK.[CHECK_CLAUSE] + ,RFD.TABLE_SCHEMA + ,RFD.TABLE_NAME + ,RFD.COLUMN_NAME + -- used for normalized names + ,CC.TABLE_NAME + ,CC.COLUMN_NAME + FROM [INFORMATION_SCHEMA].[TABLE_CONSTRAINTS] TC + JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE CC + ON TC.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG + AND TC.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA + AND TC.CONSTRAINT_NAME = CC.CONSTRAINT_NAME + LEFT JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS CHK + ON CHK.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG + AND CHK.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA + AND CHK.CONSTRAINT_NAME = CC.CONSTRAINT_NAME + AND 'CHECK' = TC.CONSTRAINT_TYPE + LEFT JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS REF + ON REF.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG + AND REF.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA + AND REF.CONSTRAINT_NAME = CC.CONSTRAINT_NAME + AND 'FOREIGN KEY' = TC.CONSTRAINT_TYPE + LEFT JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE RFD + ON RFD.CONSTRAINT_CATALOG = REF.UNIQUE_CONSTRAINT_CATALOG + AND RFD.CONSTRAINT_SCHEMA = REF.UNIQUE_CONSTRAINT_SCHEMA + AND RFD.CONSTRAINT_NAME = REF.UNIQUE_CONSTRAINT_NAME + WHERE CC.CONSTRAINT_CATALOG = CC.TABLE_CATALOG + AND CC.CONSTRAINT_SCHEMA = CC.TABLE_SCHEMA + AND CC.TABLE_CATALOG = %s + AND CC.TABLE_SCHEMA = %s + AND CC.TABLE_NAME = %s + AND CC.COLUMN_NAME = %s + """ + db_name = self._get_setting('name') + schema_name = self._get_schema_name() + table = self.execute(sql, [db_name, schema_name, table_name, name]) + + if just_names: + return [r[0] for r in table] + + all = {} + for r in table: + cons_name, type = r[:2] + if type=='PRIMARY KEY' or type=='UNIQUE': + cons = all.setdefault(cons_name, (type,[])) + cons[1].append(r[7]) + elif type=='CHECK': + cons = (type, r[2]) + elif type=='FOREIGN KEY': + if cons_name in all: + raise NotImplementedError("Multiple-column foreign keys are not supported") + else: + cons = (type, r[3:6]) + else: + raise NotImplementedError("Don't know how to handle constraints of type "+ type) + all[cons_name] = cons + return all + + @invalidate_table_constraints + def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False): + """ + Alters the given column name so it will match the given field. + Note that conversion between the two by the database must be possible. + Will not automatically add _id by default; to have this behavour, pass + explicit_name=False. + + @param table_name: The name of the table to add the column to + @param name: The name of the column to alter + @param field: The new field definition to use + """ + self._fix_field_definition(field) + + if not ignore_constraints: + qn = self.quote_name + sch = qn(self._get_schema_name()) + tab = qn(table_name) + table = ".".join([sch, tab]) + constraints = self._find_constraints_for_column(table_name, name, False) + for constraint in constraints.keys(): + params = dict(table_name = table, + constraint_name = qn(constraint)) + sql = self.drop_constraint_string % params + self.execute(sql, []) + + ret_val = super(DatabaseOperations, self).alter_column(table_name, name, field, explicit_name, ignore_constraints=True) + + if not ignore_constraints: + unique_field_handled = False + for cname, (ctype,args) in constraints.items(): + params = dict(table = table, + constraint = qn(cname)) + if ctype=='UNIQUE': + if len(args)==1: + unique_field_handled = True # + if len(args)>1 or field.unique: + params['columns'] = ", ".join(map(qn,args)) + sql = self.create_unique_sql % params + else: + continue + elif ctype=='PRIMARY KEY': + params['columns'] = ", ".join(map(qn,args)) + sql = self.create_primary_key_string % params + elif ctype=='FOREIGN KEY': + continue + # Foreign keys taken care of below + #target = "%s.%s(%s)" % tuple(map(qn,args)) + #params.update(column = qn(name), target = target) + #sql = self.create_foreign_key_sql % params + elif ctype=='CHECK': + warn(ConstraintDropped("CHECK "+ args, table_name, name)) + continue + #TODO: Some check constraints should be restored; but not before the generic + # backend restores them. + #params['check'] = args + #sql = self.create_check_constraint_sql % params + else: + raise NotImplementedError("Don't know how to handle constraints of type "+ type) + self.execute(sql, []) + # Create unique constraint if necessary + if field.unique and not unique_field_handled: + self.create_unique(table_name, (name,)) + # Create foreign key if necessary + if field.rel and self.supports_foreign_keys: + self.execute( + self.foreign_key_sql( + table_name, + field.column, + field.rel.to._meta.db_table, + field.rel.to._meta.get_field(field.rel.field_name).column + ) + ) + + + return ret_val + + def _alter_set_defaults(self, field, name, params, sqls): + "Subcommand of alter_column that sets default values (overrideable)" + # First drop the current default if one exists + table_name = self.quote_name(params['table_name']) + drop_default = self.drop_column_default_sql(table_name, name) + if drop_default: + sqls.append((drop_default, [])) + + # Next, set any default + + if field.has_default(): + default = field.get_default() + literal = self._value_to_unquoted_literal(field, default) + sqls.append(('ADD DEFAULT %s for %s' % (self._quote_string(literal), self.quote_name(name),), [])) + + def _value_to_unquoted_literal(self, field, value): + # Start with the field's own translation + conn = self._get_connection() + value = field.get_db_prep_save(value, connection=conn) + # This is still a Python object -- nobody expects to need a literal. + if isinstance(value, basestring): + return smart_unicode(value) + elif isinstance(value, (date,time,datetime)): + return value.isoformat() + else: + #TODO: Anybody else needs special translations? + return str(value) + + def _quote_string(self, s): + return "'" + s.replace("'","''") + "'" + + + def drop_column_default_sql(self, table_name, name, q_name=None): + "MSSQL specific drop default, which is a pain" + + sql = """ + SELECT object_name(cdefault) + FROM syscolumns + WHERE id = object_id('%s') + AND name = '%s' + """ + cons = self.execute(sql % (table_name, name), []) + if cons and cons[0] and cons[0][0]: + return "DROP CONSTRAINT %s" % cons[0][0] + return None + + def _fix_field_definition(self, field): + if isinstance(field, (fields.BooleanField, fields.NullBooleanField)): + if field.default == True: + field.default = 1 + if field.default == False: + field.default = 0 + + # This is copied from South's generic add_column, with two modifications: + # 1) The sql-server-specific call to _fix_field_definition + # 2) Removing a default, when needed, by calling drop_default and not the more general alter_column + @invalidate_table_constraints + def add_column(self, table_name, name, field, keep_default=True): + """ + Adds the column 'name' to the table 'table_name'. + Uses the 'field' paramater, a django.db.models.fields.Field instance, + to generate the necessary sql + + @param table_name: The name of the table to add the column to + @param name: The name of the column to add + @param field: The field to use + """ + self._fix_field_definition(field) + sql = self.column_sql(table_name, name, field) + if sql: + params = ( + self.quote_name(table_name), + sql, + ) + sql = self.add_column_string % params + self.execute(sql) + + # Now, drop the default if we need to + if not keep_default and field.default is not None: + field.default = fields.NOT_PROVIDED + #self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True) + self.drop_default(table_name, name, field) + + @invalidate_table_constraints + def drop_default(self, table_name, name, field): + fragment = self.drop_column_default_sql(table_name, name) + if fragment: + table_name = self.quote_name(table_name) + sql = " ".join(["ALTER TABLE", table_name, fragment]) + self.execute(sql) + + + @invalidate_table_constraints + def create_table(self, table_name, field_defs): + # Tweak stuff as needed + for _, f in field_defs: + self._fix_field_definition(f) + + # Run + generic.DatabaseOperations.create_table(self, table_name, field_defs) + + def _find_referencing_fks(self, table_name): + "MSSQL does not support cascading FKs when dropping tables, we need to implement." + + # FK -- Foreign Keys + # UCTU -- Unique Constraints Table Usage + # FKTU -- Foreign Key Table Usage + # (last two are both really CONSTRAINT_TABLE_USAGE, different join conditions) + sql = """ + SELECT FKTU.TABLE_SCHEMA as REFING_TABLE_SCHEMA, + FKTU.TABLE_NAME as REFING_TABLE_NAME, + FK.[CONSTRAINT_NAME] as FK_NAME + FROM [INFORMATION_SCHEMA].[REFERENTIAL_CONSTRAINTS] FK + JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] UCTU + ON FK.UNIQUE_CONSTRAINT_CATALOG = UCTU.CONSTRAINT_CATALOG and + FK.UNIQUE_CONSTRAINT_NAME = UCTU.CONSTRAINT_NAME and + FK.UNIQUE_CONSTRAINT_SCHEMA = UCTU.CONSTRAINT_SCHEMA + JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] FKTU + ON FK.CONSTRAINT_CATALOG = FKTU.CONSTRAINT_CATALOG and + FK.CONSTRAINT_NAME = FKTU.CONSTRAINT_NAME and + FK.CONSTRAINT_SCHEMA = FKTU.CONSTRAINT_SCHEMA + WHERE FK.CONSTRAINT_CATALOG = %s + AND UCTU.TABLE_SCHEMA = %s -- REFD_TABLE_SCHEMA + AND UCTU.TABLE_NAME = %s -- REFD_TABLE_NAME + """ + db_name = self._get_setting('name') + schema_name = self._get_schema_name() + return self.execute(sql, [db_name, schema_name, table_name]) + + @invalidate_table_constraints + def delete_table(self, table_name, cascade=True): + """ + Deletes the table 'table_name'. + """ + if cascade: + refing = self._find_referencing_fks(table_name) + for schmea, table, constraint in refing: + table = ".".join(map (self.quote_name, [schmea, table])) + params = dict(table_name = table, + constraint_name = self.quote_name(constraint)) + sql = self.drop_constraint_string % params + self.execute(sql, []) + cascade = False + super(DatabaseOperations, self).delete_table(table_name, cascade) + + @copy_column_constraints + @delete_column_constraints + def rename_column(self, table_name, old, new): + """ + Renames the column of 'table_name' from 'old' to 'new'. + WARNING - This isn't transactional on MSSQL! + """ + if old == new: + # No Operation + return + # Examples on the MS site show the table name not being quoted... + params = (table_name, self.quote_name(old), self.quote_name(new)) + self.execute("EXEC sp_rename '%s.%s', %s, 'COLUMN'" % params) + + @invalidate_table_constraints + def rename_table(self, old_table_name, table_name): + """ + Renames the table 'old_table_name' to 'table_name'. + WARNING - This isn't transactional on MSSQL! + """ + if old_table_name == table_name: + # No Operation + return + params = (self.quote_name(old_table_name), self.quote_name(table_name)) + self.execute('EXEC sp_rename %s, %s' % params) + + _db_type_for_alter_column = generic.alias("_db_positive_type_for_alter_column") + _alter_add_column_mods = generic.alias("_alter_add_positive_check") + + @invalidate_table_constraints + def delete_foreign_key(self, table_name, column): + super(DatabaseOperations, self).delete_foreign_key(table_name, column) + # A FK also implies a non-unique index + find_index_sql = """ + SELECT i.name -- s.name, t.name, c.name + FROM sys.tables t + INNER JOIN sys.schemas s ON t.schema_id = s.schema_id + INNER JOIN sys.indexes i ON i.object_id = t.object_id + INNER JOIN sys.index_columns ic ON ic.object_id = t.object_id + INNER JOIN sys.columns c ON c.object_id = t.object_id + AND ic.column_id = c.column_id + WHERE i.is_unique=0 AND i.is_primary_key=0 AND i.is_unique_constraint=0 + AND s.name = %s + AND t.name = %s + AND c.name = %s + """ + schema = self._get_schema_name() + indexes = self.execute(find_index_sql, [schema, table_name, column]) + qn = self.quote_name + for index in (i[0] for i in indexes): + self.execute("DROP INDEX %s on %s.%s" % (qn(index), qn(schema), qn(table_name) )) + diff --git a/users/src/south/south/db/sqlite3.py b/users/src/south/south/db/sqlite3.py new file mode 100644 index 0000000..b7e9d51 --- /dev/null +++ b/users/src/south/south/db/sqlite3.py @@ -0,0 +1,240 @@ +import inspect +import re + +from django.db.models import ForeignKey + +from south.db import generic +from django.core.management.commands import inspectdb + +class DatabaseOperations(generic.DatabaseOperations): + + """ + SQLite3 implementation of database operations. + """ + + backend_name = "sqlite3" + + # SQLite ignores several constraints. I wish I could. + supports_foreign_keys = False + has_check_constraints = False + + def add_column(self, table_name, name, field, *args, **kwds): + """ + Adds a column. + """ + # If it's not nullable, and has no default, raise an error (SQLite is picky) + if (not field.null and + (not field.has_default() or field.get_default() is None) and + not field.empty_strings_allowed): + raise ValueError("You cannot add a null=False column without a default value.") + # Initialise the field. + field.set_attributes_from_name(name) + # We add columns by remaking the table; even though SQLite supports + # adding columns, it doesn't support adding PRIMARY KEY or UNIQUE cols. + self._remake_table(table_name, added={ + field.column: self._column_sql_for_create(table_name, name, field, False), + }) + + def _get_full_table_description(self, connection, cursor, table_name): + cursor.execute('PRAGMA table_info(%s)' % connection.ops.quote_name(table_name)) + # cid, name, type, notnull, dflt_value, pk + return [{'name': field[1], + 'type': field[2], + 'null_ok': not field[3], + 'dflt_value': field[4], + 'pk': field[5] # undocumented + } for field in cursor.fetchall()] + + @generic.invalidate_table_constraints + def _remake_table(self, table_name, added={}, renames={}, deleted=[], altered={}, primary_key_override=None, uniques_deleted=[]): + """ + Given a table and three sets of changes (renames, deletes, alters), + recreates it with the modified schema. + """ + # Dry runs get skipped completely + if self.dry_run: + return + # Temporary table's name + temp_name = "_south_new_" + table_name + # Work out the (possibly new) definitions of each column + definitions = {} + cursor = self._get_connection().cursor() + # Get the index descriptions + indexes = self._get_connection().introspection.get_indexes(cursor, table_name) + multi_indexes = self._get_multi_indexes(table_name) + # Work out new column defs. + for column_info in self._get_full_table_description(self._get_connection(), cursor, table_name): + name = column_info['name'] + if name in deleted: + continue + # Get the type, ignoring PRIMARY KEY (we need to be consistent) + type = column_info['type'].replace("PRIMARY KEY", "") + # Add on primary key, not null or unique if needed. + if (primary_key_override and primary_key_override == name) or \ + (not primary_key_override and indexes[name]['primary_key']): + type += " PRIMARY KEY" + elif not column_info['null_ok']: + type += " NOT NULL" + if indexes[name]['unique'] and name not in uniques_deleted: + type += " UNIQUE" + + if column_info['dflt_value'] is not None: + type += " DEFAULT " + column_info['dflt_value'] + + # Deal with a rename + if name in renames: + name = renames[name] + # Add to the defs + definitions[name] = type + # Add on altered columns + definitions.update(altered) + # Add on the new columns + definitions.update(added) + # Alright, Make the table + self.execute("CREATE TABLE %s (%s)" % ( + self.quote_name(temp_name), + ", ".join(["%s %s" % (self.quote_name(cname), ctype) for cname, ctype in definitions.items()]), + )) + # Copy over the data + self._copy_data(table_name, temp_name, renames) + # Delete the old table, move our new one over it + self.delete_table(table_name) + self.rename_table(temp_name, table_name) + # Recreate multi-valued indexes + # We can't do that before since it's impossible to rename indexes + # and index name scope is global + self._make_multi_indexes(table_name, multi_indexes, renames=renames, deleted=deleted, uniques_deleted=uniques_deleted) + + + def _copy_data(self, src, dst, field_renames={}): + "Used to copy data into a new table" + # Make a list of all the fields to select + cursor = self._get_connection().cursor() + src_fields = [column_info[0] for column_info in self._get_connection().introspection.get_table_description(cursor, src)] + dst_fields = [column_info[0] for column_info in self._get_connection().introspection.get_table_description(cursor, dst)] + src_fields_new = [] + dst_fields_new = [] + for field in src_fields: + if field in field_renames: + dst_fields_new.append(self.quote_name(field_renames[field])) + elif field in dst_fields: + dst_fields_new.append(self.quote_name(field)) + else: + continue + src_fields_new.append(self.quote_name(field)) + # Copy over the data + self.execute("INSERT INTO %s (%s) SELECT %s FROM %s;" % ( + self.quote_name(dst), + ', '.join(dst_fields_new), + ', '.join(src_fields_new), + self.quote_name(src), + )) + + def _create_unique(self, table_name, columns): + self.execute("CREATE UNIQUE INDEX %s ON %s(%s);" % ( + self.quote_name('%s_%s' % (table_name, '__'.join(columns))), + self.quote_name(table_name), + ', '.join(self.quote_name(c) for c in columns), + )) + + def _get_multi_indexes(self, table_name): + indexes = [] + cursor = self._get_connection().cursor() + cursor.execute('PRAGMA index_list(%s)' % self.quote_name(table_name)) + # seq, name, unique + for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]: + if not unique: + continue + cursor.execute('PRAGMA index_info(%s)' % self.quote_name(index)) + info = cursor.fetchall() + if len(info) == 1: + continue + columns = [] + for field in info: + columns.append(field[2]) + indexes.append(columns) + return indexes + + def _make_multi_indexes(self, table_name, indexes, deleted=[], renames={}, uniques_deleted=[]): + for index in indexes: + columns = [] + + for name in index: + # Handle deletion + if name in deleted: + columns = [] + break + + # Handle renames + if name in renames: + name = renames[name] + columns.append(name) + + if columns and columns != uniques_deleted: + self._create_unique(table_name, columns) + + def _column_sql_for_create(self, table_name, name, field, explicit_name=True): + "Given a field and its name, returns the full type for the CREATE TABLE." + field.set_attributes_from_name(name) + if not explicit_name: + name = field.db_column + else: + field.column = name + sql = self.column_sql(table_name, name, field, with_name=False, field_prepared=True) + #if field.primary_key: + # sql += " PRIMARY KEY" + #if field.unique: + # sql += " UNIQUE" + return sql + + def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False): + """ + Changes a column's SQL definition. + + Note that this sqlite3 implementation ignores the ignore_constraints argument. + The argument is accepted for API compatibility with the generic + DatabaseOperations.alter_column() method. + """ + # Remake the table correctly + self._remake_table(table_name, altered={ + name: self._column_sql_for_create(table_name, name, field, explicit_name), + }) + + def delete_column(self, table_name, column_name): + """ + Deletes a column. + """ + self._remake_table(table_name, deleted=[column_name]) + + def rename_column(self, table_name, old, new): + """ + Renames a column from one name to another. + """ + self._remake_table(table_name, renames={old: new}) + + def create_unique(self, table_name, columns): + """ + Create an unique index on columns + """ + self._create_unique(table_name, columns) + + def delete_unique(self, table_name, columns): + """ + Delete an unique index + """ + self._remake_table(table_name, uniques_deleted=columns) + + def create_primary_key(self, table_name, columns): + if not isinstance(columns, (list, tuple)): + columns = [columns] + assert len(columns) == 1, "SQLite backend does not support multi-column primary keys" + self._remake_table(table_name, primary_key_override=columns[0]) + + # Not implemented this yet. + def delete_primary_key(self, table_name): + # By passing True in, we make sure we wipe all existing PKs. + self._remake_table(table_name, primary_key_override=True) + + # No cascades on deletes + def delete_table(self, table_name, cascade=True): + generic.DatabaseOperations.delete_table(self, table_name, False) diff --git a/users/src/south/south/db/sqlite3.pyc b/users/src/south/south/db/sqlite3.pyc new file mode 100644 index 0000000..54a460e Binary files /dev/null and b/users/src/south/south/db/sqlite3.pyc differ diff --git a/users/src/south/south/exceptions.py b/users/src/south/south/exceptions.py new file mode 100644 index 0000000..dd50b9f --- /dev/null +++ b/users/src/south/south/exceptions.py @@ -0,0 +1,151 @@ +from traceback import format_exception + +class SouthError(RuntimeError): + pass + +class SouthWarning(RuntimeWarning): + pass + +class BrokenMigration(SouthError): + def __init__(self, migration, exc_info): + self.migration = migration + self.exc_info = exc_info + if self.exc_info: + self.traceback = ''.join(format_exception(*self.exc_info)) + + def __str__(self): + return ("While loading migration '%(migration)s':\n" + '%(traceback)s' % self.__dict__) + + +class UnknownMigration(BrokenMigration): + def __str__(self): + return ("Migration '%(migration)s' probably doesn't exist.\n" + '%(traceback)s' % self.__dict__) + + +class InvalidMigrationModule(SouthError): + def __init__(self, application, module): + self.application = application + self.module = module + + def __str__(self): + return ('The migration module specified for %(application)s, %(module)r, is invalid; the parent module does not exist.' % self.__dict__) + + +class NoMigrations(SouthError): + def __init__(self, application): + self.application = application + + def __str__(self): + return "Application '%(application)s' has no migrations." % self.__dict__ + + +class MultiplePrefixMatches(SouthError): + def __init__(self, prefix, matches): + self.prefix = prefix + self.matches = matches + + def __str__(self): + self.matches_list = "\n ".join([unicode(m) for m in self.matches]) + return ("Prefix '%(prefix)s' matches more than one migration:\n" + " %(matches_list)s") % self.__dict__ + + +class GhostMigrations(SouthError): + def __init__(self, ghosts): + self.ghosts = ghosts + + def __str__(self): + self.ghosts_list = "\n ".join([unicode(m) for m in self.ghosts]) + return ("\n\n ! These migrations are in the database but not on disk:\n" + " %(ghosts_list)s\n" + " ! I'm not trusting myself; either fix this yourself by fiddling\n" + " ! with the south_migrationhistory table, or pass --delete-ghost-migrations\n" + " ! to South to have it delete ALL of these records (this may not be good).") % self.__dict__ + + +class CircularDependency(SouthError): + def __init__(self, trace): + self.trace = trace + + def __str__(self): + trace = " -> ".join([unicode(s) for s in self.trace]) + return ("Found circular dependency:\n" + " %s") % trace + + +class InconsistentMigrationHistory(SouthError): + def __init__(self, problems): + self.problems = problems + + def __str__(self): + return ('Inconsistent migration history\n' + 'The following options are available:\n' + ' --merge: will just attempt the migration ignoring any potential dependency conflicts.') + + +class DependsOnHigherMigration(SouthError): + def __init__(self, migration, depends_on): + self.migration = migration + self.depends_on = depends_on + + def __str__(self): + return "Lower migration '%(migration)s' depends on a higher migration '%(depends_on)s' in the same app." % self.__dict__ + + +class DependsOnUnknownMigration(SouthError): + def __init__(self, migration, depends_on): + self.migration = migration + self.depends_on = depends_on + + def __str__(self): + print "Migration '%(migration)s' depends on unknown migration '%(depends_on)s'." % self.__dict__ + + +class DependsOnUnmigratedApplication(SouthError): + def __init__(self, migration, application): + self.migration = migration + self.application = application + + def __str__(self): + return "Migration '%(migration)s' depends on unmigrated application '%(application)s'." % self.__dict__ + + +class FailedDryRun(SouthError): + def __init__(self, migration, exc_info): + self.migration = migration + self.name = migration.name() + self.exc_info = exc_info + self.traceback = ''.join(format_exception(*self.exc_info)) + + def __str__(self): + return (" ! Error found during dry run of '%(name)s'! Aborting.\n" + "%(traceback)s") % self.__dict__ + + +class ORMBaseNotIncluded(SouthError): + """Raised when a frozen model has something in _ormbases which isn't frozen.""" + pass + + +class UnfreezeMeLater(Exception): + """An exception, which tells the ORM unfreezer to postpone this model.""" + pass + + +class ImpossibleORMUnfreeze(SouthError): + """Raised if the ORM can't manage to unfreeze all the models in a linear fashion.""" + pass + +class ConstraintDropped(SouthWarning): + def __init__(self, constraint, table, column=None): + self.table = table + if column: + self.column = ".%s" % column + else: + self.column = "" + self.constraint = constraint + + def __str__(self): + return "Constraint %(constraint)s was dropped from %(table)s%(column)s -- was this intended?" % self.__dict__ diff --git a/users/src/south/south/exceptions.pyc b/users/src/south/south/exceptions.pyc new file mode 100644 index 0000000..6bfc840 Binary files /dev/null and b/users/src/south/south/exceptions.pyc differ diff --git a/users/src/south/south/hacks/__init__.py b/users/src/south/south/hacks/__init__.py new file mode 100644 index 0000000..8f28503 --- /dev/null +++ b/users/src/south/south/hacks/__init__.py @@ -0,0 +1,10 @@ +""" +The hacks module encapsulates all the horrible things that play with Django +internals in one, evil place. +This top file will automagically expose the correct Hacks class. +""" + +# Currently, these work for 1.0 and 1.1. +from south.hacks.django_1_0 import Hacks + +hacks = Hacks() \ No newline at end of file diff --git a/users/src/south/south/hacks/__init__.pyc b/users/src/south/south/hacks/__init__.pyc new file mode 100644 index 0000000..3931413 Binary files /dev/null and b/users/src/south/south/hacks/__init__.pyc differ diff --git a/users/src/south/south/hacks/django_1_0.py b/users/src/south/south/hacks/django_1_0.py new file mode 100644 index 0000000..af4089b --- /dev/null +++ b/users/src/south/south/hacks/django_1_0.py @@ -0,0 +1,103 @@ +""" +Hacks for the Django 1.0/1.0.2 releases. +""" + +from django.conf import settings +from django.db import models +from django.db.backends.creation import BaseDatabaseCreation +from django.db.models.loading import AppCache, cache +from django.core import management +from django.core.management.commands.flush import Command as FlushCommand +from django.utils.datastructures import SortedDict + +class SkipFlushCommand(FlushCommand): + def handle_noargs(self, **options): + # no-op to avoid calling flush + return + +class Hacks: + + def set_installed_apps(self, apps): + """ + Sets Django's INSTALLED_APPS setting to be effectively the list passed in. + """ + + # Make sure it's a list. + apps = list(apps) + + # Make sure it contains strings + if apps: + assert isinstance(apps[0], basestring), "The argument to set_installed_apps must be a list of strings." + + # Monkeypatch in! + settings.INSTALLED_APPS, settings.OLD_INSTALLED_APPS = ( + apps, + settings.INSTALLED_APPS, + ) + self._redo_app_cache() + + + def reset_installed_apps(self): + """ + Undoes the effect of set_installed_apps. + """ + settings.INSTALLED_APPS = settings.OLD_INSTALLED_APPS + self._redo_app_cache() + + + def _redo_app_cache(self): + """ + Used to repopulate AppCache after fiddling with INSTALLED_APPS. + """ + a = AppCache() + a.loaded = False + a.handled = {} + a.postponed = [] + a.app_store = SortedDict() + a.app_models = SortedDict() + a.app_errors = {} + a._populate() + + + def clear_app_cache(self): + """ + Clears the contents of AppCache to a blank state, so new models + from the ORM can be added. + """ + self.old_app_models, cache.app_models = cache.app_models, {} + + + def unclear_app_cache(self): + """ + Reversed the effects of clear_app_cache. + """ + cache.app_models = self.old_app_models + cache._get_models_cache = {} + + + def repopulate_app_cache(self): + """ + Rebuilds AppCache with the real model definitions. + """ + cache._populate() + + def patch_flush_during_test_db_creation(self): + """ + Patches BaseDatabaseCreation.create_test_db to not flush database + """ + + def patch(f): + def wrapper(*args, **kwargs): + # hold onto the original and replace flush command with a no-op + original_flush_command = management._commands['flush'] + try: + management._commands['flush'] = SkipFlushCommand() + # run create_test_db + f(*args, **kwargs) + finally: + # unpatch flush back to the original + management._commands['flush'] = original_flush_command + return wrapper + + BaseDatabaseCreation.create_test_db = patch(BaseDatabaseCreation.create_test_db) + diff --git a/users/src/south/south/hacks/django_1_0.pyc b/users/src/south/south/hacks/django_1_0.pyc new file mode 100644 index 0000000..5fad5aa Binary files /dev/null and b/users/src/south/south/hacks/django_1_0.pyc differ diff --git a/users/src/south/south/introspection_plugins/__init__.py b/users/src/south/south/introspection_plugins/__init__.py new file mode 100644 index 0000000..62688c3 --- /dev/null +++ b/users/src/south/south/introspection_plugins/__init__.py @@ -0,0 +1,10 @@ +# This module contains built-in introspector plugins for various common +# Django apps. + +# These imports trigger the lower-down files +import south.introspection_plugins.geodjango +import south.introspection_plugins.django_tagging +import south.introspection_plugins.django_taggit +import south.introspection_plugins.django_objectpermissions +import south.introspection_plugins.annoying_autoonetoone + diff --git a/users/src/south/south/introspection_plugins/annoying_autoonetoone.py b/users/src/south/south/introspection_plugins/annoying_autoonetoone.py new file mode 100644 index 0000000..d61304f --- /dev/null +++ b/users/src/south/south/introspection_plugins/annoying_autoonetoone.py @@ -0,0 +1,11 @@ +from django.conf import settings +from south.modelsinspector import add_introspection_rules + +if 'annoying' in settings.INSTALLED_APPS: + try: + from annoying.fields import AutoOneToOneField + except ImportError: + pass + else: + #django-annoying's AutoOneToOneField is essentially a OneToOneField. + add_introspection_rules([], ["^annoying\.fields\.AutoOneToOneField"]) diff --git a/users/src/south/south/introspection_plugins/django_audit_log.py b/users/src/south/south/introspection_plugins/django_audit_log.py new file mode 100644 index 0000000..b874428 --- /dev/null +++ b/users/src/south/south/introspection_plugins/django_audit_log.py @@ -0,0 +1,30 @@ +""" +South introspection rules for django-audit-log +""" + +from django.contrib.auth.models import User +from django.conf import settings +from south.modelsinspector import add_introspection_rules + +if "audit_log" in settings.INSTALLED_APPS: + try: + # Try and import the field so we can see if audit_log is available + from audit_log.models import fields + + # Make sure the `to` and `null` parameters will be ignored + rules = [( + (fields.LastUserField,), + [], + { + 'to': ['rel.to', {'default': User}], + 'null': ['null', {'default': True}], + }, + )] + + # Add the rules for the `LastUserField` + add_introspection_rules( + rules, + ['^audit_log\.models\.fields\.LastUserField'], + ) + except ImportError: + pass diff --git a/users/src/south/south/introspection_plugins/django_objectpermissions.py b/users/src/south/south/introspection_plugins/django_objectpermissions.py new file mode 100644 index 0000000..42b353b --- /dev/null +++ b/users/src/south/south/introspection_plugins/django_objectpermissions.py @@ -0,0 +1,16 @@ +""" +South introspection rules for django-objectpermissions +""" + +from django.conf import settings +from south.modelsinspector import add_ignored_fields + +if 'objectpermissions' in settings.INSTALLED_APPS: + try: + from objectpermissions.models import UserPermissionRelation, GroupPermissionRelation + except ImportError: + pass + else: + add_ignored_fields(["^objectpermissions\.models\.UserPermissionRelation", + "^objectpermissions\.models\.GroupPermissionRelation"]) + diff --git a/users/src/south/south/introspection_plugins/django_tagging.py b/users/src/south/south/introspection_plugins/django_tagging.py new file mode 100644 index 0000000..c02e529 --- /dev/null +++ b/users/src/south/south/introspection_plugins/django_tagging.py @@ -0,0 +1,24 @@ +from south.modelsinspector import add_introspection_rules +from django.conf import settings + +if "tagging" in settings.INSTALLED_APPS: + try: + from tagging.fields import TagField + except ImportError: + pass + else: + rules = [ + ( + (TagField, ), + [], + { + "blank": ["blank", {"default": True}], + "max_length": ["max_length", {"default": 255}], + }, + ), + ] + add_introspection_rules(rules, ["^tagging\.fields",]) + +if "tagging_autocomplete" in settings.INSTALLED_APPS: + add_introspection_rules([], ["^tagging_autocomplete\.models\.TagAutocompleteField"]) + diff --git a/users/src/south/south/introspection_plugins/django_taggit.py b/users/src/south/south/introspection_plugins/django_taggit.py new file mode 100644 index 0000000..aded23f --- /dev/null +++ b/users/src/south/south/introspection_plugins/django_taggit.py @@ -0,0 +1,14 @@ +""" +South introspection rules for django-taggit +""" + +from django.conf import settings +from south.modelsinspector import add_ignored_fields + +if 'taggit' in settings.INSTALLED_APPS: + try: + from taggit.managers import TaggableManager + except ImportError: + pass + else: + add_ignored_fields(["^taggit\.managers"]) diff --git a/users/src/south/south/introspection_plugins/django_timezones.py b/users/src/south/south/introspection_plugins/django_timezones.py new file mode 100644 index 0000000..d4b573d --- /dev/null +++ b/users/src/south/south/introspection_plugins/django_timezones.py @@ -0,0 +1,21 @@ +from south.modelsinspector import add_introspection_rules +from django.conf import settings + +if "timezones" in settings.INSTALLED_APPS: + try: + from timezones.fields import TimeZoneField + except ImportError: + pass + else: + rules = [ + ( + (TimeZoneField, ), + [], + { + "blank": ["blank", {"default": True}], + "max_length": ["max_length", {"default": 100}], + }, + ), + ] + add_introspection_rules(rules, ["^timezones\.fields",]) + diff --git a/users/src/south/south/introspection_plugins/geodjango.py b/users/src/south/south/introspection_plugins/geodjango.py new file mode 100644 index 0000000..bece1c9 --- /dev/null +++ b/users/src/south/south/introspection_plugins/geodjango.py @@ -0,0 +1,45 @@ +""" +GeoDjango introspection rules +""" + +import django +from django.conf import settings + +from south.modelsinspector import add_introspection_rules + +has_gis = "django.contrib.gis" in settings.INSTALLED_APPS + +if has_gis: + # Alright,import the field + from django.contrib.gis.db.models.fields import GeometryField + + # Make some introspection rules + if django.VERSION[0] == 1 and django.VERSION[1] >= 1: + # Django 1.1's gis module renamed these. + rules = [ + ( + (GeometryField, ), + [], + { + "srid": ["srid", {"default": 4326}], + "spatial_index": ["spatial_index", {"default": True}], + "dim": ["dim", {"default": 2}], + "geography": ["geography", {"default": False}], + }, + ), + ] + else: + rules = [ + ( + (GeometryField, ), + [], + { + "srid": ["_srid", {"default": 4326}], + "spatial_index": ["_index", {"default": True}], + "dim": ["_dim", {"default": 2}], + }, + ), + ] + + # Install them + add_introspection_rules(rules, ["^django\.contrib\.gis"]) \ No newline at end of file diff --git a/users/src/south/south/logger.py b/users/src/south/south/logger.py new file mode 100644 index 0000000..26b74d1 --- /dev/null +++ b/users/src/south/south/logger.py @@ -0,0 +1,38 @@ +import sys +import logging +from django.conf import settings + +# Create a dummy handler to use for now. +class NullHandler(logging.Handler): + def emit(self, record): + pass + +def get_logger(): + "Attach a file handler to the logger if there isn't one already." + debug_on = getattr(settings, "SOUTH_LOGGING_ON", False) + logging_file = getattr(settings, "SOUTH_LOGGING_FILE", False) + + if debug_on: + if logging_file: + if len(_logger.handlers) < 2: + _logger.addHandler(logging.FileHandler(logging_file)) + _logger.setLevel(logging.DEBUG) + else: + raise IOError, "SOUTH_LOGGING_ON is True. You also need a SOUTH_LOGGING_FILE setting." + + return _logger + +def close_logger(): + "Closes the logger handler for the file, so we can remove the file after a test." + for handler in _logger.handlers: + _logger.removeHandler(handler) + if isinstance(handler, logging.FileHandler): + handler.close() + +def init_logger(): + "Initialize the south logger" + logger = logging.getLogger("south") + logger.addHandler(NullHandler()) + return logger + +_logger = init_logger() diff --git a/users/src/south/south/logger.pyc b/users/src/south/south/logger.pyc new file mode 100644 index 0000000..1aaec9d Binary files /dev/null and b/users/src/south/south/logger.pyc differ diff --git a/users/src/south/south/management/__init__.py b/users/src/south/south/management/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/management/__init__.pyc b/users/src/south/south/management/__init__.pyc new file mode 100644 index 0000000..de83e04 Binary files /dev/null and b/users/src/south/south/management/__init__.pyc differ diff --git a/users/src/south/south/management/commands/__init__.py b/users/src/south/south/management/commands/__init__.py new file mode 100644 index 0000000..da218eb --- /dev/null +++ b/users/src/south/south/management/commands/__init__.py @@ -0,0 +1,40 @@ + +# Common framework for syncdb actions + +import copy + +from django.core import management +from django.conf import settings + +# Make sure the template loader cache is fixed _now_ (#448) +import django.template.loaders.app_directories + +from south.hacks import hacks +from south.management.commands.syncdb import Command as SyncCommand + +class MigrateAndSyncCommand(SyncCommand): + """Used for situations where "syncdb" is called by test frameworks.""" + + option_list = copy.deepcopy(SyncCommand.option_list) + + for opt in option_list: + if "--migrate" == opt.get_opt_string(): + opt.default = True + break + +def patch_for_test_db_setup(): + # Load the commands cache + management.get_commands() + # Repoint to the correct version of syncdb + if hasattr(settings, "SOUTH_TESTS_MIGRATE") and not settings.SOUTH_TESTS_MIGRATE: + # point at the core syncdb command when creating tests + # tests should always be up to date with the most recent model structure + management._commands['syncdb'] = 'django.core' + else: + management._commands['syncdb'] = MigrateAndSyncCommand() + # Avoid flushing data migrations. + # http://code.djangoproject.com/ticket/14661 introduced change that flushed custom + # sql during the test database creation (thus flushing the data migrations). + # we patch flush to be no-op during create_test_db, but still allow flushing + # after each test for non-transactional backends. + hacks.patch_flush_during_test_db_creation() diff --git a/users/src/south/south/management/commands/__init__.pyc b/users/src/south/south/management/commands/__init__.pyc new file mode 100644 index 0000000..7ac0b49 Binary files /dev/null and b/users/src/south/south/management/commands/__init__.pyc differ diff --git a/users/src/south/south/management/commands/convert_to_south.py b/users/src/south/south/management/commands/convert_to_south.py new file mode 100644 index 0000000..6c03bf3 --- /dev/null +++ b/users/src/south/south/management/commands/convert_to_south.py @@ -0,0 +1,93 @@ +""" +Quick conversion command module. +""" + +from optparse import make_option +import sys + +from django.core.management.base import BaseCommand +from django.core.management.color import no_style +from django.conf import settings +from django.db import models +from django.core import management +from django.core.exceptions import ImproperlyConfigured + +from south.migration import Migrations +from south.hacks import hacks +from south.exceptions import NoMigrations + +class Command(BaseCommand): + + option_list = BaseCommand.option_list + if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]: + option_list += ( + make_option('--verbosity', action='store', dest='verbosity', default='1', + type='choice', choices=['0', '1', '2'], + help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'), + ) + option_list += ( + make_option('--delete-ghost-migrations', action='store_true', dest='delete_ghosts', default=False, + help="Tells South to delete any 'ghost' migrations (ones in the database but not on disk)."), + make_option('--ignore-ghost-migrations', action='store_true', dest='ignore_ghosts', default=False, + help="Tells South to ignore any 'ghost' migrations (ones in the database but not on disk) and continue to apply new migrations."), + ) + + help = "Quickly converts the named application to use South if it is currently using syncdb." + + def handle(self, app=None, *args, **options): + + # Make sure we have an app + if not app: + print "Please specify an app to convert." + return + + # See if the app exists + app = app.split(".")[-1] + try: + app_module = models.get_app(app) + except ImproperlyConfigured: + print "There is no enabled application matching '%s'." % app + return + + # Try to get its list of models + model_list = models.get_models(app_module) + if not model_list: + print "This application has no models; this command is for applications that already have models syncdb'd." + print "Make some models, and then use ./manage.py schemamigration %s --initial instead." % app + return + + # Ask South if it thinks it's already got migrations + try: + Migrations(app) + except NoMigrations: + pass + else: + print "This application is already managed by South." + return + + # Finally! It seems we've got a candidate, so do the two-command trick + verbosity = int(options.get('verbosity', 0)) + management.call_command("schemamigration", app, initial=True, verbosity=verbosity) + + # Now, we need to re-clean and sanitise appcache + hacks.clear_app_cache() + hacks.repopulate_app_cache() + + # And also clear our cached Migration classes + Migrations._clear_cache() + + # Now, migrate + management.call_command( + "migrate", + app, + "0001", + fake=True, + verbosity=verbosity, + ignore_ghosts=options.get("ignore_ghosts", False), + delete_ghosts=options.get("delete_ghosts", False), + ) + + print + print "App '%s' converted. Note that South assumed the application's models matched the database" % app + print "(i.e. you haven't changed it since last syncdb); if you have, you should delete the %s/migrations" % app + print "directory, revert models.py so it matches the database, and try again." diff --git a/users/src/south/south/management/commands/datamigration.py b/users/src/south/south/management/commands/datamigration.py new file mode 100644 index 0000000..d75be54 --- /dev/null +++ b/users/src/south/south/management/commands/datamigration.py @@ -0,0 +1,125 @@ +""" +Data migration creation command +""" + +import sys +import os +import re +from optparse import make_option + +try: + set +except NameError: + from sets import Set as set + +from django.core.management.base import BaseCommand +from django.core.management.color import no_style +from django.db import models +from django.conf import settings + +from south.migration import Migrations +from south.exceptions import NoMigrations +from south.creator import freezer + +class Command(BaseCommand): + option_list = BaseCommand.option_list + ( + make_option('--freeze', action='append', dest='freeze_list', type='string', + help='Freeze the specified app(s). Provide an app name with each; use the option multiple times for multiple apps'), + make_option('--stdout', action='store_true', dest='stdout', default=False, + help='Print the migration to stdout instead of writing it to a file.'), + ) + help = "Creates a new template data migration for the given app" + usage_str = "Usage: ./manage.py datamigration appname migrationname [--stdout] [--freeze appname]" + + def handle(self, app=None, name="", freeze_list=None, stdout=False, verbosity=1, **options): + + # Any supposed lists that are None become empty lists + freeze_list = freeze_list or [] + + # --stdout means name = - + if stdout: + name = "-" + + # Only allow valid names + if re.search('[^_\w]', name) and name != "-": + self.error("Migration names should contain only alphanumeric characters and underscores.") + + # if not name, there's an error + if not name: + self.error("You must provide a name for this migration\n" + self.usage_str) + + if not app: + self.error("You must provide an app to create a migration for.\n" + self.usage_str) + + # Get the Migrations for this app (creating the migrations dir if needed) + migrations = Migrations(app, force_creation=True, verbose_creation=verbosity > 0) + + # See what filename is next in line. We assume they use numbers. + new_filename = migrations.next_filename(name) + + # Work out which apps to freeze + apps_to_freeze = self.calc_frozen_apps(migrations, freeze_list) + + # So, what's in this file, then? + file_contents = MIGRATION_TEMPLATE % { + "frozen_models": freezer.freeze_apps_to_string(apps_to_freeze), + "complete_apps": apps_to_freeze and "complete_apps = [%s]" % (", ".join(map(repr, apps_to_freeze))) or "" + } + + # - is a special name which means 'print to stdout' + if name == "-": + print file_contents + # Write the migration file if the name isn't - + else: + fp = open(os.path.join(migrations.migrations_dir(), new_filename), "w") + fp.write(file_contents) + fp.close() + print >>sys.stderr, "Created %s." % new_filename + + def calc_frozen_apps(self, migrations, freeze_list): + """ + Works out, from the current app, settings, and the command line options, + which apps should be frozen. + """ + apps_to_freeze = [] + for to_freeze in freeze_list: + if "." in to_freeze: + self.error("You cannot freeze %r; you must provide an app label, like 'auth' or 'books'." % to_freeze) + # Make sure it's a real app + if not models.get_app(to_freeze): + self.error("You cannot freeze %r; it's not an installed app." % to_freeze) + # OK, it's fine + apps_to_freeze.append(to_freeze) + if getattr(settings, 'SOUTH_AUTO_FREEZE_APP', True): + apps_to_freeze.append(migrations.app_label()) + return apps_to_freeze + + def error(self, message, code=1): + """ + Prints the error, and exits with the given code. + """ + print >>sys.stderr, message + sys.exit(code) + + +MIGRATION_TEMPLATE = """# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import DataMigration +from django.db import models + +class Migration(DataMigration): + + def forwards(self, orm): + "Write your forwards methods here." + + + def backwards(self, orm): + "Write your backwards methods here." + + + models = %(frozen_models)s + + %(complete_apps)s + symmetrical = True +""" diff --git a/users/src/south/south/management/commands/graphmigrations.py b/users/src/south/south/management/commands/graphmigrations.py new file mode 100644 index 0000000..eb98487 --- /dev/null +++ b/users/src/south/south/management/commands/graphmigrations.py @@ -0,0 +1,61 @@ +""" +Outputs a graphviz dot file of the dependencies. +""" + +from optparse import make_option +import re +import textwrap + +from django.core.management.base import BaseCommand +from django.core.management.color import no_style + +from south.migration import Migrations, all_migrations + +class Command(BaseCommand): + + help = "Outputs a GraphViz dot file of all migration dependencies to stdout." + + def handle(self, **options): + + # Resolve dependencies + Migrations.calculate_dependencies() + + colors = [ 'crimson', 'darkgreen', 'darkgoldenrod', 'navy', + 'brown', 'darkorange', 'aquamarine' , 'blueviolet' ] + color_index = 0 + wrapper = textwrap.TextWrapper(width=40) + + print "digraph G {" + + # Group each app in a subgraph + for migrations in all_migrations(): + print " subgraph %s {" % migrations.app_label() + print " node [color=%s];" % colors[color_index] + for migration in migrations: + # Munge the label - text wrap and change _ to spaces + label = "%s - %s" % ( + migration.app_label(), migration.name()) + label = re.sub(r"_+", " ", label) + label= "\\n".join(wrapper.wrap(label)) + print ' "%s.%s" [label="%s"];' % ( + migration.app_label(), migration.name(), label) + print " }" + color_index = (color_index + 1) % len(colors) + + # For every migration, print its links. + for migrations in all_migrations(): + for migration in migrations: + for other in migration.dependencies: + # Added weight tends to keep migrations from the same app + # in vertical alignment + attrs = "[weight=2.0]" + # But the more interesting edges are those between apps + if other.app_label() != migration.app_label(): + attrs = "[style=bold]" + print ' "%s.%s" -> "%s.%s" %s;' % ( + other.app_label(), other.name(), + migration.app_label(), migration.name(), + attrs + ) + + print "}"; diff --git a/users/src/south/south/management/commands/migrate.py b/users/src/south/south/management/commands/migrate.py new file mode 100644 index 0000000..f3cbcc8 --- /dev/null +++ b/users/src/south/south/management/commands/migrate.py @@ -0,0 +1,259 @@ +""" +Migrate management command. +""" + +import os.path, re, sys +from optparse import make_option + +from django.core.management.base import BaseCommand +from django.conf import settings + +from south import migration +from south.migration import Migrations +from south.exceptions import NoMigrations +from south.db import DEFAULT_DB_ALIAS + +class Command(BaseCommand): + option_list = BaseCommand.option_list + ( + make_option('--all', action='store_true', dest='all_apps', default=False, + help='Run the specified migration for all apps.'), + make_option('--list', action='store_true', dest='show_list', default=False, + help='List migrations noting those that have been applied'), + make_option('--changes', action='store_true', dest='show_changes', default=False, + help='List changes for migrations'), + make_option('--skip', action='store_true', dest='skip', default=False, + help='Will skip over out-of-order missing migrations'), + make_option('--merge', action='store_true', dest='merge', default=False, + help='Will run out-of-order missing migrations as they are - no rollbacks.'), + make_option('--no-initial-data', action='store_true', dest='no_initial_data', default=False, + help='Skips loading initial data if specified.'), + make_option('--fake', action='store_true', dest='fake', default=False, + help="Pretends to do the migrations, but doesn't actually execute them."), + make_option('--db-dry-run', action='store_true', dest='db_dry_run', default=False, + help="Doesn't execute the SQL generated by the db methods, and doesn't store a record that the migration(s) occurred. Useful to test migrations before applying them."), + make_option('--delete-ghost-migrations', action='store_true', dest='delete_ghosts', default=False, + help="Tells South to delete any 'ghost' migrations (ones in the database but not on disk)."), + make_option('--ignore-ghost-migrations', action='store_true', dest='ignore_ghosts', default=False, + help="Tells South to ignore any 'ghost' migrations (ones in the database but not on disk) and continue to apply new migrations."), + make_option('--noinput', action='store_false', dest='interactive', default=True, + help='Tells Django to NOT prompt the user for input of any kind.'), + make_option('--database', action='store', dest='database', + default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. ' + 'Defaults to the "default" database.'), + ) + if '--verbosity' not in [opt.get_opt_string() for opt in BaseCommand.option_list]: + option_list += ( + make_option('--verbosity', action='store', dest='verbosity', default='1', + type='choice', choices=['0', '1', '2'], + help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'), + ) + help = "Runs migrations for all apps." + args = "[appname] [migrationname|zero] [--all] [--list] [--skip] [--merge] [--no-initial-data] [--fake] [--db-dry-run] [--database=dbalias]" + + def handle(self, app=None, target=None, skip=False, merge=False, backwards=False, fake=False, db_dry_run=False, show_list=False, show_changes=False, database=DEFAULT_DB_ALIAS, delete_ghosts=False, ignore_ghosts=False, **options): + + # NOTE: THIS IS DUPLICATED FROM django.core.management.commands.syncdb + # This code imports any module named 'management' in INSTALLED_APPS. + # The 'management' module is the preferred way of listening to post_syncdb + # signals, and since we're sending those out with create_table migrations, + # we need apps to behave correctly. + for app_name in settings.INSTALLED_APPS: + try: + __import__(app_name + '.management', {}, {}, ['']) + except ImportError, exc: + msg = exc.args[0] + if not msg.startswith('No module named') or 'management' not in msg: + raise + # END DJANGO DUPE CODE + + # if all_apps flag is set, shift app over to target + if options.get('all_apps', False): + target = app + app = None + + # Migrate each app + if app: + try: + apps = [Migrations(app)] + except NoMigrations: + print "The app '%s' does not appear to use migrations." % app + print "./manage.py migrate " + self.args + return + else: + apps = list(migration.all_migrations()) + + # Do we need to show the list of migrations? + if show_list and apps: + list_migrations(apps, database, **options) + + if show_changes and apps: + show_migration_changes(apps) + + if not (show_list or show_changes): + + for app in apps: + result = migration.migrate_app( + app, + target_name = target, + fake = fake, + db_dry_run = db_dry_run, + verbosity = int(options.get('verbosity', 0)), + interactive = options.get('interactive', True), + load_initial_data = not options.get('no_initial_data', False), + merge = merge, + skip = skip, + database = database, + delete_ghosts = delete_ghosts, + ignore_ghosts = ignore_ghosts, + ) + if result is False: + sys.exit(1) # Migration failed, so the command fails. + + +def list_migrations(apps, database = DEFAULT_DB_ALIAS, **options): + """ + Prints a list of all available migrations, and which ones are currently applied. + Accepts a list of Migrations instances. + """ + from south.models import MigrationHistory + applied_migrations = MigrationHistory.objects.filter(app_name__in=[app.app_label() for app in apps]) + if database != DEFAULT_DB_ALIAS: + applied_migrations = applied_migrations.using(database) + applied_migration_names = ['%s.%s' % (mi.app_name,mi.migration) for mi in applied_migrations] + + print + for app in apps: + print " " + app.app_label() + # Get the migrations object + for migration in app: + if migration.app_label() + "." + migration.name() in applied_migration_names: + applied_migration = applied_migrations.get(app_name=migration.app_label(), migration=migration.name()) + print format_migration_list_item(migration.name(), applied=applied_migration.applied, **options) + else: + print format_migration_list_item(migration.name(), applied=False, **options) + print + +def show_migration_changes(apps): + """ + Prints a list of all available migrations, and which ones are currently applied. + Accepts a list of Migrations instances. + + Much simpler, less clear, and much less robust version: + grep "ing " migrations/*.py + """ + for app in apps: + print app.app_label() + # Get the migrations objects + migrations = [migration for migration in app] + # we use reduce to compare models in pairs, not to generate a value + reduce(diff_migrations, migrations) + +def format_migration_list_item(name, applied=True, **options): + if applied: + if int(options.get('verbosity')) >= 2: + return ' (*) %-80s (applied %s)' % (name, applied) + else: + return ' (*) %s' % name + else: + return ' ( ) %s' % name + +def diff_migrations(migration1, migration2): + + def model_name(models, model): + return models[model].get('Meta', {}).get('object_name', model) + + def field_name(models, model, field): + return '%s.%s' % (model_name(models, model), field) + + print " " + migration2.name() + + models1 = migration1.migration_class().models + models2 = migration2.migration_class().models + + # find new models + for model in models2.keys(): + if not model in models1.keys(): + print ' added model %s' % model_name(models2, model) + + # find removed models + for model in models1.keys(): + if not model in models2.keys(): + print ' removed model %s' % model_name(models1, model) + + # compare models + for model in models1: + if model in models2: + + # find added fields + for field in models2[model]: + if not field in models1[model]: + print ' added field %s' % field_name(models2, model, field) + + # find removed fields + for field in models1[model]: + if not field in models2[model]: + print ' removed field %s' % field_name(models1, model, field) + + # compare fields + for field in models1[model]: + if field in models2[model]: + + name = field_name(models1, model, field) + + # compare field attributes + field_value1 = models1[model][field] + field_value2 = models2[model][field] + + # if a field has become a class, or vice versa + if type(field_value1) != type(field_value2): + print ' type of %s changed from %s to %s' % ( + name, field_value1, field_value2) + + # if class + elif isinstance(field_value1, dict): + # print ' %s is a class' % name + pass + + # else regular field + else: + + type1, attr_list1, field_attrs1 = models1[model][field] + type2, attr_list2, field_attrs2 = models2[model][field] + + if type1 != type2: + print ' %s type changed from %s to %s' % ( + name, type1, type2) + + if attr_list1 != []: + print ' %s list %s is not []' % ( + name, attr_list1) + if attr_list2 != []: + print ' %s list %s is not []' % ( + name, attr_list2) + if attr_list1 != attr_list2: + print ' %s list changed from %s to %s' % ( + name, attr_list1, attr_list2) + + # find added field attributes + for attr in field_attrs2: + if not attr in field_attrs1: + print ' added %s attribute %s=%s' % ( + name, attr, field_attrs2[attr]) + + # find removed field attributes + for attr in field_attrs1: + if not attr in field_attrs2: + print ' removed attribute %s(%s=%s)' % ( + name, attr, field_attrs1[attr]) + + # compare field attributes + for attr in field_attrs1: + if attr in field_attrs2: + + value1 = field_attrs1[attr] + value2 = field_attrs2[attr] + if value1 != value2: + print ' %s attribute %s changed from %s to %s' % ( + name, attr, value1, value2) + + return migration2 diff --git a/users/src/south/south/management/commands/migrate.pyc b/users/src/south/south/management/commands/migrate.pyc new file mode 100644 index 0000000..9ebbbaa Binary files /dev/null and b/users/src/south/south/management/commands/migrate.pyc differ diff --git a/users/src/south/south/management/commands/migrationcheck.py b/users/src/south/south/management/commands/migrationcheck.py new file mode 100644 index 0000000..74eff58 --- /dev/null +++ b/users/src/south/south/management/commands/migrationcheck.py @@ -0,0 +1,67 @@ +from django.core.exceptions import ImproperlyConfigured +from django.core.management import call_command, CommandError +from django.core.management.base import BaseCommand +from django.conf import settings +from django.db.models import loading +from django.test import simple + +from south.migration import Migrations +from south.exceptions import NoMigrations +from south.hacks import hacks + +class Command(BaseCommand): + help = "Runs migrations for each app in turn, detecting missing depends_on values." + usage_str = "Usage: ./manage.py migrationcheck" + + def handle(self, check_app_name=None, **options): + runner = simple.DjangoTestSuiteRunner(verbosity=0) + err_msg = "Failed to migrate %s; see output for hints at missing dependencies:\n" + hacks.patch_flush_during_test_db_creation() + failures = 0 + if check_app_name is None: + app_names = settings.INSTALLED_APPS + else: + app_names = [check_app_name] + for app_name in app_names: + app_label = app_name.split(".")[-1] + if app_name == 'south': + continue + + try: + Migrations(app_name) + except (NoMigrations, ImproperlyConfigured): + continue + app = loading.get_app(app_label) + + verbosity = int(options.get('verbosity', 1)) + if verbosity >= 1: + self.stderr.write("processing %s\n" % app_name) + + old_config = runner.setup_databases() + try: + call_command('migrate', app_label, noinput=True, verbosity=verbosity) + for model in loading.get_models(app): + dummy = model._default_manager.exists() + except (KeyboardInterrupt, SystemExit): + raise + except Exception, e: + failures += 1 + if verbosity >= 1: + self.stderr.write(err_msg % app_name) + self.stderr.write("%s\n" % e) + finally: + runner.teardown_databases(old_config) + if failures > 0: + raise CommandError("Missing depends_on found in %s app(s)." % failures) + self.stderr.write("No missing depends_on found.\n") +# +#for each app: +# start with blank db. +# syncdb only south (and contrib?) +# +# migrate a single app all the way up. any errors is missing depends_on. +# for all models of that app, try the default manager: +# from django.db.models import loading +# for m in loading.get_models(loading.get_app('a')): +# m._default_manager.exists() +# Any error is also a missing depends on. diff --git a/users/src/south/south/management/commands/schemamigration.py b/users/src/south/south/management/commands/schemamigration.py new file mode 100644 index 0000000..91417f9 --- /dev/null +++ b/users/src/south/south/management/commands/schemamigration.py @@ -0,0 +1,190 @@ +""" +Startmigration command, version 2. +""" + +import sys +import os +import re +import string +import random +import inspect +from optparse import make_option + +try: + set +except NameError: + from sets import Set as set + +from django.core.management.base import BaseCommand +from django.core.management.color import no_style +from django.db import models +from django.conf import settings + +from south.migration import Migrations +from south.exceptions import NoMigrations +from south.creator import changes, actions, freezer +from south.management.commands.datamigration import Command as DataCommand + +class Command(DataCommand): + option_list = DataCommand.option_list + ( + make_option('--add-model', action='append', dest='added_model_list', type='string', + help='Generate a Create Table migration for the specified model. Add multiple models to this migration with subsequent --model parameters.'), + make_option('--add-field', action='append', dest='added_field_list', type='string', + help='Generate an Add Column migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'), + make_option('--add-index', action='append', dest='added_index_list', type='string', + help='Generate an Add Index migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'), + make_option('--initial', action='store_true', dest='initial', default=False, + help='Generate the initial schema for the app.'), + make_option('--auto', action='store_true', dest='auto', default=False, + help='Attempt to automatically detect differences from the last migration.'), + make_option('--empty', action='store_true', dest='empty', default=False, + help='Make a blank migration.'), + ) + help = "Creates a new template schema migration for the given app" + usage_str = "Usage: ./manage.py schemamigration appname migrationname [--empty] [--initial] [--auto] [--add-model ModelName] [--add-field ModelName.field_name] [--stdout]" + + def handle(self, app=None, name="", added_model_list=None, added_field_list=None, freeze_list=None, initial=False, auto=False, stdout=False, added_index_list=None, verbosity=1, empty=False, **options): + + # Any supposed lists that are None become empty lists + added_model_list = added_model_list or [] + added_field_list = added_field_list or [] + added_index_list = added_index_list or [] + freeze_list = freeze_list or [] + + # --stdout means name = - + if stdout: + name = "-" + + # Only allow valid names + if re.search('[^_\w]', name) and name != "-": + self.error("Migration names should contain only alphanumeric characters and underscores.") + + # Make sure options are compatable + if initial and (added_model_list or added_field_list or auto): + self.error("You cannot use --initial and other options together\n" + self.usage_str) + + if auto and (added_model_list or added_field_list or initial): + self.error("You cannot use --auto and other options together\n" + self.usage_str) + + if not app: + self.error("You must provide an app to create a migration for.\n" + self.usage_str) + + # Get the Migrations for this app (creating the migrations dir if needed) + migrations = Migrations(app, force_creation=True, verbose_creation=verbosity > 0) + + # What actions do we need to do? + if auto: + # Get the old migration + try: + last_migration = migrations[-1] + except IndexError: + self.error("You cannot use --auto on an app with no migrations. Try --initial.") + # Make sure it has stored models + if migrations.app_label() not in getattr(last_migration.migration_class(), "complete_apps", []): + self.error("You cannot use automatic detection, since the previous migration does not have this whole app frozen.\nEither make migrations using '--freeze %s' or set 'SOUTH_AUTO_FREEZE_APP = True' in your settings.py." % migrations.app_label()) + # Alright, construct two model dicts to run the differ on. + old_defs = dict( + (k, v) for k, v in last_migration.migration_class().models.items() + if k.split(".")[0] == migrations.app_label() + ) + new_defs = dict( + (k, v) for k, v in freezer.freeze_apps([migrations.app_label()]).items() + if k.split(".")[0] == migrations.app_label() + ) + change_source = changes.AutoChanges( + migrations = migrations, + old_defs = old_defs, + old_orm = last_migration.orm(), + new_defs = new_defs, + ) + + elif initial: + # Do an initial migration + change_source = changes.InitialChanges(migrations) + + else: + # Read the commands manually off of the arguments + if (added_model_list or added_field_list or added_index_list): + change_source = changes.ManualChanges( + migrations, + added_model_list, + added_field_list, + added_index_list, + ) + elif empty: + change_source = None + else: + print >>sys.stderr, "You have not passed any of --initial, --auto, --empty, --add-model, --add-field or --add-index." + sys.exit(1) + + # if not name, there's an error + if not name: + if change_source: + name = change_source.suggest_name() + if not name: + self.error("You must provide a name for this migration\n" + self.usage_str) + + # See what filename is next in line. We assume they use numbers. + new_filename = migrations.next_filename(name) + + # Get the actions, and then insert them into the actions lists + forwards_actions = [] + backwards_actions = [] + if change_source: + for action_name, params in change_source.get_changes(): + # Run the correct Action class + try: + action_class = getattr(actions, action_name) + except AttributeError: + raise ValueError("Invalid action name from source: %s" % action_name) + else: + action = action_class(**params) + action.add_forwards(forwards_actions) + action.add_backwards(backwards_actions) + print >>sys.stderr, action.console_line() + + # Nowt happen? That's not good for --auto. + if auto and not forwards_actions: + self.error("Nothing seems to have changed.") + + # Work out which apps to freeze + apps_to_freeze = self.calc_frozen_apps(migrations, freeze_list) + + # So, what's in this file, then? + file_contents = MIGRATION_TEMPLATE % { + "forwards": "\n".join(forwards_actions or ["pass"]), + "backwards": "\n".join(backwards_actions or ["pass"]), + "frozen_models": freezer.freeze_apps_to_string(apps_to_freeze), + "complete_apps": apps_to_freeze and "complete_apps = [%s]" % (", ".join(map(repr, apps_to_freeze))) or "" + } + + # - is a special name which means 'print to stdout' + if name == "-": + print file_contents + # Write the migration file if the name isn't - + else: + fp = open(os.path.join(migrations.migrations_dir(), new_filename), "w") + fp.write(file_contents) + fp.close() + if empty: + print >>sys.stderr, "Created %s. You must now edit this migration and add the code for each direction." % new_filename + else: + print >>sys.stderr, "Created %s. You can now apply this migration with: ./manage.py migrate %s" % (new_filename, app) + + +MIGRATION_TEMPLATE = """# -*- coding: utf-8 -*- +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + + +class Migration(SchemaMigration): + + def forwards(self, orm): +%(forwards)s + def backwards(self, orm): +%(backwards)s + models = %(frozen_models)s + + %(complete_apps)s""" diff --git a/users/src/south/south/management/commands/startmigration.py b/users/src/south/south/management/commands/startmigration.py new file mode 100644 index 0000000..28b8c61 --- /dev/null +++ b/users/src/south/south/management/commands/startmigration.py @@ -0,0 +1,31 @@ +""" +Now-obsolete startmigration command. +""" + +from optparse import make_option + +from django.core.management.base import BaseCommand +from django.core.management.color import no_style + +class Command(BaseCommand): + option_list = BaseCommand.option_list + ( + make_option('--model', action='append', dest='added_model_list', type='string', + help='Generate a Create Table migration for the specified model. Add multiple models to this migration with subsequent --model parameters.'), + make_option('--add-field', action='append', dest='added_field_list', type='string', + help='Generate an Add Column migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'), + make_option('--add-index', action='append', dest='added_index_list', type='string', + help='Generate an Add Index migration for the specified modelname.fieldname - you can use this multiple times to add more than one column.'), + make_option('--initial', action='store_true', dest='initial', default=False, + help='Generate the initial schema for the app.'), + make_option('--auto', action='store_true', dest='auto', default=False, + help='Attempt to automatically detect differences from the last migration.'), + make_option('--freeze', action='append', dest='freeze_list', type='string', + help='Freeze the specified model(s). Pass in either an app name (to freeze the whole app) or a single model, as appname.modelname.'), + make_option('--stdout', action='store_true', dest='stdout', default=False, + help='Print the migration to stdout instead of writing it to a file.'), + ) + help = "Deprecated command" + + def handle(self, app=None, name="", added_model_list=None, added_field_list=None, initial=False, freeze_list=None, auto=False, stdout=False, added_index_list=None, **options): + + print "The 'startmigration' command is now deprecated; please use the new 'schemamigration' and 'datamigration' commands." \ No newline at end of file diff --git a/users/src/south/south/management/commands/syncdb.py b/users/src/south/south/management/commands/syncdb.py new file mode 100644 index 0000000..fe5af34 --- /dev/null +++ b/users/src/south/south/management/commands/syncdb.py @@ -0,0 +1,111 @@ +""" +Overridden syncdb command +""" + +import sys +from optparse import make_option + +from django.core.management.base import NoArgsCommand, BaseCommand +from django.core.management.color import no_style +from django.utils.datastructures import SortedDict +from django.core.management.commands import syncdb +from django.conf import settings +from django.db import models +from django.db.models.loading import cache +from django.core import management + +from south.db import dbs +from south import migration +from south.exceptions import NoMigrations + +def get_app_label(app): + return '.'.join( app.__name__.split('.')[0:-1] ) + +class Command(NoArgsCommand): + option_list = syncdb.Command.option_list + ( + make_option('--migrate', action='store_true', dest='migrate', default=False, + help='Tells South to also perform migrations after the sync. Default for during testing, and other internal calls.'), + make_option('--all', action='store_true', dest='migrate_all', default=False, + help='Makes syncdb work on all apps, even migrated ones. Be careful!'), + ) + if '--verbosity' not in [opt.get_opt_string() for opt in syncdb.Command.option_list]: + option_list += ( + make_option('--verbosity', action='store', dest='verbosity', default='1', + type='choice', choices=['0', '1', '2'], + help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'), + ) + help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created, except those which use migrations." + + def handle_noargs(self, migrate_all=False, **options): + + # Import the 'management' module within each installed app, to register + # dispatcher events. + # This is copied from Django, to fix bug #511. + try: + from django.utils.importlib import import_module + except ImportError: + pass # TODO: Remove, only for Django1.0 + else: + for app_name in settings.INSTALLED_APPS: + try: + import_module('.management', app_name) + except ImportError, exc: + msg = exc.args[0] + if not msg.startswith('No module named') or 'management' not in msg: + raise + + # Work out what uses migrations and so doesn't need syncing + apps_needing_sync = [] + apps_migrated = [] + for app in models.get_apps(): + app_label = get_app_label(app) + if migrate_all: + apps_needing_sync.append(app_label) + else: + try: + migrations = migration.Migrations(app_label) + except NoMigrations: + # It needs syncing + apps_needing_sync.append(app_label) + else: + # This is a migrated app, leave it + apps_migrated.append(app_label) + verbosity = int(options.get('verbosity', 0)) + + # Run syncdb on only the ones needed + if verbosity: + print "Syncing..." + + old_installed, settings.INSTALLED_APPS = settings.INSTALLED_APPS, apps_needing_sync + old_app_store, cache.app_store = cache.app_store, SortedDict([ + (k, v) for (k, v) in cache.app_store.items() + if get_app_label(k) in apps_needing_sync + ]) + + # This will allow the setting of the MySQL storage engine, for example. + for db in dbs.values(): + db.connection_init() + + # OK, run the actual syncdb + syncdb.Command().execute(**options) + + settings.INSTALLED_APPS = old_installed + cache.app_store = old_app_store + + # Migrate if needed + if options.get('migrate', True): + if verbosity: + print "Migrating..." + management.call_command('migrate', **options) + + # Be obvious about what we did + if verbosity: + print "\nSynced:\n > %s" % "\n > ".join(apps_needing_sync) + + if options.get('migrate', True): + if verbosity: + print "\nMigrated:\n - %s" % "\n - ".join(apps_migrated) + else: + if verbosity: + print "\nNot synced (use migrations):\n - %s" % "\n - ".join(apps_migrated) + print "(use ./manage.py migrate to migrate these)" diff --git a/users/src/south/south/management/commands/syncdb.pyc b/users/src/south/south/management/commands/syncdb.pyc new file mode 100644 index 0000000..e5c2ca6 Binary files /dev/null and b/users/src/south/south/management/commands/syncdb.pyc differ diff --git a/users/src/south/south/management/commands/test.py b/users/src/south/south/management/commands/test.py new file mode 100644 index 0000000..9901786 --- /dev/null +++ b/users/src/south/south/management/commands/test.py @@ -0,0 +1,8 @@ +from django.core.management.commands import test + +from south.management.commands import patch_for_test_db_setup + +class Command(test.Command): + def handle(self, *args, **kwargs): + patch_for_test_db_setup() + super(Command, self).handle(*args, **kwargs) diff --git a/users/src/south/south/management/commands/testserver.py b/users/src/south/south/management/commands/testserver.py new file mode 100644 index 0000000..3c3c4b5 --- /dev/null +++ b/users/src/south/south/management/commands/testserver.py @@ -0,0 +1,8 @@ +from django.core.management.commands import testserver + +from south.management.commands import patch_for_test_db_setup + +class Command(testserver.Command): + def handle(self, *args, **kwargs): + patch_for_test_db_setup() + super(Command, self).handle(*args, **kwargs) diff --git a/users/src/south/south/migration/__init__.py b/users/src/south/south/migration/__init__.py new file mode 100644 index 0000000..75e1ced --- /dev/null +++ b/users/src/south/south/migration/__init__.py @@ -0,0 +1,234 @@ +""" +Main migration logic. +""" + +import sys + +from django.core.exceptions import ImproperlyConfigured + +import south.db +from south import exceptions +from south.models import MigrationHistory +from south.db import db, DEFAULT_DB_ALIAS +from south.migration.migrators import (Backwards, Forwards, + DryRunMigrator, FakeMigrator, + LoadInitialDataMigrator) +from south.migration.base import Migration, Migrations +from south.migration.utils import SortedSet +from south.migration.base import all_migrations +from south.signals import pre_migrate, post_migrate + + +def to_apply(forwards, done): + return [m for m in forwards if m not in done] + +def to_unapply(backwards, done): + return [m for m in backwards if m in done] + +def problems(pending, done): + last = None + if not pending: + raise StopIteration() + for migration in pending: + if migration in done: + last = migration + continue + if last and migration not in done: + yield last, migration + +def forwards_problems(pending, done, verbosity): + """ + Takes the list of linearised pending migrations, and the set of done ones, + and returns the list of problems, if any. + """ + return inner_problem_check(problems(reversed(pending), done), done, verbosity) + +def backwards_problems(pending, done, verbosity): + return inner_problem_check(problems(pending, done), done, verbosity) + +def inner_problem_check(problems, done, verbosity): + "Takes a set of possible problems and gets the actual issues out of it." + result = [] + for last, migration in problems: + # 'Last' is the last applied migration. Step back from it until we + # either find nothing wrong, or we find something. + to_check = list(last.dependencies) + while to_check: + checking = to_check.pop() + if checking not in done: + # That's bad. Error. + if verbosity: + print (" ! Migration %s should not have been applied " + "before %s but was." % (last, checking)) + result.append((last, checking)) + else: + to_check.extend(checking.dependencies) + return result + +def check_migration_histories(histories, delete_ghosts=False, ignore_ghosts=False): + "Checks that there's no 'ghost' migrations in the database." + exists = SortedSet() + ghosts = [] + for h in histories: + try: + m = h.get_migration() + m.migration() + except exceptions.UnknownMigration: + ghosts.append(h) + except ImproperlyConfigured: + pass # Ignore missing applications + else: + exists.add(m) + if ghosts: + # They may want us to delete ghosts. + if delete_ghosts: + for h in ghosts: + h.delete() + elif not ignore_ghosts: + raise exceptions.GhostMigrations(ghosts) + return exists + +def get_dependencies(target, migrations): + forwards = list + backwards = list + if target is None: + backwards = migrations[0].backwards_plan + else: + forwards = target.forwards_plan + # When migrating backwards we want to remove up to and + # including the next migration up in this app (not the next + # one, that includes other apps) + migration_before_here = target.next() + if migration_before_here: + backwards = migration_before_here.backwards_plan + return forwards, backwards + +def get_direction(target, applied, migrations, verbosity, interactive): + # Get the forwards and reverse dependencies for this target + forwards, backwards = get_dependencies(target, migrations) + # Is the whole forward branch applied? + problems = None + forwards = forwards() + workplan = to_apply(forwards, applied) + if not workplan: + # If they're all applied, we only know it's not backwards + direction = None + else: + # If the remaining migrations are strictly a right segment of + # the forwards trace, we just need to go forwards to our + # target (and check for badness) + problems = forwards_problems(forwards, applied, verbosity) + direction = Forwards(verbosity=verbosity, interactive=interactive) + if not problems: + # What about the whole backward trace then? + backwards = backwards() + missing_backwards = to_apply(backwards, applied) + if missing_backwards != backwards: + # If what's missing is a strict left segment of backwards (i.e. + # all the higher migrations) then we need to go backwards + workplan = to_unapply(backwards, applied) + problems = backwards_problems(backwards, applied, verbosity) + direction = Backwards(verbosity=verbosity, interactive=interactive) + return direction, problems, workplan + +def get_migrator(direction, db_dry_run, fake, load_initial_data): + if not direction: + return direction + if db_dry_run: + direction = DryRunMigrator(migrator=direction, ignore_fail=False) + elif fake: + direction = FakeMigrator(migrator=direction) + elif load_initial_data: + direction = LoadInitialDataMigrator(migrator=direction) + return direction + +def get_unapplied_migrations(migrations, applied_migrations): + applied_migration_names = ['%s.%s' % (mi.app_name,mi.migration) for mi in applied_migrations] + + for migration in migrations: + is_applied = '%s.%s' % (migration.app_label(), migration.name()) in applied_migration_names + if not is_applied: + yield migration + +def migrate_app(migrations, target_name=None, merge=False, fake=False, db_dry_run=False, yes=False, verbosity=0, load_initial_data=False, skip=False, database=DEFAULT_DB_ALIAS, delete_ghosts=False, ignore_ghosts=False, interactive=False): + app_label = migrations.app_label() + + verbosity = int(verbosity) + # Fire off the pre-migrate signal + pre_migrate.send(None, app=app_label) + + # If there aren't any, quit quizically + if not migrations: + print "? You have no migrations for the '%s' app. You might want some." % app_label + return + + # Load the entire dependency graph + Migrations.calculate_dependencies() + + # Check there's no strange ones in the database + applied_all = MigrationHistory.objects.filter(applied__isnull=False).order_by('applied') + applied = applied_all.filter(app_name=app_label) + # If we're using a different database, use that + if database != DEFAULT_DB_ALIAS: + applied_all = applied_all.using(database) + applied = applied.using(database) + south.db.db = south.db.dbs[database] + # We now have to make sure the migrations are all reloaded, as they'll + # have imported the old value of south.db.db. + Migrations.invalidate_all_modules() + + south.db.db.debug = (verbosity > 1) + + if target_name == 'current-1': + if applied.count() > 1: + previous_migration = applied[applied.count() - 2] + if verbosity: + print 'previous_migration: %s (applied: %s)' % (previous_migration.migration, previous_migration.applied) + target_name = previous_migration.migration + else: + if verbosity: + print 'previous_migration: zero' + target_name = 'zero' + elif target_name == 'current+1': + try: + first_unapplied_migration = get_unapplied_migrations(migrations, applied).next() + target_name = first_unapplied_migration.name() + except StopIteration: + target_name = None + + applied_all = check_migration_histories(applied_all, delete_ghosts, ignore_ghosts) + + # Guess the target_name + target = migrations.guess_migration(target_name) + if verbosity: + if target_name not in ('zero', None) and target.name() != target_name: + print " - Soft matched migration %s to %s." % (target_name, + target.name()) + print "Running migrations for %s:" % app_label + + # Get the forwards and reverse dependencies for this target + direction, problems, workplan = get_direction(target, applied_all, migrations, + verbosity, interactive) + if problems and not (merge or skip): + raise exceptions.InconsistentMigrationHistory(problems) + + # Perform the migration + migrator = get_migrator(direction, db_dry_run, fake, load_initial_data) + if migrator: + migrator.print_title(target) + success = migrator.migrate_many(target, workplan, database) + # Finally, fire off the post-migrate signal + if success: + post_migrate.send(None, app=app_label) + else: + if verbosity: + # Say there's nothing. + print '- Nothing to migrate.' + # If we have initial data enabled, and we're at the most recent + # migration, do initial data. + # Note: We use a fake Forwards() migrator here. It's never used really. + if load_initial_data: + migrator = LoadInitialDataMigrator(migrator=Forwards(verbosity=verbosity)) + migrator.load_initial_data(target, db=database) + # Send signal. + post_migrate.send(None, app=app_label) diff --git a/users/src/south/south/migration/__init__.pyc b/users/src/south/south/migration/__init__.pyc new file mode 100644 index 0000000..c57dc67 Binary files /dev/null and b/users/src/south/south/migration/__init__.pyc differ diff --git a/users/src/south/south/migration/base.py b/users/src/south/south/migration/base.py new file mode 100644 index 0000000..5c8c6b3 --- /dev/null +++ b/users/src/south/south/migration/base.py @@ -0,0 +1,435 @@ +from collections import deque +import datetime +import os +import re +import sys + +from django.core.exceptions import ImproperlyConfigured +from django.db import models +from django.conf import settings + +from south import exceptions +from south.migration.utils import depends, dfs, flatten, get_app_label +from south.orm import FakeORM +from south.utils import memoize, ask_for_it_by_name +from south.migration.utils import app_label_to_app_module + + +def all_migrations(applications=None): + """ + Returns all Migrations for all `applications` that are migrated. + """ + if applications is None: + applications = models.get_apps() + for model_module in applications: + # The app they've passed is the models module - go up one level + app_path = ".".join(model_module.__name__.split(".")[:-1]) + app = ask_for_it_by_name(app_path) + try: + yield Migrations(app) + except exceptions.NoMigrations: + pass + + +def application_to_app_label(application): + "Works out the app label from either the app label, the app name, or the module" + if isinstance(application, basestring): + app_label = application.split('.')[-1] + else: + app_label = application.__name__.split('.')[-1] + return app_label + + +class MigrationsMetaclass(type): + + """ + Metaclass which ensures there is only one instance of a Migrations for + any given app. + """ + + def __init__(self, name, bases, dict): + super(MigrationsMetaclass, self).__init__(name, bases, dict) + self.instances = {} + + def __call__(self, application, **kwds): + + app_label = application_to_app_label(application) + + # If we don't already have an instance, make one + if app_label not in self.instances: + self.instances[app_label] = super(MigrationsMetaclass, self).__call__(app_label_to_app_module(app_label), **kwds) + + return self.instances[app_label] + + def _clear_cache(self): + "Clears the cache of Migration objects." + self.instances = {} + + +class Migrations(list): + """ + Holds a list of Migration objects for a particular app. + """ + + __metaclass__ = MigrationsMetaclass + + if getattr(settings, "SOUTH_USE_PYC", False): + MIGRATION_FILENAME = re.compile(r'(?!__init__)' # Don't match __init__.py + r'[0-9a-zA-Z_]*' # Don't match dotfiles, or names with dots/invalid chars in them + r'(\.pyc?)?$') # Match .py or .pyc files, or module dirs + else: + MIGRATION_FILENAME = re.compile(r'(?!__init__)' # Don't match __init__.py + r'[0-9a-zA-Z_]*' # Don't match dotfiles, or names with dots/invalid chars in them + r'(\.py)?$') # Match only .py files, or module dirs + + def __init__(self, application, force_creation=False, verbose_creation=True): + "Constructor. Takes the module of the app, NOT its models (like get_app returns)" + self._cache = {} + self.set_application(application, force_creation, verbose_creation) + + def create_migrations_directory(self, verbose=True): + "Given an application, ensures that the migrations directory is ready." + migrations_dir = self.migrations_dir() + # Make the directory if it's not already there + if not os.path.isdir(migrations_dir): + if verbose: + print "Creating migrations directory at '%s'..." % migrations_dir + os.mkdir(migrations_dir) + # Same for __init__.py + init_path = os.path.join(migrations_dir, "__init__.py") + if not os.path.isfile(init_path): + # Touch the init py file + if verbose: + print "Creating __init__.py in '%s'..." % migrations_dir + open(init_path, "w").close() + + def migrations_dir(self): + """ + Returns the full path of the migrations directory. + If it doesn't exist yet, returns where it would exist, based on the + app's migrations module (defaults to app.migrations) + """ + module_path = self.migrations_module() + try: + module = __import__(module_path, {}, {}, ['']) + except ImportError: + # There's no migrations module made yet; guess! + try: + parent = __import__(".".join(module_path.split(".")[:-1]), {}, {}, ['']) + except ImportError: + # The parent doesn't even exist, that's an issue. + raise exceptions.InvalidMigrationModule( + application = self.application.__name__, + module = module_path, + ) + else: + # Good guess. + return os.path.join(os.path.dirname(parent.__file__), module_path.split(".")[-1]) + else: + # Get directory directly + return os.path.dirname(module.__file__) + + def migrations_module(self): + "Returns the module name of the migrations module for this" + app_label = application_to_app_label(self.application) + if hasattr(settings, "SOUTH_MIGRATION_MODULES"): + if app_label in settings.SOUTH_MIGRATION_MODULES: + # There's an override. + return settings.SOUTH_MIGRATION_MODULES[app_label] + return self._application.__name__ + '.migrations' + + def get_application(self): + return self._application + + def set_application(self, application, force_creation=False, verbose_creation=True): + """ + Called when the application for this Migrations is set. + Imports the migrations module object, and throws a paddy if it can't. + """ + self._application = application + if not hasattr(application, 'migrations'): + try: + module = __import__(self.migrations_module(), {}, {}, ['']) + self._migrations = application.migrations = module + except ImportError: + if force_creation: + self.create_migrations_directory(verbose_creation) + module = __import__(self.migrations_module(), {}, {}, ['']) + self._migrations = application.migrations = module + else: + raise exceptions.NoMigrations(application) + self._load_migrations_module(application.migrations) + + application = property(get_application, set_application) + + def _load_migrations_module(self, module): + self._migrations = module + filenames = [] + dirname = self.migrations_dir() + for f in os.listdir(dirname): + if self.MIGRATION_FILENAME.match(os.path.basename(f)): + full_path = os.path.join(dirname, f) + # If it's a .pyc file, only append if the .py isn't already around + if f.endswith(".pyc") and (os.path.isfile(full_path[:-1])): + continue + # If it's a module directory, only append if it contains __init__.py[c]. + if os.path.isdir(full_path): + if not (os.path.isfile(os.path.join(full_path, "__init__.py")) or \ + (getattr(settings, "SOUTH_USE_PYC", False) and \ + os.path.isfile(os.path.join(full_path, "__init__.pyc")))): + continue + filenames.append(f) + filenames.sort() + self.extend(self.migration(f) for f in filenames) + + def migration(self, filename): + name = Migration.strip_filename(filename) + if name not in self._cache: + self._cache[name] = Migration(self, name) + return self._cache[name] + + def __getitem__(self, value): + if isinstance(value, basestring): + return self.migration(value) + return super(Migrations, self).__getitem__(value) + + def _guess_migration(self, prefix): + prefix = Migration.strip_filename(prefix) + matches = [m for m in self if m.name().startswith(prefix)] + if len(matches) == 1: + return matches[0] + elif len(matches) > 1: + raise exceptions.MultiplePrefixMatches(prefix, matches) + else: + raise exceptions.UnknownMigration(prefix, None) + + def guess_migration(self, target_name): + if target_name == 'zero' or not self: + return + elif target_name is None: + return self[-1] + else: + return self._guess_migration(prefix=target_name) + + def app_label(self): + return self._application.__name__.split('.')[-1] + + def full_name(self): + return self._migrations.__name__ + + @classmethod + def calculate_dependencies(cls, force=False): + "Goes through all the migrations, and works out the dependencies." + if getattr(cls, "_dependencies_done", False) and not force: + return + for migrations in all_migrations(): + for migration in migrations: + migration.calculate_dependencies() + cls._dependencies_done = True + + @staticmethod + def invalidate_all_modules(): + "Goes through all the migrations, and invalidates all cached modules." + for migrations in all_migrations(): + for migration in migrations: + migration.invalidate_module() + + def next_filename(self, name): + "Returns the fully-formatted filename of what a new migration 'name' would be" + highest_number = 0 + for migration in self: + try: + number = int(migration.name().split("_")[0]) + highest_number = max(highest_number, number) + except ValueError: + pass + # Work out the new filename + return "%04i_%s.py" % ( + highest_number + 1, + name, + ) + + +class Migration(object): + + """ + Class which represents a particular migration file on-disk. + """ + + def __init__(self, migrations, filename): + """ + Returns the migration class implied by 'filename'. + """ + self.migrations = migrations + self.filename = filename + self.dependencies = set() + self.dependents = set() + + def __str__(self): + return self.app_label() + ':' + self.name() + + def __repr__(self): + return u'' % unicode(self) + + def __eq__(self, other): + return self.app_label() == other.app_label() and self.name() == other.name() + + def app_label(self): + return self.migrations.app_label() + + @staticmethod + def strip_filename(filename): + return os.path.splitext(os.path.basename(filename))[0] + + def name(self): + return self.strip_filename(os.path.basename(self.filename)) + + def full_name(self): + return self.migrations.full_name() + '.' + self.name() + + def migration(self): + "Tries to load the actual migration module" + full_name = self.full_name() + try: + migration = sys.modules[full_name] + except KeyError: + try: + migration = __import__(full_name, {}, {}, ['Migration']) + except ImportError, e: + raise exceptions.UnknownMigration(self, sys.exc_info()) + except Exception, e: + raise exceptions.BrokenMigration(self, sys.exc_info()) + # Override some imports + migration._ = lambda x: x # Fake i18n + migration.datetime = datetime + return migration + migration = memoize(migration) + + def migration_class(self): + "Returns the Migration class from the module" + return self.migration().Migration + + def migration_instance(self): + "Instantiates the migration_class" + return self.migration_class()() + migration_instance = memoize(migration_instance) + + def previous(self): + "Returns the migration that comes before this one in the sequence." + index = self.migrations.index(self) - 1 + if index < 0: + return None + return self.migrations[index] + previous = memoize(previous) + + def next(self): + "Returns the migration that comes after this one in the sequence." + index = self.migrations.index(self) + 1 + if index >= len(self.migrations): + return None + return self.migrations[index] + next = memoize(next) + + def _get_dependency_objects(self, attrname): + """ + Given the name of an attribute (depends_on or needed_by), either yields + a list of migration objects representing it, or errors out. + """ + for app, name in getattr(self.migration_class(), attrname, []): + try: + migrations = Migrations(app) + except ImproperlyConfigured: + raise exceptions.DependsOnUnmigratedApplication(self, app) + migration = migrations.migration(name) + try: + migration.migration() + except exceptions.UnknownMigration: + raise exceptions.DependsOnUnknownMigration(self, migration) + if migration.is_before(self) == False: + raise exceptions.DependsOnHigherMigration(self, migration) + yield migration + + def calculate_dependencies(self): + """ + Loads dependency info for this migration, and stores it in itself + and any other relevant migrations. + """ + # Normal deps first + for migration in self._get_dependency_objects("depends_on"): + self.dependencies.add(migration) + migration.dependents.add(self) + # And reverse deps + for migration in self._get_dependency_objects("needed_by"): + self.dependents.add(migration) + migration.dependencies.add(self) + # And implicit ordering deps + previous = self.previous() + if previous: + self.dependencies.add(previous) + previous.dependents.add(self) + + def invalidate_module(self): + """ + Removes the cached version of this migration's module import, so we + have to re-import it. Used when south.db.db changes. + """ + reload(self.migration()) + self.migration._invalidate() + + def forwards(self): + return self.migration_instance().forwards + + def backwards(self): + return self.migration_instance().backwards + + def forwards_plan(self): + """ + Returns a list of Migration objects to be applied, in order. + + This list includes `self`, which will be applied last. + """ + return depends(self, lambda x: x.dependencies) + + def _backwards_plan(self): + return depends(self, lambda x: x.dependents) + + def backwards_plan(self): + """ + Returns a list of Migration objects to be unapplied, in order. + + This list includes `self`, which will be unapplied last. + """ + return list(self._backwards_plan()) + + def is_before(self, other): + if self.migrations == other.migrations: + if self.filename < other.filename: + return True + return False + + def is_after(self, other): + if self.migrations == other.migrations: + if self.filename > other.filename: + return True + return False + + def prev_orm(self): + if getattr(self.migration_class(), 'symmetrical', False): + return self.orm() + previous = self.previous() + if previous is None: + # First migration? The 'previous ORM' is empty. + return FakeORM(None, self.app_label()) + return previous.orm() + prev_orm = memoize(prev_orm) + + def orm(self): + return FakeORM(self.migration_class(), self.app_label()) + orm = memoize(orm) + + def no_dry_run(self): + migration_class = self.migration_class() + try: + return migration_class.no_dry_run + except AttributeError: + return False diff --git a/users/src/south/south/migration/base.pyc b/users/src/south/south/migration/base.pyc new file mode 100644 index 0000000..78450bb Binary files /dev/null and b/users/src/south/south/migration/base.pyc differ diff --git a/users/src/south/south/migration/migrators.py b/users/src/south/south/migration/migrators.py new file mode 100644 index 0000000..fe61b7f --- /dev/null +++ b/users/src/south/south/migration/migrators.py @@ -0,0 +1,351 @@ +from copy import copy, deepcopy +from cStringIO import StringIO +import datetime +import inspect +import sys +import traceback + +from django.core.management import call_command +from django.core.management.commands import loaddata +from django.db import models + +import south.db +from south import exceptions +from south.db import DEFAULT_DB_ALIAS +from south.models import MigrationHistory +from south.signals import ran_migration + + +class Migrator(object): + def __init__(self, verbosity=0, interactive=False): + self.verbosity = int(verbosity) + self.interactive = bool(interactive) + + @staticmethod + def title(target): + raise NotImplementedError() + + def print_title(self, target): + if self.verbosity: + print self.title(target) + + @staticmethod + def status(target): + raise NotImplementedError() + + def print_status(self, migration): + status = self.status(migration) + if self.verbosity and status: + print status + + @staticmethod + def orm(migration): + raise NotImplementedError() + + def backwards(self, migration): + return self._wrap_direction(migration.backwards(), migration.prev_orm()) + + def direction(self, migration): + raise NotImplementedError() + + @staticmethod + def _wrap_direction(direction, orm): + args = inspect.getargspec(direction) + if len(args[0]) == 1: + # Old migration, no ORM should be passed in + return direction + return (lambda: direction(orm)) + + @staticmethod + def record(migration, database): + raise NotImplementedError() + + def run_migration_error(self, migration, extra_info=''): + return ( + ' ! Error found during real run of migration! Aborting.\n' + '\n' + ' ! Since you have a database that does not support running\n' + ' ! schema-altering statements in transactions, we have had \n' + ' ! to leave it in an interim state between migrations.\n' + '%s\n' + ' ! The South developers regret this has happened, and would\n' + ' ! like to gently persuade you to consider a slightly\n' + ' ! easier-to-deal-with DBMS (one that supports DDL transactions)\n' + ' ! NOTE: The error which caused the migration to fail is further up.' + ) % extra_info + + def run_migration(self, migration): + migration_function = self.direction(migration) + south.db.db.start_transaction() + try: + migration_function() + south.db.db.execute_deferred_sql() + except: + south.db.db.rollback_transaction() + if not south.db.db.has_ddl_transactions: + print self.run_migration_error(migration) + print "Error in migration: %s" % migration + raise + else: + south.db.db.commit_transaction() + + def run(self, migration): + # Get the correct ORM. + south.db.db.current_orm = self.orm(migration) + # If we're not already in a dry run, and the database doesn't support + # running DDL inside a transaction, *cough*MySQL*cough* then do a dry + # run first. + if not isinstance(getattr(self, '_wrapper', self), DryRunMigrator): + if not south.db.db.has_ddl_transactions: + dry_run = DryRunMigrator(migrator=self, ignore_fail=False) + dry_run.run_migration(migration) + return self.run_migration(migration) + + def done_migrate(self, migration, database): + south.db.db.start_transaction() + try: + # Record us as having done this + self.record(migration, database) + except: + south.db.db.rollback_transaction() + raise + else: + south.db.db.commit_transaction() + + def send_ran_migration(self, migration): + ran_migration.send(None, + app=migration.app_label(), + migration=migration, + method=self.__class__.__name__.lower()) + + def migrate(self, migration, database): + """ + Runs the specified migration forwards/backwards, in order. + """ + app = migration.migrations._migrations + migration_name = migration.name() + self.print_status(migration) + result = self.run(migration) + self.done_migrate(migration, database) + self.send_ran_migration(migration) + return result + + def migrate_many(self, target, migrations, database): + raise NotImplementedError() + + +class MigratorWrapper(object): + def __init__(self, migrator, *args, **kwargs): + self._migrator = copy(migrator) + attributes = dict([(k, getattr(self, k)) + for k in self.__class__.__dict__.iterkeys() + if not k.startswith('__')]) + self._migrator.__dict__.update(attributes) + self._migrator.__dict__['_wrapper'] = self + + def __getattr__(self, name): + return getattr(self._migrator, name) + + +class DryRunMigrator(MigratorWrapper): + def __init__(self, ignore_fail=True, *args, **kwargs): + super(DryRunMigrator, self).__init__(*args, **kwargs) + self._ignore_fail = ignore_fail + + def _run_migration(self, migration): + if migration.no_dry_run(): + if self.verbosity: + print " - Migration '%s' is marked for no-dry-run." % migration + return + south.db.db.dry_run = True + # preserve the constraint cache as it can be mutated by the dry run + constraint_cache = deepcopy(south.db.db._constraint_cache) + if self._ignore_fail: + south.db.db.debug, old_debug = False, south.db.db.debug + pending_creates = south.db.db.get_pending_creates() + south.db.db.start_transaction() + migration_function = self.direction(migration) + try: + try: + migration_function() + south.db.db.execute_deferred_sql() + except: + raise exceptions.FailedDryRun(migration, sys.exc_info()) + finally: + south.db.db.rollback_transactions_dry_run() + if self._ignore_fail: + south.db.db.debug = old_debug + south.db.db.clear_run_data(pending_creates) + south.db.db.dry_run = False + # restore the preserved constraint cache from before dry run was + # executed + south.db.db._constraint_cache = constraint_cache + + def run_migration(self, migration): + try: + self._run_migration(migration) + except exceptions.FailedDryRun: + if self._ignore_fail: + return False + raise + + def done_migrate(self, *args, **kwargs): + pass + + def send_ran_migration(self, *args, **kwargs): + pass + + +class FakeMigrator(MigratorWrapper): + def run(self, migration): + if self.verbosity: + print ' (faked)' + + def send_ran_migration(self, *args, **kwargs): + pass + + +class LoadInitialDataMigrator(MigratorWrapper): + + def load_initial_data(self, target, db='default'): + if target is None or target != target.migrations[-1]: + return + # Load initial data, if we ended up at target + if self.verbosity: + print " - Loading initial data for %s." % target.app_label() + # Override Django's get_apps call temporarily to only load from the + # current app + old_get_apps = models.get_apps + new_get_apps = lambda: [models.get_app(target.app_label())] + models.get_apps = new_get_apps + loaddata.get_apps = new_get_apps + try: + call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=db) + finally: + models.get_apps = old_get_apps + loaddata.get_apps = old_get_apps + + def migrate_many(self, target, migrations, database): + migrator = self._migrator + result = migrator.__class__.migrate_many(migrator, target, migrations, database) + if result: + self.load_initial_data(target, db=database) + return True + + +class Forwards(Migrator): + """ + Runs the specified migration forwards, in order. + """ + torun = 'forwards' + + @staticmethod + def title(target): + if target is not None: + return " - Migrating forwards to %s." % target.name() + else: + assert False, "You cannot migrate forwards to zero." + + @staticmethod + def status(migration): + return ' > %s' % migration + + @staticmethod + def orm(migration): + return migration.orm() + + def forwards(self, migration): + return self._wrap_direction(migration.forwards(), migration.orm()) + + direction = forwards + + @staticmethod + def record(migration, database): + # Record us as having done this + record = MigrationHistory.for_migration(migration, database) + record.applied = datetime.datetime.utcnow() + if database != DEFAULT_DB_ALIAS: + record.save(using=database) + else: + # Django 1.1 and below always go down this branch. + record.save() + + def format_backwards(self, migration): + if migration.no_dry_run(): + return " (migration cannot be dry-run; cannot discover commands)" + old_debug, old_dry_run = south.db.db.debug, south.db.db.dry_run + south.db.db.debug = south.db.db.dry_run = True + stdout = sys.stdout + sys.stdout = StringIO() + try: + try: + self.backwards(migration)() + return sys.stdout.getvalue() + except: + raise + finally: + south.db.db.debug, south.db.db.dry_run = old_debug, old_dry_run + sys.stdout = stdout + + def run_migration_error(self, migration, extra_info=''): + extra_info = ('\n' + '! You *might* be able to recover with:' + '%s' + '%s' % + (self.format_backwards(migration), extra_info)) + return super(Forwards, self).run_migration_error(migration, extra_info) + + def migrate_many(self, target, migrations, database): + try: + for migration in migrations: + result = self.migrate(migration, database) + if result is False: # The migrations errored, but nicely. + return False + finally: + # Call any pending post_syncdb signals + south.db.db.send_pending_create_signals(verbosity=self.verbosity, + interactive=self.interactive) + return True + + +class Backwards(Migrator): + """ + Runs the specified migration backwards, in order. + """ + torun = 'backwards' + + @staticmethod + def title(target): + if target is None: + return " - Migrating backwards to zero state." + else: + return " - Migrating backwards to just after %s." % target.name() + + @staticmethod + def status(migration): + return ' < %s' % migration + + @staticmethod + def orm(migration): + return migration.prev_orm() + + direction = Migrator.backwards + + @staticmethod + def record(migration, database): + # Record us as having not done this + record = MigrationHistory.for_migration(migration, database) + if record.id is not None: + if database != DEFAULT_DB_ALIAS: + record.delete(using=database) + else: + # Django 1.1 always goes down here + record.delete() + + def migrate_many(self, target, migrations, database): + for migration in migrations: + self.migrate(migration, database) + return True + + + diff --git a/users/src/south/south/migration/migrators.pyc b/users/src/south/south/migration/migrators.pyc new file mode 100644 index 0000000..f65b27b Binary files /dev/null and b/users/src/south/south/migration/migrators.pyc differ diff --git a/users/src/south/south/migration/utils.py b/users/src/south/south/migration/utils.py new file mode 100644 index 0000000..da37397 --- /dev/null +++ b/users/src/south/south/migration/utils.py @@ -0,0 +1,83 @@ +import sys +from collections import deque + +from django.utils.datastructures import SortedDict +from django.db import models + +from south import exceptions + + +class SortedSet(SortedDict): + def __init__(self, data=tuple()): + self.extend(data) + + def __str__(self): + return "SortedSet(%s)" % list(self) + + def add(self, value): + self[value] = True + + def remove(self, value): + del self[value] + + def extend(self, iterable): + [self.add(k) for k in iterable] + + +def get_app_label(app): + """ + Returns the _internal_ app label for the given app module. + i.e. for will return 'auth' + """ + return app.__name__.split('.')[-2] + + +def app_label_to_app_module(app_label): + """ + Given the app label, returns the module of the app itself (unlike models.get_app, + which returns the models module) + """ + # Get the models module + app = models.get_app(app_label) + module_name = ".".join(app.__name__.split(".")[:-1]) + try: + module = sys.modules[module_name] + except KeyError: + __import__(module_name, {}, {}, ['']) + module = sys.modules[module_name] + return module + + +def flatten(*stack): + stack = deque(stack) + while stack: + try: + x = stack[0].next() + except AttributeError: + stack[0] = iter(stack[0]) + x = stack[0].next() + except StopIteration: + stack.popleft() + continue + if hasattr(x, '__iter__'): + stack.appendleft(x) + else: + yield x + +def _dfs(start, get_children, path): + if start in path: + raise exceptions.CircularDependency(path[path.index(start):] + [start]) + path.append(start) + yield start + children = sorted(get_children(start), key=lambda x: str(x)) + if children: + # We need to apply all the migrations this one depends on + yield (_dfs(n, get_children, path) for n in children) + path.pop() + +def dfs(start, get_children): + return flatten(_dfs(start, get_children, [])) + +def depends(start, get_children): + result = SortedSet(reversed(list(dfs(start, get_children)))) + return list(result) diff --git a/users/src/south/south/migration/utils.pyc b/users/src/south/south/migration/utils.pyc new file mode 100644 index 0000000..8c4fdfe Binary files /dev/null and b/users/src/south/south/migration/utils.pyc differ diff --git a/users/src/south/south/models.py b/users/src/south/south/models.py new file mode 100644 index 0000000..84b5517 --- /dev/null +++ b/users/src/south/south/models.py @@ -0,0 +1,37 @@ +from django.db import models +from south.db import DEFAULT_DB_ALIAS + +class MigrationHistory(models.Model): + app_name = models.CharField(max_length=255) + migration = models.CharField(max_length=255) + applied = models.DateTimeField(blank=True) + + @classmethod + def for_migration(cls, migration, database): + try: + # Switch on multi-db-ness + if database != DEFAULT_DB_ALIAS: + # Django 1.2 + objects = cls.objects.using(database) + else: + # Django <= 1.1 + objects = cls.objects + return objects.get( + app_name=migration.app_label(), + migration=migration.name(), + ) + except cls.DoesNotExist: + return cls( + app_name=migration.app_label(), + migration=migration.name(), + ) + + def get_migrations(self): + from south.migration.base import Migrations + return Migrations(self.app_name) + + def get_migration(self): + return self.get_migrations().migration(self.migration) + + def __unicode__(self): + return "<%s: %s>" % (self.app_name, self.migration) diff --git a/users/src/south/south/models.pyc b/users/src/south/south/models.pyc new file mode 100644 index 0000000..0b61d4f Binary files /dev/null and b/users/src/south/south/models.pyc differ diff --git a/users/src/south/south/modelsinspector.py b/users/src/south/south/modelsinspector.py new file mode 100644 index 0000000..460249c --- /dev/null +++ b/users/src/south/south/modelsinspector.py @@ -0,0 +1,395 @@ +""" +Like the old south.modelsparser, but using introspection where possible +rather than direct inspection of models.py. +""" + +import datetime +import re +import decimal + +from south.utils import get_attribute, auto_through + +from django.db import models +from django.db.models.base import ModelBase, Model +from django.db.models.fields import NOT_PROVIDED +from django.conf import settings +from django.utils.functional import Promise +from django.contrib.contenttypes import generic +from django.utils.datastructures import SortedDict +from django.utils import datetime_safe + +NOISY = False + +# Gives information about how to introspect certain fields. +# This is a list of triples; the first item is a list of fields it applies to, +# (note that isinstance is used, so superclasses are perfectly valid here) +# the second is a list of positional argument descriptors, and the third +# is a list of keyword argument descriptors. +# Descriptors are of the form: +# [attrname, options] +# Where attrname is the attribute on the field to get the value from, and options +# is an optional dict. +# +# The introspector uses the combination of all matching entries, in order. +introspection_details = [ + ( + (models.Field, ), + [], + { + "null": ["null", {"default": False}], + "blank": ["blank", {"default": False, "ignore_if":"primary_key"}], + "primary_key": ["primary_key", {"default": False}], + "max_length": ["max_length", {"default": None}], + "unique": ["_unique", {"default": False}], + "db_index": ["db_index", {"default": False}], + "default": ["default", {"default": NOT_PROVIDED, "ignore_dynamics": True}], + "db_column": ["db_column", {"default": None}], + "db_tablespace": ["db_tablespace", {"default": settings.DEFAULT_INDEX_TABLESPACE}], + }, + ), + ( + (models.ForeignKey, models.OneToOneField), + [], + { + "to": ["rel.to", {}], + "to_field": ["rel.field_name", {"default_attr": "rel.to._meta.pk.name"}], + "related_name": ["rel.related_name", {"default": None}], + "db_index": ["db_index", {"default": True}], + }, + ), + ( + (models.ManyToManyField,), + [], + { + "to": ["rel.to", {}], + "symmetrical": ["rel.symmetrical", {"default": True}], + "related_name": ["rel.related_name", {"default": None}], + "db_table": ["db_table", {"default": None}], + # TODO: Kind of ugly to add this one-time-only option + "through": ["rel.through", {"ignore_if_auto_through": True}], + }, + ), + ( + (models.DateField, models.TimeField), + [], + { + "auto_now": ["auto_now", {"default": False}], + "auto_now_add": ["auto_now_add", {"default": False}], + }, + ), + ( + (models.DecimalField, ), + [], + { + "max_digits": ["max_digits", {"default": None}], + "decimal_places": ["decimal_places", {"default": None}], + }, + ), + ( + (models.SlugField, ), + [], + { + "db_index": ["db_index", {"default": True}], + }, + ), + ( + (models.BooleanField, ), + [], + { + "default": ["default", {"default": NOT_PROVIDED, "converter": bool}], + "blank": ["blank", {"default": True, "ignore_if":"primary_key"}], + }, + ), + ( + (models.FilePathField, ), + [], + { + "path": ["path", {"default": ''}], + "match": ["match", {"default": None}], + "recursive": ["recursive", {"default": False}], + }, + ), + ( + (generic.GenericRelation, ), + [], + { + "to": ["rel.to", {}], + "symmetrical": ["rel.symmetrical", {"default": True}], + "object_id_field": ["object_id_field_name", {"default": "object_id"}], + "content_type_field": ["content_type_field_name", {"default": "content_type"}], + "blank": ["blank", {"default": True}], + }, + ), +] + +# Regexes of allowed field full paths +allowed_fields = [ + "^django\.db", + "^django\.contrib\.contenttypes\.generic", + "^django\.contrib\.localflavor", +] + +# Regexes of ignored fields (custom fields which look like fields, but have no column behind them) +ignored_fields = [ + "^django\.contrib\.contenttypes\.generic\.GenericRelation", + "^django\.contrib\.contenttypes\.generic\.GenericForeignKey", +] + +# Similar, but for Meta, so just the inner level (kwds). +meta_details = { + "db_table": ["db_table", {"default_attr_concat": ["%s_%s", "app_label", "module_name"]}], + "db_tablespace": ["db_tablespace", {"default": settings.DEFAULT_TABLESPACE}], + "unique_together": ["unique_together", {"default": []}], + "ordering": ["ordering", {"default": []}], + "proxy": ["proxy", {"default": False, "ignore_missing": True}], +} + +# 2.4 compatability +any = lambda x: reduce(lambda y, z: y or z, x, False) + + +def add_introspection_rules(rules=[], patterns=[]): + "Allows you to add some introspection rules at runtime, e.g. for 3rd party apps." + assert isinstance(rules, (list, tuple)) + assert isinstance(patterns, (list, tuple)) + allowed_fields.extend(patterns) + introspection_details.extend(rules) + +def add_ignored_fields(patterns): + "Allows you to add some ignore field patterns." + assert isinstance(patterns, (list, tuple)) + ignored_fields.extend(patterns) + +def can_ignore(field): + """ + Returns True if we know for certain that we can ignore this field, False + otherwise. + """ + full_name = "%s.%s" % (field.__class__.__module__, field.__class__.__name__) + for regex in ignored_fields: + if re.match(regex, full_name): + return True + return False + +def can_introspect(field): + """ + Returns True if we are allowed to introspect this field, False otherwise. + ('allowed' means 'in core'. Custom fields can declare they are introspectable + by the default South rules by adding the attribute _south_introspects = True.) + """ + # Check for special attribute + if hasattr(field, "_south_introspects") and field._south_introspects: + return True + # Check it's an introspectable field + full_name = "%s.%s" % (field.__class__.__module__, field.__class__.__name__) + for regex in allowed_fields: + if re.match(regex, full_name): + return True + return False + + +def matching_details(field): + """ + Returns the union of all matching entries in introspection_details for the field. + """ + our_args = [] + our_kwargs = {} + for classes, args, kwargs in introspection_details: + if any([isinstance(field, x) for x in classes]): + our_args.extend(args) + our_kwargs.update(kwargs) + return our_args, our_kwargs + + +class IsDefault(Exception): + """ + Exception for when a field contains its default value. + """ + + +def get_value(field, descriptor): + """ + Gets an attribute value from a Field instance and formats it. + """ + attrname, options = descriptor + # If the options say it's not a attribute name but a real value, use that. + if options.get('is_value', False): + value = attrname + else: + try: + value = get_attribute(field, attrname) + except AttributeError: + if options.get("ignore_missing", False): + raise IsDefault + else: + raise + # Lazy-eval functions get eval'd. + if isinstance(value, Promise): + value = unicode(value) + # If the value is the same as the default, omit it for clarity + if "default" in options and value == options['default']: + raise IsDefault + # If there's an ignore_if, use it + if "ignore_if" in options: + if get_attribute(field, options['ignore_if']): + raise IsDefault + # If there's an ignore_if_auto_through which is True, use it + if options.get("ignore_if_auto_through", False): + if auto_through(field): + raise IsDefault + # Some default values need to be gotten from an attribute too. + if "default_attr" in options: + default_value = get_attribute(field, options['default_attr']) + if value == default_value: + raise IsDefault + # Some are made from a formatting string and several attrs (e.g. db_table) + if "default_attr_concat" in options: + format, attrs = options['default_attr_concat'][0], options['default_attr_concat'][1:] + default_value = format % tuple(map(lambda x: get_attribute(field, x), attrs)) + if value == default_value: + raise IsDefault + # Callables get called. + if callable(value) and not isinstance(value, ModelBase): + # Datetime.datetime.now is special, as we can access it from the eval + # context (and because it changes all the time; people will file bugs otherwise). + if value == datetime.datetime.now: + return "datetime.datetime.now" + if value == datetime.datetime.utcnow: + return "datetime.datetime.utcnow" + if value == datetime.date.today: + return "datetime.date.today" + # All other callables get called. + value = value() + # Models get their own special repr() + if isinstance(value, ModelBase): + # If it's a proxy model, follow it back to its non-proxy parent + if getattr(value._meta, "proxy", False): + value = value._meta.proxy_for_model + return "orm['%s.%s']" % (value._meta.app_label, value._meta.object_name) + # As do model instances + if isinstance(value, Model): + if options.get("ignore_dynamics", False): + raise IsDefault + return "orm['%s.%s'].objects.get(pk=%r)" % (value.__class__._meta.app_label, value.__class__._meta.object_name, value.pk) + # Make sure Decimal is converted down into a string + if isinstance(value, decimal.Decimal): + value = str(value) + # datetime_safe has an improper repr value + if isinstance(value, datetime_safe.datetime): + value = datetime.datetime(*value.utctimetuple()[:7]) + if isinstance(value, datetime_safe.date): + value = datetime.date(*value.timetuple()[:3]) + # Now, apply the converter func if there is one + if "converter" in options: + value = options['converter'](value) + # Return the final value + return repr(value) + + +def introspector(field): + """ + Given a field, introspects its definition triple. + """ + arg_defs, kwarg_defs = matching_details(field) + args = [] + kwargs = {} + # For each argument, use the descriptor to get the real value. + for defn in arg_defs: + try: + args.append(get_value(field, defn)) + except IsDefault: + pass + for kwd, defn in kwarg_defs.items(): + try: + kwargs[kwd] = get_value(field, defn) + except IsDefault: + pass + return args, kwargs + + +def get_model_fields(model, m2m=False): + """ + Given a model class, returns a dict of {field_name: field_triple} defs. + """ + + field_defs = SortedDict() + inherited_fields = {} + + # Go through all bases (that are themselves models, but not Model) + for base in model.__bases__: + if hasattr(base, '_meta') and issubclass(base, models.Model): + if not base._meta.abstract: + # Looks like we need their fields, Ma. + inherited_fields.update(get_model_fields(base)) + + # Now, go through all the fields and try to get their definition + source = model._meta.local_fields[:] + if m2m: + source += model._meta.local_many_to_many + + for field in source: + # Can we ignore it completely? + if can_ignore(field): + continue + # Does it define a south_field_triple method? + if hasattr(field, "south_field_triple"): + if NOISY: + print " ( Nativing field: %s" % field.name + field_defs[field.name] = field.south_field_triple() + # Can we introspect it? + elif can_introspect(field): + # Get the full field class path. + field_class = field.__class__.__module__ + "." + field.__class__.__name__ + # Run this field through the introspector + args, kwargs = introspector(field) + # Workaround for Django bug #13987 + if model._meta.pk.column == field.column and 'primary_key' not in kwargs: + kwargs['primary_key'] = True + # That's our definition! + field_defs[field.name] = (field_class, args, kwargs) + # Shucks, no definition! + else: + if NOISY: + print " ( Nodefing field: %s" % field.name + field_defs[field.name] = None + + # If they've used the horrific hack that is order_with_respect_to, deal with + # it. + if model._meta.order_with_respect_to: + field_defs['_order'] = ("django.db.models.fields.IntegerField", [], {"default": "0"}) + + return field_defs + + +def get_model_meta(model): + """ + Given a model class, will return the dict representing the Meta class. + """ + + # Get the introspected attributes + meta_def = {} + for kwd, defn in meta_details.items(): + try: + meta_def[kwd] = get_value(model._meta, defn) + except IsDefault: + pass + + # Also, add on any non-abstract model base classes. + # This is called _ormbases as the _bases variable was previously used + # for a list of full class paths to bases, so we can't conflict. + for base in model.__bases__: + if hasattr(base, '_meta') and issubclass(base, models.Model): + if not base._meta.abstract: + # OK, that matches our terms. + if "_ormbases" not in meta_def: + meta_def['_ormbases'] = [] + meta_def['_ormbases'].append("%s.%s" % ( + base._meta.app_label, + base._meta.object_name, + )) + + return meta_def + + +# Now, load the built-in South introspection plugins +import south.introspection_plugins diff --git a/users/src/south/south/orm.py b/users/src/south/south/orm.py new file mode 100644 index 0000000..cb585ac --- /dev/null +++ b/users/src/south/south/orm.py @@ -0,0 +1,401 @@ +""" +South's fake ORM; lets you not have to write SQL inside migrations. +Roughly emulates the real Django ORM, to a point. +""" + +import inspect +import datetime + +from django.db import models +from django.db.models.loading import cache +from django.core.exceptions import ImproperlyConfigured + +from south.db import db +from south.utils import ask_for_it_by_name +from south.hacks import hacks +from south.exceptions import UnfreezeMeLater, ORMBaseNotIncluded, ImpossibleORMUnfreeze + + +class ModelsLocals(object): + + """ + Custom dictionary-like class to be locals(); + falls back to lowercase search for items that don't exist + (because we store model names as lowercase). + """ + + def __init__(self, data): + self.data = data + + def __getitem__(self, key): + try: + return self.data[key] + except KeyError: + return self.data[key.lower()] + + +# Stores already-created ORMs. +_orm_cache = {} + +def FakeORM(*args): + """ + Creates a Fake Django ORM. + This is actually a memoised constructor; the real class is _FakeORM. + """ + if not args in _orm_cache: + _orm_cache[args] = _FakeORM(*args) + return _orm_cache[args] + + +class LazyFakeORM(object): + """ + In addition to memoising the ORM call, this function lazily generates them + for a Migration class. Assign the result of this to (for example) + .orm, and as soon as .orm is accessed the ORM will be created. + """ + + def __init__(self, *args): + self._args = args + self.orm = None + + def __get__(self, obj, type=None): + if not self.orm: + self.orm = FakeORM(*self._args) + return self.orm + + +class _FakeORM(object): + + """ + Simulates the Django ORM at some point in time, + using a frozen definition on the Migration class. + """ + + def __init__(self, cls, app): + self.default_app = app + self.cls = cls + # Try loading the models off the migration class; default to no models. + self.models = {} + try: + self.models_source = cls.models + except AttributeError: + return + + # Start a 'new' AppCache + hacks.clear_app_cache() + + # Now, make each model's data into a FakeModel + # We first make entries for each model that are just its name + # This allows us to have circular model dependency loops + model_names = [] + for name, data in self.models_source.items(): + # Make sure there's some kind of Meta + if "Meta" not in data: + data['Meta'] = {} + try: + app_label, model_name = name.split(".", 1) + except ValueError: + app_label = self.default_app + model_name = name + + # If there's an object_name in the Meta, use it and remove it + if "object_name" in data['Meta']: + model_name = data['Meta']['object_name'] + del data['Meta']['object_name'] + + name = "%s.%s" % (app_label, model_name) + self.models[name.lower()] = name + model_names.append((name.lower(), app_label, model_name, data)) + + # Loop until model_names is entry, or hasn't shrunk in size since + # last iteration. + # The make_model method can ask to postpone a model; it's then pushed + # to the back of the queue. Because this is currently only used for + # inheritance, it should thus theoretically always decrease by one. + last_size = None + while model_names: + # First, make sure we've shrunk. + if len(model_names) == last_size: + raise ImpossibleORMUnfreeze() + last_size = len(model_names) + # Make one run through + postponed_model_names = [] + for name, app_label, model_name, data in model_names: + try: + self.models[name] = self.make_model(app_label, model_name, data) + except UnfreezeMeLater: + postponed_model_names.append((name, app_label, model_name, data)) + # Reset + model_names = postponed_model_names + + # And perform the second run to iron out any circular/backwards depends. + self.retry_failed_fields() + + # Force evaluation of relations on the models now + for model in self.models.values(): + model._meta.get_all_field_names() + + # Reset AppCache + hacks.unclear_app_cache() + + + def __iter__(self): + return iter(self.models.values()) + + + def __getattr__(self, key): + fullname = (self.default_app+"."+key).lower() + try: + return self.models[fullname] + except KeyError: + raise AttributeError("The model '%s' from the app '%s' is not available in this migration. (Did you use orm.ModelName, not orm['app.ModelName']?)" % (key, self.default_app)) + + + def __getitem__(self, key): + # Detect if they asked for a field on a model or not. + if ":" in key: + key, fname = key.split(":") + else: + fname = None + # Now, try getting the model + key = key.lower() + try: + model = self.models[key] + except KeyError: + try: + app, model = key.split(".", 1) + except ValueError: + raise KeyError("The model '%s' is not in appname.modelname format." % key) + else: + raise KeyError("The model '%s' from the app '%s' is not available in this migration." % (model, app)) + # If they asked for a field, get it. + if fname: + return model._meta.get_field_by_name(fname)[0] + else: + return model + + + def eval_in_context(self, code, app, extra_imports={}): + "Evaluates the given code in the context of the migration file." + + # Drag in the migration module's locals (hopefully including models.py) + fake_locals = dict(inspect.getmodule(self.cls).__dict__) + + # Remove all models from that (i.e. from modern models.py), to stop pollution + for key, value in fake_locals.items(): + if isinstance(value, type) and issubclass(value, models.Model) and hasattr(value, "_meta"): + del fake_locals[key] + + # We add our models into the locals for the eval + fake_locals.update(dict([ + (name.split(".")[-1], model) + for name, model in self.models.items() + ])) + + # Make sure the ones for this app override. + fake_locals.update(dict([ + (name.split(".")[-1], model) + for name, model in self.models.items() + if name.split(".")[0] == app + ])) + + # Ourselves as orm, to allow non-fail cross-app referencing + fake_locals['orm'] = self + + # And a fake _ function + fake_locals['_'] = lambda x: x + + # Datetime; there should be no datetime direct accesses + fake_locals['datetime'] = datetime + + # Now, go through the requested imports and import them. + for name, value in extra_imports.items(): + # First, try getting it out of locals. + parts = value.split(".") + try: + obj = fake_locals[parts[0]] + for part in parts[1:]: + obj = getattr(obj, part) + except (KeyError, AttributeError): + pass + else: + fake_locals[name] = obj + continue + # OK, try to import it directly + try: + fake_locals[name] = ask_for_it_by_name(value) + except ImportError: + if name == "SouthFieldClass": + raise ValueError("Cannot import the required field '%s'" % value) + else: + print "WARNING: Cannot import '%s'" % value + + # Use ModelsLocals to make lookups work right for CapitalisedModels + fake_locals = ModelsLocals(fake_locals) + + return eval(code, globals(), fake_locals) + + + def make_meta(self, app, model, data, stub=False): + "Makes a Meta class out of a dict of eval-able arguments." + results = {'app_label': app} + for key, code in data.items(): + # Some things we never want to use. + if key in ["_bases", "_ormbases"]: + continue + # Some things we don't want with stubs. + if stub and key in ["order_with_respect_to"]: + continue + # OK, add it. + try: + results[key] = self.eval_in_context(code, app) + except (NameError, AttributeError), e: + raise ValueError("Cannot successfully create meta field '%s' for model '%s.%s': %s." % ( + key, app, model, e + )) + return type("Meta", tuple(), results) + + + def make_model(self, app, name, data): + "Makes a Model class out of the given app name, model name and pickled data." + + # Extract any bases out of Meta + if "_ormbases" in data['Meta']: + # Make sure everything we depend on is done already; otherwise, wait. + for key in data['Meta']['_ormbases']: + key = key.lower() + if key not in self.models: + raise ORMBaseNotIncluded("Cannot find ORM base %s" % key) + elif isinstance(self.models[key], basestring): + # Then the other model hasn't been unfrozen yet. + # We postpone ourselves; the situation will eventually resolve. + raise UnfreezeMeLater() + bases = [self.models[key.lower()] for key in data['Meta']['_ormbases']] + # Perhaps the old style? + elif "_bases" in data['Meta']: + bases = map(ask_for_it_by_name, data['Meta']['_bases']) + # Ah, bog standard, then. + else: + bases = [models.Model] + + # Turn the Meta dict into a basic class + meta = self.make_meta(app, name, data['Meta'], data.get("_stub", False)) + + failed_fields = {} + fields = {} + stub = False + + # Now, make some fields! + for fname, params in data.items(): + # If it's the stub marker, ignore it. + if fname == "_stub": + stub = bool(params) + continue + elif fname == "Meta": + continue + elif not params: + raise ValueError("Field '%s' on model '%s.%s' has no definition." % (fname, app, name)) + elif isinstance(params, (str, unicode)): + # It's a premade definition string! Let's hope it works... + code = params + extra_imports = {} + else: + # If there's only one parameter (backwards compat), make it 3. + if len(params) == 1: + params = (params[0], [], {}) + # There should be 3 parameters. Code is a tuple of (code, what-to-import) + if len(params) == 3: + code = "SouthFieldClass(%s)" % ", ".join( + params[1] + + ["%s=%s" % (n, v) for n, v in params[2].items()] + ) + extra_imports = {"SouthFieldClass": params[0]} + else: + raise ValueError("Field '%s' on model '%s.%s' has a weird definition length (should be 1 or 3 items)." % (fname, app, name)) + + try: + # Execute it in a probably-correct context. + field = self.eval_in_context(code, app, extra_imports) + except (NameError, AttributeError, AssertionError, KeyError): + # It might rely on other models being around. Add it to the + # model for the second pass. + failed_fields[fname] = (code, extra_imports) + else: + fields[fname] = field + + # Find the app in the Django core, and get its module + more_kwds = {} + try: + app_module = models.get_app(app) + more_kwds['__module__'] = app_module.__name__ + except ImproperlyConfigured: + # The app this belonged to has vanished, but thankfully we can still + # make a mock model, so ignore the error. + more_kwds['__module__'] = '_south_mock' + + more_kwds['Meta'] = meta + + # Make our model + fields.update(more_kwds) + + model = type( + str(name), + tuple(bases), + fields, + ) + + # If this is a stub model, change Objects to a whiny class + if stub: + model.objects = WhinyManager() + # Also, make sure they can't instantiate it + model.__init__ = whiny_method + else: + model.objects = NoDryRunManager(model.objects) + + if failed_fields: + model._failed_fields = failed_fields + + return model + + def retry_failed_fields(self): + "Tries to re-evaluate the _failed_fields for each model." + for modelkey, model in self.models.items(): + app, modelname = modelkey.split(".", 1) + if hasattr(model, "_failed_fields"): + for fname, (code, extra_imports) in model._failed_fields.items(): + try: + field = self.eval_in_context(code, app, extra_imports) + except (NameError, AttributeError, AssertionError, KeyError), e: + # It's failed again. Complain. + raise ValueError("Cannot successfully create field '%s' for model '%s': %s." % ( + fname, modelname, e + )) + else: + # Startup that field. + model.add_to_class(fname, field) + + +class WhinyManager(object): + "A fake manager that whines whenever you try to touch it. For stub models." + + def __getattr__(self, key): + raise AttributeError("You cannot use items from a stub model.") + + +class NoDryRunManager(object): + """ + A manager that always proxies through to the real manager, + unless a dry run is in progress. + """ + + def __init__(self, real): + self.real = real + + def __getattr__(self, name): + if db.dry_run: + raise AttributeError("You are in a dry run, and cannot access the ORM.\nWrap ORM sections in 'if not db.dry_run:', or if the whole migration is only a data migration, set no_dry_run = True on the Migration class.") + return getattr(self.real, name) + + +def whiny_method(*a, **kw): + raise ValueError("You cannot instantiate a stub model.") diff --git a/users/src/south/south/orm.pyc b/users/src/south/south/orm.pyc new file mode 100644 index 0000000..0e817f2 Binary files /dev/null and b/users/src/south/south/orm.pyc differ diff --git a/users/src/south/south/signals.py b/users/src/south/south/signals.py new file mode 100644 index 0000000..f2938d5 --- /dev/null +++ b/users/src/south/south/signals.py @@ -0,0 +1,24 @@ +""" +South-specific signals +""" + +from django.dispatch import Signal +from django.conf import settings + +# Sent at the start of the migration of an app +pre_migrate = Signal(providing_args=["app"]) + +# Sent after each successful migration of an app +post_migrate = Signal(providing_args=["app"]) + +# Sent after each run of a particular migration in a direction +ran_migration = Signal(providing_args=["app","migration","method"]) + +# Compatibility code for django.contrib.auth +# Is causing strange errors, removing for now (we might need to fix up orm first) +#if 'django.contrib.auth' in settings.INSTALLED_APPS: + #def create_permissions_compat(app, **kwargs): + #from django.db.models import get_app + #from django.contrib.auth.management import create_permissions + #create_permissions(get_app(app), (), 0) + #post_migrate.connect(create_permissions_compat) diff --git a/users/src/south/south/signals.pyc b/users/src/south/south/signals.pyc new file mode 100644 index 0000000..5fe520c Binary files /dev/null and b/users/src/south/south/signals.pyc differ diff --git a/users/src/south/south/tests/__init__.py b/users/src/south/south/tests/__init__.py new file mode 100644 index 0000000..a943016 --- /dev/null +++ b/users/src/south/south/tests/__init__.py @@ -0,0 +1,63 @@ + +import unittest +import os +import sys +from django.conf import settings +from south.hacks import hacks + +# Add the tests directory so fakeapp is on sys.path +test_root = os.path.dirname(__file__) +sys.path.append(test_root) + +# Note: the individual test files are imported below this. + +class Monkeypatcher(unittest.TestCase): + + """ + Base test class for tests that play with the INSTALLED_APPS setting at runtime. + """ + + def create_fake_app(self, name): + + class Fake: + pass + + fake = Fake() + fake.__name__ = name + try: + fake.migrations = __import__(name + ".migrations", {}, {}, ['migrations']) + except ImportError: + pass + return fake + + + def setUp(self): + """ + Changes the Django environment so we can run tests against our test apps. + """ + if getattr(self, 'installed_apps', None): + hacks.set_installed_apps(self.installed_apps) + + + def tearDown(self): + """ + Undoes what setUp did. + """ + if getattr(self, 'installed_apps', None): + hacks.reset_installed_apps() + + +# Try importing all tests if asked for (then we can run 'em) +try: + skiptest = settings.SKIP_SOUTH_TESTS +except: + skiptest = True + +if not skiptest: + from south.tests.db import * + from south.tests.db_mysql import * + from south.tests.logic import * + from south.tests.autodetection import * + from south.tests.logger import * + from south.tests.inspector import * + from south.tests.freezer import * diff --git a/users/src/south/south/tests/autodetection.py b/users/src/south/south/tests/autodetection.py new file mode 100644 index 0000000..4e21f47 --- /dev/null +++ b/users/src/south/south/tests/autodetection.py @@ -0,0 +1,233 @@ +import unittest + +from south.creator.changes import AutoChanges + +class TestComparison(unittest.TestCase): + + """ + Tests the comparison methods of startmigration. + """ + + def test_no_change(self): + "Test with a completely unchanged definition." + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['southdemo.Lizard']"}), + ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['southdemo.Lizard']"}), + ), + False, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.related.ForeignKey', ['ohhai', 'there'], {'to': "somewhere", "from": "there"}), + ('django.db.models.fields.related.ForeignKey', ['ohhai', 'there'], {"from": "there", 'to': "somewhere"}), + ), + False, + ) + + + def test_pos_change(self): + "Test with a changed positional argument." + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['hi'], {'to': "foo"}), + ('django.db.models.fields.CharField', [], {'to': "foo"}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', [], {'to': "foo"}), + ('django.db.models.fields.CharField', ['bye'], {'to': "foo"}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['pi'], {'to': "foo"}), + ('django.db.models.fields.CharField', ['pi'], {'to': "foo"}), + ), + False, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['pisdadad'], {'to': "foo"}), + ('django.db.models.fields.CharField', ['pi'], {'to': "foo"}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['hi'], {}), + ('django.db.models.fields.CharField', [], {}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', [], {}), + ('django.db.models.fields.CharField', ['bye'], {}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['pi'], {}), + ('django.db.models.fields.CharField', ['pi'], {}), + ), + False, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['pi'], {}), + ('django.db.models.fields.CharField', ['45fdfdf'], {}), + ), + True, + ) + + + def test_kwd_change(self): + "Test a changed keyword argument" + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['pi'], {'to': "foo"}), + ('django.db.models.fields.CharField', ['pi'], {'to': "blue"}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', [], {'to': "foo"}), + ('django.db.models.fields.CharField', [], {'to': "blue"}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['b'], {'to': "foo"}), + ('django.db.models.fields.CharField', ['b'], {'to': "blue"}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', [], {'to': "foo"}), + ('django.db.models.fields.CharField', [], {}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['a'], {'to': "foo"}), + ('django.db.models.fields.CharField', ['a'], {}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', [], {}), + ('django.db.models.fields.CharField', [], {'to': "foo"}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('django.db.models.fields.CharField', ['a'], {}), + ('django.db.models.fields.CharField', ['a'], {'to': "foo"}), + ), + True, + ) + + + + def test_backcompat_nochange(self): + "Test that the backwards-compatable comparison is working" + + self.assertEqual( + AutoChanges.different_attributes( + ('models.CharField', [], {}), + ('django.db.models.fields.CharField', [], {}), + ), + False, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('models.CharField', ['ack'], {}), + ('django.db.models.fields.CharField', ['ack'], {}), + ), + False, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('models.CharField', [], {'to':'b'}), + ('django.db.models.fields.CharField', [], {'to':'b'}), + ), + False, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('models.CharField', ['hah'], {'to':'you'}), + ('django.db.models.fields.CharField', ['hah'], {'to':'you'}), + ), + False, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('models.CharField', ['hah'], {'to':'you'}), + ('django.db.models.fields.CharField', ['hah'], {'to':'heh'}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('models.CharField', ['hah'], {}), + ('django.db.models.fields.CharField', [], {'to':"orm['appname.hah']"}), + ), + False, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('models.CharField', ['hah'], {}), + ('django.db.models.fields.CharField', [], {'to':'hah'}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('models.CharField', ['hah'], {}), + ('django.db.models.fields.CharField', [], {'to':'rrr'}), + ), + True, + ) + + self.assertEqual( + AutoChanges.different_attributes( + ('models.CharField', ['hah'], {}), + ('django.db.models.fields.IntField', [], {'to':'hah'}), + ), + True, + ) \ No newline at end of file diff --git a/users/src/south/south/tests/brokenapp/__init__.py b/users/src/south/south/tests/brokenapp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/brokenapp/migrations/0001_depends_on_unmigrated.py b/users/src/south/south/tests/brokenapp/migrations/0001_depends_on_unmigrated.py new file mode 100644 index 0000000..d53f836 --- /dev/null +++ b/users/src/south/south/tests/brokenapp/migrations/0001_depends_on_unmigrated.py @@ -0,0 +1,13 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = [('unknown', '0001_initial')] + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/brokenapp/migrations/0002_depends_on_unknown.py b/users/src/south/south/tests/brokenapp/migrations/0002_depends_on_unknown.py new file mode 100644 index 0000000..389af80 --- /dev/null +++ b/users/src/south/south/tests/brokenapp/migrations/0002_depends_on_unknown.py @@ -0,0 +1,13 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = [('fakeapp', '9999_unknown')] + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/brokenapp/migrations/0003_depends_on_higher.py b/users/src/south/south/tests/brokenapp/migrations/0003_depends_on_higher.py new file mode 100644 index 0000000..319069b --- /dev/null +++ b/users/src/south/south/tests/brokenapp/migrations/0003_depends_on_higher.py @@ -0,0 +1,13 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = [('brokenapp', '0004_higher')] + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/brokenapp/migrations/0004_higher.py b/users/src/south/south/tests/brokenapp/migrations/0004_higher.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/brokenapp/migrations/0004_higher.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/brokenapp/migrations/__init__.py b/users/src/south/south/tests/brokenapp/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/brokenapp/models.py b/users/src/south/south/tests/brokenapp/models.py new file mode 100644 index 0000000..a7d84dc --- /dev/null +++ b/users/src/south/south/tests/brokenapp/models.py @@ -0,0 +1,55 @@ +# -*- coding: UTF-8 -*- + +from django.db import models +from django.contrib.auth.models import User as UserAlias + +def default_func(): + return "yays" + +# An empty case. +class Other1(models.Model): pass + +# Nastiness. +class HorribleModel(models.Model): + "A model to test the edge cases of model parsing" + + ZERO, ONE = range(2) + + # First, some nice fields + name = models.CharField(max_length=255) + short_name = models.CharField(max_length=50) + slug = models.SlugField(unique=True) + + # A ForeignKey, to a model above, and then below + o1 = models.ForeignKey(Other1) + o2 = models.ForeignKey('Other2') + + # Now to something outside + user = models.ForeignKey(UserAlias, related_name="horribles") + + # Unicode! + code = models.CharField(max_length=25, default="↑↑↓↓←→←→BA") + + # Odd defaults! + class_attr = models.IntegerField(default=ZERO) + func = models.CharField(max_length=25, default=default_func) + + # Time to get nasty. Define a non-field choices, and use it + choices = [('hello', '1'), ('world', '2')] + choiced = models.CharField(max_length=20, choices=choices) + + class Meta: + db_table = "my_fave" + verbose_name = "Dr. Strangelove," + \ + """or how I learned to stop worrying +and love the bomb""" + + # Now spread over multiple lines + multiline = \ + models.TextField( + ) + +# Special case. +class Other2(models.Model): + # Try loading a field without a newline after it (inspect hates this) + close_but_no_cigar = models.PositiveIntegerField(primary_key=True) \ No newline at end of file diff --git a/users/src/south/south/tests/circular_a/__init__.py b/users/src/south/south/tests/circular_a/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/circular_a/migrations/0001_first.py b/users/src/south/south/tests/circular_a/migrations/0001_first.py new file mode 100644 index 0000000..b0d90eb --- /dev/null +++ b/users/src/south/south/tests/circular_a/migrations/0001_first.py @@ -0,0 +1,13 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = [('circular_b', '0001_first')] + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/circular_a/migrations/__init__.py b/users/src/south/south/tests/circular_a/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/circular_a/models.py b/users/src/south/south/tests/circular_a/models.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/circular_b/__init__.py b/users/src/south/south/tests/circular_b/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/circular_b/migrations/0001_first.py b/users/src/south/south/tests/circular_b/migrations/0001_first.py new file mode 100644 index 0000000..b11b120 --- /dev/null +++ b/users/src/south/south/tests/circular_b/migrations/0001_first.py @@ -0,0 +1,13 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = [('circular_a', '0001_first')] + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/circular_b/migrations/__init__.py b/users/src/south/south/tests/circular_b/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/circular_b/models.py b/users/src/south/south/tests/circular_b/models.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/db.py b/users/src/south/south/tests/db.py new file mode 100644 index 0000000..9c27ad6 --- /dev/null +++ b/users/src/south/south/tests/db.py @@ -0,0 +1,742 @@ +import unittest + +from south.db import db, generic +from django.db import connection, models + +# Create a list of error classes from the various database libraries +errors = [] +try: + from psycopg2 import ProgrammingError + errors.append(ProgrammingError) +except ImportError: + pass +errors = tuple(errors) + +try: + from south.db import mysql +except ImportError: + mysql = None + +class TestOperations(unittest.TestCase): + + """ + Tests if the various DB abstraction calls work. + Can only test a limited amount due to DB differences. + """ + + def setUp(self): + db.debug = False + db.clear_deferred_sql() + db.start_transaction() + + def tearDown(self): + db.rollback_transaction() + + def test_create(self): + """ + Test creation of tables. + """ + cursor = connection.cursor() + # It needs to take at least 2 args + self.assertRaises(TypeError, db.create_table) + self.assertRaises(TypeError, db.create_table, "test1") + # Empty tables (i.e. no columns) are not fine, so make at least 1 + db.create_table("test1", [('email_confirmed', models.BooleanField(default=False))]) + # And should exist + cursor.execute("SELECT * FROM test1") + # Make sure we can't do the same query on an empty table + try: + cursor.execute("SELECT * FROM nottheretest1") + except: + pass + else: + self.fail("Non-existent table could be selected!") + + def test_delete(self): + """ + Test deletion of tables. + """ + cursor = connection.cursor() + db.create_table("test_deltable", [('email_confirmed', models.BooleanField(default=False))]) + db.delete_table("test_deltable") + # Make sure it went + try: + cursor.execute("SELECT * FROM test_deltable") + except: + pass + else: + self.fail("Just-deleted table could be selected!") + + def test_nonexistent_delete(self): + """ + Test deletion of nonexistent tables. + """ + try: + db.delete_table("test_nonexistdeltable") + except: + pass + else: + self.fail("Non-existent table could be deleted!") + + def test_foreign_keys(self): + """ + Tests foreign key creation, especially uppercase (see #61) + """ + Test = db.mock_model(model_name='Test', db_table='test5a', + db_tablespace='', pk_field_name='ID', + pk_field_type=models.AutoField, pk_field_args=[]) + db.create_table("test5a", [('ID', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True))]) + db.create_table("test5b", [ + ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), + ('UNIQUE', models.ForeignKey(Test)), + ]) + db.execute_deferred_sql() + + def test_rename(self): + """ + Test column renaming + """ + cursor = connection.cursor() + db.create_table("test_rn", [('spam', models.BooleanField(default=False))]) + # Make sure we can select the column + cursor.execute("SELECT spam FROM test_rn") + # Rename it + db.rename_column("test_rn", "spam", "eggs") + cursor.execute("SELECT eggs FROM test_rn") + db.commit_transaction() + db.start_transaction() + try: + cursor.execute("SELECT spam FROM test_rn") + except: + pass + else: + self.fail("Just-renamed column could be selected!") + db.rollback_transaction() + db.delete_table("test_rn") + db.start_transaction() + + def test_dry_rename(self): + """ + Test column renaming while --dry-run is turned on (should do nothing) + See ticket #65 + """ + cursor = connection.cursor() + db.create_table("test_drn", [('spam', models.BooleanField(default=False))]) + # Make sure we can select the column + cursor.execute("SELECT spam FROM test_drn") + # Rename it + db.dry_run = True + db.rename_column("test_drn", "spam", "eggs") + db.dry_run = False + cursor.execute("SELECT spam FROM test_drn") + db.commit_transaction() + db.start_transaction() + try: + cursor.execute("SELECT eggs FROM test_drn") + except: + pass + else: + self.fail("Dry-renamed new column could be selected!") + db.rollback_transaction() + db.delete_table("test_drn") + db.start_transaction() + + def test_table_rename(self): + """ + Test column renaming + """ + cursor = connection.cursor() + db.create_table("testtr", [('spam', models.BooleanField(default=False))]) + # Make sure we can select the column + cursor.execute("SELECT spam FROM testtr") + # Rename it + db.rename_table("testtr", "testtr2") + cursor.execute("SELECT spam FROM testtr2") + db.commit_transaction() + db.start_transaction() + try: + cursor.execute("SELECT spam FROM testtr") + except: + pass + else: + self.fail("Just-renamed column could be selected!") + db.rollback_transaction() + db.delete_table("testtr2") + db.start_transaction() + + def test_percents_in_defaults(self): + """ + Test that % in a default gets escaped to %%. + """ + cursor = connection.cursor() + try: + db.create_table("testpind", [('cf', models.CharField(max_length=255, default="It should be 2%!"))]) + except IndexError: + self.fail("% was not properly escaped in column SQL.") + db.delete_table("testpind") + + def test_index(self): + """ + Test the index operations + """ + db.create_table("test3", [ + ('SELECT', models.BooleanField(default=False)), + ('eggs', models.IntegerField(unique=True)), + ]) + db.execute_deferred_sql() + # Add an index on that column + db.create_index("test3", ["SELECT"]) + # Add another index on two columns + db.create_index("test3", ["SELECT", "eggs"]) + # Delete them both + db.delete_index("test3", ["SELECT"]) + db.delete_index("test3", ["SELECT", "eggs"]) + # Delete the unique index/constraint + if db.backend_name != "sqlite3": + db.delete_unique("test3", ["eggs"]) + db.delete_table("test3") + + def test_primary_key(self): + """ + Test the primary key operations + """ + + db.create_table("test_pk", [ + ('id', models.IntegerField(primary_key=True)), + ('new_pkey', models.IntegerField()), + ('eggs', models.IntegerField(unique=True)), + ]) + db.execute_deferred_sql() + # Remove the default primary key, and make eggs it + db.delete_primary_key("test_pk") + db.create_primary_key("test_pk", "new_pkey") + # Try inserting a now-valid row pair + db.execute("INSERT INTO test_pk (id, new_pkey, eggs) VALUES (1, 2, 3)") + db.execute("INSERT INTO test_pk (id, new_pkey, eggs) VALUES (1, 3, 4)") + db.delete_table("test_pk") + + def test_primary_key_implicit(self): + """ + Tests changing primary key implicitly. + """ + + # This is ONLY important for SQLite. It's not a feature we support, but + # not implementing it means SQLite fails (due to the table-copying weirdness). + if db.backend_name != "sqlite3": + return + + db.create_table("test_pki", [ + ('id', models.IntegerField(primary_key=True)), + ('new_pkey', models.IntegerField()), + ('eggs', models.IntegerField(unique=True)), + ]) + db.execute_deferred_sql() + # Remove the default primary key, and make eggs it + db.alter_column("test_pki", "id", models.IntegerField()) + db.alter_column("test_pki", "new_pkey", models.IntegerField(primary_key=True)) + # Try inserting a now-valid row pair + db.execute("INSERT INTO test_pki (id, new_pkey, eggs) VALUES (1, 2, 3)") + db.execute("INSERT INTO test_pki (id, new_pkey, eggs) VALUES (1, 3, 4)") + db.delete_table("test_pki") + + def test_add_columns(self): + """ + Test adding columns + """ + db.create_table("test_addc", [ + ('spam', models.BooleanField(default=False)), + ('eggs', models.IntegerField()), + ]) + # Add a column + db.add_column("test_addc", "add1", models.IntegerField(default=3), keep_default=False) + # Add a FK with keep_default=False (#69) + User = db.mock_model(model_name='User', db_table='auth_user', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField, pk_field_args=[], pk_field_kwargs={}) + # insert some data so we can test the default value of the added fkey + db.execute("INSERT INTO test_addc (eggs, add1) VALUES (1, 2)") + db.add_column("test_addc", "user", models.ForeignKey(User, null=True), keep_default=False) + # try selecting from the user_id column to make sure it was actually created + val = db.execute("SELECT user_id FROM test_addc")[0][0] + self.assertEquals(val, None) + db.delete_column("test_addc", "add1") + db.delete_table("test_addc") + + def test_add_nullbool_column(self): + """ + Test adding NullBoolean columns + """ + db.create_table("test_addnbc", [ + ('spam', models.BooleanField(default=False)), + ('eggs', models.IntegerField()), + ]) + # Add a column + db.add_column("test_addnbc", "add1", models.NullBooleanField()) + # Add a column with a default + db.add_column("test_addnbc", "add2", models.NullBooleanField(default=True)) + # insert some data so we can test the default values of the added column + db.execute("INSERT INTO test_addnbc (eggs) VALUES (1)") + # try selecting from the new columns to make sure they were properly created + false,null,true = db.execute("SELECT spam,add1,add2 FROM test_addnbc")[0][0:3] + self.assertTrue(true) + self.assertEquals(null, None) + self.assertEquals(false, False) + db.delete_table("test_addnbc") + + def test_alter_columns(self): + """ + Test altering columns + """ + db.create_table("test_alterc", [ + ('spam', models.BooleanField(default=False)), + ('eggs', models.IntegerField()), + ]) + # Change eggs to be a FloatField + db.alter_column("test_alterc", "eggs", models.FloatField()) + db.delete_table("test_alterc") + + def test_mysql_defaults(self): + """ + Test MySQL default handling for BLOB and TEXT. + """ + db.create_table("test_altermyd", [ + ('spam', models.BooleanField(default=False)), + ('eggs', models.TextField()), + ]) + # Change eggs to be a FloatField + db.alter_column("test_altermyd", "eggs", models.TextField(null=True)) + db.delete_table("test_altermyd") + + def test_alter_column_postgres_multiword(self): + """ + Tests altering columns with multiple words in Postgres types (issue #125) + e.g. 'datetime with time zone', look at django/db/backends/postgresql/creation.py + """ + db.create_table("test_multiword", [ + ('col_datetime', models.DateTimeField(null=True)), + ('col_integer', models.PositiveIntegerField(null=True)), + ('col_smallint', models.PositiveSmallIntegerField(null=True)), + ('col_float', models.FloatField(null=True)), + ]) + + # test if 'double precision' is preserved + db.alter_column('test_multiword', 'col_float', models.FloatField('float', null=True)) + + # test if 'CHECK ("%(column)s" >= 0)' is stripped + db.alter_column('test_multiword', 'col_integer', models.PositiveIntegerField(null=True)) + db.alter_column('test_multiword', 'col_smallint', models.PositiveSmallIntegerField(null=True)) + + # test if 'with timezone' is preserved + if db.backend_name == "postgres": + db.execute("INSERT INTO test_multiword (col_datetime) VALUES ('2009-04-24 14:20:55+02')") + db.alter_column('test_multiword', 'col_datetime', models.DateTimeField(auto_now=True)) + assert db.execute("SELECT col_datetime = '2009-04-24 14:20:55+02' FROM test_multiword")[0][0] + + db.delete_table("test_multiword") + + def test_alter_constraints(self): + """ + Tests that going from a PostiveIntegerField to an IntegerField drops + the constraint on the database. + """ + # Only applies to databases that support CHECK constraints + if not db.has_check_constraints: + return + # Make the test table + db.create_table("test_alterc", [ + ('num', models.PositiveIntegerField()), + ]) + # Add in some test values + db.execute("INSERT INTO test_alterc (num) VALUES (1)") + db.execute("INSERT INTO test_alterc (num) VALUES (2)") + # Ensure that adding a negative number is bad + db.commit_transaction() + db.start_transaction() + try: + db.execute("INSERT INTO test_alterc (num) VALUES (-3)") + except: + db.rollback_transaction() + else: + self.fail("Could insert a negative integer into a PositiveIntegerField.") + # Alter it to a normal IntegerField + db.alter_column("test_alterc", "num", models.IntegerField()) + # It should now work + db.execute("INSERT INTO test_alterc (num) VALUES (-3)") + db.delete_table("test_alterc") + # We need to match up for tearDown + db.start_transaction() + + def test_unique(self): + """ + Tests creating/deleting unique constraints. + """ + + # SQLite backend doesn't support this yet. + if db.backend_name == "sqlite3": + return + + db.create_table("test_unique2", [ + ('id', models.AutoField(primary_key=True)), + ]) + db.create_table("test_unique", [ + ('spam', models.BooleanField(default=False)), + ('eggs', models.IntegerField()), + ('ham', models.ForeignKey(db.mock_model('Unique2', 'test_unique2'))), + ]) + # Add a constraint + db.create_unique("test_unique", ["spam"]) + # Shouldn't do anything during dry-run + db.dry_run = True + db.delete_unique("test_unique", ["spam"]) + db.dry_run = False + db.delete_unique("test_unique", ["spam"]) + db.create_unique("test_unique", ["spam"]) + db.commit_transaction() + db.start_transaction() + + # Special preparations for Sql Server + if db.backend_name == "pyodbc": + db.execute("SET IDENTITY_INSERT test_unique2 ON;") + + # Test it works + TRUE = (True,) + FALSE = (False,) + db.execute("INSERT INTO test_unique2 (id) VALUES (1)") + db.execute("INSERT INTO test_unique2 (id) VALUES (2)") + db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 0, 1)", TRUE) + db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 1, 2)", FALSE) + try: + db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 2, 1)", FALSE) + except: + db.rollback_transaction() + else: + self.fail("Could insert non-unique item.") + + # Drop that, add one only on eggs + db.delete_unique("test_unique", ["spam"]) + db.execute("DELETE FROM test_unique") + db.create_unique("test_unique", ["eggs"]) + db.start_transaction() + + # Test similarly + db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 0, 1)", TRUE) + db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 1, 2)", FALSE) + try: + db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 1, 1)", TRUE) + except: + db.rollback_transaction() + else: + self.fail("Could insert non-unique item.") + + # Drop those, test combined constraints + db.delete_unique("test_unique", ["eggs"]) + db.execute("DELETE FROM test_unique") + db.create_unique("test_unique", ["spam", "eggs", "ham_id"]) + db.start_transaction() + # Test similarly + db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 0, 1)", TRUE) + db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 1, 1)", FALSE) + try: + db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 0, 1)", TRUE) + except: + db.rollback_transaction() + else: + self.fail("Could insert non-unique pair.") + db.delete_unique("test_unique", ["spam", "eggs", "ham_id"]) + db.start_transaction() + + def test_alter_unique(self): + """ + Tests that unique constraints are properly created and deleted when + altering columns. + """ + db.create_table("test_alter_unique", [ + ('spam', models.IntegerField()), + ('eggs', models.IntegerField(unique=True)), + ]) + db.execute_deferred_sql() + + # Make sure the unique constraint is created + db.execute('INSERT INTO test_alter_unique VALUES (0, 42)') + db.commit_transaction() + db.start_transaction() + try: + db.execute("INSERT INTO test_alter_unique VALUES (1, 42)") + except: + pass + else: + self.fail("Could insert the same integer twice into a field with unique=True.") + db.rollback_transaction() + + # remove constraint + db.alter_column("test_alter_unique", "eggs", models.IntegerField()) + # make sure the insertion works now + db.execute('INSERT INTO test_alter_unique VALUES (1, 42)') + + # add it back again + db.execute('DELETE FROM test_alter_unique WHERE spam=1') + db.alter_column("test_alter_unique", "eggs", models.IntegerField(unique=True)) + # it should fail again + db.start_transaction() + try: + db.execute("INSERT INTO test_alter_unique VALUES (1, 42)") + except: + pass + else: + self.fail("Unique constraint not created during alter_column()") + db.rollback_transaction() + + # Delete the unique index/constraint + if db.backend_name != "sqlite3": + db.delete_unique("test_alter_unique", ["eggs"]) + db.delete_table("test_alter_unique") + db.start_transaction() + + def test_capitalised_constraints(self): + """ + Under PostgreSQL at least, capitalised constraints must be quoted. + """ + db.create_table("test_capconst", [ + ('SOMECOL', models.PositiveIntegerField(primary_key=True)), + ]) + # Alter it so it's not got the check constraint + db.alter_column("test_capconst", "SOMECOL", models.IntegerField()) + + def test_text_default(self): + """ + MySQL cannot have blank defaults on TEXT columns. + """ + db.create_table("test_textdef", [ + ('textcol', models.TextField(blank=True)), + ]) + + def test_add_unique_fk(self): + """ + Test adding a ForeignKey with unique=True or a OneToOneField + """ + db.create_table("test_add_unique_fk", [ + ('spam', models.BooleanField(default=False)) + ]) + + db.add_column("test_add_unique_fk", "mock1", models.ForeignKey(db.mock_model('Mock', 'mock'), null=True, unique=True)) + db.add_column("test_add_unique_fk", "mock2", models.OneToOneField(db.mock_model('Mock', 'mock'), null=True)) + + db.delete_table("test_add_unique_fk") + + def test_column_constraint(self): + """ + Tests that the value constraint of PositiveIntegerField is enforced on + the database level. + """ + db.create_table("test_column_constraint", [ + ('spam', models.PositiveIntegerField()), + ]) + db.execute_deferred_sql() + + # Make sure we can't insert negative values + db.commit_transaction() + db.start_transaction() + try: + db.execute("INSERT INTO test_column_constraint VALUES (-42)") + except: + pass + else: + self.fail("Could insert a negative value into a PositiveIntegerField.") + db.rollback_transaction() + + # remove constraint + db.alter_column("test_column_constraint", "spam", models.IntegerField()) + # make sure the insertion works now + db.execute('INSERT INTO test_column_constraint VALUES (-42)') + db.execute('DELETE FROM test_column_constraint') + + # add it back again + db.alter_column("test_column_constraint", "spam", models.PositiveIntegerField()) + # it should fail again + db.start_transaction() + try: + db.execute("INSERT INTO test_column_constraint VALUES (-42)") + except: + pass + else: + self.fail("Could insert a negative value after changing an IntegerField to a PositiveIntegerField.") + db.rollback_transaction() + + db.delete_table("test_column_constraint") + db.start_transaction() + + + def test_sql_defaults(self): + """ + Test that sql default value is correct for non-string field types. + """ + from datetime import datetime + + class CustomField(models.CharField): + __metaclass__ = models.SubfieldBase + description = 'CustomField' + def get_default(self): + if self.has_default(): + if callable(self.default): + return self.default() + return self.default + return super(CustomField, self).get_default() + def get_prep_value(self, value): + if not value: + return value + return ','.join(map(str, value)) + def to_python(self, value): + if not value or isinstance(value, list): + return value + return map(int, value.split(',')) + + defaults = ( + (models.DateTimeField(default=datetime(2012, 12, 21, 0, 0, 1)), 'DEFAULT \'2012-12-21 00:00:01'), + (models.CharField(default='sukasuka'), 'DEFAULT \'sukasuka'), + (models.BooleanField(default=False), 'DEFAULT False'), + (models.IntegerField(default=42), 'DEFAULT 42'), + (CustomField(default=[2012,2018,2021,2036]), 'DEFAULT \'2012,2018,2021,2036') + ) + for field, sql_test_str in defaults: + sql = db.column_sql('fish', 'YAAAAAAZ', field) + if sql_test_str not in sql: + self.fail("default sql value was not properly generated for field %r." % field) + + + +class TestCacheGeneric(unittest.TestCase): + base_ops_cls = generic.DatabaseOperations + def setUp(self): + class CacheOps(self.base_ops_cls): + def __init__(self): + self._constraint_cache = {} + self.cache_filled = 0 + self.settings = {'NAME' : 'db'} + + def _fill_constraint_cache(self, db, table): + self.cache_filled += 1 + self._constraint_cache.setdefault(db, {}) + self._constraint_cache[db].setdefault(table, {}) + + @generic.invalidate_table_constraints + def clear_con(self, table): + pass + + @generic.copy_column_constraints + def cp_column(self, table, column_old, column_new): + pass + + @generic.delete_column_constraints + def rm_column(self, table, column): + pass + + @generic.copy_column_constraints + @generic.delete_column_constraints + def mv_column(self, table, column_old, column_new): + pass + + def _get_setting(self, attr): + return self.settings[attr] + self.CacheOps = CacheOps + + def test_cache(self): + ops = self.CacheOps() + self.assertEqual(0, ops.cache_filled) + self.assertFalse(ops.lookup_constraint('db', 'table')) + self.assertEqual(1, ops.cache_filled) + self.assertFalse(ops.lookup_constraint('db', 'table')) + self.assertEqual(1, ops.cache_filled) + ops.clear_con('table') + self.assertEqual(1, ops.cache_filled) + self.assertFalse(ops.lookup_constraint('db', 'table')) + self.assertEqual(2, ops.cache_filled) + self.assertFalse(ops.lookup_constraint('db', 'table', 'column')) + self.assertEqual(2, ops.cache_filled) + + cache = ops._constraint_cache + cache['db']['table']['column'] = 'constraint' + self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column')) + self.assertEqual([('column', 'constraint')], ops.lookup_constraint('db', 'table')) + self.assertEqual(2, ops.cache_filled) + + # invalidate_table_constraints + ops.clear_con('new_table') + self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column')) + self.assertEqual(2, ops.cache_filled) + + self.assertFalse(ops.lookup_constraint('db', 'new_table')) + self.assertEqual(3, ops.cache_filled) + + # delete_column_constraints + cache['db']['table']['column'] = 'constraint' + self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column')) + ops.rm_column('table', 'column') + self.assertEqual([], ops.lookup_constraint('db', 'table', 'column')) + self.assertEqual([], ops.lookup_constraint('db', 'table', 'noexist_column')) + + # copy_column_constraints + cache['db']['table']['column'] = 'constraint' + self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column')) + import sys + ops.cp_column('table', 'column', 'column_new') + self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column_new')) + self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column')) + + # copy + delete + cache['db']['table']['column'] = 'constraint' + self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column')) + ops.mv_column('table', 'column', 'column_new') + self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column_new')) + self.assertEqual([], ops.lookup_constraint('db', 'table', 'column')) + return + + def test_valid(self): + ops = self.CacheOps() + # none of these should vivify a table into a valid state + self.assertFalse(ops._is_valid_cache('db', 'table')) + self.assertFalse(ops._is_valid_cache('db', 'table')) + ops.clear_con('table') + self.assertFalse(ops._is_valid_cache('db', 'table')) + ops.rm_column('table', 'column') + self.assertFalse(ops._is_valid_cache('db', 'table')) + + # these should change the cache state + ops.lookup_constraint('db', 'table') + self.assertTrue(ops._is_valid_cache('db', 'table')) + ops.lookup_constraint('db', 'table', 'column') + self.assertTrue(ops._is_valid_cache('db', 'table')) + ops.clear_con('table') + self.assertFalse(ops._is_valid_cache('db', 'table')) + + def test_valid_implementation(self): + # generic fills the cache on a per-table basis + ops = self.CacheOps() + self.assertFalse(ops._is_valid_cache('db', 'table')) + self.assertFalse(ops._is_valid_cache('db', 'other_table')) + ops.lookup_constraint('db', 'table') + self.assertTrue(ops._is_valid_cache('db', 'table')) + self.assertFalse(ops._is_valid_cache('db', 'other_table')) + ops.lookup_constraint('db', 'other_table') + self.assertTrue(ops._is_valid_cache('db', 'table')) + self.assertTrue(ops._is_valid_cache('db', 'other_table')) + ops.clear_con('table') + self.assertFalse(ops._is_valid_cache('db', 'table')) + self.assertTrue(ops._is_valid_cache('db', 'other_table')) + +if mysql: + class TestCacheMysql(TestCacheGeneric): + base_ops_cls = mysql.DatabaseOperations + + def test_valid_implementation(self): + # mysql fills the cache on a per-db basis + ops = self.CacheOps() + self.assertFalse(ops._is_valid_cache('db', 'table')) + self.assertFalse(ops._is_valid_cache('db', 'other_table')) + ops.lookup_constraint('db', 'table') + cache = ops._constraint_cache + self.assertTrue(ops._is_valid_cache('db', 'table')) + self.assertTrue(ops._is_valid_cache('db', 'other_table')) + ops.lookup_constraint('db', 'other_table') + self.assertTrue(ops._is_valid_cache('db', 'table')) + self.assertTrue(ops._is_valid_cache('db', 'other_table')) + ops.clear_con('table') + self.assertFalse(ops._is_valid_cache('db', 'table')) + self.assertTrue(ops._is_valid_cache('db', 'other_table')) diff --git a/users/src/south/south/tests/db_mysql.py b/users/src/south/south/tests/db_mysql.py new file mode 100644 index 0000000..2a3b889 --- /dev/null +++ b/users/src/south/south/tests/db_mysql.py @@ -0,0 +1,150 @@ +# Additional MySQL-specific tests +# Written by: F. Gabriel Gosselin +# Based on tests by: aarranz +import unittest + +from south.db import db, generic, mysql +from django.db import connection, models + + +class TestMySQLOperations(unittest.TestCase): + """MySQL-specific tests""" + def setUp(self): + db.debug = False + db.clear_deferred_sql() + + def tearDown(self): + pass + + def _create_foreign_tables(self, main_name, reference_name): + # Create foreign table and model + Foreign = db.mock_model(model_name='Foreign', db_table=reference_name, + db_tablespace='', pk_field_name='id', + pk_field_type=models.AutoField, + pk_field_args=[]) + db.create_table(reference_name, [ + ('id', models.AutoField(primary_key=True)), + ]) + # Create table with foreign key + db.create_table(main_name, [ + ('id', models.AutoField(primary_key=True)), + ('foreign', models.ForeignKey(Foreign)), + ]) + return Foreign + + def test_constraint_references(self): + """Tests that referred table is reported accurately""" + main_table = 'test_cns_ref' + reference_table = 'test_cr_foreign' + db.start_transaction() + self._create_foreign_tables(main_table, reference_table) + db.execute_deferred_sql() + constraint = db._find_foreign_constraints(main_table, 'foreign_id')[0] + constraint_name = 'foreign_id_refs_id_%x' % (abs(hash((main_table, + reference_table)))) + self.assertEquals(constraint_name, constraint) + references = db._lookup_constraint_references(main_table, constraint) + self.assertEquals((reference_table, 'id'), references) + db.delete_table(main_table) + db.delete_table(reference_table) + + def test_reverse_column_constraint(self): + """Tests that referred column in a foreign key (ex. id) is found""" + main_table = 'test_reverse_ref' + reference_table = 'test_rr_foreign' + db.start_transaction() + self._create_foreign_tables(main_table, reference_table) + db.execute_deferred_sql() + inverse = db._lookup_reverse_constraint(reference_table, 'id') + (cname, rev_table, rev_column) = inverse[0] + self.assertEquals(main_table, rev_table) + self.assertEquals('foreign_id', rev_column) + db.delete_table(main_table) + db.delete_table(reference_table) + + def test_delete_fk_column(self): + main_table = 'test_drop_foreign' + ref_table = 'test_df_ref' + self._create_foreign_tables(main_table, ref_table) + db.execute_deferred_sql() + constraints = db._find_foreign_constraints(main_table, 'foreign_id') + self.assertEquals(len(constraints), 1) + db.delete_column(main_table, 'foreign_id') + constraints = db._find_foreign_constraints(main_table, 'foreign_id') + self.assertEquals(len(constraints), 0) + db.delete_table(main_table) + db.delete_table(ref_table) + + def test_rename_fk_column(self): + main_table = 'test_rename_foreign' + ref_table = 'test_rf_ref' + self._create_foreign_tables(main_table, ref_table) + db.execute_deferred_sql() + constraints = db._find_foreign_constraints(main_table, 'foreign_id') + self.assertEquals(len(constraints), 1) + db.rename_column(main_table, 'foreign_id', 'reference_id') + db.execute_deferred_sql() #Create constraints + constraints = db._find_foreign_constraints(main_table, 'reference_id') + self.assertEquals(len(constraints), 1) + db.delete_table(main_table) + db.delete_table(ref_table) + + def test_rename_fk_inbound(self): + """ + Tests that the column referred to by an external column can be renamed. + Edge case, but also useful as stepping stone to renaming tables. + """ + main_table = 'test_rename_fk_inbound' + ref_table = 'test_rfi_ref' + self._create_foreign_tables(main_table, ref_table) + db.execute_deferred_sql() + constraints = db._lookup_reverse_constraint(ref_table, 'id') + self.assertEquals(len(constraints), 1) + db.rename_column(ref_table, 'id', 'rfi_id') + db.execute_deferred_sql() #Create constraints + constraints = db._lookup_reverse_constraint(ref_table, 'rfi_id') + self.assertEquals(len(constraints), 1) + cname = db._find_foreign_constraints(main_table, 'foreign_id')[0] + (rtable, rcolumn) = db._lookup_constraint_references(main_table, cname) + self.assertEquals(rcolumn, 'rfi_id') + db.delete_table(main_table) + db.delete_table(ref_table) + + def test_rename_constrained_table(self): + """Renames a table with a foreign key column (towards another table)""" + main_table = 'test_rn_table' + ref_table = 'test_rt_ref' + renamed_table = 'test_renamed_table' + self._create_foreign_tables(main_table, ref_table) + db.execute_deferred_sql() + constraints = db._find_foreign_constraints(main_table, 'foreign_id') + self.assertEquals(len(constraints), 1) + db.rename_table(main_table, renamed_table) + db.execute_deferred_sql() #Create constraints + constraints = db._find_foreign_constraints(renamed_table, 'foreign_id') + self.assertEquals(len(constraints), 1) + (rtable, rcolumn) = db._lookup_constraint_references( + renamed_table, constraints[0]) + self.assertEquals(rcolumn, 'id') + db.delete_table(renamed_table) + db.delete_table(ref_table) + + def test_renamed_referenced_table(self): + """Rename a table referred to in a foreign key""" + main_table = 'test_rn_refd_table' + ref_table = 'test_rrt_ref' + renamed_table = 'test_renamed_ref' + self._create_foreign_tables(main_table, ref_table) + db.execute_deferred_sql() + constraints = db._lookup_reverse_constraint(ref_table) + self.assertEquals(len(constraints), 1) + db.rename_table(ref_table, renamed_table) + db.execute_deferred_sql() #Create constraints + constraints = db._find_foreign_constraints(main_table, 'foreign_id') + self.assertEquals(len(constraints), 1) + (rtable, rcolumn) = db._lookup_constraint_references( + main_table, constraints[0]) + self.assertEquals(renamed_table, rtable) + db.delete_table(main_table) + db.delete_table(renamed_table) + diff --git a/users/src/south/south/tests/deps_a/__init__.py b/users/src/south/south/tests/deps_a/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/deps_a/migrations/0001_a.py b/users/src/south/south/tests/deps_a/migrations/0001_a.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_a/migrations/0001_a.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_a/migrations/0002_a.py b/users/src/south/south/tests/deps_a/migrations/0002_a.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_a/migrations/0002_a.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_a/migrations/0003_a.py b/users/src/south/south/tests/deps_a/migrations/0003_a.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_a/migrations/0003_a.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_a/migrations/0004_a.py b/users/src/south/south/tests/deps_a/migrations/0004_a.py new file mode 100644 index 0000000..e5c2977 --- /dev/null +++ b/users/src/south/south/tests/deps_a/migrations/0004_a.py @@ -0,0 +1,13 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = [('deps_b', '0003_b')] + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_a/migrations/0005_a.py b/users/src/south/south/tests/deps_a/migrations/0005_a.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_a/migrations/0005_a.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_a/migrations/__init__.py b/users/src/south/south/tests/deps_a/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/deps_a/models.py b/users/src/south/south/tests/deps_a/models.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/deps_b/__init__.py b/users/src/south/south/tests/deps_b/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/deps_b/migrations/0001_b.py b/users/src/south/south/tests/deps_b/migrations/0001_b.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_b/migrations/0001_b.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_b/migrations/0002_b.py b/users/src/south/south/tests/deps_b/migrations/0002_b.py new file mode 100644 index 0000000..459ea5d --- /dev/null +++ b/users/src/south/south/tests/deps_b/migrations/0002_b.py @@ -0,0 +1,13 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = [('deps_a', '0002_a')] + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_b/migrations/0003_b.py b/users/src/south/south/tests/deps_b/migrations/0003_b.py new file mode 100644 index 0000000..1692888 --- /dev/null +++ b/users/src/south/south/tests/deps_b/migrations/0003_b.py @@ -0,0 +1,13 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = [('deps_a', '0003_a')] + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_b/migrations/0004_b.py b/users/src/south/south/tests/deps_b/migrations/0004_b.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_b/migrations/0004_b.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_b/migrations/0005_b.py b/users/src/south/south/tests/deps_b/migrations/0005_b.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_b/migrations/0005_b.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_b/migrations/__init__.py b/users/src/south/south/tests/deps_b/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/deps_b/models.py b/users/src/south/south/tests/deps_b/models.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/deps_c/__init__.py b/users/src/south/south/tests/deps_c/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/deps_c/migrations/0001_c.py b/users/src/south/south/tests/deps_c/migrations/0001_c.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_c/migrations/0001_c.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_c/migrations/0002_c.py b/users/src/south/south/tests/deps_c/migrations/0002_c.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_c/migrations/0002_c.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_c/migrations/0003_c.py b/users/src/south/south/tests/deps_c/migrations/0003_c.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_c/migrations/0003_c.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_c/migrations/0004_c.py b/users/src/south/south/tests/deps_c/migrations/0004_c.py new file mode 100644 index 0000000..d27ed3a --- /dev/null +++ b/users/src/south/south/tests/deps_c/migrations/0004_c.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_c/migrations/0005_c.py b/users/src/south/south/tests/deps_c/migrations/0005_c.py new file mode 100644 index 0000000..459ea5d --- /dev/null +++ b/users/src/south/south/tests/deps_c/migrations/0005_c.py @@ -0,0 +1,13 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = [('deps_a', '0002_a')] + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/deps_c/migrations/__init__.py b/users/src/south/south/tests/deps_c/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/deps_c/models.py b/users/src/south/south/tests/deps_c/models.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/emptyapp/__init__.py b/users/src/south/south/tests/emptyapp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/emptyapp/migrations/__init__.py b/users/src/south/south/tests/emptyapp/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/emptyapp/models.py b/users/src/south/south/tests/emptyapp/models.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/fakeapp/__init__.py b/users/src/south/south/tests/fakeapp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/fakeapp/migrations/0001_spam.py b/users/src/south/south/tests/fakeapp/migrations/0001_spam.py new file mode 100644 index 0000000..9739648 --- /dev/null +++ b/users/src/south/south/tests/fakeapp/migrations/0001_spam.py @@ -0,0 +1,17 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + # Model 'Spam' + db.create_table("southtest_spam", ( + ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), + ('weight', models.FloatField()), + ('expires', models.DateTimeField()), + ('name', models.CharField(max_length=255)) + )) + + def backwards(self): + db.delete_table("southtest_spam") + diff --git a/users/src/south/south/tests/fakeapp/migrations/0002_eggs.py b/users/src/south/south/tests/fakeapp/migrations/0002_eggs.py new file mode 100644 index 0000000..3ec8399 --- /dev/null +++ b/users/src/south/south/tests/fakeapp/migrations/0002_eggs.py @@ -0,0 +1,20 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + + Spam = db.mock_model(model_name='Spam', db_table='southtest_spam', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField) + + db.create_table("southtest_eggs", ( + ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), + ('size', models.FloatField()), + ('quantity', models.IntegerField()), + ('spam', models.ForeignKey(Spam)), + )) + + def backwards(self): + + db.delete_table("southtest_eggs") + diff --git a/users/src/south/south/tests/fakeapp/migrations/0003_alter_spam.py b/users/src/south/south/tests/fakeapp/migrations/0003_alter_spam.py new file mode 100644 index 0000000..39126c2 --- /dev/null +++ b/users/src/south/south/tests/fakeapp/migrations/0003_alter_spam.py @@ -0,0 +1,18 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + + db.alter_column("southtest_spam", 'weight', models.FloatField(null=True)) + + def backwards(self): + + db.alter_column("southtest_spam", 'weight', models.FloatField()) + + models = { + "fakeapp.bug135": { + 'date': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 5, 6, 15, 33, 15, 780013)'}), + } + } diff --git a/users/src/south/south/tests/fakeapp/migrations/__init__.py b/users/src/south/south/tests/fakeapp/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/fakeapp/models.py b/users/src/south/south/tests/fakeapp/models.py new file mode 100644 index 0000000..54e9192 --- /dev/null +++ b/users/src/south/south/tests/fakeapp/models.py @@ -0,0 +1,89 @@ +# -*- coding: UTF-8 -*- + +from django.db import models +from django.contrib.auth.models import User as UserAlias + +from south.modelsinspector import add_introspection_rules + +def default_func(): + return "yays" + +# An empty case. +class Other1(models.Model): pass + +# Nastiness. +class HorribleModel(models.Model): + "A model to test the edge cases of model parsing" + + ZERO, ONE = range(2) + + # First, some nice fields + name = models.CharField(max_length=255) + short_name = models.CharField(max_length=50) + slug = models.SlugField(unique=True) + + # A ForeignKey, to a model above, and then below + o1 = models.ForeignKey(Other1) + o2 = models.ForeignKey('Other2') + + # Now to something outside + user = models.ForeignKey(UserAlias, related_name="horribles") + + # Unicode! + code = models.CharField(max_length=25, default="↑↑↓↓←→←→BA") + + # Odd defaults! + class_attr = models.IntegerField(default=ZERO) + func = models.CharField(max_length=25, default=default_func) + + # Time to get nasty. Define a non-field choices, and use it + choices = [('hello', '1'), ('world', '2')] + choiced = models.CharField(max_length=20, choices=choices) + + class Meta: + db_table = "my_fave" + verbose_name = "Dr. Strangelove," + \ + """or how I learned to stop worrying +and love the bomb""" + + # Now spread over multiple lines + multiline = \ + models.TextField( + ) + +# Special case. +class Other2(models.Model): + # Try loading a field without a newline after it (inspect hates this) + close_but_no_cigar = models.PositiveIntegerField(primary_key=True) + +class CustomField(models.IntegerField): + def __init__(self, an_other_model, **kwargs): + super(CustomField, self).__init__(**kwargs) + self.an_other_model = an_other_model + +add_introspection_rules([ + ( + [CustomField], + [], + {'an_other_model': ('an_other_model', {})}, + ), +], ['^south\.tests\.fakeapp\.models\.CustomField']) + +class BaseModel(models.Model): + pass + +class SubModel(BaseModel): + others = models.ManyToManyField(Other1) + custom = CustomField(Other2) + +class CircularA(models.Model): + c = models.ForeignKey('CircularC') + +class CircularB(models.Model): + a = models.ForeignKey(CircularA) + +class CircularC(models.Model): + b = models.ForeignKey(CircularB) + +class Recursive(models.Model): + self = models.ForeignKey('self') diff --git a/users/src/south/south/tests/freezer.py b/users/src/south/south/tests/freezer.py new file mode 100644 index 0000000..b0be54d --- /dev/null +++ b/users/src/south/south/tests/freezer.py @@ -0,0 +1,15 @@ +import unittest + +from south.creator.freezer import model_dependencies +from south.tests.fakeapp import models + +class TestFreezer(unittest.TestCase): + def test_dependencies(self): + self.assertEqual(set(model_dependencies(models.SubModel)), + set([models.BaseModel, models.Other1, models.Other2])) + + self.assertEqual(set(model_dependencies(models.CircularA)), + set([models.CircularA, models.CircularB, models.CircularC])) + + self.assertEqual(set(model_dependencies(models.Recursive)), + set([models.Recursive])) diff --git a/users/src/south/south/tests/inspector.py b/users/src/south/south/tests/inspector.py new file mode 100644 index 0000000..818ffef --- /dev/null +++ b/users/src/south/south/tests/inspector.py @@ -0,0 +1,49 @@ +import unittest + +from south.tests import Monkeypatcher +from south.modelsinspector import * +from fakeapp.models import HorribleModel + +class TestModelInspector(Monkeypatcher): + + """ + Tests if the various parts of the modelinspector work. + """ + + def test_get_value(self): + + # Let's start nicely. + name = HorribleModel._meta.get_field_by_name("name")[0] + slug = HorribleModel._meta.get_field_by_name("slug")[0] + user = HorribleModel._meta.get_field_by_name("user")[0] + + # Simple int retrieval + self.assertEqual( + get_value(name, ["max_length", {}]), + "255", + ) + + # Bool retrieval + self.assertEqual( + get_value(slug, ["unique", {}]), + "True", + ) + + # String retrieval + self.assertEqual( + get_value(user, ["rel.related_name", {}]), + "'horribles'", + ) + + # Default triggering + self.assertEqual( + get_value(slug, ["unique", {"default": False}]), + "True", + ) + self.assertRaises( + IsDefault, + get_value, + slug, + ["unique", {"default": True}], + ) + \ No newline at end of file diff --git a/users/src/south/south/tests/logger.py b/users/src/south/south/tests/logger.py new file mode 100644 index 0000000..e82d1eb --- /dev/null +++ b/users/src/south/south/tests/logger.py @@ -0,0 +1,84 @@ +import logging +import os +import tempfile +import unittest +import StringIO +import sys + +from django.conf import settings +from django.db import connection, models + +from south.db import db +from south.logger import close_logger + +class TestLogger(unittest.TestCase): + + """ + Tests if the logging is working reasonably. Some tests ignored if you don't + have write permission to the disk. + """ + + def setUp(self): + db.debug = False + self.test_path = tempfile.mkstemp(suffix=".south.log")[1] + + def test_db_execute_logging_nofile(self): + "Does logging degrade nicely if SOUTH_LOGGING_ON not set?" + settings.SOUTH_LOGGING_ON = False # this needs to be set to False + # to avoid issues where other tests + # set this to True. settings is shared + # between these tests. + db.create_table("test9", [('email_confirmed', models.BooleanField(default=False))]) + + def test_db_execute_logging_off_with_basic_config(self): + """ + Does the south logger avoid outputing debug information with + south logging turned off and python logging configured with + a basic config?" + """ + settings.SOUTH_LOGGING_ON = False + + # Set root logger to capture WARNING and worse + logging_stream = StringIO.StringIO() + logging.basicConfig(stream=logging_stream, level=logging.WARNING) + + db.create_table("test12", [('email_confirmed', models.BooleanField(default=False))]) + + # since south logging is off, and our root logger is at WARNING + # we should not find DEBUG info in the log + self.assertIs(logging_stream.getvalue(), '') + + def test_db_execute_logging_validfile(self): + "Does logging work when passing in a valid file?" + settings.SOUTH_LOGGING_ON = True + settings.SOUTH_LOGGING_FILE = self.test_path + # Check to see if we can make the logfile + try: + fh = open(self.test_path, "w") + except IOError: + # Permission was denied, ignore the test. + return + else: + fh.close() + # Do an action which logs + db.create_table("test10", [('email_confirmed', models.BooleanField(default=False))]) + # Close the logged file + close_logger() + try: + os.remove(self.test_path) + except: + # It's a tempfile, it's not vital we remove it. + pass + + def test_db_execute_logging_missingfilename(self): + "Does logging raise an error if there is a missing filename?" + settings.SOUTH_LOGGING_ON = True + settings.SOUTH_LOGGING_FILE = None + self.assertRaises( + IOError, + db.create_table, + "test11", + [('email_confirmed', models.BooleanField(default=False))], + ) + + diff --git a/users/src/south/south/tests/logic.py b/users/src/south/south/tests/logic.py new file mode 100644 index 0000000..7be9530 --- /dev/null +++ b/users/src/south/south/tests/logic.py @@ -0,0 +1,898 @@ +import unittest + +import datetime +import sys + +from south import exceptions +from south.migration import migrate_app +from south.migration.base import all_migrations, Migrations +from south.creator.changes import ManualChanges +from south.migration.utils import depends, flatten, get_app_label +from south.models import MigrationHistory +from south.tests import Monkeypatcher +from south.db import db + + + +class TestBrokenMigration(Monkeypatcher): + installed_apps = ["fakeapp", "otherfakeapp", "brokenapp"] + + def test_broken_dependencies(self): + self.assertRaises( + exceptions.DependsOnUnmigratedApplication, + Migrations.calculate_dependencies, + force=True, + ) + #depends_on_unknown = self.brokenapp['0002_depends_on_unknown'] + #self.assertRaises(exceptions.DependsOnUnknownMigration, + # depends_on_unknown.dependencies) + #depends_on_higher = self.brokenapp['0003_depends_on_higher'] + #self.assertRaises(exceptions.DependsOnHigherMigration, + # depends_on_higher.dependencies) + + +class TestMigration(Monkeypatcher): + installed_apps = ["fakeapp", "otherfakeapp"] + + def setUp(self): + super(TestMigration, self).setUp() + self.fakeapp = Migrations('fakeapp') + self.otherfakeapp = Migrations('otherfakeapp') + Migrations.calculate_dependencies(force=True) + + def test_str(self): + migrations = [str(m) for m in self.fakeapp] + self.assertEqual(['fakeapp:0001_spam', + 'fakeapp:0002_eggs', + 'fakeapp:0003_alter_spam'], + migrations) + + def test_repr(self): + migrations = [repr(m) for m in self.fakeapp] + self.assertEqual(['', + '', + ''], + migrations) + + def test_app_label(self): + self.assertEqual(['fakeapp', 'fakeapp', 'fakeapp'], + [m.app_label() for m in self.fakeapp]) + + def test_name(self): + self.assertEqual(['0001_spam', '0002_eggs', '0003_alter_spam'], + [m.name() for m in self.fakeapp]) + + def test_full_name(self): + self.assertEqual(['fakeapp.migrations.0001_spam', + 'fakeapp.migrations.0002_eggs', + 'fakeapp.migrations.0003_alter_spam'], + [m.full_name() for m in self.fakeapp]) + + def test_migration(self): + # Can't use vanilla import, modules beginning with numbers aren't in grammar + M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration + M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration + M3 = __import__("fakeapp.migrations.0003_alter_spam", {}, {}, ['Migration']).Migration + self.assertEqual([M1, M2, M3], + [m.migration().Migration for m in self.fakeapp]) + self.assertRaises(exceptions.UnknownMigration, + self.fakeapp['9999_unknown'].migration) + + def test_previous(self): + self.assertEqual([None, + self.fakeapp['0001_spam'], + self.fakeapp['0002_eggs']], + [m.previous() for m in self.fakeapp]) + + def test_dependencies(self): + "Test that the dependency detection works." + self.assertEqual([ + set([]), + set([self.fakeapp['0001_spam']]), + set([self.fakeapp['0002_eggs']]) + ], + [m.dependencies for m in self.fakeapp], + ) + self.assertEqual([ + set([self.fakeapp['0001_spam']]), + set([self.otherfakeapp['0001_first']]), + set([ + self.otherfakeapp['0002_second'], + self.fakeapp['0003_alter_spam'], + ]) + ], + [m.dependencies for m in self.otherfakeapp], + ) + + def test_forwards_plan(self): + self.assertEqual([ + [self.fakeapp['0001_spam']], + [ + self.fakeapp['0001_spam'], + self.fakeapp['0002_eggs'] + ], + [ + self.fakeapp['0001_spam'], + self.fakeapp['0002_eggs'], + self.fakeapp['0003_alter_spam'], + ] + ], + [m.forwards_plan() for m in self.fakeapp], + ) + self.assertEqual([ + [ + self.fakeapp['0001_spam'], + self.otherfakeapp['0001_first'] + ], + [ + self.fakeapp['0001_spam'], + self.otherfakeapp['0001_first'], + self.otherfakeapp['0002_second'] + ], + [ + self.fakeapp['0001_spam'], + self.otherfakeapp['0001_first'], + self.otherfakeapp['0002_second'], + self.fakeapp['0002_eggs'], + self.fakeapp['0003_alter_spam'], + self.otherfakeapp['0003_third'], + ] + ], + [m.forwards_plan() for m in self.otherfakeapp], + ) + + def test_is_before(self): + F1 = self.fakeapp['0001_spam'] + F2 = self.fakeapp['0002_eggs'] + F3 = self.fakeapp['0003_alter_spam'] + O1 = self.otherfakeapp['0001_first'] + O2 = self.otherfakeapp['0002_second'] + O3 = self.otherfakeapp['0003_third'] + self.assertTrue(F1.is_before(F2)) + self.assertTrue(F1.is_before(F3)) + self.assertTrue(F2.is_before(F3)) + self.assertEqual(O3.is_before(O1), False) + self.assertEqual(O3.is_before(O2), False) + self.assertEqual(O2.is_before(O2), False) + self.assertEqual(O2.is_before(O1), False) + self.assertEqual(F2.is_before(O1), None) + self.assertEqual(F2.is_before(O2), None) + self.assertEqual(F2.is_before(O3), None) + + +class TestMigrationDependencies(Monkeypatcher): + installed_apps = ['deps_a', 'deps_b', 'deps_c'] + + def setUp(self): + super(TestMigrationDependencies, self).setUp() + self.deps_a = Migrations('deps_a') + self.deps_b = Migrations('deps_b') + self.deps_c = Migrations('deps_c') + Migrations.calculate_dependencies(force=True) + + def test_dependencies(self): + self.assertEqual( + [ + set([]), + set([self.deps_a['0001_a']]), + set([self.deps_a['0002_a']]), + set([ + self.deps_a['0003_a'], + self.deps_b['0003_b'], + ]), + set([self.deps_a['0004_a']]), + ], + [m.dependencies for m in self.deps_a], + ) + self.assertEqual( + [ + set([]), + set([ + self.deps_b['0001_b'], + self.deps_a['0002_a'] + ]), + set([ + self.deps_b['0002_b'], + self.deps_a['0003_a'] + ]), + set([self.deps_b['0003_b']]), + set([self.deps_b['0004_b']]), + ], + [m.dependencies for m in self.deps_b], + ) + self.assertEqual( + [ + set([]), + set([self.deps_c['0001_c']]), + set([self.deps_c['0002_c']]), + set([self.deps_c['0003_c']]), + set([ + self.deps_c['0004_c'], + self.deps_a['0002_a'] + ]), + ], + [m.dependencies for m in self.deps_c], + ) + + def test_dependents(self): + self.assertEqual([set([self.deps_a['0002_a']]), + set([self.deps_c['0005_c'], + self.deps_b['0002_b'], + self.deps_a['0003_a']]), + set([self.deps_b['0003_b'], + self.deps_a['0004_a']]), + set([self.deps_a['0005_a']]), + set([])], + [m.dependents for m in self.deps_a]) + self.assertEqual([set([self.deps_b['0002_b']]), + set([self.deps_b['0003_b']]), + set([self.deps_b['0004_b'], + self.deps_a['0004_a']]), + set([self.deps_b['0005_b']]), + set([])], + [m.dependents for m in self.deps_b]) + self.assertEqual([set([self.deps_c['0002_c']]), + set([self.deps_c['0003_c']]), + set([self.deps_c['0004_c']]), + set([self.deps_c['0005_c']]), + set([])], + [m.dependents for m in self.deps_c]) + + def test_forwards_plan(self): + self.assertEqual([[self.deps_a['0001_a']], + [self.deps_a['0001_a'], + self.deps_a['0002_a']], + [self.deps_a['0001_a'], + self.deps_a['0002_a'], + self.deps_a['0003_a']], + [self.deps_b['0001_b'], + self.deps_a['0001_a'], + self.deps_a['0002_a'], + self.deps_b['0002_b'], + self.deps_a['0003_a'], + self.deps_b['0003_b'], + self.deps_a['0004_a']], + [self.deps_b['0001_b'], + self.deps_a['0001_a'], + self.deps_a['0002_a'], + self.deps_b['0002_b'], + self.deps_a['0003_a'], + self.deps_b['0003_b'], + self.deps_a['0004_a'], + self.deps_a['0005_a']]], + [m.forwards_plan() for m in self.deps_a]) + self.assertEqual([[self.deps_b['0001_b']], + [self.deps_b['0001_b'], + self.deps_a['0001_a'], + self.deps_a['0002_a'], + self.deps_b['0002_b']], + [self.deps_b['0001_b'], + self.deps_a['0001_a'], + self.deps_a['0002_a'], + self.deps_b['0002_b'], + self.deps_a['0003_a'], + self.deps_b['0003_b']], + [self.deps_b['0001_b'], + self.deps_a['0001_a'], + self.deps_a['0002_a'], + self.deps_b['0002_b'], + self.deps_a['0003_a'], + self.deps_b['0003_b'], + self.deps_b['0004_b']], + [self.deps_b['0001_b'], + self.deps_a['0001_a'], + self.deps_a['0002_a'], + self.deps_b['0002_b'], + self.deps_a['0003_a'], + self.deps_b['0003_b'], + self.deps_b['0004_b'], + self.deps_b['0005_b']]], + [m.forwards_plan() for m in self.deps_b]) + self.assertEqual([[self.deps_c['0001_c']], + [self.deps_c['0001_c'], + self.deps_c['0002_c']], + [self.deps_c['0001_c'], + self.deps_c['0002_c'], + self.deps_c['0003_c']], + [self.deps_c['0001_c'], + self.deps_c['0002_c'], + self.deps_c['0003_c'], + self.deps_c['0004_c']], + [self.deps_c['0001_c'], + self.deps_c['0002_c'], + self.deps_c['0003_c'], + self.deps_c['0004_c'], + self.deps_a['0001_a'], + self.deps_a['0002_a'], + self.deps_c['0005_c']]], + [m.forwards_plan() for m in self.deps_c]) + + def test_backwards_plan(self): + self.assertEqual([ + [ + self.deps_c['0005_c'], + self.deps_b['0005_b'], + self.deps_b['0004_b'], + self.deps_a['0005_a'], + self.deps_a['0004_a'], + self.deps_b['0003_b'], + self.deps_b['0002_b'], + self.deps_a['0003_a'], + self.deps_a['0002_a'], + self.deps_a['0001_a'], + ], + [ + self.deps_c['0005_c'], + self.deps_b['0005_b'], + self.deps_b['0004_b'], + self.deps_a['0005_a'], + self.deps_a['0004_a'], + self.deps_b['0003_b'], + self.deps_b['0002_b'], + self.deps_a['0003_a'], + self.deps_a['0002_a'], + ], + [ + self.deps_b['0005_b'], + self.deps_b['0004_b'], + self.deps_a['0005_a'], + self.deps_a['0004_a'], + self.deps_b['0003_b'], + self.deps_a['0003_a'], + ], + [ + self.deps_a['0005_a'], + self.deps_a['0004_a'], + ], + [ + self.deps_a['0005_a'], + ] + ], [m.backwards_plan() for m in self.deps_a]) + self.assertEqual([ + [ + self.deps_b['0005_b'], + self.deps_b['0004_b'], + self.deps_a['0005_a'], + self.deps_a['0004_a'], + self.deps_b['0003_b'], + self.deps_b['0002_b'], + self.deps_b['0001_b'], + ], + [ + self.deps_b['0005_b'], + self.deps_b['0004_b'], + self.deps_a['0005_a'], + self.deps_a['0004_a'], + self.deps_b['0003_b'], + self.deps_b['0002_b'], + ], + [ + self.deps_b['0005_b'], + self.deps_b['0004_b'], + self.deps_a['0005_a'], + self.deps_a['0004_a'], + self.deps_b['0003_b'], + ], + [ + self.deps_b['0005_b'], + self.deps_b['0004_b'], + ], + [ + self.deps_b['0005_b'], + ], + ], [m.backwards_plan() for m in self.deps_b]) + self.assertEqual([ + [ + self.deps_c['0005_c'], + self.deps_c['0004_c'], + self.deps_c['0003_c'], + self.deps_c['0002_c'], + self.deps_c['0001_c'], + ], + [ + self.deps_c['0005_c'], + self.deps_c['0004_c'], + self.deps_c['0003_c'], + self.deps_c['0002_c'], + ], + [ + self.deps_c['0005_c'], + self.deps_c['0004_c'], + self.deps_c['0003_c'], + ], + [ + self.deps_c['0005_c'], + self.deps_c['0004_c'], + ], + [self.deps_c['0005_c']] + ], [m.backwards_plan() for m in self.deps_c]) + + +class TestCircularDependencies(Monkeypatcher): + installed_apps = ["circular_a", "circular_b"] + + def test_plans(self): + Migrations.calculate_dependencies(force=True) + circular_a = Migrations('circular_a') + circular_b = Migrations('circular_b') + self.assertRaises( + exceptions.CircularDependency, + circular_a[-1].forwards_plan, + ) + self.assertRaises( + exceptions.CircularDependency, + circular_b[-1].forwards_plan, + ) + self.assertRaises( + exceptions.CircularDependency, + circular_a[-1].backwards_plan, + ) + self.assertRaises( + exceptions.CircularDependency, + circular_b[-1].backwards_plan, + ) + + +class TestMigrations(Monkeypatcher): + installed_apps = ["fakeapp", "otherfakeapp"] + + def test_all(self): + + M1 = Migrations(__import__("fakeapp", {}, {}, [''])) + M2 = Migrations(__import__("otherfakeapp", {}, {}, [''])) + + self.assertEqual( + [M1, M2], + list(all_migrations()), + ) + + def test(self): + + M1 = Migrations(__import__("fakeapp", {}, {}, [''])) + + self.assertEqual(M1, Migrations("fakeapp")) + self.assertEqual(M1, Migrations(self.create_fake_app("fakeapp"))) + + def test_application(self): + fakeapp = Migrations("fakeapp") + application = __import__("fakeapp", {}, {}, ['']) + self.assertEqual(application, fakeapp.application) + + def test_migration(self): + # Can't use vanilla import, modules beginning with numbers aren't in grammar + M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration + M2 = __import__("fakeapp.migrations.0002_eggs", {}, {}, ['Migration']).Migration + migration = Migrations('fakeapp') + self.assertEqual(M1, migration['0001_spam'].migration().Migration) + self.assertEqual(M2, migration['0002_eggs'].migration().Migration) + self.assertRaises(exceptions.UnknownMigration, + migration['0001_jam'].migration) + + def test_guess_migration(self): + # Can't use vanilla import, modules beginning with numbers aren't in grammar + M1 = __import__("fakeapp.migrations.0001_spam", {}, {}, ['Migration']).Migration + migration = Migrations('fakeapp') + self.assertEqual(M1, migration.guess_migration("0001_spam").migration().Migration) + self.assertEqual(M1, migration.guess_migration("0001_spa").migration().Migration) + self.assertEqual(M1, migration.guess_migration("0001_sp").migration().Migration) + self.assertEqual(M1, migration.guess_migration("0001_s").migration().Migration) + self.assertEqual(M1, migration.guess_migration("0001_").migration().Migration) + self.assertEqual(M1, migration.guess_migration("0001").migration().Migration) + self.assertRaises(exceptions.UnknownMigration, + migration.guess_migration, "0001-spam") + self.assertRaises(exceptions.MultiplePrefixMatches, + migration.guess_migration, "000") + self.assertRaises(exceptions.MultiplePrefixMatches, + migration.guess_migration, "") + self.assertRaises(exceptions.UnknownMigration, + migration.guess_migration, "0001_spams") + self.assertRaises(exceptions.UnknownMigration, + migration.guess_migration, "0001_jam") + + def test_app_label(self): + names = ['fakeapp', 'otherfakeapp'] + self.assertEqual(names, + [Migrations(n).app_label() for n in names]) + + def test_full_name(self): + names = ['fakeapp', 'otherfakeapp'] + self.assertEqual([n + '.migrations' for n in names], + [Migrations(n).full_name() for n in names]) + + +class TestMigrationLogic(Monkeypatcher): + + """ + Tests if the various logic functions in migration actually work. + """ + + installed_apps = ["fakeapp", "otherfakeapp"] + + def assertListEqual(self, list1, list2, msg=None): + list1 = list(list1) + list2 = list(list2) + list1.sort() + list2.sort() + return self.assert_(list1 == list2, "%s is not equal to %s" % (list1, list2)) + + def test_find_ghost_migrations(self): + pass + + def test_apply_migrations(self): + MigrationHistory.objects.all().delete() + migrations = Migrations("fakeapp") + + # We should start with no migrations + self.assertEqual(list(MigrationHistory.objects.all()), []) + + # Apply them normally + migrate_app(migrations, target_name=None, fake=False, + load_initial_data=True) + + # We should finish with all migrations + self.assertListEqual( + ((u"fakeapp", u"0001_spam"), + (u"fakeapp", u"0002_eggs"), + (u"fakeapp", u"0003_alter_spam"),), + MigrationHistory.objects.values_list("app_name", "migration"), + ) + + # Now roll them backwards + migrate_app(migrations, target_name="zero", fake=False) + + # Finish with none + self.assertEqual(list(MigrationHistory.objects.all()), []) + + + def test_migration_merge_forwards(self): + MigrationHistory.objects.all().delete() + migrations = Migrations("fakeapp") + + # We should start with no migrations + self.assertEqual(list(MigrationHistory.objects.all()), []) + + # Insert one in the wrong order + MigrationHistory.objects.create(app_name = "fakeapp", + migration = "0002_eggs", + applied = datetime.datetime.now()) + + # Did it go in? + self.assertListEqual( + ((u"fakeapp", u"0002_eggs"),), + MigrationHistory.objects.values_list("app_name", "migration"), + ) + + # Apply them normally + self.assertRaises(exceptions.InconsistentMigrationHistory, + migrate_app, + migrations, target_name=None, fake=False) + self.assertRaises(exceptions.InconsistentMigrationHistory, + migrate_app, + migrations, target_name='zero', fake=False) + try: + migrate_app(migrations, target_name=None, fake=False) + except exceptions.InconsistentMigrationHistory, e: + self.assertEqual( + [ + ( + migrations['0002_eggs'], + migrations['0001_spam'], + ) + ], + e.problems, + ) + try: + migrate_app(migrations, target_name="zero", fake=False) + except exceptions.InconsistentMigrationHistory, e: + self.assertEqual( + [ + ( + migrations['0002_eggs'], + migrations['0001_spam'], + ) + ], + e.problems, + ) + + # Nothing should have changed (no merge mode!) + self.assertListEqual( + ((u"fakeapp", u"0002_eggs"),), + MigrationHistory.objects.values_list("app_name", "migration"), + ) + + # Apply with merge + migrate_app(migrations, target_name=None, merge=True, fake=False) + + # We should finish with all migrations + self.assertListEqual( + ((u"fakeapp", u"0001_spam"), + (u"fakeapp", u"0002_eggs"), + (u"fakeapp", u"0003_alter_spam"),), + MigrationHistory.objects.values_list("app_name", "migration"), + ) + + # Now roll them backwards + migrate_app(migrations, target_name="0002", fake=False) + migrate_app(migrations, target_name="0001", fake=True) + migrate_app(migrations, target_name="zero", fake=False) + + # Finish with none + self.assertEqual(list(MigrationHistory.objects.all()), []) + + def test_alter_column_null(self): + + def null_ok(eat_exception=True): + from django.db import connection, transaction + # the DBAPI introspection module fails on postgres NULLs. + cursor = connection.cursor() + + # SQLite has weird now() + if db.backend_name == "sqlite3": + now_func = "DATETIME('NOW')" + # So does SQLServer... should we be using a backend attribute? + elif db.backend_name == "pyodbc": + now_func = "GETDATE()" + elif db.backend_name == "oracle": + now_func = "SYSDATE" + else: + now_func = "NOW()" + + try: + if db.backend_name == "pyodbc": + cursor.execute("SET IDENTITY_INSERT southtest_spam ON;") + cursor.execute("INSERT INTO southtest_spam (id, weight, expires, name) VALUES (100, NULL, %s, 'whatever');" % now_func) + except: + if eat_exception: + transaction.rollback() + return False + else: + raise + else: + cursor.execute("DELETE FROM southtest_spam") + transaction.commit() + return True + + MigrationHistory.objects.all().delete() + migrations = Migrations("fakeapp") + + # by default name is NOT NULL + migrate_app(migrations, target_name="0002", fake=False) + self.failIf(null_ok()) + self.assertListEqual( + ((u"fakeapp", u"0001_spam"), + (u"fakeapp", u"0002_eggs"),), + MigrationHistory.objects.values_list("app_name", "migration"), + ) + + # after 0003, it should be NULL + migrate_app(migrations, target_name="0003", fake=False) + self.assert_(null_ok(False)) + self.assertListEqual( + ((u"fakeapp", u"0001_spam"), + (u"fakeapp", u"0002_eggs"), + (u"fakeapp", u"0003_alter_spam"),), + MigrationHistory.objects.values_list("app_name", "migration"), + ) + + # make sure it is NOT NULL again + migrate_app(migrations, target_name="0002", fake=False) + self.failIf(null_ok(), 'weight not null after migration') + self.assertListEqual( + ((u"fakeapp", u"0001_spam"), + (u"fakeapp", u"0002_eggs"),), + MigrationHistory.objects.values_list("app_name", "migration"), + ) + + # finish with no migrations, otherwise other tests fail... + migrate_app(migrations, target_name="zero", fake=False) + self.assertEqual(list(MigrationHistory.objects.all()), []) + + def test_dependencies(self): + + fakeapp = Migrations("fakeapp") + otherfakeapp = Migrations("otherfakeapp") + + # Test a simple path + self.assertEqual([fakeapp['0001_spam'], + fakeapp['0002_eggs'], + fakeapp['0003_alter_spam']], + fakeapp['0003_alter_spam'].forwards_plan()) + + # And a complex one. + self.assertEqual( + [ + fakeapp['0001_spam'], + otherfakeapp['0001_first'], + otherfakeapp['0002_second'], + fakeapp['0002_eggs'], + fakeapp['0003_alter_spam'], + otherfakeapp['0003_third'] + ], + otherfakeapp['0003_third'].forwards_plan(), + ) + + +class TestMigrationUtils(Monkeypatcher): + installed_apps = ["fakeapp", "otherfakeapp"] + + def test_get_app_label(self): + self.assertEqual( + "southtest", + get_app_label(self.create_fake_app("southtest.models")), + ) + self.assertEqual( + "baz", + get_app_label(self.create_fake_app("foo.bar.baz.models")), + ) + +class TestUtils(unittest.TestCase): + + def test_flatten(self): + self.assertEqual([], list(flatten(iter([])))) + self.assertEqual([], list(flatten(iter([iter([]), ])))) + self.assertEqual([1], list(flatten(iter([1])))) + self.assertEqual([1, 2], list(flatten(iter([1, 2])))) + self.assertEqual([1, 2], list(flatten(iter([iter([1]), 2])))) + self.assertEqual([1, 2], list(flatten(iter([iter([1, 2])])))) + self.assertEqual([1, 2, 3], list(flatten(iter([iter([1, 2]), 3])))) + self.assertEqual([1, 2, 3], + list(flatten(iter([iter([1]), iter([2]), 3])))) + self.assertEqual([1, 2, 3], + list(flatten([[1], [2], 3]))) + + def test_depends(self): + graph = {'A1': []} + self.assertEqual(['A1'], + depends('A1', lambda n: graph[n])) + graph = {'A1': [], + 'A2': ['A1'], + 'A3': ['A2']} + self.assertEqual(['A1', 'A2', 'A3'], + depends('A3', lambda n: graph[n])) + graph = {'A1': [], + 'A2': ['A1'], + 'A3': ['A2', 'A1']} + self.assertEqual(['A1', 'A2', 'A3'], + depends('A3', lambda n: graph[n])) + graph = {'A1': [], + 'A2': ['A1'], + 'A3': ['A2', 'A1', 'B1'], + 'B1': []} + self.assertEqual( + ['B1', 'A1', 'A2', 'A3'], + depends('A3', lambda n: graph[n]), + ) + graph = {'A1': [], + 'A2': ['A1'], + 'A3': ['A2', 'A1', 'B2'], + 'B1': [], + 'B2': ['B1']} + self.assertEqual( + ['B1', 'B2', 'A1', 'A2', 'A3'], + depends('A3', lambda n: graph[n]), + ) + graph = {'A1': [], + 'A2': ['A1', 'B1'], + 'A3': ['A2'], + 'B1': ['A1']} + self.assertEqual(['A1', 'B1', 'A2', 'A3'], + depends('A3', lambda n: graph[n])) + graph = {'A1': [], + 'A2': ['A1'], + 'A3': ['A2', 'A1', 'B2'], + 'B1': [], + 'B2': ['B1', 'C1'], + 'C1': ['B1']} + self.assertEqual( + ['B1', 'C1', 'B2', 'A1', 'A2', 'A3'], + depends('A3', lambda n: graph[n]), + ) + graph = {'A1': [], + 'A2': ['A1'], + 'A3': ['A2', 'B2', 'A1', 'C1'], + 'B1': ['A1'], + 'B2': ['B1', 'C2', 'A1'], + 'C1': ['B1'], + 'C2': ['C1', 'A1'], + 'C3': ['C2']} + self.assertEqual( + ['A1', 'B1', 'C1', 'C2', 'B2', 'A2', 'A3'], + depends('A3', lambda n: graph[n]), + ) + + def assertCircularDependency(self, trace, target, graph): + "Custom assertion that checks a circular dependency is detected correctly." + self.assertRaises( + exceptions.CircularDependency, + depends, + target, + lambda n: graph[n], + ) + try: + depends(target, lambda n: graph[n]) + except exceptions.CircularDependency, e: + self.assertEqual(trace, e.trace) + + def test_depends_cycle(self): + graph = {'A1': ['A1']} + self.assertCircularDependency( + ['A1', 'A1'], + 'A1', + graph, + ) + graph = {'A1': [], + 'A2': ['A1', 'A2'], + 'A3': ['A2']} + self.assertCircularDependency( + ['A2', 'A2'], + 'A3', + graph, + ) + graph = {'A1': [], + 'A2': ['A1'], + 'A3': ['A2', 'A3'], + 'A4': ['A3']} + self.assertCircularDependency( + ['A3', 'A3'], + 'A4', + graph, + ) + graph = {'A1': ['B1'], + 'B1': ['A1']} + self.assertCircularDependency( + ['A1', 'B1', 'A1'], + 'A1', + graph, + ) + graph = {'A1': [], + 'A2': ['A1', 'B2'], + 'A3': ['A2'], + 'B1': [], + 'B2': ['B1', 'A2'], + 'B3': ['B2']} + self.assertCircularDependency( + ['A2', 'B2', 'A2'], + 'A3', + graph, + ) + graph = {'A1': [], + 'A2': ['A1', 'B3'], + 'A3': ['A2'], + 'B1': [], + 'B2': ['B1', 'A2'], + 'B3': ['B2']} + self.assertCircularDependency( + ['A2', 'B3', 'B2', 'A2'], + 'A3', + graph, + ) + graph = {'A1': [], + 'A2': ['A1'], + 'A3': ['A2', 'B2'], + 'A4': ['A3'], + 'B1': ['A3'], + 'B2': ['B1']} + self.assertCircularDependency( + ['A3', 'B2', 'B1', 'A3'], + 'A4', + graph, + ) + +class TestManualChanges(Monkeypatcher): + installed_apps = ["fakeapp", "otherfakeapp"] + + def test_suggest_name(self): + migrations = Migrations('fakeapp') + change = ManualChanges(migrations, + [], + ['fakeapp.slug'], + []) + self.assertEquals(change.suggest_name(), + 'add_field_fakeapp_slug') + + change = ManualChanges(migrations, + [], + [], + ['fakeapp.slug']) + self.assertEquals(change.suggest_name(), + 'add_index_fakeapp_slug') diff --git a/users/src/south/south/tests/otherfakeapp/__init__.py b/users/src/south/south/tests/otherfakeapp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/otherfakeapp/migrations/0001_first.py b/users/src/south/south/tests/otherfakeapp/migrations/0001_first.py new file mode 100644 index 0000000..ad9c095 --- /dev/null +++ b/users/src/south/south/tests/otherfakeapp/migrations/0001_first.py @@ -0,0 +1,15 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = ( + ("fakeapp", "0001_spam"), + ) + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/otherfakeapp/migrations/0002_second.py b/users/src/south/south/tests/otherfakeapp/migrations/0002_second.py new file mode 100644 index 0000000..7c0fb0c --- /dev/null +++ b/users/src/south/south/tests/otherfakeapp/migrations/0002_second.py @@ -0,0 +1,11 @@ +from south.db import db +from django.db import models + +class Migration: + + def forwards(self): + pass + + def backwards(self): + pass + diff --git a/users/src/south/south/tests/otherfakeapp/migrations/0003_third.py b/users/src/south/south/tests/otherfakeapp/migrations/0003_third.py new file mode 100644 index 0000000..fa8ed97 --- /dev/null +++ b/users/src/south/south/tests/otherfakeapp/migrations/0003_third.py @@ -0,0 +1,14 @@ +from south.db import db +from django.db import models + +class Migration: + + depends_on = ( + ("fakeapp", "0003_alter_spam"), + ) + + def forwards(self): + pass + + def backwards(self): + pass diff --git a/users/src/south/south/tests/otherfakeapp/migrations/__init__.py b/users/src/south/south/tests/otherfakeapp/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/users/src/south/south/tests/otherfakeapp/models.py b/users/src/south/south/tests/otherfakeapp/models.py new file mode 100644 index 0000000..93a4b8e --- /dev/null +++ b/users/src/south/south/tests/otherfakeapp/models.py @@ -0,0 +1 @@ +# This file left intentionally blank. \ No newline at end of file diff --git a/users/src/south/south/utils.py b/users/src/south/south/utils.py new file mode 100644 index 0000000..5035986 --- /dev/null +++ b/users/src/south/south/utils.py @@ -0,0 +1,73 @@ +""" +Generally helpful utility functions. +""" + + +def _ask_for_it_by_name(name): + "Returns an object referenced by absolute path." + bits = name.split(".") + + ## what if there is no absolute reference? + if len(bits)>1: + modulename = ".".join(bits[:-1]) + else: + modulename=bits[0] + + module = __import__(modulename, {}, {}, bits[-1]) + + if len(bits) == 1: + return module + else: + return getattr(module, bits[-1]) + + +def ask_for_it_by_name(name): + "Returns an object referenced by absolute path. (Memoised outer wrapper)" + if name not in ask_for_it_by_name.cache: + ask_for_it_by_name.cache[name] = _ask_for_it_by_name(name) + return ask_for_it_by_name.cache[name] +ask_for_it_by_name.cache = {} + + +def get_attribute(item, attribute): + """ + Like getattr, but recursive (i.e. you can ask for 'foo.bar.yay'.) + """ + value = item + for part in attribute.split("."): + value = getattr(value, part) + return value + +def auto_through(field): + "Returns if the M2M class passed in has an autogenerated through table or not." + return ( + # Django 1.0/1.1 + (not field.rel.through) + or + # Django 1.2+ + getattr(getattr(field.rel.through, "_meta", None), "auto_created", False) + ) + +def auto_model(model): + "Returns if the given model was automatically generated." + return getattr(model._meta, "auto_created", False) + +def memoize(function): + "Standard memoization decorator." + name = function.__name__ + _name = '_' + name + + def method(self): + if not hasattr(self, _name): + value = function(self) + setattr(self, _name, value) + return getattr(self, _name) + + def invalidate(): + if hasattr(method, _name): + delattr(method, _name) + + method.__name__ = function.__name__ + method.__doc__ = function.__doc__ + method._invalidate = invalidate + return method diff --git a/users/src/south/south/utils.pyc b/users/src/south/south/utils.pyc new file mode 100644 index 0000000..6b6243a Binary files /dev/null and b/users/src/south/south/utils.pyc differ diff --git a/users/src/south/south/v2.py b/users/src/south/south/v2.py new file mode 100644 index 0000000..4646052 --- /dev/null +++ b/users/src/south/south/v2.py @@ -0,0 +1,19 @@ +""" +API versioning file; we can tell what kind of migrations things are +by what class they inherit from (if none, it's a v1). +""" + +from south.utils import ask_for_it_by_name + +class BaseMigration(object): + + def gf(self, field_name): + "Gets a field by absolute reference." + return ask_for_it_by_name(field_name) + +class SchemaMigration(BaseMigration): + pass + +class DataMigration(BaseMigration): + # Data migrations shouldn't be dry-run + no_dry_run = True diff --git a/users/src/south/south/v2.pyc b/users/src/south/south/v2.pyc new file mode 100644 index 0000000..09b8b8c Binary files /dev/null and b/users/src/south/south/v2.pyc differ diff --git a/users/static/admin/css/base.css b/users/static/admin/css/base.css new file mode 100644 index 0000000..c5e385d --- /dev/null +++ b/users/static/admin/css/base.css @@ -0,0 +1,772 @@ +/* + DJANGO Admin styles +*/ + +body { + margin: 0; + padding: 0; + font-size: 12px; + font-family: "Lucida Grande","DejaVu Sans","Bitstream Vera Sans",Verdana,Arial,sans-serif; + color: #333; + background: #fff; +} + +/* LINKS */ + +a:link, a:visited { + color: #5b80b2; + text-decoration: none; +} + +a:hover { + color: #036; +} + +a img { + border: none; +} + +a.section:link, a.section:visited { + color: white; + text-decoration: none; +} + +/* GLOBAL DEFAULTS */ + +p, ol, ul, dl { + margin: .2em 0 .8em 0; +} + +p { + padding: 0; + line-height: 140%; +} + +h1,h2,h3,h4,h5 { + font-weight: bold; +} + +h1 { + font-size: 18px; + color: #666; + padding: 0 6px 0 0; + margin: 0 0 .2em 0; +} + +h2 { + font-size: 16px; + margin: 1em 0 .5em 0; +} + +h2.subhead { + font-weight: normal; + margin-top: 0; +} + +h3 { + font-size: 14px; + margin: .8em 0 .3em 0; + color: #666; + font-weight: bold; +} + +h4 { + font-size: 12px; + margin: 1em 0 .8em 0; + padding-bottom: 3px; +} + +h5 { + font-size: 10px; + margin: 1.5em 0 .5em 0; + color: #666; + text-transform: uppercase; + letter-spacing: 1px; +} + +ul li { + list-style-type: square; + padding: 1px 0; +} + +ul.plainlist { + margin-left: 0 !important; +} + +ul.plainlist li { + list-style-type: none; +} + +li ul { + margin-bottom: 0; +} + +li, dt, dd { + font-size: 11px; + line-height: 14px; +} + +dt { + font-weight: bold; + margin-top: 4px; +} + +dd { + margin-left: 0; +} + +form { + margin: 0; + padding: 0; +} + +fieldset { + margin: 0; + padding: 0; +} + +blockquote { + font-size: 11px; + color: #777; + margin-left: 2px; + padding-left: 10px; + border-left: 5px solid #ddd; +} + +code, pre { + font-family: "Bitstream Vera Sans Mono", Monaco, "Courier New", Courier, monospace; + background: inherit; + color: #666; + font-size: 11px; +} + +pre.literal-block { + margin: 10px; + background: #eee; + padding: 6px 8px; +} + +code strong { + color: #930; +} + +hr { + clear: both; + color: #eee; + background-color: #eee; + height: 1px; + border: none; + margin: 0; + padding: 0; + font-size: 1px; + line-height: 1px; +} + +/* TEXT STYLES & MODIFIERS */ + +.small { + font-size: 11px; +} + +.tiny { + font-size: 10px; +} + +p.tiny { + margin-top: -2px; +} + +.mini { + font-size: 9px; +} + +p.mini { + margin-top: -3px; +} + +.help, p.help { + font-size: 10px !important; + color: #999; +} + +p img, h1 img, h2 img, h3 img, h4 img, td img { + vertical-align: middle; +} + +.quiet, a.quiet:link, a.quiet:visited { + color: #999 !important; + font-weight: normal !important; +} + +.quiet strong { + font-weight: bold !important; +} + +.float-right { + float: right; +} + +.float-left { + float: left; +} + +.clear { + clear: both; +} + +.align-left { + text-align: left; +} + +.align-right { + text-align: right; +} + +.example { + margin: 10px 0; + padding: 5px 10px; + background: #efefef; +} + +.nowrap { + white-space: nowrap; +} + +/* TABLES */ + +table { + border-collapse: collapse; + border-color: #ccc; +} + +td, th { + font-size: 11px; + line-height: 13px; + border-bottom: 1px solid #eee; + vertical-align: top; + padding: 5px; + font-family: "Lucida Grande", Verdana, Arial, sans-serif; +} + +th { + text-align: left; + font-size: 12px; + font-weight: bold; +} + +thead th, +tfoot td { + color: #666; + padding: 2px 5px; + font-size: 11px; + background: #e1e1e1 url(../img/admin/nav-bg.gif) top left repeat-x; + border-left: 1px solid #ddd; + border-bottom: 1px solid #ddd; +} + +tfoot td { + border-bottom: none; + border-top: 1px solid #ddd; +} + +thead th:first-child, +tfoot td:first-child { + border-left: none !important; +} + +thead th.optional { + font-weight: normal !important; +} + +fieldset table { + border-right: 1px solid #eee; +} + +tr.row-label td { + font-size: 9px; + padding-top: 2px; + padding-bottom: 0; + border-bottom: none; + color: #666; + margin-top: -1px; +} + +tr.alt { + background: #f6f6f6; +} + +.row1 { + background: #EDF3FE; +} + +.row2 { + background: white; +} + +/* SORTABLE TABLES */ + +thead th a:link, thead th a:visited { + color: #666; + display: block; +} + +table thead th.sorted { + background-position: bottom left !important; +} + +table thead th.sorted a { + padding-right: 13px; +} + +table thead th.ascending a { + background: url(../img/admin/arrow-up.gif) right .4em no-repeat; +} + +table thead th.descending a { + background: url(../img/admin/arrow-down.gif) right .4em no-repeat; +} + +/* ORDERABLE TABLES */ + +table.orderable tbody tr td:hover { + cursor: move; +} + +table.orderable tbody tr td:first-child { + padding-left: 14px; + background-image: url(../img/admin/nav-bg-grabber.gif); + background-repeat: repeat-y; +} + +table.orderable-initalized .order-cell, body>tr>td.order-cell { + display: none; +} + +/* FORM DEFAULTS */ + +input, textarea, select, .form-row p { + margin: 2px 0; + padding: 2px 3px; + vertical-align: middle; + font-family: "Lucida Grande", Verdana, Arial, sans-serif; + font-weight: normal; + font-size: 11px; +} + +textarea { + vertical-align: top !important; +} + +input[type=text], input[type=password], textarea, select, .vTextField { + border: 1px solid #ccc; +} + +/* FORM BUTTONS */ + +.button, input[type=submit], input[type=button], .submit-row input { + background: white url(../img/admin/nav-bg.gif) bottom repeat-x; + padding: 3px 5px; + color: black; + border: 1px solid #bbb; + border-color: #ddd #aaa #aaa #ddd; +} + +.button:active, input[type=submit]:active, input[type=button]:active { + background-image: url(../img/admin/nav-bg-reverse.gif); + background-position: top; +} + +.button[disabled], input[type=submit][disabled], input[type=button][disabled] { + background-image: url(../img/admin/nav-bg.gif); + background-position: bottom; + opacity: 0.4; +} + +.button.default, input[type=submit].default, .submit-row input.default { + border: 2px solid #5b80b2; + background: #7CA0C7 url(../img/admin/default-bg.gif) bottom repeat-x; + font-weight: bold; + color: white; + float: right; +} + +.button.default:active, input[type=submit].default:active { + background-image: url(../img/admin/default-bg-reverse.gif); + background-position: top; +} + +.button[disabled].default, input[type=submit][disabled].default, input[type=button][disabled].default { + background-image: url(../img/admin/default-bg.gif); + background-position: bottom; + opacity: 0.4; +} + + +/* MODULES */ + +.module { + border: 1px solid #ccc; + margin-bottom: 5px; + background: white; +} + +.module p, .module ul, .module h3, .module h4, .module dl, .module pre { + padding-left: 10px; + padding-right: 10px; +} + +.module blockquote { + margin-left: 12px; +} + +.module ul, .module ol { + margin-left: 1.5em; +} + +.module h3 { + margin-top: .6em; +} + +.module h2, .module caption, .inline-group h2 { + margin: 0; + padding: 2px 5px 3px 5px; + font-size: 11px; + text-align: left; + font-weight: bold; + background: #7CA0C7 url(../img/admin/default-bg.gif) top left repeat-x; + color: white; +} + +.module table { + border-collapse: collapse; +} + +/* MESSAGES & ERRORS */ + +ul.messagelist { + padding: 0 0 5px 0; + margin: 0; +} + +ul.messagelist li { + font-size: 12px; + display: block; + padding: 4px 5px 4px 25px; + margin: 0 0 3px 0; + border-bottom: 1px solid #ddd; + color: #666; + background: #ffc url(../img/admin/icon_success.gif) 5px .3em no-repeat; +} + +ul.messagelist li.warning{ + background-image: url(../img/admin/icon_alert.gif); +} + +ul.messagelist li.error{ + background-image: url(../img/admin/icon_error.gif); +} + +.errornote { + font-size: 12px !important; + display: block; + padding: 4px 5px 4px 25px; + margin: 0 0 3px 0; + border: 1px solid red; + color: red; + background: #ffc url(../img/admin/icon_error.gif) 5px .3em no-repeat; +} + +ul.errorlist { + margin: 0 !important; + padding: 0 !important; +} + +.errorlist li { + font-size: 12px !important; + display: block; + padding: 4px 5px 4px 25px; + margin: 0 0 3px 0; + border: 1px solid red; + color: white; + background: red url(../img/admin/icon_alert.gif) 5px .3em no-repeat; +} + +.errorlist li a { + color: white; + text-decoration: underline; +} + +td ul.errorlist { + margin: 0 !important; + padding: 0 !important; +} + +td ul.errorlist li { + margin: 0 !important; +} + +.errors { + background: #ffc; +} + +.errors input, .errors select, .errors textarea { + border: 1px solid red; +} + +div.system-message { + background: #ffc; + margin: 10px; + padding: 6px 8px; + font-size: .8em; +} + +div.system-message p.system-message-title { + padding: 4px 5px 4px 25px; + margin: 0; + color: red; + background: #ffc url(../img/admin/icon_error.gif) 5px .3em no-repeat; +} + +.description { + font-size: 12px; + padding: 5px 0 0 12px; +} + +/* BREADCRUMBS */ + +div.breadcrumbs { + background: white url(../img/admin/nav-bg-reverse.gif) 0 -10px repeat-x; + padding: 2px 8px 3px 8px; + font-size: 11px; + color: #999; + border-top: 1px solid white; + border-bottom: 1px solid #ccc; + text-align: left; +} + +/* ACTION ICONS */ + +.addlink { + padding-left: 12px; + background: url(../img/admin/icon_addlink.gif) 0 .2em no-repeat; +} + +.changelink { + padding-left: 12px; + background: url(../img/admin/icon_changelink.gif) 0 .2em no-repeat; +} + +.deletelink { + padding-left: 12px; + background: url(../img/admin/icon_deletelink.gif) 0 .25em no-repeat; +} + +a.deletelink:link, a.deletelink:visited { + color: #CC3434; +} + +a.deletelink:hover { + color: #993333; +} + +/* OBJECT TOOLS */ + +.object-tools { + font-size: 10px; + font-weight: bold; + font-family: Arial,Helvetica,sans-serif; + padding-left: 0; + float: right; + position: relative; + margin-top: -2.4em; + margin-bottom: -2em; +} + +.form-row .object-tools { + margin-top: 5px; + margin-bottom: 5px; + float: none; + height: 2em; + padding-left: 3.5em; +} + +.object-tools li { + display: block; + float: left; + background: url(../img/admin/tool-left.gif) 0 0 no-repeat; + padding: 0 0 0 8px; + margin-left: 2px; + height: 16px; +} + +.object-tools li:hover { + background: url(../img/admin/tool-left_over.gif) 0 0 no-repeat; +} + +.object-tools a:link, .object-tools a:visited { + display: block; + float: left; + color: white; + padding: .1em 14px .1em 8px; + height: 14px; + background: #999 url(../img/admin/tool-right.gif) 100% 0 no-repeat; +} + +.object-tools a:hover, .object-tools li:hover a { + background: #5b80b2 url(../img/admin/tool-right_over.gif) 100% 0 no-repeat; +} + +.object-tools a.viewsitelink, .object-tools a.golink { + background: #999 url(../img/admin/tooltag-arrowright.gif) top right no-repeat; + padding-right: 28px; +} + +.object-tools a.viewsitelink:hover, .object-tools a.golink:hover { + background: #5b80b2 url(../img/admin/tooltag-arrowright_over.gif) top right no-repeat; +} + +.object-tools a.addlink { + background: #999 url(../img/admin/tooltag-add.gif) top right no-repeat; + padding-right: 28px; +} + +.object-tools a.addlink:hover { + background: #5b80b2 url(../img/admin/tooltag-add_over.gif) top right no-repeat; +} + +/* OBJECT HISTORY */ + +table#change-history { + width: 100%; +} + +table#change-history tbody th { + width: 16em; +} + +/* PAGE STRUCTURE */ + +#container { + position: relative; + width: 100%; + min-width: 760px; + padding: 0; +} + +#content { + margin: 10px 15px; +} + +#header { + width: 100%; +} + +#content-main { + float: left; + width: 100%; +} + +#content-related { + float: right; + width: 18em; + position: relative; + margin-right: -19em; +} + +#footer { + clear: both; + padding: 10px; +} + +/* COLUMN TYPES */ + +.colMS { + margin-right: 20em !important; +} + +.colSM { + margin-left: 20em !important; +} + +.colSM #content-related { + float: left; + margin-right: 0; + margin-left: -19em; +} + +.colSM #content-main { + float: right; +} + +.popup .colM { + width: 95%; +} + +.subcol { + float: left; + width: 46%; + margin-right: 15px; +} + +.dashboard #content { + width: 500px; +} + +/* HEADER */ + +#header { + background: #417690; + color: #ffc; + overflow: hidden; +} + +#header a:link, #header a:visited { + color: white; +} + +#header a:hover { + text-decoration: underline; +} + +#branding h1 { + padding: 0 10px; + font-size: 18px; + margin: 8px 0; + font-weight: normal; + color: #f4f379; +} + +#branding h2 { + padding: 0 10px; + font-size: 14px; + margin: -8px 0 8px 0; + font-weight: normal; + color: #ffc; +} + +#user-tools { + position: absolute; + top: 0; + right: 0; + padding: 1.2em 10px; + font-size: 11px; + text-align: right; +} + +/* SIDEBAR */ + +#content-related h3 { + font-size: 12px; + color: #666; + margin-bottom: 3px; +} + +#content-related h4 { + font-size: 11px; +} + +#content-related .module h2 { + background: #eee url(../img/admin/nav-bg.gif) bottom left repeat-x; + color: #666; +} + diff --git a/users/static/admin/css/changelists.css b/users/static/admin/css/changelists.css new file mode 100644 index 0000000..315b8c7 --- /dev/null +++ b/users/static/admin/css/changelists.css @@ -0,0 +1,289 @@ +/* CHANGELISTS */ + +#changelist { + position: relative; + width: 100%; +} + +#changelist table { + width: 100%; +} + +.change-list .hiddenfields { display:none; } + +.change-list .filtered table { + border-right: 1px solid #ddd; +} + +.change-list .filtered { + min-height: 400px; +} + +.change-list .filtered { + background: white url(../img/admin/changelist-bg.gif) top right repeat-y !important; +} + +.change-list .filtered .results, .change-list .filtered .paginator, .filtered #toolbar, .filtered div.xfull { + margin-right: 160px !important; + width: auto !important; +} + +.change-list .filtered table tbody th { + padding-right: 1em; +} + +#changelist .toplinks { + border-bottom: 1px solid #ccc !important; +} + +#changelist .paginator { + color: #666; + border-top: 1px solid #eee; + border-bottom: 1px solid #eee; + background: white url(../img/admin/nav-bg.gif) 0 180% repeat-x; + overflow: hidden; +} + +.change-list .filtered .paginator { + border-right: 1px solid #ddd; +} + +/* CHANGELIST TABLES */ + +#changelist table thead th { + white-space: nowrap; + vertical-align: middle; +} + +#changelist table thead th.action-checkbox-column { + width: 1.5em; + text-align: center; +} + +#changelist table tbody td, #changelist table tbody th { + border-left: 1px solid #ddd; +} + +#changelist table tbody td:first-child, #changelist table tbody th:first-child { + border-left: 0; + border-right: 1px solid #ddd; +} + +#changelist table tbody td.action-checkbox { + text-align:center; +} + +#changelist table tfoot { + color: #666; +} + +/* TOOLBAR */ + +#changelist #toolbar { + padding: 3px; + border-bottom: 1px solid #ddd; + background: #e1e1e1 url(../img/admin/nav-bg.gif) top left repeat-x; + color: #666; +} + +#changelist #toolbar form input { + font-size: 11px; + padding: 1px 2px; +} + +#changelist #toolbar form #searchbar { + padding: 2px; +} + +#changelist #changelist-search img { + vertical-align: middle; +} + +/* FILTER COLUMN */ + +#changelist-filter { + position: absolute; + top: 0; + right: 0; + z-index: 1000; + width: 160px; + border-left: 1px solid #ddd; + background: #efefef; + margin: 0; +} + +#changelist-filter h2 { + font-size: 11px; + padding: 2px 5px; + border-bottom: 1px solid #ddd; +} + +#changelist-filter h3 { + font-size: 12px; + margin-bottom: 0; +} + +#changelist-filter ul { + padding-left: 0; + margin-left: 10px; +} + +#changelist-filter li { + list-style-type: none; + margin-left: 0; + padding-left: 0; +} + +#changelist-filter a { + color: #999; +} + +#changelist-filter a:hover { + color: #036; +} + +#changelist-filter li.selected { + border-left: 5px solid #ccc; + padding-left: 5px; + margin-left: -10px; +} + +#changelist-filter li.selected a { + color: #5b80b2 !important; +} + +/* DATE DRILLDOWN */ + +.change-list ul.toplinks { + display: block; + background: white url(../img/admin/nav-bg-reverse.gif) 0 -10px repeat-x; + border-top: 1px solid white; + float: left; + padding: 0 !important; + margin: 0 !important; + width: 100%; +} + +.change-list ul.toplinks li { + float: left; + width: 9em; + padding: 3px 6px; + font-weight: bold; + list-style-type: none; +} + +.change-list ul.toplinks .date-back a { + color: #999; +} + +.change-list ul.toplinks .date-back a:hover { + color: #036; +} + +/* PAGINATOR */ + +.paginator { + font-size: 11px; + padding-top: 10px; + padding-bottom: 10px; + line-height: 22px; + margin: 0; + border-top: 1px solid #ddd; +} + +.paginator a:link, .paginator a:visited { + padding: 2px 6px; + border: solid 1px #ccc; + background: white; + text-decoration: none; +} + +.paginator a.showall { + padding: 0 !important; + border: none !important; +} + +.paginator a.showall:hover { + color: #036 !important; + background: transparent !important; +} + +.paginator .end { + border-width: 2px !important; + margin-right: 6px; +} + +.paginator .this-page { + padding: 2px 6px; + font-weight: bold; + font-size: 13px; + vertical-align: top; +} + +.paginator a:hover { + color: white; + background: #5b80b2; + border-color: #036; +} + +/* ACTIONS */ + +.filtered .actions { + margin-right: 160px !important; + border-right: 1px solid #ddd; +} + +#changelist table input { + margin: 0; +} + +#changelist table tbody tr.selected { + background-color: #FFFFCC; +} + +#changelist .actions { + color: #999; + padding: 3px; + border-top: 1px solid #fff; + border-bottom: 1px solid #ddd; + background: white url(../img/admin/nav-bg-reverse.gif) 0 -10px repeat-x; +} + +#changelist .actions.selected { + background: #fffccf; + border-top: 1px solid #fffee8; + border-bottom: 1px solid #edecd6; +} + +#changelist .actions span.all, +#changelist .actions span.action-counter, +#changelist .actions span.clear, +#changelist .actions span.question { + font-size: 11px; + margin: 0 0.5em; + display: none; +} + +#changelist .actions:last-child { + border-bottom: none; +} + +#changelist .actions select { + border: 1px solid #aaa; + margin-left: 0.5em; + padding: 1px 2px; +} + +#changelist .actions label { + font-size: 11px; + margin-left: 0.5em; +} + +#changelist #action-toggle { + display: none; +} + +#changelist .actions .button { + font-size: 11px; + padding: 1px 2px; +} diff --git a/users/static/admin/css/dashboard.css b/users/static/admin/css/dashboard.css new file mode 100644 index 0000000..ceefe15 --- /dev/null +++ b/users/static/admin/css/dashboard.css @@ -0,0 +1,30 @@ +/* DASHBOARD */ + +.dashboard .module table th { + width: 100%; +} + +.dashboard .module table td { + white-space: nowrap; +} + +.dashboard .module table td a { + display: block; + padding-right: .6em; +} + +/* RECENT ACTIONS MODULE */ + +.module ul.actionlist { + margin-left: 0; +} + +ul.actionlist li { + list-style-type: none; +} + +ul.actionlist li.changelink { + overflow: hidden; + text-overflow: ellipsis; + -o-text-overflow: ellipsis; +} \ No newline at end of file diff --git a/users/static/admin/css/forms.css b/users/static/admin/css/forms.css new file mode 100644 index 0000000..1cedf24 --- /dev/null +++ b/users/static/admin/css/forms.css @@ -0,0 +1,360 @@ +@import url('widgets.css'); + +/* FORM ROWS */ + +.form-row { + overflow: hidden; + padding: 8px 12px; + font-size: 11px; + border-bottom: 1px solid #eee; +} + +.form-row img, .form-row input { + vertical-align: middle; +} + +form .form-row p { + padding-left: 0; + font-size: 11px; +} + +/* FORM LABELS */ + +form h4 { + margin: 0 !important; + padding: 0 !important; + border: none !important; +} + +label { + font-weight: normal !important; + color: #666; + font-size: 12px; +} + +.required label, label.required { + font-weight: bold !important; + color: #333 !important; +} + +/* RADIO BUTTONS */ + +form ul.radiolist li { + list-style-type: none; +} + +form ul.radiolist label { + float: none; + display: inline; +} + +form ul.inline { + margin-left: 0; + padding: 0; +} + +form ul.inline li { + float: left; + padding-right: 7px; +} + +/* ALIGNED FIELDSETS */ + +.aligned label { + display: block; + padding: 3px 10px 0 0; + float: left; + width: 8em; +} + +.aligned ul label { + display: inline; + float: none; + width: auto; +} + +.colMS .aligned .vLargeTextField, .colMS .aligned .vXMLLargeTextField { + width: 350px; +} + +form .aligned p, form .aligned ul { + margin-left: 7em; + padding-left: 30px; +} + +form .aligned table p { + margin-left: 0; + padding-left: 0; +} + +form .aligned p.help { + padding-left: 38px; +} + +.aligned .vCheckboxLabel { + float: none !important; + display: inline; + padding-left: 4px; +} + +.colM .aligned .vLargeTextField, .colM .aligned .vXMLLargeTextField { + width: 610px; +} + +.checkbox-row p.help { + margin-left: 0; + padding-left: 0 !important; +} + +fieldset .field-box { + float: left; + margin-right: 20px; +} + +/* WIDE FIELDSETS */ + +.wide label { + width: 15em !important; +} + +form .wide p { + margin-left: 15em; +} + +form .wide p.help { + padding-left: 38px; +} + +.colM fieldset.wide .vLargeTextField, .colM fieldset.wide .vXMLLargeTextField { + width: 450px; +} + +/* COLLAPSED FIELDSETS */ + +fieldset.collapsed * { + display: none; +} + +fieldset.collapsed h2, fieldset.collapsed { + display: block !important; +} + +fieldset.collapsed h2 { + background-image: url(../img/admin/nav-bg.gif); + background-position: bottom left; + color: #999; +} + +fieldset.collapsed .collapse-toggle { + background: transparent; + display: inline !important; +} + +/* MONOSPACE TEXTAREAS */ + +fieldset.monospace textarea { + font-family: "Bitstream Vera Sans Mono",Monaco,"Courier New",Courier,monospace; +} + +/* SUBMIT ROW */ + +.submit-row { + padding: 5px 7px; + text-align: right; + background: white url(../img/admin/nav-bg.gif) 0 100% repeat-x; + border: 1px solid #ccc; + margin: 5px 0; + overflow: hidden; +} + +.submit-row input { + margin: 0 0 0 5px; +} + +.submit-row p { + margin: 0.3em; +} + +.submit-row p.deletelink-box { + float: left; +} + +.submit-row .deletelink { + background: url(../img/admin/icon_deletelink.gif) 0 50% no-repeat; + padding-left: 14px; +} + +/* CUSTOM FORM FIELDS */ + +.vSelectMultipleField { + vertical-align: top !important; +} + +.vCheckboxField { + border: none; +} + +.vDateField, .vTimeField { + margin-right: 2px; +} + +.vURLField { + width: 30em; +} + +.vLargeTextField, .vXMLLargeTextField { + width: 48em; +} + +.flatpages-flatpage #id_content { + height: 40.2em; +} + +.module table .vPositiveSmallIntegerField { + width: 2.2em; +} + +.vTextField { + width: 20em; +} + +.vIntegerField { + width: 5em; +} + +.vForeignKeyRawIdAdminField { + width: 5em; +} + +/* INLINES */ + +.inline-group { + padding: 0; + border: 1px solid #ccc; + margin: 10px 0; +} + +.inline-group .aligned label { + width: 8em; +} + +.inline-related { + position: relative; +} + +.inline-related h3 { + margin: 0; + color: #666; + padding: 3px 5px; + font-size: 11px; + background: #e1e1e1 url(../img/admin/nav-bg.gif) top left repeat-x; + border-bottom: 1px solid #ddd; +} + +.inline-related h3 span.delete { + float: right; +} + +.inline-related h3 span.delete label { + margin-left: 2px; + font-size: 11px; +} + +.inline-related fieldset { + margin: 0; + background: #fff; + border: none; +} + +.inline-related fieldset.module h3 { + margin: 0; + padding: 2px 5px 3px 5px; + font-size: 11px; + text-align: left; + font-weight: bold; + background: #bcd; + color: #fff; +} + +.inline-group .tabular fieldset.module { + border: none; + border-bottom: 1px solid #ddd; +} + +.inline-related.tabular fieldset.module table { + width: 100%; +} + +.last-related fieldset { + border: none; +} + +.inline-group .tabular tr.has_original td { + padding-top: 2em; +} + +.inline-group .tabular tr td.original { + padding: 2px 0 0 0; + width: 0; + _position: relative; +} + +.inline-group .tabular th.original { + width: 0px; + padding: 0; +} + +.inline-group .tabular td.original p { + position: absolute; + left: 0; + height: 1.1em; + padding: 2px 7px; + overflow: hidden; + font-size: 9px; + font-weight: bold; + color: #666; + _width: 700px; +} + +.inline-group ul.tools { + padding: 0; + margin: 0; + list-style: none; +} + +.inline-group ul.tools li { + display: inline; + padding: 0 5px; +} + +.inline-group div.add-row, +.inline-group .tabular tr.add-row td { + color: #666; + padding: 3px 5px; + border-bottom: 1px solid #ddd; + background: #e1e1e1 url(../img/admin/nav-bg.gif) top left repeat-x; +} + +.inline-group .tabular tr.add-row td { + padding: 4px 5px 3px; + border-bottom: none; +} + +.inline-group ul.tools a.add, +.inline-group div.add-row a, +.inline-group .tabular tr.add-row td a { + background: url(../img/admin/icon_addlink.gif) 0 50% no-repeat; + padding-left: 14px; + font-size: 11px; + outline: 0; /* Remove dotted border around link */ +} + +.empty-form { + display: none; +} + +/* IE7 specific bug fixes */ + +.submit-row input { + float: right; +} \ No newline at end of file diff --git a/users/static/admin/css/ie.css b/users/static/admin/css/ie.css new file mode 100644 index 0000000..5fd1ce3 --- /dev/null +++ b/users/static/admin/css/ie.css @@ -0,0 +1,57 @@ +/* IE 6 & 7 */ + +/* Proper fixed width for dashboard in IE6 */ + +.dashboard #content { + *width: 768px; +} + +.dashboard #content-main { + *width: 535px; +} + +/* IE 6 ONLY */ + +/* Keep header from flowing off the page */ + +#container { + _position: static; +} + +/* Put the right sidebars back on the page */ + +.colMS #content-related { + _margin-right: 0; + _margin-left: 10px; + _position: static; +} + +/* Put the left sidebars back on the page */ + +.colSM #content-related { + _margin-right: 10px; + _margin-left: -115px; + _position: static; +} + +.form-row { + _height: 1%; +} + +/* Fix right margin for changelist filters in IE6 */ + +#changelist-filter ul { + _margin-right: -10px; +} + +/* IE ignores min-height, but treats height as if it were min-height */ + +.change-list .filtered { + _height: 400px; +} + +/* IE doesn't know alpha transparency in PNGs */ + +.inline-deletelink { + background: transparent url(../img/admin/inline-delete-8bit.png) no-repeat; +} \ No newline at end of file diff --git a/users/static/admin/css/login.css b/users/static/admin/css/login.css new file mode 100644 index 0000000..8d90d12 --- /dev/null +++ b/users/static/admin/css/login.css @@ -0,0 +1,54 @@ +/* LOGIN FORM */ + +body.login { + background: #eee; +} + +.login #container { + background: white; + border: 1px solid #ccc; + width: 28em; + min-width: 300px; + margin-left: auto; + margin-right: auto; + margin-top: 100px; +} + +.login #content-main { + width: 100%; +} + +.login form { + margin-top: 1em; +} + +.login .form-row { + padding: 4px 0; + float: left; + width: 100%; +} + +.login .form-row label { + float: left; + width: 9em; + padding-right: 0.5em; + line-height: 2em; + text-align: right; + font-size: 1em; + color: #333; +} + +.login .form-row #id_username, .login .form-row #id_password { + width: 14em; +} + +.login span.help { + font-size: 10px; + display: block; +} + +.login .submit-row { + clear: both; + padding: 1em 0 0 9.4em; +} + diff --git a/users/static/admin/css/rtl.css b/users/static/admin/css/rtl.css new file mode 100644 index 0000000..c02241d --- /dev/null +++ b/users/static/admin/css/rtl.css @@ -0,0 +1,244 @@ +body { + direction: rtl; +} + +/* LOGIN */ + +.login .form-row { + float: right; +} + +.login .form-row label { + float: right; + padding-left: 0.5em; + padding-right: 0; + text-align: left; +} + +.login .submit-row { + clear: both; + padding: 1em 9.4em 0 0; +} + +/* GLOBAL */ + +th { + text-align: right; +} + +.module h2, .module caption { + text-align: right; +} + +.addlink, .changelink { + padding-left: 0px; + padding-right: 12px; + background-position: 100% 0.2em; +} + +.deletelink { + padding-left: 0px; + padding-right: 12px; + background-position: 100% 0.25em; +} + +.object-tools { + float: left; +} + +thead th:first-child, +tfoot td:first-child { + border-left: 1px solid #ddd !important; +} + +/* LAYOUT */ + +#user-tools { + right: auto; + left: 0; + text-align: left; +} + +div.breadcrumbs { + text-align: right; +} + +#content-main { + float: right; +} + +#content-related { + float: left; + margin-left: -19em; + margin-right: auto; +} + +.colMS { + margin-left: 20em !important; + margin-right: 10px !important; +} + +/* SORTABLE TABLES */ + + +table thead th.sorted a { + padding-left: 13px; + padding-right: 0px; +} + +table thead th.ascending a, +table thead th.descending a { + background-position: left; +} + +/* dashboard styles */ + +.dashboard .module table td a { + padding-left: .6em; + padding-right: 12px; +} + +/* changelists styles */ + +.change-list ul.toplinks li { + float: right; +} + +.change-list .filtered { + background: white url(../img/admin/changelist-bg_rtl.gif) top left repeat-y !important; +} + +.change-list .filtered table { + border-left: 1px solid #ddd; + border-right: 0px none; +} + +#changelist-filter { + right: auto; + left: 0; + border-left: 0px none; + border-right: 1px solid #ddd; +} + +.change-list .filtered .results, .change-list .filtered .paginator, .filtered #toolbar, .filtered div.xfull { + margin-right: 0px !important; + margin-left: 160px !important; +} + +#changelist-filter li.selected { + border-left: 0px none; + padding-left: 0px; + margin-left: 0; + border-right: 5px solid #ccc; + padding-right: 5px; + margin-right: -10px; +} + +.filtered .actions { + border-left:1px solid #DDDDDD; + margin-left:160px !important; + border-right: 0 none; + margin-right:0 !important; +} + +#changelist table tbody td:first-child, #changelist table tbody th:first-child { + border-right: 0; + border-left: 1px solid #ddd; +} + +/* FORMS */ + +.aligned label { + padding: 0 0 3px 1em; + float: right; +} + +.submit-row { + text-align: left +} + +.submit-row p.deletelink-box { + float: right; +} + +.submit-row .deletelink { + background: url(../img/admin/icon_deletelink.gif) 0 50% no-repeat; + padding-right: 14px; +} + +.vDateField, .vTimeField { + margin-left: 2px; +} + +form ul.inline li { + float: right; + padding-right: 0; + padding-left: 7px; +} + +input[type=submit].default, .submit-row input.default { + float: left; +} + +fieldset .field-box { + float: right; + margin-left: 20px; +} + +.errorlist li { + background-position: 100% .3em; + padding: 4px 25px 4px 5px; +} + +.errornote { + background-position: 100% .3em; + padding: 4px 25px 4px 5px; +} + +/* WIDGETS */ + +.calendarnav-previous { + top: 0; + left: auto; + right: 0; +} + +.calendarnav-next { + top: 0; + right: auto; + left: 0; +} + +.calendar caption, .calendarbox h2 { + text-align: center; +} + +.selector { + float: right; +} + +.selector .selector-filter { + text-align: right; +} + +.inline-deletelink { + float: left; +} + +/* MISC */ + +.inline-related h2, .inline-group h2 { + text-align: right +} + +.inline-related h3 span.delete { + padding-right: 20px; + padding-left: inherit; + left: 10px; + right: inherit; +} + +.inline-related h3 span.delete label { + margin-left: inherit; + margin-right: 2px; +} diff --git a/users/static/admin/css/widgets.css b/users/static/admin/css/widgets.css new file mode 100644 index 0000000..26400fa --- /dev/null +++ b/users/static/admin/css/widgets.css @@ -0,0 +1,514 @@ +/* SELECTOR (FILTER INTERFACE) */ + +.selector { + width: 580px; + float: left; +} + +.selector select { + width: 270px; + height: 17.2em; +} + +.selector-available, .selector-chosen { + float: left; + width: 270px; + text-align: center; + margin-bottom: 5px; +} + +.selector-available h2, .selector-chosen h2 { + border: 1px solid #ccc; +} + +.selector .selector-available h2 { + background: white url(../img/admin/nav-bg.gif) bottom left repeat-x; + color: #666; +} + +.selector .selector-filter { + background: white; + border: 1px solid #ccc; + border-width: 0 1px; + padding: 3px; + color: #999; + font-size: 10px; + margin: 0; + text-align: left; +} + +.selector .selector-chosen .selector-filter { + padding: 4px 5px; +} + +.selector .selector-available input { + width: 230px; +} + +.selector ul.selector-chooser { + float: left; + width: 22px; + height: 50px; + background: url(../img/admin/chooser-bg.gif) top center no-repeat; + margin: 8em 3px 0 3px; + padding: 0; +} + +.selector-chooser li { + margin: 0; + padding: 3px; + list-style-type: none; +} + +.selector select { + margin-bottom: 5px; + margin-top: 0; +} + +.selector-add, .selector-remove { + width: 16px; + height: 16px; + display: block; + text-indent: -3000px; + overflow: hidden; +} + +.selector-add { + background: url(../img/admin/selector-add.gif) top center no-repeat; + margin-bottom: 2px; +} + +.selector-remove { + background: url(../img/admin/selector-remove.gif) top center no-repeat; +} + +a.selector-chooseall, a.selector-clearall { + display: block; + width: 6em; + text-align: left; + margin-left: auto; + margin-right: auto; + font-weight: bold; + color: #666; + padding: 3px 0 3px 18px; +} + +a.selector-chooseall:hover, a.selector-clearall:hover { + color: #036; +} + +a.selector-chooseall { + width: 7em; + background: url(../img/admin/selector-addall.gif) left center no-repeat; +} + +a.selector-clearall { + background: url(../img/admin/selector-removeall.gif) left center no-repeat; +} + + +/* STACKED SELECTORS */ + +.stacked { + float: left; + width: 500px; +} + +.stacked select { + width: 480px; + height: 10.1em; +} + +.stacked .selector-available, .stacked .selector-chosen { + width: 480px; +} + +.stacked .selector-available { + margin-bottom: 0; +} + +.stacked .selector-available input { + width: 442px; +} + +.stacked ul.selector-chooser { + height: 22px; + width: 50px; + margin: 0 0 3px 40%; + background: url(../img/admin/chooser_stacked-bg.gif) top center no-repeat; +} + +.stacked .selector-chooser li { + float: left; + padding: 3px 3px 3px 5px; +} + +.stacked .selector-chooseall, .stacked .selector-clearall { + display: none; +} + +.stacked .selector-add { + background-image: url(../img/admin/selector_stacked-add.gif); +} + +.stacked .selector-remove { + background-image: url(../img/admin/selector_stacked-remove.gif); +} + + +/* DATE AND TIME */ + +p.datetime { + line-height: 20px; + margin: 0; + padding: 0; + color: #666; + font-size: 11px; + font-weight: bold; +} + +.datetime span { + font-size: 11px; + color: #ccc; + font-weight: normal; + white-space: nowrap; +} + +table p.datetime { + font-size: 10px; + margin-left: 0; + padding-left: 0; +} + +/* FILE UPLOADS */ + +p.file-upload { + line-height: 20px; + margin: 0; + padding: 0; + color: #666; + font-size: 11px; + font-weight: bold; +} + +.file-upload a { + font-weight: normal; +} + +.file-upload .deletelink { + margin-left: 5px; +} + +span.clearable-file-input label { + color: #333; + font-size: 11px; + display: inline; + float: none; +} + +/* CALENDARS & CLOCKS */ + +.calendarbox, .clockbox { + margin: 5px auto; + font-size: 11px; + width: 16em; + text-align: center; + background: white; + position: relative; +} + +.clockbox { + width: auto; +} + +.calendar { + margin: 0; + padding: 0; +} + +.calendar table { + margin: 0; + padding: 0; + border-collapse: collapse; + background: white; + width: 99%; +} + +.calendar caption, .calendarbox h2 { + margin: 0; + font-size: 11px; + text-align: center; + border-top: none; +} + +.calendar th { + font-size: 10px; + color: #666; + padding: 2px 3px; + text-align: center; + background: #e1e1e1 url(../img/admin/nav-bg.gif) 0 50% repeat-x; + border-bottom: 1px solid #ddd; +} + +.calendar td { + font-size: 11px; + text-align: center; + padding: 0; + border-top: 1px solid #eee; + border-bottom: none; +} + +.calendar td.selected a { + background: #C9DBED; +} + +.calendar td.nonday { + background: #efefef; +} + +.calendar td.today a { + background: #ffc; +} + +.calendar td a, .timelist a { + display: block; + font-weight: bold; + padding: 4px; + text-decoration: none; + color: #444; +} + +.calendar td a:hover, .timelist a:hover { + background: #5b80b2; + color: white; +} + +.calendar td a:active, .timelist a:active { + background: #036; + color: white; +} + +.calendarnav { + font-size: 10px; + text-align: center; + color: #ccc; + margin: 0; + padding: 1px 3px; +} + +.calendarnav a:link, #calendarnav a:visited, #calendarnav a:hover { + color: #999; +} + +.calendar-shortcuts { + background: white; + font-size: 10px; + line-height: 11px; + border-top: 1px solid #eee; + padding: 3px 0 4px; + color: #ccc; +} + +.calendarbox .calendarnav-previous, .calendarbox .calendarnav-next { + display: block; + position: absolute; + font-weight: bold; + font-size: 12px; + background: #C9DBED url(../img/admin/default-bg.gif) bottom left repeat-x; + padding: 1px 4px 2px 4px; + color: white; +} + +.calendarnav-previous:hover, .calendarnav-next:hover { + background: #036; +} + +.calendarnav-previous { + top: 0; + left: 0; +} + +.calendarnav-next { + top: 0; + right: 0; +} + +.calendar-cancel { + margin: 0 !important; + padding: 0; + font-size: 10px; + background: #e1e1e1 url(../img/admin/nav-bg.gif) 0 50% repeat-x; + border-top: 1px solid #ddd; +} + +.calendar-cancel a { + padding: 2px; + color: #999; +} + +ul.timelist, .timelist li { + list-style-type: none; + margin: 0; + padding: 0; +} + +.timelist a { + padding: 2px; +} + +/* INLINE ORDERER */ + +ul.orderer { + position: relative; + padding: 0 !important; + margin: 0 !important; + list-style-type: none; +} + +ul.orderer li { + list-style-type: none; + display: block; + padding: 0; + margin: 0; + border: 1px solid #bbb; + border-width: 0 1px 1px 0; + white-space: nowrap; + overflow: hidden; + background: #e2e2e2 url(../img/admin/nav-bg-grabber.gif) repeat-y; +} + +ul.orderer li:hover { + cursor: move; + background-color: #ddd; +} + +ul.orderer li a.selector { + margin-left: 12px; + overflow: hidden; + width: 83%; + font-size: 10px !important; + padding: 0.6em 0; +} + +ul.orderer li a:link, ul.orderer li a:visited { + color: #333; +} + +ul.orderer li .inline-deletelink { + position: absolute; + right: 4px; + margin-top: 0.6em; +} + +ul.orderer li.selected { + background-color: #f8f8f8; + border-right-color: #f8f8f8; +} + +ul.orderer li.deleted { + background: #bbb url(../img/admin/deleted-overlay.gif); +} + +ul.orderer li.deleted a:link, ul.orderer li.deleted a:visited { + color: #888; +} + +ul.orderer li.deleted .inline-deletelink { + background-image: url(../img/admin/inline-restore.png); +} + +ul.orderer li.deleted:hover, ul.orderer li.deleted a.selector:hover { + cursor: default; +} + +/* EDIT INLINE */ + +.inline-deletelink { + float: right; + text-indent: -9999px; + background: transparent url(../img/admin/inline-delete.png) no-repeat; + width: 15px; + height: 15px; + border: 0px none; + outline: 0; /* Remove dotted border around link */ +} + +.inline-deletelink:hover { + background-position: -15px 0; + cursor: pointer; +} + +.editinline button.addlink { + border: 0px none; + color: #5b80b2; + font-size: 100%; + cursor: pointer; +} + +.editinline button.addlink:hover { + color: #036; + cursor: pointer; +} + +.editinline table .help { + text-align: right; + float: right; + padding-left: 2em; +} + +.editinline tfoot .addlink { + white-space: nowrap; +} + +.editinline table thead th:last-child { + border-left: none; +} + +.editinline tr.deleted { + background: #ddd url(../img/admin/deleted-overlay.gif); +} + +.editinline tr.deleted .inline-deletelink { + background-image: url(../img/admin/inline-restore.png); +} + +.editinline tr.deleted td:hover { + cursor: default; +} + +.editinline tr.deleted td:first-child { + background-image: none !important; +} + +/* EDIT INLINE - STACKED */ + +.editinline-stacked { + min-width: 758px; +} + +.editinline-stacked .inline-object { + margin-left: 210px; + background: white; +} + +.editinline-stacked .inline-source { + float: left; + width: 200px; + background: #f8f8f8; +} + +.editinline-stacked .inline-splitter { + float: left; + width: 9px; + background: #f8f8f8 url(../img/admin/inline-splitter-bg.gif) 50% 50% no-repeat; + border-right: 1px solid #ccc; +} + +.editinline-stacked .controls { + clear: both; + background: #e1e1e1 url(../img/admin/nav-bg.gif) top left repeat-x; + padding: 3px 4px; + font-size: 11px; + border-top: 1px solid #ddd; +} + diff --git a/users/static/admin/img/admin/arrow-down.gif b/users/static/admin/img/admin/arrow-down.gif new file mode 100644 index 0000000..a967b9f Binary files /dev/null and b/users/static/admin/img/admin/arrow-down.gif differ diff --git a/users/static/admin/img/admin/arrow-up.gif b/users/static/admin/img/admin/arrow-up.gif new file mode 100644 index 0000000..3fe4851 Binary files /dev/null and b/users/static/admin/img/admin/arrow-up.gif differ diff --git a/users/static/admin/img/admin/changelist-bg.gif b/users/static/admin/img/admin/changelist-bg.gif new file mode 100644 index 0000000..7f46994 Binary files /dev/null and b/users/static/admin/img/admin/changelist-bg.gif differ diff --git a/users/static/admin/img/admin/changelist-bg_rtl.gif b/users/static/admin/img/admin/changelist-bg_rtl.gif new file mode 100644 index 0000000..2379712 Binary files /dev/null and b/users/static/admin/img/admin/changelist-bg_rtl.gif differ diff --git a/users/static/admin/img/admin/chooser-bg.gif b/users/static/admin/img/admin/chooser-bg.gif new file mode 100644 index 0000000..30e83c2 Binary files /dev/null and b/users/static/admin/img/admin/chooser-bg.gif differ diff --git a/users/static/admin/img/admin/chooser_stacked-bg.gif b/users/static/admin/img/admin/chooser_stacked-bg.gif new file mode 100644 index 0000000..5d104b6 Binary files /dev/null and b/users/static/admin/img/admin/chooser_stacked-bg.gif differ diff --git a/users/static/admin/img/admin/default-bg-reverse.gif b/users/static/admin/img/admin/default-bg-reverse.gif new file mode 100644 index 0000000..0873281 Binary files /dev/null and b/users/static/admin/img/admin/default-bg-reverse.gif differ diff --git a/users/static/admin/img/admin/default-bg.gif b/users/static/admin/img/admin/default-bg.gif new file mode 100644 index 0000000..003aeca Binary files /dev/null and b/users/static/admin/img/admin/default-bg.gif differ diff --git a/users/static/admin/img/admin/deleted-overlay.gif b/users/static/admin/img/admin/deleted-overlay.gif new file mode 100644 index 0000000..dc3828f Binary files /dev/null and b/users/static/admin/img/admin/deleted-overlay.gif differ diff --git a/users/static/admin/img/admin/icon-no.gif b/users/static/admin/img/admin/icon-no.gif new file mode 100644 index 0000000..1b4ee58 Binary files /dev/null and b/users/static/admin/img/admin/icon-no.gif differ diff --git a/users/static/admin/img/admin/icon-unknown.gif b/users/static/admin/img/admin/icon-unknown.gif new file mode 100644 index 0000000..cfd2b02 Binary files /dev/null and b/users/static/admin/img/admin/icon-unknown.gif differ diff --git a/users/static/admin/img/admin/icon-yes.gif b/users/static/admin/img/admin/icon-yes.gif new file mode 100644 index 0000000..7399282 Binary files /dev/null and b/users/static/admin/img/admin/icon-yes.gif differ diff --git a/users/static/admin/img/admin/icon_addlink.gif b/users/static/admin/img/admin/icon_addlink.gif new file mode 100644 index 0000000..ee70e1a Binary files /dev/null and b/users/static/admin/img/admin/icon_addlink.gif differ diff --git a/users/static/admin/img/admin/icon_alert.gif b/users/static/admin/img/admin/icon_alert.gif new file mode 100644 index 0000000..a1dde26 Binary files /dev/null and b/users/static/admin/img/admin/icon_alert.gif differ diff --git a/users/static/admin/img/admin/icon_calendar.gif b/users/static/admin/img/admin/icon_calendar.gif new file mode 100644 index 0000000..7587b30 Binary files /dev/null and b/users/static/admin/img/admin/icon_calendar.gif differ diff --git a/users/static/admin/img/admin/icon_changelink.gif b/users/static/admin/img/admin/icon_changelink.gif new file mode 100644 index 0000000..e1b9afd Binary files /dev/null and b/users/static/admin/img/admin/icon_changelink.gif differ diff --git a/users/static/admin/img/admin/icon_clock.gif b/users/static/admin/img/admin/icon_clock.gif new file mode 100644 index 0000000..ff2d57e Binary files /dev/null and b/users/static/admin/img/admin/icon_clock.gif differ diff --git a/users/static/admin/img/admin/icon_deletelink.gif b/users/static/admin/img/admin/icon_deletelink.gif new file mode 100644 index 0000000..72523e3 Binary files /dev/null and b/users/static/admin/img/admin/icon_deletelink.gif differ diff --git a/users/static/admin/img/admin/icon_error.gif b/users/static/admin/img/admin/icon_error.gif new file mode 100644 index 0000000..3730a00 Binary files /dev/null and b/users/static/admin/img/admin/icon_error.gif differ diff --git a/users/static/admin/img/admin/icon_searchbox.png b/users/static/admin/img/admin/icon_searchbox.png new file mode 100644 index 0000000..8ab579e Binary files /dev/null and b/users/static/admin/img/admin/icon_searchbox.png differ diff --git a/users/static/admin/img/admin/icon_success.gif b/users/static/admin/img/admin/icon_success.gif new file mode 100644 index 0000000..5cf90a1 Binary files /dev/null and b/users/static/admin/img/admin/icon_success.gif differ diff --git a/users/static/admin/img/admin/inline-delete-8bit.png b/users/static/admin/img/admin/inline-delete-8bit.png new file mode 100644 index 0000000..95caf59 Binary files /dev/null and b/users/static/admin/img/admin/inline-delete-8bit.png differ diff --git a/users/static/admin/img/admin/inline-delete.png b/users/static/admin/img/admin/inline-delete.png new file mode 100644 index 0000000..d59bcd2 Binary files /dev/null and b/users/static/admin/img/admin/inline-delete.png differ diff --git a/users/static/admin/img/admin/inline-restore-8bit.png b/users/static/admin/img/admin/inline-restore-8bit.png new file mode 100644 index 0000000..e087c8e Binary files /dev/null and b/users/static/admin/img/admin/inline-restore-8bit.png differ diff --git a/users/static/admin/img/admin/inline-restore.png b/users/static/admin/img/admin/inline-restore.png new file mode 100644 index 0000000..efdd92a Binary files /dev/null and b/users/static/admin/img/admin/inline-restore.png differ diff --git a/users/static/admin/img/admin/inline-splitter-bg.gif b/users/static/admin/img/admin/inline-splitter-bg.gif new file mode 100644 index 0000000..32ac5b3 Binary files /dev/null and b/users/static/admin/img/admin/inline-splitter-bg.gif differ diff --git a/users/static/admin/img/admin/nav-bg-grabber.gif b/users/static/admin/img/admin/nav-bg-grabber.gif new file mode 100644 index 0000000..0a784fa Binary files /dev/null and b/users/static/admin/img/admin/nav-bg-grabber.gif differ diff --git a/users/static/admin/img/admin/nav-bg-reverse.gif b/users/static/admin/img/admin/nav-bg-reverse.gif new file mode 100644 index 0000000..f11029f Binary files /dev/null and b/users/static/admin/img/admin/nav-bg-reverse.gif differ diff --git a/users/static/admin/img/admin/nav-bg.gif b/users/static/admin/img/admin/nav-bg.gif new file mode 100644 index 0000000..f8402b8 Binary files /dev/null and b/users/static/admin/img/admin/nav-bg.gif differ diff --git a/users/static/admin/img/admin/selector-add.gif b/users/static/admin/img/admin/selector-add.gif new file mode 100644 index 0000000..50132d1 Binary files /dev/null and b/users/static/admin/img/admin/selector-add.gif differ diff --git a/users/static/admin/img/admin/selector-addall.gif b/users/static/admin/img/admin/selector-addall.gif new file mode 100644 index 0000000..d6e7c63 Binary files /dev/null and b/users/static/admin/img/admin/selector-addall.gif differ diff --git a/users/static/admin/img/admin/selector-remove.gif b/users/static/admin/img/admin/selector-remove.gif new file mode 100644 index 0000000..2b9b0a2 Binary files /dev/null and b/users/static/admin/img/admin/selector-remove.gif differ diff --git a/users/static/admin/img/admin/selector-removeall.gif b/users/static/admin/img/admin/selector-removeall.gif new file mode 100644 index 0000000..5a44219 Binary files /dev/null and b/users/static/admin/img/admin/selector-removeall.gif differ diff --git a/users/static/admin/img/admin/selector-search.gif b/users/static/admin/img/admin/selector-search.gif new file mode 100644 index 0000000..6d5f4c7 Binary files /dev/null and b/users/static/admin/img/admin/selector-search.gif differ diff --git a/users/static/admin/img/admin/selector_stacked-add.gif b/users/static/admin/img/admin/selector_stacked-add.gif new file mode 100644 index 0000000..7426169 Binary files /dev/null and b/users/static/admin/img/admin/selector_stacked-add.gif differ diff --git a/users/static/admin/img/admin/selector_stacked-remove.gif b/users/static/admin/img/admin/selector_stacked-remove.gif new file mode 100644 index 0000000..60412ce Binary files /dev/null and b/users/static/admin/img/admin/selector_stacked-remove.gif differ diff --git a/users/static/admin/img/admin/tool-left.gif b/users/static/admin/img/admin/tool-left.gif new file mode 100644 index 0000000..011490f Binary files /dev/null and b/users/static/admin/img/admin/tool-left.gif differ diff --git a/users/static/admin/img/admin/tool-left_over.gif b/users/static/admin/img/admin/tool-left_over.gif new file mode 100644 index 0000000..937e07b Binary files /dev/null and b/users/static/admin/img/admin/tool-left_over.gif differ diff --git a/users/static/admin/img/admin/tool-right.gif b/users/static/admin/img/admin/tool-right.gif new file mode 100644 index 0000000..cdc140c Binary files /dev/null and b/users/static/admin/img/admin/tool-right.gif differ diff --git a/users/static/admin/img/admin/tool-right_over.gif b/users/static/admin/img/admin/tool-right_over.gif new file mode 100644 index 0000000..4db977e Binary files /dev/null and b/users/static/admin/img/admin/tool-right_over.gif differ diff --git a/users/static/admin/img/admin/tooltag-add.gif b/users/static/admin/img/admin/tooltag-add.gif new file mode 100644 index 0000000..8b53d49 Binary files /dev/null and b/users/static/admin/img/admin/tooltag-add.gif differ diff --git a/users/static/admin/img/admin/tooltag-add_over.gif b/users/static/admin/img/admin/tooltag-add_over.gif new file mode 100644 index 0000000..bfc52f1 Binary files /dev/null and b/users/static/admin/img/admin/tooltag-add_over.gif differ diff --git a/users/static/admin/img/admin/tooltag-arrowright.gif b/users/static/admin/img/admin/tooltag-arrowright.gif new file mode 100644 index 0000000..cdaaae7 Binary files /dev/null and b/users/static/admin/img/admin/tooltag-arrowright.gif differ diff --git a/users/static/admin/img/admin/tooltag-arrowright_over.gif b/users/static/admin/img/admin/tooltag-arrowright_over.gif new file mode 100644 index 0000000..7163189 Binary files /dev/null and b/users/static/admin/img/admin/tooltag-arrowright_over.gif differ diff --git a/users/static/admin/img/gis/move_vertex_off.png b/users/static/admin/img/gis/move_vertex_off.png new file mode 100644 index 0000000..296b2e2 Binary files /dev/null and b/users/static/admin/img/gis/move_vertex_off.png differ diff --git a/users/static/admin/img/gis/move_vertex_on.png b/users/static/admin/img/gis/move_vertex_on.png new file mode 100644 index 0000000..21f4758 Binary files /dev/null and b/users/static/admin/img/gis/move_vertex_on.png differ diff --git a/users/static/admin/js/LICENSE-JQUERY.txt b/users/static/admin/js/LICENSE-JQUERY.txt new file mode 100644 index 0000000..a4c5bd7 --- /dev/null +++ b/users/static/admin/js/LICENSE-JQUERY.txt @@ -0,0 +1,20 @@ +Copyright (c) 2010 John Resig, http://jquery.com/ + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/users/static/admin/js/SelectBox.js b/users/static/admin/js/SelectBox.js new file mode 100644 index 0000000..f28c861 --- /dev/null +++ b/users/static/admin/js/SelectBox.js @@ -0,0 +1,111 @@ +var SelectBox = { + cache: new Object(), + init: function(id) { + var box = document.getElementById(id); + var node; + SelectBox.cache[id] = new Array(); + var cache = SelectBox.cache[id]; + for (var i = 0; (node = box.options[i]); i++) { + cache.push({value: node.value, text: node.text, displayed: 1}); + } + }, + redisplay: function(id) { + // Repopulate HTML select box from cache + var box = document.getElementById(id); + box.options.length = 0; // clear all options + for (var i = 0, j = SelectBox.cache[id].length; i < j; i++) { + var node = SelectBox.cache[id][i]; + if (node.displayed) { + box.options[box.options.length] = new Option(node.text, node.value, false, false); + } + } + }, + filter: function(id, text) { + // Redisplay the HTML select box, displaying only the choices containing ALL + // the words in text. (It's an AND search.) + var tokens = text.toLowerCase().split(/\s+/); + var node, token; + for (var i = 0; (node = SelectBox.cache[id][i]); i++) { + node.displayed = 1; + for (var j = 0; (token = tokens[j]); j++) { + if (node.text.toLowerCase().indexOf(token) == -1) { + node.displayed = 0; + } + } + } + SelectBox.redisplay(id); + }, + delete_from_cache: function(id, value) { + var node, delete_index = null; + for (var i = 0; (node = SelectBox.cache[id][i]); i++) { + if (node.value == value) { + delete_index = i; + break; + } + } + var j = SelectBox.cache[id].length - 1; + for (var i = delete_index; i < j; i++) { + SelectBox.cache[id][i] = SelectBox.cache[id][i+1]; + } + SelectBox.cache[id].length--; + }, + add_to_cache: function(id, option) { + SelectBox.cache[id].push({value: option.value, text: option.text, displayed: 1}); + }, + cache_contains: function(id, value) { + // Check if an item is contained in the cache + var node; + for (var i = 0; (node = SelectBox.cache[id][i]); i++) { + if (node.value == value) { + return true; + } + } + return false; + }, + move: function(from, to) { + var from_box = document.getElementById(from); + var to_box = document.getElementById(to); + var option; + for (var i = 0; (option = from_box.options[i]); i++) { + if (option.selected && SelectBox.cache_contains(from, option.value)) { + SelectBox.add_to_cache(to, {value: option.value, text: option.text, displayed: 1}); + SelectBox.delete_from_cache(from, option.value); + } + } + SelectBox.redisplay(from); + SelectBox.redisplay(to); + }, + move_all: function(from, to) { + var from_box = document.getElementById(from); + var to_box = document.getElementById(to); + var option; + for (var i = 0; (option = from_box.options[i]); i++) { + if (SelectBox.cache_contains(from, option.value)) { + SelectBox.add_to_cache(to, {value: option.value, text: option.text, displayed: 1}); + SelectBox.delete_from_cache(from, option.value); + } + } + SelectBox.redisplay(from); + SelectBox.redisplay(to); + }, + sort: function(id) { + SelectBox.cache[id].sort( function(a, b) { + a = a.text.toLowerCase(); + b = b.text.toLowerCase(); + try { + if (a > b) return 1; + if (a < b) return -1; + } + catch (e) { + // silently fail on IE 'unknown' exception + } + return 0; + } ); + }, + select_all: function(id) { + var box = document.getElementById(id); + for (var i = 0; i < box.options.length; i++) { + box.options[i].selected = 'selected'; + } + } +} diff --git a/users/static/admin/js/SelectFilter2.js b/users/static/admin/js/SelectFilter2.js new file mode 100644 index 0000000..92eff2e --- /dev/null +++ b/users/static/admin/js/SelectFilter2.js @@ -0,0 +1,130 @@ +/* +SelectFilter2 - Turns a multiple-select box into a filter interface. + +Different than SelectFilter because this is coupled to the admin framework. + +Requires core.js, SelectBox.js and addevent.js. +*/ + +function findForm(node) { + // returns the node of the form containing the given node + if (node.tagName.toLowerCase() != 'form') { + return findForm(node.parentNode); + } + return node; +} + +var SelectFilter = { + init: function(field_id, field_name, is_stacked, admin_media_prefix) { + if (field_id.match(/__prefix__/)){ + // Don't intialize on empty forms. + return; + } + var from_box = document.getElementById(field_id); + from_box.id += '_from'; // change its ID + from_box.className = 'filtered'; + + var ps = from_box.parentNode.getElementsByTagName('p'); + for (var i=0; i, because it just gets in the way. + from_box.parentNode.removeChild(ps[i]); + } else if (ps[i].className.indexOf("help") != -1) { + // Move help text up to the top so it isn't below the select + // boxes or wrapped off on the side to the right of the add + // button: + from_box.parentNode.insertBefore(ps[i], from_box.parentNode.firstChild); + } + } + + //
    or
    + var selector_div = quickElement('div', from_box.parentNode); + selector_div.className = is_stacked ? 'selector stacked' : 'selector'; + + //
    + var selector_available = quickElement('div', selector_div, ''); + selector_available.className = 'selector-available'; + quickElement('h2', selector_available, interpolate(gettext('Available %s'), [field_name])); + var filter_p = quickElement('p', selector_available, ''); + filter_p.className = 'selector-filter'; + + var search_filter_label = quickElement('label', filter_p, '', 'for', field_id + "_input", 'style', 'width:16px;padding:2px'); + + var search_selector_img = quickElement('img', search_filter_label, '', 'src', admin_media_prefix + 'img/admin/selector-search.gif'); + search_selector_img.alt = gettext("Filter"); + + filter_p.appendChild(document.createTextNode(' ')); + + var filter_input = quickElement('input', filter_p, '', 'type', 'text'); + filter_input.id = field_id + '_input'; + selector_available.appendChild(from_box); + var choose_all = quickElement('a', selector_available, gettext('Choose all'), 'href', 'javascript: (function(){ SelectBox.move_all("' + field_id + '_from", "' + field_id + '_to"); })()'); + choose_all.className = 'selector-chooseall'; + + //
      + var selector_chooser = quickElement('ul', selector_div, ''); + selector_chooser.className = 'selector-chooser'; + var add_link = quickElement('a', quickElement('li', selector_chooser, ''), gettext('Add'), 'href', 'javascript: (function(){ SelectBox.move("' + field_id + '_from","' + field_id + '_to");})()'); + add_link.className = 'selector-add'; + var remove_link = quickElement('a', quickElement('li', selector_chooser, ''), gettext('Remove'), 'href', 'javascript: (function(){ SelectBox.move("' + field_id + '_to","' + field_id + '_from");})()'); + remove_link.className = 'selector-remove'; + + //
      + var selector_chosen = quickElement('div', selector_div, ''); + selector_chosen.className = 'selector-chosen'; + quickElement('h2', selector_chosen, interpolate(gettext('Chosen %s'), [field_name])); + var selector_filter = quickElement('p', selector_chosen, gettext('Select your choice(s) and click ')); + selector_filter.className = 'selector-filter'; + quickElement('img', selector_filter, '', 'src', admin_media_prefix + (is_stacked ? 'img/admin/selector_stacked-add.gif':'img/admin/selector-add.gif'), 'alt', 'Add'); + var to_box = quickElement('select', selector_chosen, '', 'id', field_id + '_to', 'multiple', 'multiple', 'size', from_box.size, 'name', from_box.getAttribute('name')); + to_box.className = 'filtered'; + var clear_all = quickElement('a', selector_chosen, gettext('Clear all'), 'href', 'javascript: (function() { SelectBox.move_all("' + field_id + '_to", "' + field_id + '_from");})()'); + clear_all.className = 'selector-clearall'; + + from_box.setAttribute('name', from_box.getAttribute('name') + '_old'); + + // Set up the JavaScript event handlers for the select box filter interface + addEvent(filter_input, 'keyup', function(e) { SelectFilter.filter_key_up(e, field_id); }); + addEvent(filter_input, 'keydown', function(e) { SelectFilter.filter_key_down(e, field_id); }); + addEvent(from_box, 'dblclick', function() { SelectBox.move(field_id + '_from', field_id + '_to'); }); + addEvent(to_box, 'dblclick', function() { SelectBox.move(field_id + '_to', field_id + '_from'); }); + addEvent(findForm(from_box), 'submit', function() { SelectBox.select_all(field_id + '_to'); }); + SelectBox.init(field_id + '_from'); + SelectBox.init(field_id + '_to'); + // Move selected from_box options to to_box + SelectBox.move(field_id + '_from', field_id + '_to'); + }, + filter_key_up: function(event, field_id) { + from = document.getElementById(field_id + '_from'); + // don't submit form if user pressed Enter + if ((event.which && event.which == 13) || (event.keyCode && event.keyCode == 13)) { + from.selectedIndex = 0; + SelectBox.move(field_id + '_from', field_id + '_to'); + from.selectedIndex = 0; + return false; + } + var temp = from.selectedIndex; + SelectBox.filter(field_id + '_from', document.getElementById(field_id + '_input').value); + from.selectedIndex = temp; + return true; + }, + filter_key_down: function(event, field_id) { + from = document.getElementById(field_id + '_from'); + // right arrow -- move across + if ((event.which && event.which == 39) || (event.keyCode && event.keyCode == 39)) { + var old_index = from.selectedIndex; + SelectBox.move(field_id + '_from', field_id + '_to'); + from.selectedIndex = (old_index == from.length) ? from.length - 1 : old_index; + return false; + } + // down arrow -- wrap around + if ((event.which && event.which == 40) || (event.keyCode && event.keyCode == 40)) { + from.selectedIndex = (from.length == from.selectedIndex + 1) ? 0 : from.selectedIndex + 1; + } + // up arrow -- wrap around + if ((event.which && event.which == 38) || (event.keyCode && event.keyCode == 38)) { + from.selectedIndex = (from.selectedIndex == 0) ? from.length - 1 : from.selectedIndex - 1; + } + return true; + } +} diff --git a/users/static/admin/js/actions.js b/users/static/admin/js/actions.js new file mode 100644 index 0000000..94aa6db --- /dev/null +++ b/users/static/admin/js/actions.js @@ -0,0 +1,139 @@ +(function($) { + $.fn.actions = function(opts) { + var options = $.extend({}, $.fn.actions.defaults, opts); + var actionCheckboxes = $(this); + var list_editable_changed = false; + checker = function(checked) { + if (checked) { + showQuestion(); + } else { + reset(); + } + $(actionCheckboxes).attr("checked", checked) + .parent().parent().toggleClass(options.selectedClass, checked); + } + updateCounter = function() { + var sel = $(actionCheckboxes).filter(":checked").length; + $(options.counterContainer).html(interpolate( + ngettext('%(sel)s of %(cnt)s selected', '%(sel)s of %(cnt)s selected', sel), { + sel: sel, + cnt: _actions_icnt + }, true)); + $(options.allToggle).attr("checked", function() { + if (sel == actionCheckboxes.length) { + value = true; + showQuestion(); + } else { + value = false; + clearAcross(); + } + return value; + }); + } + showQuestion = function() { + $(options.acrossClears).hide(); + $(options.acrossQuestions).show(); + $(options.allContainer).hide(); + } + showClear = function() { + $(options.acrossClears).show(); + $(options.acrossQuestions).hide(); + $(options.actionContainer).toggleClass(options.selectedClass); + $(options.allContainer).show(); + $(options.counterContainer).hide(); + } + reset = function() { + $(options.acrossClears).hide(); + $(options.acrossQuestions).hide(); + $(options.allContainer).hide(); + $(options.counterContainer).show(); + } + clearAcross = function() { + reset(); + $(options.acrossInput).val(0); + $(options.actionContainer).removeClass(options.selectedClass); + } + // Show counter by default + $(options.counterContainer).show(); + // Check state of checkboxes and reinit state if needed + $(this).filter(":checked").each(function(i) { + $(this).parent().parent().toggleClass(options.selectedClass); + updateCounter(); + if ($(options.acrossInput).val() == 1) { + showClear(); + } + }); + $(options.allToggle).show().click(function() { + checker($(this).attr("checked")); + updateCounter(); + }); + $("div.actions span.question a").click(function(event) { + event.preventDefault(); + $(options.acrossInput).val(1); + showClear(); + }); + $("div.actions span.clear a").click(function(event) { + event.preventDefault(); + $(options.allToggle).attr("checked", false); + clearAcross(); + checker(0); + updateCounter(); + }); + lastChecked = null; + $(actionCheckboxes).click(function(event) { + if (!event) { var event = window.event; } + var target = event.target ? event.target : event.srcElement; + if (lastChecked && $.data(lastChecked) != $.data(target) && event.shiftKey == true) { + var inrange = false; + $(lastChecked).attr("checked", target.checked) + .parent().parent().toggleClass(options.selectedClass, target.checked); + $(actionCheckboxes).each(function() { + if ($.data(this) == $.data(lastChecked) || $.data(this) == $.data(target)) { + inrange = (inrange) ? false : true; + } + if (inrange) { + $(this).attr("checked", target.checked) + .parent().parent().toggleClass(options.selectedClass, target.checked); + } + }); + } + $(target).parent().parent().toggleClass(options.selectedClass, target.checked); + lastChecked = target; + updateCounter(); + }); + $('form#changelist-form table#result_list tr').find('td:gt(0) :input').change(function() { + list_editable_changed = true; + }); + $('form#changelist-form button[name="index"]').click(function(event) { + if (list_editable_changed) { + return confirm(gettext("You have unsaved changes on individual editable fields. If you run an action, your unsaved changes will be lost.")); + } + }); + $('form#changelist-form input[name="_save"]').click(function(event) { + var action_changed = false; + $('div.actions select option:selected').each(function() { + if ($(this).val()) { + action_changed = true; + } + }); + if (action_changed) { + if (list_editable_changed) { + return confirm(gettext("You have selected an action, but you haven't saved your changes to individual fields yet. Please click OK to save. You'll need to re-run the action.")); + } else { + return confirm(gettext("You have selected an action, and you haven't made any changes on individual fields. You're probably looking for the Go button rather than the Save button.")); + } + } + }); + } + /* Setup plugin defaults */ + $.fn.actions.defaults = { + actionContainer: "div.actions", + counterContainer: "span.action-counter", + allContainer: "div.actions span.all", + acrossInput: "div.actions input.select-across", + acrossQuestions: "div.actions span.question", + acrossClears: "div.actions span.clear", + allToggle: "#action-toggle", + selectedClass: "selected" + } +})(django.jQuery); diff --git a/users/static/admin/js/actions.min.js b/users/static/admin/js/actions.min.js new file mode 100644 index 0000000..21f00cd --- /dev/null +++ b/users/static/admin/js/actions.min.js @@ -0,0 +1,7 @@ +(function(a){a.fn.actions=function(h){var b=a.extend({},a.fn.actions.defaults,h),e=a(this),f=false;checker=function(c){c?showQuestion():reset();a(e).attr("checked",c).parent().parent().toggleClass(b.selectedClass,c)};updateCounter=function(){var c=a(e).filter(":checked").length;a(b.counterContainer).html(interpolate(ngettext("%(sel)s of %(cnt)s selected","%(sel)s of %(cnt)s selected",c),{sel:c,cnt:_actions_icnt},true));a(b.allToggle).attr("checked",function(){if(c==e.length){value=true;showQuestion()}else{value= +false;clearAcross()}return value})};showQuestion=function(){a(b.acrossClears).hide();a(b.acrossQuestions).show();a(b.allContainer).hide()};showClear=function(){a(b.acrossClears).show();a(b.acrossQuestions).hide();a(b.actionContainer).toggleClass(b.selectedClass);a(b.allContainer).show();a(b.counterContainer).hide()};reset=function(){a(b.acrossClears).hide();a(b.acrossQuestions).hide();a(b.allContainer).hide();a(b.counterContainer).show()};clearAcross=function(){reset();a(b.acrossInput).val(0);a(b.actionContainer).removeClass(b.selectedClass)}; +a(b.counterContainer).show();a(this).filter(":checked").each(function(){a(this).parent().parent().toggleClass(b.selectedClass);updateCounter();a(b.acrossInput).val()==1&&showClear()});a(b.allToggle).show().click(function(){checker(a(this).attr("checked"));updateCounter()});a("div.actions span.question a").click(function(c){c.preventDefault();a(b.acrossInput).val(1);showClear()});a("div.actions span.clear a").click(function(c){c.preventDefault();a(b.allToggle).attr("checked",false);clearAcross();checker(0); +updateCounter()});lastChecked=null;a(e).click(function(c){if(!c)c=window.event;var d=c.target?c.target:c.srcElement;if(lastChecked&&a.data(lastChecked)!=a.data(d)&&c.shiftKey==true){var g=false;a(lastChecked).attr("checked",d.checked).parent().parent().toggleClass(b.selectedClass,d.checked);a(e).each(function(){if(a.data(this)==a.data(lastChecked)||a.data(this)==a.data(d))g=g?false:true;g&&a(this).attr("checked",d.checked).parent().parent().toggleClass(b.selectedClass,d.checked)})}a(d).parent().parent().toggleClass(b.selectedClass, +d.checked);lastChecked=d;updateCounter()});a("form#changelist-form table#result_list tr").find("td:gt(0) :input").change(function(){f=true});a('form#changelist-form button[name="index"]').click(function(){if(f)return confirm(gettext("You have unsaved changes on individual editable fields. If you run an action, your unsaved changes will be lost."))});a('form#changelist-form input[name="_save"]').click(function(){var c=false;a("div.actions select option:selected").each(function(){if(a(this).val())c= +true});if(c)return f?confirm(gettext("You have selected an action, but you haven't saved your changes to individual fields yet. Please click OK to save. You'll need to re-run the action.")):confirm(gettext("You have selected an action, and you haven't made any changes on individual fields. You're probably looking for the Go button rather than the Save button."))})};a.fn.actions.defaults={actionContainer:"div.actions",counterContainer:"span.action-counter",allContainer:"div.actions span.all",acrossInput:"div.actions input.select-across", +acrossQuestions:"div.actions span.question",acrossClears:"div.actions span.clear",allToggle:"#action-toggle",selectedClass:"selected"}})(django.jQuery); diff --git a/users/static/admin/js/admin/DateTimeShortcuts.js b/users/static/admin/js/admin/DateTimeShortcuts.js new file mode 100644 index 0000000..a4293b3 --- /dev/null +++ b/users/static/admin/js/admin/DateTimeShortcuts.js @@ -0,0 +1,274 @@ +// Inserts shortcut buttons after all of the following: +// +// + +var DateTimeShortcuts = { + calendars: [], + calendarInputs: [], + clockInputs: [], + calendarDivName1: 'calendarbox', // name of calendar
      that gets toggled + calendarDivName2: 'calendarin', // name of
      that contains calendar + calendarLinkName: 'calendarlink',// name of the link that is used to toggle + clockDivName: 'clockbox', // name of clock
      that gets toggled + clockLinkName: 'clocklink', // name of the link that is used to toggle + shortCutsClass: 'datetimeshortcuts', // class of the clock and cal shortcuts + admin_media_prefix: '', + init: function() { + // Get admin_media_prefix by grabbing it off the window object. It's + // set in the admin/base.html template, so if it's not there, someone's + // overridden the template. In that case, we'll set a clearly-invalid + // value in the hopes that someone will examine HTTP requests and see it. + if (window.__admin_media_prefix__ != undefined) { + DateTimeShortcuts.admin_media_prefix = window.__admin_media_prefix__; + } else { + DateTimeShortcuts.admin_media_prefix = '/missing-admin-media-prefix/'; + } + + var inputs = document.getElementsByTagName('input'); + for (i=0; i + //

      Choose a time

      + // + //

      Cancel

      + //
      + + var clock_box = document.createElement('div'); + clock_box.style.display = 'none'; + clock_box.style.position = 'absolute'; + clock_box.className = 'clockbox module'; + clock_box.setAttribute('id', DateTimeShortcuts.clockDivName + num); + document.body.appendChild(clock_box); + addEvent(clock_box, 'click', DateTimeShortcuts.cancelEventPropagation); + + quickElement('h2', clock_box, gettext('Choose a time')); + time_list = quickElement('ul', clock_box, ''); + time_list.className = 'timelist'; + time_format = get_format('TIME_INPUT_FORMATS')[0]; + quickElement("a", quickElement("li", time_list, ""), gettext("Now"), "href", "javascript:DateTimeShortcuts.handleClockQuicklink(" + num + ", new Date().strftime('" + time_format + "'));"); + quickElement("a", quickElement("li", time_list, ""), gettext("Midnight"), "href", "javascript:DateTimeShortcuts.handleClockQuicklink(" + num + ", new Date(1970,1,1,0,0,0,0).strftime('" + time_format + "'));"); + quickElement("a", quickElement("li", time_list, ""), gettext("6 a.m."), "href", "javascript:DateTimeShortcuts.handleClockQuicklink(" + num + ", new Date(1970,1,1,6,0,0,0).strftime('" + time_format + "'));"); + quickElement("a", quickElement("li", time_list, ""), gettext("Noon"), "href", "javascript:DateTimeShortcuts.handleClockQuicklink(" + num + ", new Date(1970,1,1,12,0,0,0).strftime('" + time_format + "'));"); + + cancel_p = quickElement('p', clock_box, ''); + cancel_p.className = 'calendar-cancel'; + quickElement('a', cancel_p, gettext('Cancel'), 'href', 'javascript:DateTimeShortcuts.dismissClock(' + num + ');'); + }, + openClock: function(num) { + var clock_box = document.getElementById(DateTimeShortcuts.clockDivName+num) + var clock_link = document.getElementById(DateTimeShortcuts.clockLinkName+num) + + // Recalculate the clockbox position + // is it left-to-right or right-to-left layout ? + if (getStyle(document.body,'direction')!='rtl') { + clock_box.style.left = findPosX(clock_link) + 17 + 'px'; + } + else { + // since style's width is in em, it'd be tough to calculate + // px value of it. let's use an estimated px for now + // TODO: IE returns wrong value for findPosX when in rtl mode + // (it returns as it was left aligned), needs to be fixed. + clock_box.style.left = findPosX(clock_link) - 110 + 'px'; + } + clock_box.style.top = Math.max(0, findPosY(clock_link) - 30) + 'px'; + + // Show the clock box + clock_box.style.display = 'block'; + addEvent(window.document, 'click', function() { DateTimeShortcuts.dismissClock(num); return true; }); + }, + dismissClock: function(num) { + document.getElementById(DateTimeShortcuts.clockDivName + num).style.display = 'none'; + window.document.onclick = null; + }, + handleClockQuicklink: function(num, val) { + DateTimeShortcuts.clockInputs[num].value = val; + DateTimeShortcuts.clockInputs[num].focus(); + DateTimeShortcuts.dismissClock(num); + }, + // Add calendar widget to a given field. + addCalendar: function(inp) { + var num = DateTimeShortcuts.calendars.length; + + DateTimeShortcuts.calendarInputs[num] = inp; + + // Shortcut links (calendar icon and "Today" link) + var shortcuts_span = document.createElement('span'); + shortcuts_span.className = DateTimeShortcuts.shortCutsClass; + inp.parentNode.insertBefore(shortcuts_span, inp.nextSibling); + var today_link = document.createElement('a'); + today_link.setAttribute('href', 'javascript:DateTimeShortcuts.handleCalendarQuickLink(' + num + ', 0);'); + today_link.appendChild(document.createTextNode(gettext('Today'))); + var cal_link = document.createElement('a'); + cal_link.setAttribute('href', 'javascript:DateTimeShortcuts.openCalendar(' + num + ');'); + cal_link.id = DateTimeShortcuts.calendarLinkName + num; + quickElement('img', cal_link, '', 'src', DateTimeShortcuts.admin_media_prefix + 'img/admin/icon_calendar.gif', 'alt', gettext('Calendar')); + shortcuts_span.appendChild(document.createTextNode('\240')); + shortcuts_span.appendChild(today_link); + shortcuts_span.appendChild(document.createTextNode('\240|\240')); + shortcuts_span.appendChild(cal_link); + + // Create calendarbox div. + // + // Markup looks like: + // + //
      + //

      + // + // February 2003 + //

      + //
      + // + //
      + //
      + // Yesterday | Today | Tomorrow + //
      + //

      Cancel

      + //
      + var cal_box = document.createElement('div'); + cal_box.style.display = 'none'; + cal_box.style.position = 'absolute'; + cal_box.className = 'calendarbox module'; + cal_box.setAttribute('id', DateTimeShortcuts.calendarDivName1 + num); + document.body.appendChild(cal_box); + addEvent(cal_box, 'click', DateTimeShortcuts.cancelEventPropagation); + + // next-prev links + var cal_nav = quickElement('div', cal_box, ''); + var cal_nav_prev = quickElement('a', cal_nav, '<', 'href', 'javascript:DateTimeShortcuts.drawPrev('+num+');'); + cal_nav_prev.className = 'calendarnav-previous'; + var cal_nav_next = quickElement('a', cal_nav, '>', 'href', 'javascript:DateTimeShortcuts.drawNext('+num+');'); + cal_nav_next.className = 'calendarnav-next'; + + // main box + var cal_main = quickElement('div', cal_box, '', 'id', DateTimeShortcuts.calendarDivName2 + num); + cal_main.className = 'calendar'; + DateTimeShortcuts.calendars[num] = new Calendar(DateTimeShortcuts.calendarDivName2 + num, DateTimeShortcuts.handleCalendarCallback(num)); + DateTimeShortcuts.calendars[num].drawCurrent(); + + // calendar shortcuts + var shortcuts = quickElement('div', cal_box, ''); + shortcuts.className = 'calendar-shortcuts'; + quickElement('a', shortcuts, gettext('Yesterday'), 'href', 'javascript:DateTimeShortcuts.handleCalendarQuickLink(' + num + ', -1);'); + shortcuts.appendChild(document.createTextNode('\240|\240')); + quickElement('a', shortcuts, gettext('Today'), 'href', 'javascript:DateTimeShortcuts.handleCalendarQuickLink(' + num + ', 0);'); + shortcuts.appendChild(document.createTextNode('\240|\240')); + quickElement('a', shortcuts, gettext('Tomorrow'), 'href', 'javascript:DateTimeShortcuts.handleCalendarQuickLink(' + num + ', +1);'); + + // cancel bar + var cancel_p = quickElement('p', cal_box, ''); + cancel_p.className = 'calendar-cancel'; + quickElement('a', cancel_p, gettext('Cancel'), 'href', 'javascript:DateTimeShortcuts.dismissCalendar(' + num + ');'); + }, + openCalendar: function(num) { + var cal_box = document.getElementById(DateTimeShortcuts.calendarDivName1+num) + var cal_link = document.getElementById(DateTimeShortcuts.calendarLinkName+num) + var inp = DateTimeShortcuts.calendarInputs[num]; + + // Determine if the current value in the input has a valid date. + // If so, draw the calendar with that date's year and month. + if (inp.value) { + var date_parts = inp.value.split('-'); + var year = date_parts[0]; + var month = parseFloat(date_parts[1]); + if (year.match(/\d\d\d\d/) && month >= 1 && month <= 12) { + DateTimeShortcuts.calendars[num].drawDate(month, year); + } + } + + // Recalculate the clockbox position + // is it left-to-right or right-to-left layout ? + if (getStyle(document.body,'direction')!='rtl') { + cal_box.style.left = findPosX(cal_link) + 17 + 'px'; + } + else { + // since style's width is in em, it'd be tough to calculate + // px value of it. let's use an estimated px for now + // TODO: IE returns wrong value for findPosX when in rtl mode + // (it returns as it was left aligned), needs to be fixed. + cal_box.style.left = findPosX(cal_link) - 180 + 'px'; + } + cal_box.style.top = Math.max(0, findPosY(cal_link) - 75) + 'px'; + + cal_box.style.display = 'block'; + addEvent(window.document, 'click', function() { DateTimeShortcuts.dismissCalendar(num); return true; }); + }, + dismissCalendar: function(num) { + document.getElementById(DateTimeShortcuts.calendarDivName1+num).style.display = 'none'; + window.document.onclick = null; + }, + drawPrev: function(num) { + DateTimeShortcuts.calendars[num].drawPreviousMonth(); + }, + drawNext: function(num) { + DateTimeShortcuts.calendars[num].drawNextMonth(); + }, + handleCalendarCallback: function(num) { + format = get_format('DATE_INPUT_FORMATS')[0]; + // the format needs to be escaped a little + format = format.replace('\\', '\\\\'); + format = format.replace('\r', '\\r'); + format = format.replace('\n', '\\n'); + format = format.replace('\t', '\\t'); + format = format.replace("'", "\\'"); + return ["function(y, m, d) { DateTimeShortcuts.calendarInputs[", + num, + "].value = new Date(y, m-1, d).strftime('", + format, + "');DateTimeShortcuts.calendarInputs[", + num, + "].focus();document.getElementById(DateTimeShortcuts.calendarDivName1+", + num, + ").style.display='none';}"].join(''); + }, + handleCalendarQuickLink: function(num, offset) { + var d = new Date(); + d.setDate(d.getDate() + offset) + DateTimeShortcuts.calendarInputs[num].value = d.strftime(get_format('DATE_INPUT_FORMATS')[0]); + DateTimeShortcuts.calendarInputs[num].focus(); + DateTimeShortcuts.dismissCalendar(num); + }, + cancelEventPropagation: function(e) { + if (!e) e = window.event; + e.cancelBubble = true; + if (e.stopPropagation) e.stopPropagation(); + } +} + +addEvent(window, 'load', DateTimeShortcuts.init); diff --git a/users/static/admin/js/admin/RelatedObjectLookups.js b/users/static/admin/js/admin/RelatedObjectLookups.js new file mode 100644 index 0000000..1bc78f8 --- /dev/null +++ b/users/static/admin/js/admin/RelatedObjectLookups.js @@ -0,0 +1,96 @@ +// Handles related-objects functionality: lookup link for raw_id_fields +// and Add Another links. + +function html_unescape(text) { + // Unescape a string that was escaped using django.utils.html.escape. + text = text.replace(/</g, '<'); + text = text.replace(/>/g, '>'); + text = text.replace(/"/g, '"'); + text = text.replace(/'/g, "'"); + text = text.replace(/&/g, '&'); + return text; +} + +// IE doesn't accept periods or dashes in the window name, but the element IDs +// we use to generate popup window names may contain them, therefore we map them +// to allowed characters in a reversible way so that we can locate the correct +// element when the popup window is dismissed. +function id_to_windowname(text) { + text = text.replace(/\./g, '__dot__'); + text = text.replace(/\-/g, '__dash__'); + return text; +} + +function windowname_to_id(text) { + text = text.replace(/__dot__/g, '.'); + text = text.replace(/__dash__/g, '-'); + return text; +} + +function showRelatedObjectLookupPopup(triggeringLink) { + var name = triggeringLink.id.replace(/^lookup_/, ''); + name = id_to_windowname(name); + var href; + if (triggeringLink.href.search(/\?/) >= 0) { + href = triggeringLink.href + '&pop=1'; + } else { + href = triggeringLink.href + '?pop=1'; + } + var win = window.open(href, name, 'height=500,width=800,resizable=yes,scrollbars=yes'); + win.focus(); + return false; +} + +function dismissRelatedLookupPopup(win, chosenId) { + var name = windowname_to_id(win.name); + var elem = document.getElementById(name); + if (elem.className.indexOf('vManyToManyRawIdAdminField') != -1 && elem.value) { + elem.value += ',' + chosenId; + } else { + document.getElementById(name).value = chosenId; + } + win.close(); +} + +function showAddAnotherPopup(triggeringLink) { + var name = triggeringLink.id.replace(/^add_/, ''); + name = id_to_windowname(name); + href = triggeringLink.href + if (href.indexOf('?') == -1) { + href += '?_popup=1'; + } else { + href += '&_popup=1'; + } + var win = window.open(href, name, 'height=500,width=800,resizable=yes,scrollbars=yes'); + win.focus(); + return false; +} + +function dismissAddAnotherPopup(win, newId, newRepr) { + // newId and newRepr are expected to have previously been escaped by + // django.utils.html.escape. + newId = html_unescape(newId); + newRepr = html_unescape(newRepr); + var name = windowname_to_id(win.name); + var elem = document.getElementById(name); + if (elem) { + if (elem.nodeName == 'SELECT') { + var o = new Option(newRepr, newId); + elem.options[elem.options.length] = o; + o.selected = true; + } else if (elem.nodeName == 'INPUT') { + if (elem.className.indexOf('vManyToManyRawIdAdminField') != -1 && elem.value) { + elem.value += ',' + newId; + } else { + elem.value = newId; + } + } + } else { + var toId = name + "_to"; + elem = document.getElementById(toId); + var o = new Option(newRepr, newId); + SelectBox.add_to_cache(toId, o); + SelectBox.redisplay(toId); + } + win.close(); +} diff --git a/users/static/admin/js/admin/ordering.js b/users/static/admin/js/admin/ordering.js new file mode 100644 index 0000000..53c42f3 --- /dev/null +++ b/users/static/admin/js/admin/ordering.js @@ -0,0 +1,137 @@ +addEvent(window, 'load', reorder_init); + +var lis; +var top = 0; +var left = 0; +var height = 30; + +function reorder_init() { + lis = document.getElementsBySelector('ul#orderthese li'); + var input = document.getElementsBySelector('input[name=order_]')[0]; + setOrder(input.value.split(',')); + input.disabled = true; + draw(); + // Now initialise the dragging behaviour + var limit = (lis.length - 1) * height; + for (var i = 0; i < lis.length; i++) { + var li = lis[i]; + var img = document.getElementById('handle'+li.id); + li.style.zIndex = 1; + Drag.init(img, li, left + 10, left + 10, top + 10, top + 10 + limit); + li.onDragStart = startDrag; + li.onDragEnd = endDrag; + img.style.cursor = 'move'; + } +} + +function submitOrderForm() { + var inputOrder = document.getElementsBySelector('input[name=order_]')[0]; + inputOrder.value = getOrder(); + inputOrder.disabled=false; +} + +function startDrag() { + this.style.zIndex = '10'; + this.className = 'dragging'; +} + +function endDrag(x, y) { + this.style.zIndex = '1'; + this.className = ''; + // Work out how far along it has been dropped, using x co-ordinate + var oldIndex = this.index; + var newIndex = Math.round((y - 10 - top) / height); + // 'Snap' to the correct position + this.style.top = (10 + top + newIndex * height) + 'px'; + this.index = newIndex; + moveItem(oldIndex, newIndex); +} + +function moveItem(oldIndex, newIndex) { + // Swaps two items, adjusts the index and left co-ord for all others + if (oldIndex == newIndex) { + return; // Nothing to swap; + } + var direction, lo, hi; + if (newIndex > oldIndex) { + lo = oldIndex; + hi = newIndex; + direction = -1; + } else { + direction = 1; + hi = oldIndex; + lo = newIndex; + } + var lis2 = new Array(); // We will build the new order in this array + for (var i = 0; i < lis.length; i++) { + if (i < lo || i > hi) { + // Position of items not between the indexes is unaffected + lis2[i] = lis[i]; + continue; + } else if (i == newIndex) { + lis2[i] = lis[oldIndex]; + continue; + } else { + // Item is between the two indexes - move it along 1 + lis2[i] = lis[i - direction]; + } + } + // Re-index everything + reIndex(lis2); + lis = lis2; + draw(); +// document.getElementById('hiddenOrder').value = getOrder(); + document.getElementsBySelector('input[name=order_]')[0].value = getOrder(); +} + +function reIndex(lis) { + for (var i = 0; i < lis.length; i++) { + lis[i].index = i; + } +} + +function draw() { + for (var i = 0; i < lis.length; i++) { + var li = lis[i]; + li.index = i; + li.style.position = 'absolute'; + li.style.left = (10 + left) + 'px'; + li.style.top = (10 + top + (i * height)) + 'px'; + } +} + +function getOrder() { + var order = new Array(lis.length); + for (var i = 0; i < lis.length; i++) { + order[i] = lis[i].id.substring(1, 100); + } + return order.join(','); +} + +function setOrder(id_list) { + /* Set the current order to match the lsit of IDs */ + var temp_lis = new Array(); + for (var i = 0; i < id_list.length; i++) { + var id = 'p' + id_list[i]; + temp_lis[temp_lis.length] = document.getElementById(id); + } + reIndex(temp_lis); + lis = temp_lis; + draw(); +} + +function addEvent(elm, evType, fn, useCapture) +// addEvent and removeEvent +// cross-browser event handling for IE5+, NS6 and Mozilla +// By Scott Andrew +{ + if (elm.addEventListener){ + elm.addEventListener(evType, fn, useCapture); + return true; + } else if (elm.attachEvent){ + var r = elm.attachEvent("on"+evType, fn); + return r; + } else { + elm['on'+evType] = fn; + } +} diff --git a/users/static/admin/js/calendar.js b/users/static/admin/js/calendar.js new file mode 100644 index 0000000..c95a95d --- /dev/null +++ b/users/static/admin/js/calendar.js @@ -0,0 +1,156 @@ +/* +calendar.js - Calendar functions by Adrian Holovaty +*/ + +function removeChildren(a) { // "a" is reference to an object + while (a.hasChildNodes()) a.removeChild(a.lastChild); +} + +// quickElement(tagType, parentReference, textInChildNode, [, attribute, attributeValue ...]); +function quickElement() { + var obj = document.createElement(arguments[0]); + if (arguments[2] != '' && arguments[2] != null) { + var textNode = document.createTextNode(arguments[2]); + obj.appendChild(textNode); + } + var len = arguments.length; + for (var i = 3; i < len; i += 2) { + obj.setAttribute(arguments[i], arguments[i+1]); + } + arguments[1].appendChild(obj); + return obj; +} + +// CalendarNamespace -- Provides a collection of HTML calendar-related helper functions +var CalendarNamespace = { + monthsOfYear: gettext('January February March April May June July August September October November December').split(' '), + daysOfWeek: gettext('S M T W T F S').split(' '), + firstDayOfWeek: parseInt(get_format('FIRST_DAY_OF_WEEK')), + isLeapYear: function(year) { + return (((year % 4)==0) && ((year % 100)!=0) || ((year % 400)==0)); + }, + getDaysInMonth: function(month,year) { + var days; + if (month==1 || month==3 || month==5 || month==7 || month==8 || month==10 || month==12) { + days = 31; + } + else if (month==4 || month==6 || month==9 || month==11) { + days = 30; + } + else if (month==2 && CalendarNamespace.isLeapYear(year)) { + days = 29; + } + else { + days = 28; + } + return days; + }, + draw: function(month, year, div_id, callback) { // month = 1-12, year = 1-9999 + var today = new Date(); + var todayDay = today.getDate(); + var todayMonth = today.getMonth()+1; + var todayYear = today.getFullYear(); + var todayClass = ''; + + month = parseInt(month); + year = parseInt(year); + var calDiv = document.getElementById(div_id); + removeChildren(calDiv); + var calTable = document.createElement('table'); + quickElement('caption', calTable, CalendarNamespace.monthsOfYear[month-1] + ' ' + year); + var tableBody = quickElement('tbody', calTable); + + // Draw days-of-week header + var tableRow = quickElement('tr', tableBody); + for (var i = 0; i < 7; i++) { + quickElement('th', tableRow, CalendarNamespace.daysOfWeek[(i + CalendarNamespace.firstDayOfWeek) % 7]); + } + + var startingPos = new Date(year, month-1, 1 - CalendarNamespace.firstDayOfWeek).getDay(); + var days = CalendarNamespace.getDaysInMonth(month, year); + + // Draw blanks before first of month + tableRow = quickElement('tr', tableBody); + for (var i = 0; i < startingPos; i++) { + var _cell = quickElement('td', tableRow, ' '); + _cell.style.backgroundColor = '#f3f3f3'; + } + + // Draw days of month + var currentDay = 1; + for (var i = startingPos; currentDay <= days; i++) { + if (i%7 == 0 && currentDay != 1) { + tableRow = quickElement('tr', tableBody); + } + if ((currentDay==todayDay) && (month==todayMonth) && (year==todayYear)) { + todayClass='today'; + } else { + todayClass=''; + } + var cell = quickElement('td', tableRow, '', 'class', todayClass); + + quickElement('a', cell, currentDay, 'href', 'javascript:void(' + callback + '('+year+','+month+','+currentDay+'));'); + currentDay++; + } + + // Draw blanks after end of month (optional, but makes for valid code) + while (tableRow.childNodes.length < 7) { + var _cell = quickElement('td', tableRow, ' '); + _cell.style.backgroundColor = '#f3f3f3'; + } + + calDiv.appendChild(calTable); + } +} + +// Calendar -- A calendar instance +function Calendar(div_id, callback) { + // div_id (string) is the ID of the element in which the calendar will + // be displayed + // callback (string) is the name of a JavaScript function that will be + // called with the parameters (year, month, day) when a day in the + // calendar is clicked + this.div_id = div_id; + this.callback = callback; + this.today = new Date(); + this.currentMonth = this.today.getMonth() + 1; + this.currentYear = this.today.getFullYear(); +} +Calendar.prototype = { + drawCurrent: function() { + CalendarNamespace.draw(this.currentMonth, this.currentYear, this.div_id, this.callback); + }, + drawDate: function(month, year) { + this.currentMonth = month; + this.currentYear = year; + this.drawCurrent(); + }, + drawPreviousMonth: function() { + if (this.currentMonth == 1) { + this.currentMonth = 12; + this.currentYear--; + } + else { + this.currentMonth--; + } + this.drawCurrent(); + }, + drawNextMonth: function() { + if (this.currentMonth == 12) { + this.currentMonth = 1; + this.currentYear++; + } + else { + this.currentMonth++; + } + this.drawCurrent(); + }, + drawPreviousYear: function() { + this.currentYear--; + this.drawCurrent(); + }, + drawNextYear: function() { + this.currentYear++; + this.drawCurrent(); + } +} diff --git a/users/static/admin/js/collapse.js b/users/static/admin/js/collapse.js new file mode 100644 index 0000000..0a1e2d8 --- /dev/null +++ b/users/static/admin/js/collapse.js @@ -0,0 +1,27 @@ +(function($) { + $(document).ready(function() { + // Add anchor tag for Show/Hide link + $("fieldset.collapse").each(function(i, elem) { + // Don't hide if fields in this fieldset have errors + if ( $(elem).find("div.errors").length == 0 ) { + $(elem).addClass("collapsed"); + $(elem).find("h2").first().append(' (' + gettext("Show") + + ')'); + } + }); + // Add toggle to anchor tag + $("fieldset.collapse a.collapse-toggle").toggle( + function() { // Show + $(this).text(gettext("Hide")); + $(this).closest("fieldset").removeClass("collapsed"); + return false; + }, + function() { // Hide + $(this).text(gettext("Show")); + $(this).closest("fieldset").addClass("collapsed"); + return false; + } + ); + }); +})(django.jQuery); diff --git a/users/static/admin/js/collapse.min.js b/users/static/admin/js/collapse.min.js new file mode 100644 index 0000000..428984e --- /dev/null +++ b/users/static/admin/js/collapse.min.js @@ -0,0 +1,2 @@ +(function(a){a(document).ready(function(){a("fieldset.collapse").each(function(c,b){if(a(b).find("div.errors").length==0){a(b).addClass("collapsed");a(b).find("h2").first().append(' ('+gettext("Show")+")")}});a("fieldset.collapse a.collapse-toggle").toggle(function(){a(this).text(gettext("Hide"));a(this).closest("fieldset").removeClass("collapsed");return false},function(){a(this).text(gettext("Show"));a(this).closest("fieldset").addClass("collapsed"); +return false})})})(django.jQuery); diff --git a/users/static/admin/js/compress.py b/users/static/admin/js/compress.py new file mode 100644 index 0000000..8d2caa2 --- /dev/null +++ b/users/static/admin/js/compress.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +import os +import optparse +import subprocess +import sys + +here = os.path.dirname(__file__) + +def main(): + usage = "usage: %prog [file1..fileN]" + description = """With no file paths given this script will automatically +compress all jQuery-based files of the admin app. Requires the Google Closure +Compiler library and Java version 6 or later.""" + parser = optparse.OptionParser(usage, description=description) + parser.add_option("-c", dest="compiler", default="~/bin/compiler.jar", + help="path to Closure Compiler jar file") + parser.add_option("-v", "--verbose", + action="store_true", dest="verbose") + parser.add_option("-q", "--quiet", + action="store_false", dest="verbose") + (options, args) = parser.parse_args() + + compiler = os.path.expanduser(options.compiler) + if not os.path.exists(compiler): + sys.exit("Google Closure compiler jar file %s not found. Please use the -c option to specify the path." % compiler) + + if not args: + if options.verbose: + sys.stdout.write("No filenames given; defaulting to admin scripts\n") + args = [os.path.join(here, f) for f in [ + "actions.js", "collapse.js", "inlines.js", "prepopulate.js"]] + + for arg in args: + if not arg.endswith(".js"): + arg = arg + ".js" + to_compress = os.path.expanduser(arg) + if os.path.exists(to_compress): + to_compress_min = "%s.min.js" % "".join(arg.rsplit(".js")) + cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min) + if options.verbose: + sys.stdout.write("Running: %s\n" % cmd) + subprocess.call(cmd.split()) + else: + sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress) + +if __name__ == '__main__': + main() diff --git a/users/static/admin/js/core.js b/users/static/admin/js/core.js new file mode 100644 index 0000000..3ca8ad0 --- /dev/null +++ b/users/static/admin/js/core.js @@ -0,0 +1,221 @@ +// Core javascript helper functions + +// basic browser identification & version +var isOpera = (navigator.userAgent.indexOf("Opera")>=0) && parseFloat(navigator.appVersion); +var isIE = ((document.all) && (!isOpera)) && parseFloat(navigator.appVersion.split("MSIE ")[1].split(";")[0]); + +// Cross-browser event handlers. +function addEvent(obj, evType, fn) { + if (obj.addEventListener) { + obj.addEventListener(evType, fn, false); + return true; + } else if (obj.attachEvent) { + var r = obj.attachEvent("on" + evType, fn); + return r; + } else { + return false; + } +} + +function removeEvent(obj, evType, fn) { + if (obj.removeEventListener) { + obj.removeEventListener(evType, fn, false); + return true; + } else if (obj.detachEvent) { + obj.detachEvent("on" + evType, fn); + return true; + } else { + return false; + } +} + +// quickElement(tagType, parentReference, textInChildNode, [, attribute, attributeValue ...]); +function quickElement() { + var obj = document.createElement(arguments[0]); + if (arguments[2] != '' && arguments[2] != null) { + var textNode = document.createTextNode(arguments[2]); + obj.appendChild(textNode); + } + var len = arguments.length; + for (var i = 3; i < len; i += 2) { + obj.setAttribute(arguments[i], arguments[i+1]); + } + arguments[1].appendChild(obj); + return obj; +} + +// ---------------------------------------------------------------------------- +// Cross-browser xmlhttp object +// from http://jibbering.com/2002/4/httprequest.html +// ---------------------------------------------------------------------------- +var xmlhttp; +/*@cc_on @*/ +/*@if (@_jscript_version >= 5) + try { + xmlhttp = new ActiveXObject("Msxml2.XMLHTTP"); + } catch (e) { + try { + xmlhttp = new ActiveXObject("Microsoft.XMLHTTP"); + } catch (E) { + xmlhttp = false; + } + } +@else + xmlhttp = false; +@end @*/ +if (!xmlhttp && typeof XMLHttpRequest != 'undefined') { + xmlhttp = new XMLHttpRequest(); +} + +// ---------------------------------------------------------------------------- +// Find-position functions by PPK +// See http://www.quirksmode.org/js/findpos.html +// ---------------------------------------------------------------------------- +function findPosX(obj) { + var curleft = 0; + if (obj.offsetParent) { + while (obj.offsetParent) { + curleft += obj.offsetLeft - ((isOpera) ? 0 : obj.scrollLeft); + obj = obj.offsetParent; + } + // IE offsetParent does not include the top-level + if (isIE && obj.parentElement){ + curleft += obj.offsetLeft - obj.scrollLeft; + } + } else if (obj.x) { + curleft += obj.x; + } + return curleft; +} + +function findPosY(obj) { + var curtop = 0; + if (obj.offsetParent) { + while (obj.offsetParent) { + curtop += obj.offsetTop - ((isOpera) ? 0 : obj.scrollTop); + obj = obj.offsetParent; + } + // IE offsetParent does not include the top-level + if (isIE && obj.parentElement){ + curtop += obj.offsetTop - obj.scrollTop; + } + } else if (obj.y) { + curtop += obj.y; + } + return curtop; +} + +//----------------------------------------------------------------------------- +// Date object extensions +// ---------------------------------------------------------------------------- +Date.prototype.getCorrectYear = function() { + // Date.getYear() is unreliable -- + // see http://www.quirksmode.org/js/introdate.html#year + var y = this.getYear() % 100; + return (y < 38) ? y + 2000 : y + 1900; +} + +Date.prototype.getTwelveHours = function() { + hours = this.getHours(); + if (hours == 0) { + return 12; + } + else { + return hours <= 12 ? hours : hours-12 + } +} + +Date.prototype.getTwoDigitMonth = function() { + return (this.getMonth() < 9) ? '0' + (this.getMonth()+1) : (this.getMonth()+1); +} + +Date.prototype.getTwoDigitDate = function() { + return (this.getDate() < 10) ? '0' + this.getDate() : this.getDate(); +} + +Date.prototype.getTwoDigitTwelveHour = function() { + return (this.getTwelveHours() < 10) ? '0' + this.getTwelveHours() : this.getTwelveHours(); +} + +Date.prototype.getTwoDigitHour = function() { + return (this.getHours() < 10) ? '0' + this.getHours() : this.getHours(); +} + +Date.prototype.getTwoDigitMinute = function() { + return (this.getMinutes() < 10) ? '0' + this.getMinutes() : this.getMinutes(); +} + +Date.prototype.getTwoDigitSecond = function() { + return (this.getSeconds() < 10) ? '0' + this.getSeconds() : this.getSeconds(); +} + +Date.prototype.getISODate = function() { + return this.getCorrectYear() + '-' + this.getTwoDigitMonth() + '-' + this.getTwoDigitDate(); +} + +Date.prototype.getHourMinute = function() { + return this.getTwoDigitHour() + ':' + this.getTwoDigitMinute(); +} + +Date.prototype.getHourMinuteSecond = function() { + return this.getTwoDigitHour() + ':' + this.getTwoDigitMinute() + ':' + this.getTwoDigitSecond(); +} + +Date.prototype.strftime = function(format) { + var fields = { + c: this.toString(), + d: this.getTwoDigitDate(), + H: this.getTwoDigitHour(), + I: this.getTwoDigitTwelveHour(), + m: this.getTwoDigitMonth(), + M: this.getTwoDigitMinute(), + p: (this.getHours() >= 12) ? 'PM' : 'AM', + S: this.getTwoDigitSecond(), + w: '0' + this.getDay(), + x: this.toLocaleDateString(), + X: this.toLocaleTimeString(), + y: ('' + this.getFullYear()).substr(2, 4), + Y: '' + this.getFullYear(), + '%' : '%' + }; + var result = '', i = 0; + while (i < format.length) { + if (format.charAt(i) === '%') { + result = result + fields[format.charAt(i + 1)]; + ++i; + } + else { + result = result + format.charAt(i); + } + ++i; + } + return result; +} + +// ---------------------------------------------------------------------------- +// String object extensions +// ---------------------------------------------------------------------------- +String.prototype.pad_left = function(pad_length, pad_string) { + var new_string = this; + for (var i = 0; new_string.length < pad_length; i++) { + new_string = pad_string + new_string; + } + return new_string; +} + +// ---------------------------------------------------------------------------- +// Get the computed style for and element +// ---------------------------------------------------------------------------- +function getStyle(oElm, strCssRule){ + var strValue = ""; + if(document.defaultView && document.defaultView.getComputedStyle){ + strValue = document.defaultView.getComputedStyle(oElm, "").getPropertyValue(strCssRule); + } + else if(oElm.currentStyle){ + strCssRule = strCssRule.replace(/\-(\w)/g, function (strMatch, p1){ + return p1.toUpperCase(); + }); + strValue = oElm.currentStyle[strCssRule]; + } + return strValue; +} diff --git a/users/static/admin/js/dateparse.js b/users/static/admin/js/dateparse.js new file mode 100644 index 0000000..3cb82de --- /dev/null +++ b/users/static/admin/js/dateparse.js @@ -0,0 +1,239 @@ +/* 'Magic' date parsing, by Simon Willison (6th October 2003) + http://simon.incutio.com/archive/2003/10/06/betterDateInput + Adapted for 6newslawrence.com, 28th January 2004 +*/ + +/* Finds the index of the first occurence of item in the array, or -1 if not found */ +if (typeof Array.prototype.indexOf == 'undefined') { + Array.prototype.indexOf = function(item) { + var len = this.length; + for (var i = 0; i < len; i++) { + if (this[i] == item) { + return i; + } + } + return -1; + }; +} +/* Returns an array of items judged 'true' by the passed in test function */ +if (typeof Array.prototype.filter == 'undefined') { + Array.prototype.filter = function(test) { + var matches = []; + var len = this.length; + for (var i = 0; i < len; i++) { + if (test(this[i])) { + matches[matches.length] = this[i]; + } + } + return matches; + }; +} + +var monthNames = gettext("January February March April May June July August September October November December").split(" "); +var weekdayNames = gettext("Sunday Monday Tuesday Wednesday Thursday Friday Saturday").split(" "); + +/* Takes a string, returns the index of the month matching that string, throws + an error if 0 or more than 1 matches +*/ +function parseMonth(month) { + var matches = monthNames.filter(function(item) { + return new RegExp("^" + month, "i").test(item); + }); + if (matches.length == 0) { + throw new Error("Invalid month string"); + } + if (matches.length > 1) { + throw new Error("Ambiguous month"); + } + return monthNames.indexOf(matches[0]); +} +/* Same as parseMonth but for days of the week */ +function parseWeekday(weekday) { + var matches = weekdayNames.filter(function(item) { + return new RegExp("^" + weekday, "i").test(item); + }); + if (matches.length == 0) { + throw new Error("Invalid day string"); + } + if (matches.length > 1) { + throw new Error("Ambiguous weekday"); + } + return weekdayNames.indexOf(matches[0]); +} + +/* Array of objects, each has 're', a regular expression and 'handler', a + function for creating a date from something that matches the regular + expression. Handlers may throw errors if string is unparseable. +*/ +var dateParsePatterns = [ + // Today + { re: /^tod/i, + handler: function() { + return new Date(); + } + }, + // Tomorrow + { re: /^tom/i, + handler: function() { + var d = new Date(); + d.setDate(d.getDate() + 1); + return d; + } + }, + // Yesterday + { re: /^yes/i, + handler: function() { + var d = new Date(); + d.setDate(d.getDate() - 1); + return d; + } + }, + // 4th + { re: /^(\d{1,2})(st|nd|rd|th)?$/i, + handler: function(bits) { + var d = new Date(); + d.setDate(parseInt(bits[1], 10)); + return d; + } + }, + // 4th Jan + { re: /^(\d{1,2})(?:st|nd|rd|th)? (\w+)$/i, + handler: function(bits) { + var d = new Date(); + d.setDate(1); + d.setMonth(parseMonth(bits[2])); + d.setDate(parseInt(bits[1], 10)); + return d; + } + }, + // 4th Jan 2003 + { re: /^(\d{1,2})(?:st|nd|rd|th)? (\w+),? (\d{4})$/i, + handler: function(bits) { + var d = new Date(); + d.setDate(1); + d.setYear(bits[3]); + d.setMonth(parseMonth(bits[2])); + d.setDate(parseInt(bits[1], 10)); + return d; + } + }, + // Jan 4th + { re: /^(\w+) (\d{1,2})(?:st|nd|rd|th)?$/i, + handler: function(bits) { + var d = new Date(); + d.setDate(1); + d.setMonth(parseMonth(bits[1])); + d.setDate(parseInt(bits[2], 10)); + return d; + } + }, + // Jan 4th 2003 + { re: /^(\w+) (\d{1,2})(?:st|nd|rd|th)?,? (\d{4})$/i, + handler: function(bits) { + var d = new Date(); + d.setDate(1); + d.setYear(bits[3]); + d.setMonth(parseMonth(bits[1])); + d.setDate(parseInt(bits[2], 10)); + return d; + } + }, + // next Tuesday - this is suspect due to weird meaning of "next" + { re: /^next (\w+)$/i, + handler: function(bits) { + var d = new Date(); + var day = d.getDay(); + var newDay = parseWeekday(bits[1]); + var addDays = newDay - day; + if (newDay <= day) { + addDays += 7; + } + d.setDate(d.getDate() + addDays); + return d; + } + }, + // last Tuesday + { re: /^last (\w+)$/i, + handler: function(bits) { + throw new Error("Not yet implemented"); + } + }, + // mm/dd/yyyy (American style) + { re: /(\d{1,2})\/(\d{1,2})\/(\d{4})/, + handler: function(bits) { + var d = new Date(); + d.setDate(1); + d.setYear(bits[3]); + d.setMonth(parseInt(bits[1], 10) - 1); // Because months indexed from 0 + d.setDate(parseInt(bits[2], 10)); + return d; + } + }, + // yyyy-mm-dd (ISO style) + { re: /(\d{4})-(\d{1,2})-(\d{1,2})/, + handler: function(bits) { + var d = new Date(); + d.setDate(1); + d.setYear(parseInt(bits[1])); + d.setMonth(parseInt(bits[2], 10) - 1); + d.setDate(parseInt(bits[3], 10)); + return d; + } + }, +]; + +function parseDateString(s) { + for (var i = 0; i < dateParsePatterns.length; i++) { + var re = dateParsePatterns[i].re; + var handler = dateParsePatterns[i].handler; + var bits = re.exec(s); + if (bits) { + return handler(bits); + } + } + throw new Error("Invalid date string"); +} + +function fmt00(x) { + // fmt00: Tags leading zero onto numbers 0 - 9. + // Particularly useful for displaying results from Date methods. + // + if (Math.abs(parseInt(x)) < 10){ + x = "0"+ Math.abs(x); + } + return x; +} + +function parseDateStringISO(s) { + try { + var d = parseDateString(s); + return d.getFullYear() + '-' + (fmt00(d.getMonth() + 1)) + '-' + fmt00(d.getDate()) + } + catch (e) { return s; } +} +function magicDate(input) { + var messagespan = input.id + 'Msg'; + try { + var d = parseDateString(input.value); + input.value = d.getFullYear() + '-' + (fmt00(d.getMonth() + 1)) + '-' + + fmt00(d.getDate()); + input.className = ''; + // Human readable date + if (document.getElementById(messagespan)) { + document.getElementById(messagespan).firstChild.nodeValue = d.toDateString(); + document.getElementById(messagespan).className = 'normal'; + } + } + catch (e) { + input.className = 'error'; + var message = e.message; + // Fix for IE6 bug + if (message.indexOf('is null or not an object') > -1) { + message = 'Invalid date string'; + } + if (document.getElementById(messagespan)) { + document.getElementById(messagespan).firstChild.nodeValue = message; + document.getElementById(messagespan).className = 'error'; + } + } +} diff --git a/users/static/admin/js/getElementsBySelector.js b/users/static/admin/js/getElementsBySelector.js new file mode 100644 index 0000000..15b57a1 --- /dev/null +++ b/users/static/admin/js/getElementsBySelector.js @@ -0,0 +1,167 @@ +/* document.getElementsBySelector(selector) + - returns an array of element objects from the current document + matching the CSS selector. Selectors can contain element names, + class names and ids and can be nested. For example: + + elements = document.getElementsBySelect('div#main p a.external') + + Will return an array of all 'a' elements with 'external' in their + class attribute that are contained inside 'p' elements that are + contained inside the 'div' element which has id="main" + + New in version 0.4: Support for CSS2 and CSS3 attribute selectors: + See http://www.w3.org/TR/css3-selectors/#attribute-selectors + + Version 0.4 - Simon Willison, March 25th 2003 + -- Works in Phoenix 0.5, Mozilla 1.3, Opera 7, Internet Explorer 6, Internet Explorer 5 on Windows + -- Opera 7 fails +*/ + +function getAllChildren(e) { + // Returns all children of element. Workaround required for IE5/Windows. Ugh. + return e.all ? e.all : e.getElementsByTagName('*'); +} + +document.getElementsBySelector = function(selector) { + // Attempt to fail gracefully in lesser browsers + if (!document.getElementsByTagName) { + return new Array(); + } + // Split selector in to tokens + var tokens = selector.split(' '); + var currentContext = new Array(document); + for (var i = 0; i < tokens.length; i++) { + token = tokens[i].replace(/^\s+/,'').replace(/\s+$/,'');; + if (token.indexOf('#') > -1) { + // Token is an ID selector + var bits = token.split('#'); + var tagName = bits[0]; + var id = bits[1]; + var element = document.getElementById(id); + if (!element || (tagName && element.nodeName.toLowerCase() != tagName)) { + // ID not found or tag with that ID not found, return false. + return new Array(); + } + // Set currentContext to contain just this element + currentContext = new Array(element); + continue; // Skip to next token + } + if (token.indexOf('.') > -1) { + // Token contains a class selector + var bits = token.split('.'); + var tagName = bits[0]; + var className = bits[1]; + if (!tagName) { + tagName = '*'; + } + // Get elements matching tag, filter them for class selector + var found = new Array; + var foundCount = 0; + for (var h = 0; h < currentContext.length; h++) { + var elements; + if (tagName == '*') { + elements = getAllChildren(currentContext[h]); + } else { + try { + elements = currentContext[h].getElementsByTagName(tagName); + } + catch(e) { + elements = []; + } + } + for (var j = 0; j < elements.length; j++) { + found[foundCount++] = elements[j]; + } + } + currentContext = new Array; + var currentContextIndex = 0; + for (var k = 0; k < found.length; k++) { + if (found[k].className && found[k].className.match(new RegExp('\\b'+className+'\\b'))) { + currentContext[currentContextIndex++] = found[k]; + } + } + continue; // Skip to next token + } + // Code to deal with attribute selectors + if (token.match(/^(\w*)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/)) { + var tagName = RegExp.$1; + var attrName = RegExp.$2; + var attrOperator = RegExp.$3; + var attrValue = RegExp.$4; + if (!tagName) { + tagName = '*'; + } + // Grab all of the tagName elements within current context + var found = new Array; + var foundCount = 0; + for (var h = 0; h < currentContext.length; h++) { + var elements; + if (tagName == '*') { + elements = getAllChildren(currentContext[h]); + } else { + elements = currentContext[h].getElementsByTagName(tagName); + } + for (var j = 0; j < elements.length; j++) { + found[foundCount++] = elements[j]; + } + } + currentContext = new Array; + var currentContextIndex = 0; + var checkFunction; // This function will be used to filter the elements + switch (attrOperator) { + case '=': // Equality + checkFunction = function(e) { return (e.getAttribute(attrName) == attrValue); }; + break; + case '~': // Match one of space seperated words + checkFunction = function(e) { return (e.getAttribute(attrName).match(new RegExp('\\b'+attrValue+'\\b'))); }; + break; + case '|': // Match start with value followed by optional hyphen + checkFunction = function(e) { return (e.getAttribute(attrName).match(new RegExp('^'+attrValue+'-?'))); }; + break; + case '^': // Match starts with value + checkFunction = function(e) { return (e.getAttribute(attrName).indexOf(attrValue) == 0); }; + break; + case '$': // Match ends with value - fails with "Warning" in Opera 7 + checkFunction = function(e) { return (e.getAttribute(attrName).lastIndexOf(attrValue) == e.getAttribute(attrName).length - attrValue.length); }; + break; + case '*': // Match ends with value + checkFunction = function(e) { return (e.getAttribute(attrName).indexOf(attrValue) > -1); }; + break; + default : + // Just test for existence of attribute + checkFunction = function(e) { return e.getAttribute(attrName); }; + } + currentContext = new Array; + var currentContextIndex = 0; + for (var k = 0; k < found.length; k++) { + if (checkFunction(found[k])) { + currentContext[currentContextIndex++] = found[k]; + } + } + // alert('Attribute Selector: '+tagName+' '+attrName+' '+attrOperator+' '+attrValue); + continue; // Skip to next token + } + // If we get here, token is JUST an element (not a class or ID selector) + tagName = token; + var found = new Array; + var foundCount = 0; + for (var h = 0; h < currentContext.length; h++) { + var elements = currentContext[h].getElementsByTagName(tagName); + for (var j = 0; j < elements.length; j++) { + found[foundCount++] = elements[j]; + } + } + currentContext = found; + } + return currentContext; +} + +/* That revolting regular expression explained +/^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/ + \---/ \---/\-------------/ \-------/ + | | | | + | | | The value + | | ~,|,^,$,* or = + | Attribute + Tag +*/ diff --git a/users/static/admin/js/inlines.js b/users/static/admin/js/inlines.js new file mode 100644 index 0000000..bddd6f7 --- /dev/null +++ b/users/static/admin/js/inlines.js @@ -0,0 +1,136 @@ +/** + * Django admin inlines + * + * Based on jQuery Formset 1.1 + * @author Stanislaus Madueke (stan DOT madueke AT gmail DOT com) + * @requires jQuery 1.2.6 or later + * + * Copyright (c) 2009, Stanislaus Madueke + * All rights reserved. + * + * Spiced up with Code from Zain Memon's GSoC project 2009 + * and modified for Django by Jannis Leidel + * + * Licensed under the New BSD License + * See: http://www.opensource.org/licenses/bsd-license.php + */ +(function($) { + $.fn.formset = function(opts) { + var options = $.extend({}, $.fn.formset.defaults, opts); + var updateElementIndex = function(el, prefix, ndx) { + var id_regex = new RegExp("(" + prefix + "-(\\d+|__prefix__))"); + var replacement = prefix + "-" + ndx; + if ($(el).attr("for")) { + $(el).attr("for", $(el).attr("for").replace(id_regex, replacement)); + } + if (el.id) { + el.id = el.id.replace(id_regex, replacement); + } + if (el.name) { + el.name = el.name.replace(id_regex, replacement); + } + }; + var totalForms = $("#id_" + options.prefix + "-TOTAL_FORMS").attr("autocomplete", "off"); + var nextIndex = parseInt(totalForms.val()); + var maxForms = $("#id_" + options.prefix + "-MAX_NUM_FORMS").attr("autocomplete", "off"); + // only show the add button if we are allowed to add more items, + // note that max_num = None translates to a blank string. + var showAddButton = maxForms.val() == '' || (maxForms.val()-totalForms.val()) > 0; + $(this).each(function(i) { + $(this).not("." + options.emptyCssClass).addClass(options.formCssClass); + }); + if ($(this).length && showAddButton) { + var addButton; + if ($(this).attr("tagName") == "TR") { + // If forms are laid out as table rows, insert the + // "add" button in a new table row: + var numCols = this.eq(0).children().length; + $(this).parent().append('' + options.addText + ""); + addButton = $(this).parent().find("tr:last a"); + } else { + // Otherwise, insert it immediately after the last form: + $(this).filter(":last").after('"); + addButton = $(this).filter(":last").next().find("a"); + } + addButton.click(function() { + var totalForms = $("#id_" + options.prefix + "-TOTAL_FORMS"); + var template = $("#" + options.prefix + "-empty"); + var row = template.clone(true); + row.removeClass(options.emptyCssClass) + .addClass(options.formCssClass) + .attr("id", options.prefix + "-" + nextIndex); + if (row.is("tr")) { + // If the forms are laid out in table rows, insert + // the remove button into the last table cell: + row.children(":last").append('"); + } else if (row.is("ul") || row.is("ol")) { + // If they're laid out as an ordered/unordered list, + // insert an
    • after the last list item: + row.append('
    • ' + options.deleteText + "
    • "); + } else { + // Otherwise, just insert the remove button as the + // last child element of the form's container: + row.children(":first").append('' + options.deleteText + ""); + } + row.find("*").each(function() { + updateElementIndex(this, options.prefix, totalForms.val()); + }); + // Insert the new form when it has been fully edited + row.insertBefore($(template)); + // Update number of total forms + $(totalForms).val(parseInt(totalForms.val()) + 1); + nextIndex += 1; + // Hide add button in case we've hit the max, except we want to add infinitely + if ((maxForms.val() != '') && (maxForms.val()-totalForms.val()) <= 0) { + addButton.parent().hide(); + } + // The delete button of each row triggers a bunch of other things + row.find("a." + options.deleteCssClass).click(function() { + // Remove the parent form containing this button: + var row = $(this).parents("." + options.formCssClass); + row.remove(); + nextIndex -= 1; + // If a post-delete callback was provided, call it with the deleted form: + if (options.removed) { + options.removed(row); + } + // Update the TOTAL_FORMS form count. + var forms = $("." + options.formCssClass); + $("#id_" + options.prefix + "-TOTAL_FORMS").val(forms.length); + // Show add button again once we drop below max + if ((maxForms.val() == '') || (maxForms.val()-forms.length) > 0) { + addButton.parent().show(); + } + // Also, update names and ids for all remaining form controls + // so they remain in sequence: + for (var i=0, formCount=forms.length; i0;b(this).each(function(){b(this).not("."+ +a.emptyCssClass).addClass(a.formCssClass)});if(b(this).length&&g){var j;if(b(this).attr("tagName")=="TR"){g=this.eq(0).children().length;b(this).parent().append(''+a.addText+"");j=b(this).parent().find("tr:last a")}else{b(this).filter(":last").after('");j=b(this).filter(":last").next().find("a")}j.click(function(){var c=b("#id_"+ +a.prefix+"-TOTAL_FORMS"),f=b("#"+a.prefix+"-empty"),d=f.clone(true);d.removeClass(a.emptyCssClass).addClass(a.formCssClass).attr("id",a.prefix+"-"+l);if(d.is("tr"))d.children(":last").append('");else d.is("ul")||d.is("ol")?d.append('
    • '+a.deleteText+"
    • "):d.children(":first").append(''+ +a.deleteText+"");d.find("*").each(function(){k(this,a.prefix,c.val())});d.insertBefore(b(f));b(c).val(parseInt(c.val())+1);l+=1;h.val()!=""&&h.val()-c.val()<=0&&j.parent().hide();d.find("a."+a.deleteCssClass).click(function(){var e=b(this).parents("."+a.formCssClass);e.remove();l-=1;a.removed&&a.removed(e);e=b("."+a.formCssClass);b("#id_"+a.prefix+"-TOTAL_FORMS").val(e.length);if(h.val()==""||h.val()-e.length>0)j.parent().show();for(var i=0,m=e.length;i)[^>]*$|^#([\w-]+)$/, + + // Is it a simple selector + isSimple = /^.[^:#\[\.,]*$/, + + // Check if a string has a non-whitespace character in it + rnotwhite = /\S/, + + // Used for trimming whitespace + rtrim = /^(\s|\u00A0)+|(\s|\u00A0)+$/g, + + // Match a standalone tag + rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>)?$/, + + // Keep a UserAgent string for use with jQuery.browser + userAgent = navigator.userAgent, + + // For matching the engine and version of the browser + browserMatch, + + // Has the ready events already been bound? + readyBound = false, + + // The functions to execute on DOM ready + readyList = [], + + // The ready event handler + DOMContentLoaded, + + // Save a reference to some core methods + toString = Object.prototype.toString, + hasOwnProperty = Object.prototype.hasOwnProperty, + push = Array.prototype.push, + slice = Array.prototype.slice, + indexOf = Array.prototype.indexOf; + +jQuery.fn = jQuery.prototype = { + init: function( selector, context ) { + var match, elem, ret, doc; + + // Handle $(""), $(null), or $(undefined) + if ( !selector ) { + return this; + } + + // Handle $(DOMElement) + if ( selector.nodeType ) { + this.context = this[0] = selector; + this.length = 1; + return this; + } + + // The body element only exists once, optimize finding it + if ( selector === "body" && !context ) { + this.context = document; + this[0] = document.body; + this.selector = "body"; + this.length = 1; + return this; + } + + // Handle HTML strings + if ( typeof selector === "string" ) { + // Are we dealing with HTML string or an ID? + match = quickExpr.exec( selector ); + + // Verify a match, and that no context was specified for #id + if ( match && (match[1] || !context) ) { + + // HANDLE: $(html) -> $(array) + if ( match[1] ) { + doc = (context ? context.ownerDocument || context : document); + + // If a single string is passed in and it's a single tag + // just do a createElement and skip the rest + ret = rsingleTag.exec( selector ); + + if ( ret ) { + if ( jQuery.isPlainObject( context ) ) { + selector = [ document.createElement( ret[1] ) ]; + jQuery.fn.attr.call( selector, context, true ); + + } else { + selector = [ doc.createElement( ret[1] ) ]; + } + + } else { + ret = buildFragment( [ match[1] ], [ doc ] ); + selector = (ret.cacheable ? ret.fragment.cloneNode(true) : ret.fragment).childNodes; + } + + return jQuery.merge( this, selector ); + + // HANDLE: $("#id") + } else { + elem = document.getElementById( match[2] ); + + if ( elem ) { + // Handle the case where IE and Opera return items + // by name instead of ID + if ( elem.id !== match[2] ) { + return rootjQuery.find( selector ); + } + + // Otherwise, we inject the element directly into the jQuery object + this.length = 1; + this[0] = elem; + } + + this.context = document; + this.selector = selector; + return this; + } + + // HANDLE: $("TAG") + } else if ( !context && /^\w+$/.test( selector ) ) { + this.selector = selector; + this.context = document; + selector = document.getElementsByTagName( selector ); + return jQuery.merge( this, selector ); + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return (context || rootjQuery).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return jQuery( context ).find( selector ); + } + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return rootjQuery.ready( selector ); + } + + if (selector.selector !== undefined) { + this.selector = selector.selector; + this.context = selector.context; + } + + return jQuery.makeArray( selector, this ); + }, + + // Start with an empty selector + selector: "", + + // The current version of jQuery being used + jquery: "1.4.2", + + // The default length of a jQuery object is 0 + length: 0, + + // The number of elements contained in the matched element set + size: function() { + return this.length; + }, + + toArray: function() { + return slice.call( this, 0 ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + return num == null ? + + // Return a 'clean' array + this.toArray() : + + // Return just the object + ( num < 0 ? this.slice(num)[ 0 ] : this[ num ] ); + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems, name, selector ) { + // Build a new jQuery matched element set + var ret = jQuery(); + + if ( jQuery.isArray( elems ) ) { + push.apply( ret, elems ); + + } else { + jQuery.merge( ret, elems ); + } + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + ret.context = this.context; + + if ( name === "find" ) { + ret.selector = this.selector + (this.selector ? " " : "") + selector; + } else if ( name ) { + ret.selector = this.selector + "." + name + "(" + selector + ")"; + } + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + // (You can seed the arguments with an array of args, but this is + // only used internally.) + each: function( callback, args ) { + return jQuery.each( this, callback, args ); + }, + + ready: function( fn ) { + // Attach the listeners + jQuery.bindReady(); + + // If the DOM is already ready + if ( jQuery.isReady ) { + // Execute the function immediately + fn.call( document, jQuery ); + + // Otherwise, remember the function for later + } else if ( readyList ) { + // Add the function to the wait list + readyList.push( fn ); + } + + return this; + }, + + eq: function( i ) { + return i === -1 ? + this.slice( i ) : + this.slice( i, +i + 1 ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ), + "slice", slice.call(arguments).join(",") ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map(this, function( elem, i ) { + return callback.call( elem, i, elem ); + })); + }, + + end: function() { + return this.prevObject || jQuery(null); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: [].sort, + splice: [].splice +}; + +// Give the init function the jQuery prototype for later instantiation +jQuery.fn.init.prototype = jQuery.fn; + +jQuery.extend = jQuery.fn.extend = function() { + // copy reference to target object + var target = arguments[0] || {}, i = 1, length = arguments.length, deep = false, options, name, src, copy; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + target = arguments[1] || {}; + // skip the boolean and the target + i = 2; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction(target) ) { + target = {}; + } + + // extend jQuery itself if only one argument is passed + if ( length === i ) { + target = this; + --i; + } + + for ( ; i < length; i++ ) { + // Only deal with non-null/undefined values + if ( (options = arguments[ i ]) != null ) { + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging object literal values or arrays + if ( deep && copy && ( jQuery.isPlainObject(copy) || jQuery.isArray(copy) ) ) { + var clone = src && ( jQuery.isPlainObject(src) || jQuery.isArray(src) ) ? src + : jQuery.isArray(copy) ? [] : {}; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend({ + noConflict: function( deep ) { + window.$ = _$; + + if ( deep ) { + window.jQuery = _jQuery; + } + + return jQuery; + }, + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // Handle when the DOM is ready + ready: function() { + // Make sure that the DOM is not already loaded + if ( !jQuery.isReady ) { + // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). + if ( !document.body ) { + return setTimeout( jQuery.ready, 13 ); + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If there are functions bound, to execute + if ( readyList ) { + // Execute all of them + var fn, i = 0; + while ( (fn = readyList[ i++ ]) ) { + fn.call( document, jQuery ); + } + + // Reset the list of functions + readyList = null; + } + + // Trigger any bound ready events + if ( jQuery.fn.triggerHandler ) { + jQuery( document ).triggerHandler( "ready" ); + } + } + }, + + bindReady: function() { + if ( readyBound ) { + return; + } + + readyBound = true; + + // Catch cases where $(document).ready() is called after the + // browser event has already occurred. + if ( document.readyState === "complete" ) { + return jQuery.ready(); + } + + // Mozilla, Opera and webkit nightlies currently support this event + if ( document.addEventListener ) { + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", jQuery.ready, false ); + + // If IE event model is used + } else if ( document.attachEvent ) { + // ensure firing before onload, + // maybe late but safe also for iframes + document.attachEvent("onreadystatechange", DOMContentLoaded); + + // A fallback to window.onload, that will always work + window.attachEvent( "onload", jQuery.ready ); + + // If IE and not a frame + // continually check to see if the document is ready + var toplevel = false; + + try { + toplevel = window.frameElement == null; + } catch(e) {} + + if ( document.documentElement.doScroll && toplevel ) { + doScrollCheck(); + } + } + }, + + // See test/unit/core.js for details concerning isFunction. + // Since version 1.3, DOM methods and functions like alert + // aren't supported. They return false on IE (#2968). + isFunction: function( obj ) { + return toString.call(obj) === "[object Function]"; + }, + + isArray: function( obj ) { + return toString.call(obj) === "[object Array]"; + }, + + isPlainObject: function( obj ) { + // Must be an Object. + // Because of IE, we also have to check the presence of the constructor property. + // Make sure that DOM nodes and window objects don't pass through, as well + if ( !obj || toString.call(obj) !== "[object Object]" || obj.nodeType || obj.setInterval ) { + return false; + } + + // Not own constructor property must be Object + if ( obj.constructor + && !hasOwnProperty.call(obj, "constructor") + && !hasOwnProperty.call(obj.constructor.prototype, "isPrototypeOf") ) { + return false; + } + + // Own properties are enumerated firstly, so to speed up, + // if last one is own, then all properties are own. + + var key; + for ( key in obj ) {} + + return key === undefined || hasOwnProperty.call( obj, key ); + }, + + isEmptyObject: function( obj ) { + for ( var name in obj ) { + return false; + } + return true; + }, + + error: function( msg ) { + throw msg; + }, + + parseJSON: function( data ) { + if ( typeof data !== "string" || !data ) { + return null; + } + + // Make sure leading/trailing whitespace is removed (IE can't handle it) + data = jQuery.trim( data ); + + // Make sure the incoming data is actual JSON + // Logic borrowed from http://json.org/json2.js + if ( /^[\],:{}\s]*$/.test(data.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g, "@") + .replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g, "]") + .replace(/(?:^|:|,)(?:\s*\[)+/g, "")) ) { + + // Try to use the native JSON parser first + return window.JSON && window.JSON.parse ? + window.JSON.parse( data ) : + (new Function("return " + data))(); + + } else { + jQuery.error( "Invalid JSON: " + data ); + } + }, + + noop: function() {}, + + // Evalulates a script in a global context + globalEval: function( data ) { + if ( data && rnotwhite.test(data) ) { + // Inspired by code by Andrea Giammarchi + // http://webreflection.blogspot.com/2007/08/global-scope-evaluation-and-dom.html + var head = document.getElementsByTagName("head")[0] || document.documentElement, + script = document.createElement("script"); + + script.type = "text/javascript"; + + if ( jQuery.support.scriptEval ) { + script.appendChild( document.createTextNode( data ) ); + } else { + script.text = data; + } + + // Use insertBefore instead of appendChild to circumvent an IE6 bug. + // This arises when a base node is used (#2709). + head.insertBefore( script, head.firstChild ); + head.removeChild( script ); + } + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toUpperCase() === name.toUpperCase(); + }, + + // args is for internal usage only + each: function( object, callback, args ) { + var name, i = 0, + length = object.length, + isObj = length === undefined || jQuery.isFunction(object); + + if ( args ) { + if ( isObj ) { + for ( name in object ) { + if ( callback.apply( object[ name ], args ) === false ) { + break; + } + } + } else { + for ( ; i < length; ) { + if ( callback.apply( object[ i++ ], args ) === false ) { + break; + } + } + } + + // A special, fast, case for the most common use of each + } else { + if ( isObj ) { + for ( name in object ) { + if ( callback.call( object[ name ], name, object[ name ] ) === false ) { + break; + } + } + } else { + for ( var value = object[0]; + i < length && callback.call( value, i, value ) !== false; value = object[++i] ) {} + } + } + + return object; + }, + + trim: function( text ) { + return (text || "").replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( array, results ) { + var ret = results || []; + + if ( array != null ) { + // The window, strings (and functions) also have 'length' + // The extra typeof function check is to prevent crashes + // in Safari 2 (See: #3039) + if ( array.length == null || typeof array === "string" || jQuery.isFunction(array) || (typeof array !== "function" && array.setInterval) ) { + push.call( ret, array ); + } else { + jQuery.merge( ret, array ); + } + } + + return ret; + }, + + inArray: function( elem, array ) { + if ( array.indexOf ) { + return array.indexOf( elem ); + } + + for ( var i = 0, length = array.length; i < length; i++ ) { + if ( array[ i ] === elem ) { + return i; + } + } + + return -1; + }, + + merge: function( first, second ) { + var i = first.length, j = 0; + + if ( typeof second.length === "number" ) { + for ( var l = second.length; j < l; j++ ) { + first[ i++ ] = second[ j ]; + } + + } else { + while ( second[j] !== undefined ) { + first[ i++ ] = second[ j++ ]; + } + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, inv ) { + var ret = []; + + // Go through the array, only saving the items + // that pass the validator function + for ( var i = 0, length = elems.length; i < length; i++ ) { + if ( !inv !== !callback( elems[ i ], i ) ) { + ret.push( elems[ i ] ); + } + } + + return ret; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var ret = [], value; + + // Go through the array, translating each of the items to their + // new value (or values). + for ( var i = 0, length = elems.length; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret[ ret.length ] = value; + } + } + + return ret.concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + proxy: function( fn, proxy, thisObject ) { + if ( arguments.length === 2 ) { + if ( typeof proxy === "string" ) { + thisObject = fn; + fn = thisObject[ proxy ]; + proxy = undefined; + + } else if ( proxy && !jQuery.isFunction( proxy ) ) { + thisObject = proxy; + proxy = undefined; + } + } + + if ( !proxy && fn ) { + proxy = function() { + return fn.apply( thisObject || this, arguments ); + }; + } + + // Set the guid of unique handler to the same of original handler, so it can be removed + if ( fn ) { + proxy.guid = fn.guid = fn.guid || proxy.guid || jQuery.guid++; + } + + // So proxy can be declared as an argument + return proxy; + }, + + // Use of jQuery.browser is frowned upon. + // More details: http://docs.jquery.com/Utilities/jQuery.browser + uaMatch: function( ua ) { + ua = ua.toLowerCase(); + + var match = /(webkit)[ \/]([\w.]+)/.exec( ua ) || + /(opera)(?:.*version)?[ \/]([\w.]+)/.exec( ua ) || + /(msie) ([\w.]+)/.exec( ua ) || + !/compatible/.test( ua ) && /(mozilla)(?:.*? rv:([\w.]+))?/.exec( ua ) || + []; + + return { browser: match[1] || "", version: match[2] || "0" }; + }, + + browser: {} +}); + +browserMatch = jQuery.uaMatch( userAgent ); +if ( browserMatch.browser ) { + jQuery.browser[ browserMatch.browser ] = true; + jQuery.browser.version = browserMatch.version; +} + +// Deprecated, use jQuery.browser.webkit instead +if ( jQuery.browser.webkit ) { + jQuery.browser.safari = true; +} + +if ( indexOf ) { + jQuery.inArray = function( elem, array ) { + return indexOf.call( array, elem ); + }; +} + +// All jQuery objects should point back to these +rootjQuery = jQuery(document); + +// Cleanup functions for the document ready method +if ( document.addEventListener ) { + DOMContentLoaded = function() { + document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false ); + jQuery.ready(); + }; + +} else if ( document.attachEvent ) { + DOMContentLoaded = function() { + // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). + if ( document.readyState === "complete" ) { + document.detachEvent( "onreadystatechange", DOMContentLoaded ); + jQuery.ready(); + } + }; +} + +// The DOM ready check for Internet Explorer +function doScrollCheck() { + if ( jQuery.isReady ) { + return; + } + + try { + // If IE is used, use the trick by Diego Perini + // http://javascript.nwbox.com/IEContentLoaded/ + document.documentElement.doScroll("left"); + } catch( error ) { + setTimeout( doScrollCheck, 1 ); + return; + } + + // and execute any waiting functions + jQuery.ready(); +} + +function evalScript( i, elem ) { + if ( elem.src ) { + jQuery.ajax({ + url: elem.src, + async: false, + dataType: "script" + }); + } else { + jQuery.globalEval( elem.text || elem.textContent || elem.innerHTML || "" ); + } + + if ( elem.parentNode ) { + elem.parentNode.removeChild( elem ); + } +} + +// Mutifunctional method to get and set values to a collection +// The value/s can be optionally by executed if its a function +function access( elems, key, value, exec, fn, pass ) { + var length = elems.length; + + // Setting many attributes + if ( typeof key === "object" ) { + for ( var k in key ) { + access( elems, k, key[k], exec, fn, value ); + } + return elems; + } + + // Setting one attribute + if ( value !== undefined ) { + // Optionally, function values get executed if exec is true + exec = !pass && exec && jQuery.isFunction(value); + + for ( var i = 0; i < length; i++ ) { + fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass ); + } + + return elems; + } + + // Getting an attribute + return length ? fn( elems[0], key ) : undefined; +} + +function now() { + return (new Date).getTime(); +} +(function() { + + jQuery.support = {}; + + var root = document.documentElement, + script = document.createElement("script"), + div = document.createElement("div"), + id = "script" + now(); + + div.style.display = "none"; + div.innerHTML = "
      a"; + + var all = div.getElementsByTagName("*"), + a = div.getElementsByTagName("a")[0]; + + // Can't get basic test support + if ( !all || !all.length || !a ) { + return; + } + + jQuery.support = { + // IE strips leading whitespace when .innerHTML is used + leadingWhitespace: div.firstChild.nodeType === 3, + + // Make sure that tbody elements aren't automatically inserted + // IE will insert them into empty tables + tbody: !div.getElementsByTagName("tbody").length, + + // Make sure that link elements get serialized correctly by innerHTML + // This requires a wrapper element in IE + htmlSerialize: !!div.getElementsByTagName("link").length, + + // Get the style information from getAttribute + // (IE uses .cssText insted) + style: /red/.test( a.getAttribute("style") ), + + // Make sure that URLs aren't manipulated + // (IE normalizes it by default) + hrefNormalized: a.getAttribute("href") === "/a", + + // Make sure that element opacity exists + // (IE uses filter instead) + // Use a regex to work around a WebKit issue. See #5145 + opacity: /^0.55$/.test( a.style.opacity ), + + // Verify style float existence + // (IE uses styleFloat instead of cssFloat) + cssFloat: !!a.style.cssFloat, + + // Make sure that if no value is specified for a checkbox + // that it defaults to "on". + // (WebKit defaults to "" instead) + checkOn: div.getElementsByTagName("input")[0].value === "on", + + // Make sure that a selected-by-default option has a working selected property. + // (WebKit defaults to false instead of true, IE too, if it's in an optgroup) + optSelected: document.createElement("select").appendChild( document.createElement("option") ).selected, + + parentNode: div.removeChild( div.appendChild( document.createElement("div") ) ).parentNode === null, + + // Will be defined later + deleteExpando: true, + checkClone: false, + scriptEval: false, + noCloneEvent: true, + boxModel: null + }; + + script.type = "text/javascript"; + try { + script.appendChild( document.createTextNode( "window." + id + "=1;" ) ); + } catch(e) {} + + root.insertBefore( script, root.firstChild ); + + // Make sure that the execution of code works by injecting a script + // tag with appendChild/createTextNode + // (IE doesn't support this, fails, and uses .text instead) + if ( window[ id ] ) { + jQuery.support.scriptEval = true; + delete window[ id ]; + } + + // Test to see if it's possible to delete an expando from an element + // Fails in Internet Explorer + try { + delete script.test; + + } catch(e) { + jQuery.support.deleteExpando = false; + } + + root.removeChild( script ); + + if ( div.attachEvent && div.fireEvent ) { + div.attachEvent("onclick", function click() { + // Cloning a node shouldn't copy over any + // bound event handlers (IE does this) + jQuery.support.noCloneEvent = false; + div.detachEvent("onclick", click); + }); + div.cloneNode(true).fireEvent("onclick"); + } + + div = document.createElement("div"); + div.innerHTML = ""; + + var fragment = document.createDocumentFragment(); + fragment.appendChild( div.firstChild ); + + // WebKit doesn't clone checked state correctly in fragments + jQuery.support.checkClone = fragment.cloneNode(true).cloneNode(true).lastChild.checked; + + // Figure out if the W3C box model works as expected + // document.body must exist before we can do this + jQuery(function() { + var div = document.createElement("div"); + div.style.width = div.style.paddingLeft = "1px"; + + document.body.appendChild( div ); + jQuery.boxModel = jQuery.support.boxModel = div.offsetWidth === 2; + document.body.removeChild( div ).style.display = 'none'; + + div = null; + }); + + // Technique from Juriy Zaytsev + // http://thinkweb2.com/projects/prototype/detecting-event-support-without-browser-sniffing/ + var eventSupported = function( eventName ) { + var el = document.createElement("div"); + eventName = "on" + eventName; + + var isSupported = (eventName in el); + if ( !isSupported ) { + el.setAttribute(eventName, "return;"); + isSupported = typeof el[eventName] === "function"; + } + el = null; + + return isSupported; + }; + + jQuery.support.submitBubbles = eventSupported("submit"); + jQuery.support.changeBubbles = eventSupported("change"); + + // release memory in IE + root = script = div = all = a = null; +})(); + +jQuery.props = { + "for": "htmlFor", + "class": "className", + readonly: "readOnly", + maxlength: "maxLength", + cellspacing: "cellSpacing", + rowspan: "rowSpan", + colspan: "colSpan", + tabindex: "tabIndex", + usemap: "useMap", + frameborder: "frameBorder" +}; +var expando = "jQuery" + now(), uuid = 0, windowData = {}; + +jQuery.extend({ + cache: {}, + + expando:expando, + + // The following elements throw uncatchable exceptions if you + // attempt to add expando properties to them. + noData: { + "embed": true, + "object": true, + "applet": true + }, + + data: function( elem, name, data ) { + if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) { + return; + } + + elem = elem == window ? + windowData : + elem; + + var id = elem[ expando ], cache = jQuery.cache, thisCache; + + if ( !id && typeof name === "string" && data === undefined ) { + return null; + } + + // Compute a unique ID for the element + if ( !id ) { + id = ++uuid; + } + + // Avoid generating a new cache unless none exists and we + // want to manipulate it. + if ( typeof name === "object" ) { + elem[ expando ] = id; + thisCache = cache[ id ] = jQuery.extend(true, {}, name); + + } else if ( !cache[ id ] ) { + elem[ expando ] = id; + cache[ id ] = {}; + } + + thisCache = cache[ id ]; + + // Prevent overriding the named cache with undefined values + if ( data !== undefined ) { + thisCache[ name ] = data; + } + + return typeof name === "string" ? thisCache[ name ] : thisCache; + }, + + removeData: function( elem, name ) { + if ( elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()] ) { + return; + } + + elem = elem == window ? + windowData : + elem; + + var id = elem[ expando ], cache = jQuery.cache, thisCache = cache[ id ]; + + // If we want to remove a specific section of the element's data + if ( name ) { + if ( thisCache ) { + // Remove the section of cache data + delete thisCache[ name ]; + + // If we've removed all the data, remove the element's cache + if ( jQuery.isEmptyObject(thisCache) ) { + jQuery.removeData( elem ); + } + } + + // Otherwise, we want to remove all of the element's data + } else { + if ( jQuery.support.deleteExpando ) { + delete elem[ jQuery.expando ]; + + } else if ( elem.removeAttribute ) { + elem.removeAttribute( jQuery.expando ); + } + + // Completely remove the data cache + delete cache[ id ]; + } + } +}); + +jQuery.fn.extend({ + data: function( key, value ) { + if ( typeof key === "undefined" && this.length ) { + return jQuery.data( this[0] ); + + } else if ( typeof key === "object" ) { + return this.each(function() { + jQuery.data( this, key ); + }); + } + + var parts = key.split("."); + parts[1] = parts[1] ? "." + parts[1] : ""; + + if ( value === undefined ) { + var data = this.triggerHandler("getData" + parts[1] + "!", [parts[0]]); + + if ( data === undefined && this.length ) { + data = jQuery.data( this[0], key ); + } + return data === undefined && parts[1] ? + this.data( parts[0] ) : + data; + } else { + return this.trigger("setData" + parts[1] + "!", [parts[0], value]).each(function() { + jQuery.data( this, key, value ); + }); + } + }, + + removeData: function( key ) { + return this.each(function() { + jQuery.removeData( this, key ); + }); + } +}); +jQuery.extend({ + queue: function( elem, type, data ) { + if ( !elem ) { + return; + } + + type = (type || "fx") + "queue"; + var q = jQuery.data( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( !data ) { + return q || []; + } + + if ( !q || jQuery.isArray(data) ) { + q = jQuery.data( elem, type, jQuery.makeArray(data) ); + + } else { + q.push( data ); + } + + return q; + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), fn = queue.shift(); + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + } + + if ( fn ) { + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift("inprogress"); + } + + fn.call(elem, function() { + jQuery.dequeue(elem, type); + }); + } + } +}); + +jQuery.fn.extend({ + queue: function( type, data ) { + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + } + + if ( data === undefined ) { + return jQuery.queue( this[0], type ); + } + return this.each(function( i, elem ) { + var queue = jQuery.queue( this, type, data ); + + if ( type === "fx" && queue[0] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + }); + }, + dequeue: function( type ) { + return this.each(function() { + jQuery.dequeue( this, type ); + }); + }, + + // Based off of the plugin by Clint Helfers, with permission. + // http://blindsignals.com/index.php/2009/07/jquery-delay/ + delay: function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[time] || time : time; + type = type || "fx"; + + return this.queue( type, function() { + var elem = this; + setTimeout(function() { + jQuery.dequeue( elem, type ); + }, time ); + }); + }, + + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + } +}); +var rclass = /[\n\t]/g, + rspace = /\s+/, + rreturn = /\r/g, + rspecialurl = /href|src|style/, + rtype = /(button|input)/i, + rfocusable = /(button|input|object|select|textarea)/i, + rclickable = /^(a|area)$/i, + rradiocheck = /radio|checkbox/; + +jQuery.fn.extend({ + attr: function( name, value ) { + return access( this, name, value, true, jQuery.attr ); + }, + + removeAttr: function( name, fn ) { + return this.each(function(){ + jQuery.attr( this, name, "" ); + if ( this.nodeType === 1 ) { + this.removeAttribute( name ); + } + }); + }, + + addClass: function( value ) { + if ( jQuery.isFunction(value) ) { + return this.each(function(i) { + var self = jQuery(this); + self.addClass( value.call(this, i, self.attr("class")) ); + }); + } + + if ( value && typeof value === "string" ) { + var classNames = (value || "").split( rspace ); + + for ( var i = 0, l = this.length; i < l; i++ ) { + var elem = this[i]; + + if ( elem.nodeType === 1 ) { + if ( !elem.className ) { + elem.className = value; + + } else { + var className = " " + elem.className + " ", setClass = elem.className; + for ( var c = 0, cl = classNames.length; c < cl; c++ ) { + if ( className.indexOf( " " + classNames[c] + " " ) < 0 ) { + setClass += " " + classNames[c]; + } + } + elem.className = jQuery.trim( setClass ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + if ( jQuery.isFunction(value) ) { + return this.each(function(i) { + var self = jQuery(this); + self.removeClass( value.call(this, i, self.attr("class")) ); + }); + } + + if ( (value && typeof value === "string") || value === undefined ) { + var classNames = (value || "").split(rspace); + + for ( var i = 0, l = this.length; i < l; i++ ) { + var elem = this[i]; + + if ( elem.nodeType === 1 && elem.className ) { + if ( value ) { + var className = (" " + elem.className + " ").replace(rclass, " "); + for ( var c = 0, cl = classNames.length; c < cl; c++ ) { + className = className.replace(" " + classNames[c] + " ", " "); + } + elem.className = jQuery.trim( className ); + + } else { + elem.className = ""; + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, isBool = typeof stateVal === "boolean"; + + if ( jQuery.isFunction( value ) ) { + return this.each(function(i) { + var self = jQuery(this); + self.toggleClass( value.call(this, i, self.attr("class"), stateVal), stateVal ); + }); + } + + return this.each(function() { + if ( type === "string" ) { + // toggle individual class names + var className, i = 0, self = jQuery(this), + state = stateVal, + classNames = value.split( rspace ); + + while ( (className = classNames[ i++ ]) ) { + // check each className given, space seperated list + state = isBool ? state : !self.hasClass( className ); + self[ state ? "addClass" : "removeClass" ]( className ); + } + + } else if ( type === "undefined" || type === "boolean" ) { + if ( this.className ) { + // store className if set + jQuery.data( this, "__className__", this.className ); + } + + // toggle whole className + this.className = this.className || value === false ? "" : jQuery.data( this, "__className__" ) || ""; + } + }); + }, + + hasClass: function( selector ) { + var className = " " + selector + " "; + for ( var i = 0, l = this.length; i < l; i++ ) { + if ( (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) > -1 ) { + return true; + } + } + + return false; + }, + + val: function( value ) { + if ( value === undefined ) { + var elem = this[0]; + + if ( elem ) { + if ( jQuery.nodeName( elem, "option" ) ) { + return (elem.attributes.value || {}).specified ? elem.value : elem.text; + } + + // We need to handle select boxes special + if ( jQuery.nodeName( elem, "select" ) ) { + var index = elem.selectedIndex, + values = [], + options = elem.options, + one = elem.type === "select-one"; + + // Nothing was selected + if ( index < 0 ) { + return null; + } + + // Loop through all the selected options + for ( var i = one ? index : 0, max = one ? index + 1 : options.length; i < max; i++ ) { + var option = options[ i ]; + + if ( option.selected ) { + // Get the specifc value for the option + value = jQuery(option).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + } + + // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified + if ( rradiocheck.test( elem.type ) && !jQuery.support.checkOn ) { + return elem.getAttribute("value") === null ? "on" : elem.value; + } + + + // Everything else, we just grab the value + return (elem.value || "").replace(rreturn, ""); + + } + + return undefined; + } + + var isFunction = jQuery.isFunction(value); + + return this.each(function(i) { + var self = jQuery(this), val = value; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call(this, i, self.val()); + } + + // Typecast each time if the value is a Function and the appended + // value is therefore different each time. + if ( typeof val === "number" ) { + val += ""; + } + + if ( jQuery.isArray(val) && rradiocheck.test( this.type ) ) { + this.checked = jQuery.inArray( self.val(), val ) >= 0; + + } else if ( jQuery.nodeName( this, "select" ) ) { + var values = jQuery.makeArray(val); + + jQuery( "option", this ).each(function() { + this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0; + }); + + if ( !values.length ) { + this.selectedIndex = -1; + } + + } else { + this.value = val; + } + }); + } +}); + +jQuery.extend({ + attrFn: { + val: true, + css: true, + html: true, + text: true, + data: true, + width: true, + height: true, + offset: true + }, + + attr: function( elem, name, value, pass ) { + // don't set attributes on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 ) { + return undefined; + } + + if ( pass && name in jQuery.attrFn ) { + return jQuery(elem)[name](value); + } + + var notxml = elem.nodeType !== 1 || !jQuery.isXMLDoc( elem ), + // Whether we are setting (or getting) + set = value !== undefined; + + // Try to normalize/fix the name + name = notxml && jQuery.props[ name ] || name; + + // Only do all the following if this is a node (faster for style) + if ( elem.nodeType === 1 ) { + // These attributes require special treatment + var special = rspecialurl.test( name ); + + // Safari mis-reports the default selected property of an option + // Accessing the parent's selectedIndex property fixes it + if ( name === "selected" && !jQuery.support.optSelected ) { + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + // Make sure that it also works with optgroups, see #5701 + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + + // If applicable, access the attribute via the DOM 0 way + if ( name in elem && notxml && !special ) { + if ( set ) { + // We can't allow the type property to be changed (since it causes problems in IE) + if ( name === "type" && rtype.test( elem.nodeName ) && elem.parentNode ) { + jQuery.error( "type property can't be changed" ); + } + + elem[ name ] = value; + } + + // browsers index elements by id/name on forms, give priority to attributes. + if ( jQuery.nodeName( elem, "form" ) && elem.getAttributeNode(name) ) { + return elem.getAttributeNode( name ).nodeValue; + } + + // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set + // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + if ( name === "tabIndex" ) { + var attributeNode = elem.getAttributeNode( "tabIndex" ); + + return attributeNode && attributeNode.specified ? + attributeNode.value : + rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ? + 0 : + undefined; + } + + return elem[ name ]; + } + + if ( !jQuery.support.style && notxml && name === "style" ) { + if ( set ) { + elem.style.cssText = "" + value; + } + + return elem.style.cssText; + } + + if ( set ) { + // convert the value to a string (all browsers do this but IE) see #1070 + elem.setAttribute( name, "" + value ); + } + + var attr = !jQuery.support.hrefNormalized && notxml && special ? + // Some attributes require a special call on IE + elem.getAttribute( name, 2 ) : + elem.getAttribute( name ); + + // Non-existent attributes return null, we normalize to undefined + return attr === null ? undefined : attr; + } + + // elem is actually elem.style ... set the style + // Using attr for specific style information is now deprecated. Use style instead. + return jQuery.style( elem, name, value ); + } +}); +var rnamespaces = /\.(.*)$/, + fcleanup = function( nm ) { + return nm.replace(/[^\w\s\.\|`]/g, function( ch ) { + return "\\" + ch; + }); + }; + +/* + * A number of helper functions used for managing events. + * Many of the ideas behind this code originated from + * Dean Edwards' addEvent library. + */ +jQuery.event = { + + // Bind an event to an element + // Original by Dean Edwards + add: function( elem, types, handler, data ) { + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // For whatever reason, IE has trouble passing the window object + // around, causing it to be cloned in the process + if ( elem.setInterval && ( elem !== window && !elem.frameElement ) ) { + elem = window; + } + + var handleObjIn, handleObj; + + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + } + + // Make sure that the function being executed has a unique ID + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure + var elemData = jQuery.data( elem ); + + // If no elemData is found then we must be trying to bind to one of the + // banned noData elements + if ( !elemData ) { + return; + } + + var events = elemData.events = elemData.events || {}, + eventHandle = elemData.handle, eventHandle; + + if ( !eventHandle ) { + elemData.handle = eventHandle = function() { + // Handle the second event of a trigger and when + // an event is called after a page has unloaded + return typeof jQuery !== "undefined" && !jQuery.event.triggered ? + jQuery.event.handle.apply( eventHandle.elem, arguments ) : + undefined; + }; + } + + // Add elem as a property of the handle function + // This is to prevent a memory leak with non-native events in IE. + eventHandle.elem = elem; + + // Handle multiple events separated by a space + // jQuery(...).bind("mouseover mouseout", fn); + types = types.split(" "); + + var type, i = 0, namespaces; + + while ( (type = types[ i++ ]) ) { + handleObj = handleObjIn ? + jQuery.extend({}, handleObjIn) : + { handler: handler, data: data }; + + // Namespaced event handlers + if ( type.indexOf(".") > -1 ) { + namespaces = type.split("."); + type = namespaces.shift(); + handleObj.namespace = namespaces.slice(0).sort().join("."); + + } else { + namespaces = []; + handleObj.namespace = ""; + } + + handleObj.type = type; + handleObj.guid = handler.guid; + + // Get the current list of functions bound to this event + var handlers = events[ type ], + special = jQuery.event.special[ type ] || {}; + + // Init the event handler queue + if ( !handlers ) { + handlers = events[ type ] = []; + + // Check for a special event handler + // Only use addEventListener/attachEvent if the special + // events handler returns false + if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + // Bind the global event handler to the element + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle, false ); + + } else if ( elem.attachEvent ) { + elem.attachEvent( "on" + type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add the function to the element's handler list + handlers.push( handleObj ); + + // Keep track of which events have been used, for global triggering + jQuery.event.global[ type ] = true; + } + + // Nullify elem to prevent memory leaks in IE + elem = null; + }, + + global: {}, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, pos ) { + // don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + var ret, type, fn, i = 0, all, namespaces, namespace, special, eventType, handleObj, origType, + elemData = jQuery.data( elem ), + events = elemData && elemData.events; + + if ( !elemData || !events ) { + return; + } + + // types is actually an event object here + if ( types && types.type ) { + handler = types.handler; + types = types.type; + } + + // Unbind all events for the element + if ( !types || typeof types === "string" && types.charAt(0) === "." ) { + types = types || ""; + + for ( type in events ) { + jQuery.event.remove( elem, type + types ); + } + + return; + } + + // Handle multiple events separated by a space + // jQuery(...).unbind("mouseover mouseout", fn); + types = types.split(" "); + + while ( (type = types[ i++ ]) ) { + origType = type; + handleObj = null; + all = type.indexOf(".") < 0; + namespaces = []; + + if ( !all ) { + // Namespaced event handlers + namespaces = type.split("."); + type = namespaces.shift(); + + namespace = new RegExp("(^|\\.)" + + jQuery.map( namespaces.slice(0).sort(), fcleanup ).join("\\.(?:.*\\.)?") + "(\\.|$)") + } + + eventType = events[ type ]; + + if ( !eventType ) { + continue; + } + + if ( !handler ) { + for ( var j = 0; j < eventType.length; j++ ) { + handleObj = eventType[ j ]; + + if ( all || namespace.test( handleObj.namespace ) ) { + jQuery.event.remove( elem, origType, handleObj.handler, j ); + eventType.splice( j--, 1 ); + } + } + + continue; + } + + special = jQuery.event.special[ type ] || {}; + + for ( var j = pos || 0; j < eventType.length; j++ ) { + handleObj = eventType[ j ]; + + if ( handler.guid === handleObj.guid ) { + // remove the given handler for the given type + if ( all || namespace.test( handleObj.namespace ) ) { + if ( pos == null ) { + eventType.splice( j--, 1 ); + } + + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + + if ( pos != null ) { + break; + } + } + } + + // remove generic event handler if no more handlers exist + if ( eventType.length === 0 || pos != null && eventType.length === 1 ) { + if ( !special.teardown || special.teardown.call( elem, namespaces ) === false ) { + removeEvent( elem, type, elemData.handle ); + } + + ret = null; + delete events[ type ]; + } + } + + // Remove the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + var handle = elemData.handle; + if ( handle ) { + handle.elem = null; + } + + delete elemData.events; + delete elemData.handle; + + if ( jQuery.isEmptyObject( elemData ) ) { + jQuery.removeData( elem ); + } + } + }, + + // bubbling is internal + trigger: function( event, data, elem /*, bubbling */ ) { + // Event object or event type + var type = event.type || event, + bubbling = arguments[3]; + + if ( !bubbling ) { + event = typeof event === "object" ? + // jQuery.Event object + event[expando] ? event : + // Object literal + jQuery.extend( jQuery.Event(type), event ) : + // Just the event type (string) + jQuery.Event(type); + + if ( type.indexOf("!") >= 0 ) { + event.type = type = type.slice(0, -1); + event.exclusive = true; + } + + // Handle a global trigger + if ( !elem ) { + // Don't bubble custom events when global (to avoid too much overhead) + event.stopPropagation(); + + // Only trigger if we've ever bound an event for it + if ( jQuery.event.global[ type ] ) { + jQuery.each( jQuery.cache, function() { + if ( this.events && this.events[type] ) { + jQuery.event.trigger( event, data, this.handle.elem ); + } + }); + } + } + + // Handle triggering a single element + + // don't do events on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 ) { + return undefined; + } + + // Clean up in case it is reused + event.result = undefined; + event.target = elem; + + // Clone the incoming data, if any + data = jQuery.makeArray( data ); + data.unshift( event ); + } + + event.currentTarget = elem; + + // Trigger the event, it is assumed that "handle" is a function + var handle = jQuery.data( elem, "handle" ); + if ( handle ) { + handle.apply( elem, data ); + } + + var parent = elem.parentNode || elem.ownerDocument; + + // Trigger an inline bound script + try { + if ( !(elem && elem.nodeName && jQuery.noData[elem.nodeName.toLowerCase()]) ) { + if ( elem[ "on" + type ] && elem[ "on" + type ].apply( elem, data ) === false ) { + event.result = false; + } + } + + // prevent IE from throwing an error for some elements with some event types, see #3533 + } catch (e) {} + + if ( !event.isPropagationStopped() && parent ) { + jQuery.event.trigger( event, data, parent, true ); + + } else if ( !event.isDefaultPrevented() ) { + var target = event.target, old, + isClick = jQuery.nodeName(target, "a") && type === "click", + special = jQuery.event.special[ type ] || {}; + + if ( (!special._default || special._default.call( elem, event ) === false) && + !isClick && !(target && target.nodeName && jQuery.noData[target.nodeName.toLowerCase()]) ) { + + try { + if ( target[ type ] ) { + // Make sure that we don't accidentally re-trigger the onFOO events + old = target[ "on" + type ]; + + if ( old ) { + target[ "on" + type ] = null; + } + + jQuery.event.triggered = true; + target[ type ](); + } + + // prevent IE from throwing an error for some elements with some event types, see #3533 + } catch (e) {} + + if ( old ) { + target[ "on" + type ] = old; + } + + jQuery.event.triggered = false; + } + } + }, + + handle: function( event ) { + var all, handlers, namespaces, namespace, events; + + event = arguments[0] = jQuery.event.fix( event || window.event ); + event.currentTarget = this; + + // Namespaced event handlers + all = event.type.indexOf(".") < 0 && !event.exclusive; + + if ( !all ) { + namespaces = event.type.split("."); + event.type = namespaces.shift(); + namespace = new RegExp("(^|\\.)" + namespaces.slice(0).sort().join("\\.(?:.*\\.)?") + "(\\.|$)"); + } + + var events = jQuery.data(this, "events"), handlers = events[ event.type ]; + + if ( events && handlers ) { + // Clone the handlers to prevent manipulation + handlers = handlers.slice(0); + + for ( var j = 0, l = handlers.length; j < l; j++ ) { + var handleObj = handlers[ j ]; + + // Filter the functions by class + if ( all || namespace.test( handleObj.namespace ) ) { + // Pass in a reference to the handler function itself + // So that we can later remove it + event.handler = handleObj.handler; + event.data = handleObj.data; + event.handleObj = handleObj; + + var ret = handleObj.handler.apply( this, arguments ); + + if ( ret !== undefined ) { + event.result = ret; + if ( ret === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + + if ( event.isImmediatePropagationStopped() ) { + break; + } + } + } + } + + return event.result; + }, + + props: "altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "), + + fix: function( event ) { + if ( event[ expando ] ) { + return event; + } + + // store a copy of the original event object + // and "clone" to set read-only properties + var originalEvent = event; + event = jQuery.Event( originalEvent ); + + for ( var i = this.props.length, prop; i; ) { + prop = this.props[ --i ]; + event[ prop ] = originalEvent[ prop ]; + } + + // Fix target property, if necessary + if ( !event.target ) { + event.target = event.srcElement || document; // Fixes #1925 where srcElement might not be defined either + } + + // check if target is a textnode (safari) + if ( event.target.nodeType === 3 ) { + event.target = event.target.parentNode; + } + + // Add relatedTarget, if necessary + if ( !event.relatedTarget && event.fromElement ) { + event.relatedTarget = event.fromElement === event.target ? event.toElement : event.fromElement; + } + + // Calculate pageX/Y if missing and clientX/Y available + if ( event.pageX == null && event.clientX != null ) { + var doc = document.documentElement, body = document.body; + event.pageX = event.clientX + (doc && doc.scrollLeft || body && body.scrollLeft || 0) - (doc && doc.clientLeft || body && body.clientLeft || 0); + event.pageY = event.clientY + (doc && doc.scrollTop || body && body.scrollTop || 0) - (doc && doc.clientTop || body && body.clientTop || 0); + } + + // Add which for key events + if ( !event.which && ((event.charCode || event.charCode === 0) ? event.charCode : event.keyCode) ) { + event.which = event.charCode || event.keyCode; + } + + // Add metaKey to non-Mac browsers (use ctrl for PC's and Meta for Macs) + if ( !event.metaKey && event.ctrlKey ) { + event.metaKey = event.ctrlKey; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + // Note: button is not normalized, so don't use it + if ( !event.which && event.button !== undefined ) { + event.which = (event.button & 1 ? 1 : ( event.button & 2 ? 3 : ( event.button & 4 ? 2 : 0 ) )); + } + + return event; + }, + + // Deprecated, use jQuery.guid instead + guid: 1E8, + + // Deprecated, use jQuery.proxy instead + proxy: jQuery.proxy, + + special: { + ready: { + // Make sure the ready event is setup + setup: jQuery.bindReady, + teardown: jQuery.noop + }, + + live: { + add: function( handleObj ) { + jQuery.event.add( this, handleObj.origType, jQuery.extend({}, handleObj, {handler: liveHandler}) ); + }, + + remove: function( handleObj ) { + var remove = true, + type = handleObj.origType.replace(rnamespaces, ""); + + jQuery.each( jQuery.data(this, "events").live || [], function() { + if ( type === this.origType.replace(rnamespaces, "") ) { + remove = false; + return false; + } + }); + + if ( remove ) { + jQuery.event.remove( this, handleObj.origType, liveHandler ); + } + } + + }, + + beforeunload: { + setup: function( data, namespaces, eventHandle ) { + // We only want to do this special case on windows + if ( this.setInterval ) { + this.onbeforeunload = eventHandle; + } + + return false; + }, + teardown: function( namespaces, eventHandle ) { + if ( this.onbeforeunload === eventHandle ) { + this.onbeforeunload = null; + } + } + } + } +}; + +var removeEvent = document.removeEventListener ? + function( elem, type, handle ) { + elem.removeEventListener( type, handle, false ); + } : + function( elem, type, handle ) { + elem.detachEvent( "on" + type, handle ); + }; + +jQuery.Event = function( src ) { + // Allow instantiation without the 'new' keyword + if ( !this.preventDefault ) { + return new jQuery.Event( src ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + // Event type + } else { + this.type = src; + } + + // timeStamp is buggy for some events on Firefox(#3843) + // So we won't rely on the native value + this.timeStamp = now(); + + // Mark it as fixed + this[ expando ] = true; +}; + +function returnFalse() { + return false; +} +function returnTrue() { + return true; +} + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + preventDefault: function() { + this.isDefaultPrevented = returnTrue; + + var e = this.originalEvent; + if ( !e ) { + return; + } + + // if preventDefault exists run it on the original event + if ( e.preventDefault ) { + e.preventDefault(); + } + // otherwise set the returnValue property of the original event to false (IE) + e.returnValue = false; + }, + stopPropagation: function() { + this.isPropagationStopped = returnTrue; + + var e = this.originalEvent; + if ( !e ) { + return; + } + // if stopPropagation exists run it on the original event + if ( e.stopPropagation ) { + e.stopPropagation(); + } + // otherwise set the cancelBubble property of the original event to true (IE) + e.cancelBubble = true; + }, + stopImmediatePropagation: function() { + this.isImmediatePropagationStopped = returnTrue; + this.stopPropagation(); + }, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse +}; + +// Checks if an event happened on an element within another element +// Used in jQuery.event.special.mouseenter and mouseleave handlers +var withinElement = function( event ) { + // Check if mouse(over|out) are still within the same parent element + var parent = event.relatedTarget; + + // Firefox sometimes assigns relatedTarget a XUL element + // which we cannot access the parentNode property of + try { + // Traverse up the tree + while ( parent && parent !== this ) { + parent = parent.parentNode; + } + + if ( parent !== this ) { + // set the correct event type + event.type = event.data; + + // handle event if we actually just moused on to a non sub-element + jQuery.event.handle.apply( this, arguments ); + } + + // assuming we've left the element since we most likely mousedover a xul element + } catch(e) { } +}, + +// In case of event delegation, we only need to rename the event.type, +// liveHandler will take care of the rest. +delegate = function( event ) { + event.type = event.data; + jQuery.event.handle.apply( this, arguments ); +}; + +// Create mouseenter and mouseleave events +jQuery.each({ + mouseenter: "mouseover", + mouseleave: "mouseout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + setup: function( data ) { + jQuery.event.add( this, fix, data && data.selector ? delegate : withinElement, orig ); + }, + teardown: function( data ) { + jQuery.event.remove( this, fix, data && data.selector ? delegate : withinElement ); + } + }; +}); + +// submit delegation +if ( !jQuery.support.submitBubbles ) { + + jQuery.event.special.submit = { + setup: function( data, namespaces ) { + if ( this.nodeName.toLowerCase() !== "form" ) { + jQuery.event.add(this, "click.specialSubmit", function( e ) { + var elem = e.target, type = elem.type; + + if ( (type === "submit" || type === "image") && jQuery( elem ).closest("form").length ) { + return trigger( "submit", this, arguments ); + } + }); + + jQuery.event.add(this, "keypress.specialSubmit", function( e ) { + var elem = e.target, type = elem.type; + + if ( (type === "text" || type === "password") && jQuery( elem ).closest("form").length && e.keyCode === 13 ) { + return trigger( "submit", this, arguments ); + } + }); + + } else { + return false; + } + }, + + teardown: function( namespaces ) { + jQuery.event.remove( this, ".specialSubmit" ); + } + }; + +} + +// change delegation, happens here so we have bind. +if ( !jQuery.support.changeBubbles ) { + + var formElems = /textarea|input|select/i, + + changeFilters, + + getVal = function( elem ) { + var type = elem.type, val = elem.value; + + if ( type === "radio" || type === "checkbox" ) { + val = elem.checked; + + } else if ( type === "select-multiple" ) { + val = elem.selectedIndex > -1 ? + jQuery.map( elem.options, function( elem ) { + return elem.selected; + }).join("-") : + ""; + + } else if ( elem.nodeName.toLowerCase() === "select" ) { + val = elem.selectedIndex; + } + + return val; + }, + + testChange = function testChange( e ) { + var elem = e.target, data, val; + + if ( !formElems.test( elem.nodeName ) || elem.readOnly ) { + return; + } + + data = jQuery.data( elem, "_change_data" ); + val = getVal(elem); + + // the current data will be also retrieved by beforeactivate + if ( e.type !== "focusout" || elem.type !== "radio" ) { + jQuery.data( elem, "_change_data", val ); + } + + if ( data === undefined || val === data ) { + return; + } + + if ( data != null || val ) { + e.type = "change"; + return jQuery.event.trigger( e, arguments[1], elem ); + } + }; + + jQuery.event.special.change = { + filters: { + focusout: testChange, + + click: function( e ) { + var elem = e.target, type = elem.type; + + if ( type === "radio" || type === "checkbox" || elem.nodeName.toLowerCase() === "select" ) { + return testChange.call( this, e ); + } + }, + + // Change has to be called before submit + // Keydown will be called before keypress, which is used in submit-event delegation + keydown: function( e ) { + var elem = e.target, type = elem.type; + + if ( (e.keyCode === 13 && elem.nodeName.toLowerCase() !== "textarea") || + (e.keyCode === 32 && (type === "checkbox" || type === "radio")) || + type === "select-multiple" ) { + return testChange.call( this, e ); + } + }, + + // Beforeactivate happens also before the previous element is blurred + // with this event you can't trigger a change event, but you can store + // information/focus[in] is not needed anymore + beforeactivate: function( e ) { + var elem = e.target; + jQuery.data( elem, "_change_data", getVal(elem) ); + } + }, + + setup: function( data, namespaces ) { + if ( this.type === "file" ) { + return false; + } + + for ( var type in changeFilters ) { + jQuery.event.add( this, type + ".specialChange", changeFilters[type] ); + } + + return formElems.test( this.nodeName ); + }, + + teardown: function( namespaces ) { + jQuery.event.remove( this, ".specialChange" ); + + return formElems.test( this.nodeName ); + } + }; + + changeFilters = jQuery.event.special.change.filters; +} + +function trigger( type, elem, args ) { + args[0].type = type; + return jQuery.event.handle.apply( elem, args ); +} + +// Create "bubbling" focus and blur events +if ( document.addEventListener ) { + jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { + jQuery.event.special[ fix ] = { + setup: function() { + this.addEventListener( orig, handler, true ); + }, + teardown: function() { + this.removeEventListener( orig, handler, true ); + } + }; + + function handler( e ) { + e = jQuery.event.fix( e ); + e.type = fix; + return jQuery.event.handle.call( this, e ); + } + }); +} + +jQuery.each(["bind", "one"], function( i, name ) { + jQuery.fn[ name ] = function( type, data, fn ) { + // Handle object literals + if ( typeof type === "object" ) { + for ( var key in type ) { + this[ name ](key, data, type[key], fn); + } + return this; + } + + if ( jQuery.isFunction( data ) ) { + fn = data; + data = undefined; + } + + var handler = name === "one" ? jQuery.proxy( fn, function( event ) { + jQuery( this ).unbind( event, handler ); + return fn.apply( this, arguments ); + }) : fn; + + if ( type === "unload" && name !== "one" ) { + this.one( type, data, fn ); + + } else { + for ( var i = 0, l = this.length; i < l; i++ ) { + jQuery.event.add( this[i], type, handler, data ); + } + } + + return this; + }; +}); + +jQuery.fn.extend({ + unbind: function( type, fn ) { + // Handle object literals + if ( typeof type === "object" && !type.preventDefault ) { + for ( var key in type ) { + this.unbind(key, type[key]); + } + + } else { + for ( var i = 0, l = this.length; i < l; i++ ) { + jQuery.event.remove( this[i], type, fn ); + } + } + + return this; + }, + + delegate: function( selector, types, data, fn ) { + return this.live( types, data, fn, selector ); + }, + + undelegate: function( selector, types, fn ) { + if ( arguments.length === 0 ) { + return this.unbind( "live" ); + + } else { + return this.die( types, null, fn, selector ); + } + }, + + trigger: function( type, data ) { + return this.each(function() { + jQuery.event.trigger( type, data, this ); + }); + }, + + triggerHandler: function( type, data ) { + if ( this[0] ) { + var event = jQuery.Event( type ); + event.preventDefault(); + event.stopPropagation(); + jQuery.event.trigger( event, data, this[0] ); + return event.result; + } + }, + + toggle: function( fn ) { + // Save reference to arguments for access in closure + var args = arguments, i = 1; + + // link all the functions, so any of them can unbind this click handler + while ( i < args.length ) { + jQuery.proxy( fn, args[ i++ ] ); + } + + return this.click( jQuery.proxy( fn, function( event ) { + // Figure out which function to execute + var lastToggle = ( jQuery.data( this, "lastToggle" + fn.guid ) || 0 ) % i; + jQuery.data( this, "lastToggle" + fn.guid, lastToggle + 1 ); + + // Make sure that clicks stop + event.preventDefault(); + + // and execute the function + return args[ lastToggle ].apply( this, arguments ) || false; + })); + }, + + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +}); + +var liveMap = { + focus: "focusin", + blur: "focusout", + mouseenter: "mouseover", + mouseleave: "mouseout" +}; + +jQuery.each(["live", "die"], function( i, name ) { + jQuery.fn[ name ] = function( types, data, fn, origSelector /* Internal Use Only */ ) { + var type, i = 0, match, namespaces, preType, + selector = origSelector || this.selector, + context = origSelector ? this : jQuery( this.context ); + + if ( jQuery.isFunction( data ) ) { + fn = data; + data = undefined; + } + + types = (types || "").split(" "); + + while ( (type = types[ i++ ]) != null ) { + match = rnamespaces.exec( type ); + namespaces = ""; + + if ( match ) { + namespaces = match[0]; + type = type.replace( rnamespaces, "" ); + } + + if ( type === "hover" ) { + types.push( "mouseenter" + namespaces, "mouseleave" + namespaces ); + continue; + } + + preType = type; + + if ( type === "focus" || type === "blur" ) { + types.push( liveMap[ type ] + namespaces ); + type = type + namespaces; + + } else { + type = (liveMap[ type ] || type) + namespaces; + } + + if ( name === "live" ) { + // bind live handler + context.each(function(){ + jQuery.event.add( this, liveConvert( type, selector ), + { data: data, selector: selector, handler: fn, origType: type, origHandler: fn, preType: preType } ); + }); + + } else { + // unbind live handler + context.unbind( liveConvert( type, selector ), fn ); + } + } + + return this; + } +}); + +function liveHandler( event ) { + var stop, elems = [], selectors = [], args = arguments, + related, match, handleObj, elem, j, i, l, data, + events = jQuery.data( this, "events" ); + + // Make sure we avoid non-left-click bubbling in Firefox (#3861) + if ( event.liveFired === this || !events || !events.live || event.button && event.type === "click" ) { + return; + } + + event.liveFired = this; + + var live = events.live.slice(0); + + for ( j = 0; j < live.length; j++ ) { + handleObj = live[j]; + + if ( handleObj.origType.replace( rnamespaces, "" ) === event.type ) { + selectors.push( handleObj.selector ); + + } else { + live.splice( j--, 1 ); + } + } + + match = jQuery( event.target ).closest( selectors, event.currentTarget ); + + for ( i = 0, l = match.length; i < l; i++ ) { + for ( j = 0; j < live.length; j++ ) { + handleObj = live[j]; + + if ( match[i].selector === handleObj.selector ) { + elem = match[i].elem; + related = null; + + // Those two events require additional checking + if ( handleObj.preType === "mouseenter" || handleObj.preType === "mouseleave" ) { + related = jQuery( event.relatedTarget ).closest( handleObj.selector )[0]; + } + + if ( !related || related !== elem ) { + elems.push({ elem: elem, handleObj: handleObj }); + } + } + } + } + + for ( i = 0, l = elems.length; i < l; i++ ) { + match = elems[i]; + event.currentTarget = match.elem; + event.data = match.handleObj.data; + event.handleObj = match.handleObj; + + if ( match.handleObj.origHandler.apply( match.elem, args ) === false ) { + stop = false; + break; + } + } + + return stop; +} + +function liveConvert( type, selector ) { + return "live." + (type && type !== "*" ? type + "." : "") + selector.replace(/\./g, "`").replace(/ /g, "&"); +} + +jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup error").split(" "), function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( fn ) { + return fn ? this.bind( name, fn ) : this.trigger( name ); + }; + + if ( jQuery.attrFn ) { + jQuery.attrFn[ name ] = true; + } +}); + +// Prevent memory leaks in IE +// Window isn't included so as not to unbind existing unload events +// More info: +// - http://isaacschlueter.com/2006/10/msie-memory-leaks/ +if ( window.attachEvent && !window.addEventListener ) { + window.attachEvent("onunload", function() { + for ( var id in jQuery.cache ) { + if ( jQuery.cache[ id ].handle ) { + // Try/Catch is to handle iframes being unloaded, see #4280 + try { + jQuery.event.remove( jQuery.cache[ id ].handle.elem ); + } catch(e) {} + } + } + }); +} +/*! + * Sizzle CSS Selector Engine - v1.0 + * Copyright 2009, The Dojo Foundation + * Released under the MIT, BSD, and GPL Licenses. + * More information: http://sizzlejs.com/ + */ +(function(){ + +var chunker = /((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g, + done = 0, + toString = Object.prototype.toString, + hasDuplicate = false, + baseHasDuplicate = true; + +// Here we check if the JavaScript engine is using some sort of +// optimization where it does not always call our comparision +// function. If that is the case, discard the hasDuplicate value. +// Thus far that includes Google Chrome. +[0, 0].sort(function(){ + baseHasDuplicate = false; + return 0; +}); + +var Sizzle = function(selector, context, results, seed) { + results = results || []; + var origContext = context = context || document; + + if ( context.nodeType !== 1 && context.nodeType !== 9 ) { + return []; + } + + if ( !selector || typeof selector !== "string" ) { + return results; + } + + var parts = [], m, set, checkSet, extra, prune = true, contextXML = isXML(context), + soFar = selector; + + // Reset the position of the chunker regexp (start from head) + while ( (chunker.exec(""), m = chunker.exec(soFar)) !== null ) { + soFar = m[3]; + + parts.push( m[1] ); + + if ( m[2] ) { + extra = m[3]; + break; + } + } + + if ( parts.length > 1 && origPOS.exec( selector ) ) { + if ( parts.length === 2 && Expr.relative[ parts[0] ] ) { + set = posProcess( parts[0] + parts[1], context ); + } else { + set = Expr.relative[ parts[0] ] ? + [ context ] : + Sizzle( parts.shift(), context ); + + while ( parts.length ) { + selector = parts.shift(); + + if ( Expr.relative[ selector ] ) { + selector += parts.shift(); + } + + set = posProcess( selector, set ); + } + } + } else { + // Take a shortcut and set the context if the root selector is an ID + // (but not if it'll be faster if the inner selector is an ID) + if ( !seed && parts.length > 1 && context.nodeType === 9 && !contextXML && + Expr.match.ID.test(parts[0]) && !Expr.match.ID.test(parts[parts.length - 1]) ) { + var ret = Sizzle.find( parts.shift(), context, contextXML ); + context = ret.expr ? Sizzle.filter( ret.expr, ret.set )[0] : ret.set[0]; + } + + if ( context ) { + var ret = seed ? + { expr: parts.pop(), set: makeArray(seed) } : + Sizzle.find( parts.pop(), parts.length === 1 && (parts[0] === "~" || parts[0] === "+") && context.parentNode ? context.parentNode : context, contextXML ); + set = ret.expr ? Sizzle.filter( ret.expr, ret.set ) : ret.set; + + if ( parts.length > 0 ) { + checkSet = makeArray(set); + } else { + prune = false; + } + + while ( parts.length ) { + var cur = parts.pop(), pop = cur; + + if ( !Expr.relative[ cur ] ) { + cur = ""; + } else { + pop = parts.pop(); + } + + if ( pop == null ) { + pop = context; + } + + Expr.relative[ cur ]( checkSet, pop, contextXML ); + } + } else { + checkSet = parts = []; + } + } + + if ( !checkSet ) { + checkSet = set; + } + + if ( !checkSet ) { + Sizzle.error( cur || selector ); + } + + if ( toString.call(checkSet) === "[object Array]" ) { + if ( !prune ) { + results.push.apply( results, checkSet ); + } else if ( context && context.nodeType === 1 ) { + for ( var i = 0; checkSet[i] != null; i++ ) { + if ( checkSet[i] && (checkSet[i] === true || checkSet[i].nodeType === 1 && contains(context, checkSet[i])) ) { + results.push( set[i] ); + } + } + } else { + for ( var i = 0; checkSet[i] != null; i++ ) { + if ( checkSet[i] && checkSet[i].nodeType === 1 ) { + results.push( set[i] ); + } + } + } + } else { + makeArray( checkSet, results ); + } + + if ( extra ) { + Sizzle( extra, origContext, results, seed ); + Sizzle.uniqueSort( results ); + } + + return results; +}; + +Sizzle.uniqueSort = function(results){ + if ( sortOrder ) { + hasDuplicate = baseHasDuplicate; + results.sort(sortOrder); + + if ( hasDuplicate ) { + for ( var i = 1; i < results.length; i++ ) { + if ( results[i] === results[i-1] ) { + results.splice(i--, 1); + } + } + } + } + + return results; +}; + +Sizzle.matches = function(expr, set){ + return Sizzle(expr, null, null, set); +}; + +Sizzle.find = function(expr, context, isXML){ + var set, match; + + if ( !expr ) { + return []; + } + + for ( var i = 0, l = Expr.order.length; i < l; i++ ) { + var type = Expr.order[i], match; + + if ( (match = Expr.leftMatch[ type ].exec( expr )) ) { + var left = match[1]; + match.splice(1,1); + + if ( left.substr( left.length - 1 ) !== "\\" ) { + match[1] = (match[1] || "").replace(/\\/g, ""); + set = Expr.find[ type ]( match, context, isXML ); + if ( set != null ) { + expr = expr.replace( Expr.match[ type ], "" ); + break; + } + } + } + } + + if ( !set ) { + set = context.getElementsByTagName("*"); + } + + return {set: set, expr: expr}; +}; + +Sizzle.filter = function(expr, set, inplace, not){ + var old = expr, result = [], curLoop = set, match, anyFound, + isXMLFilter = set && set[0] && isXML(set[0]); + + while ( expr && set.length ) { + for ( var type in Expr.filter ) { + if ( (match = Expr.leftMatch[ type ].exec( expr )) != null && match[2] ) { + var filter = Expr.filter[ type ], found, item, left = match[1]; + anyFound = false; + + match.splice(1,1); + + if ( left.substr( left.length - 1 ) === "\\" ) { + continue; + } + + if ( curLoop === result ) { + result = []; + } + + if ( Expr.preFilter[ type ] ) { + match = Expr.preFilter[ type ]( match, curLoop, inplace, result, not, isXMLFilter ); + + if ( !match ) { + anyFound = found = true; + } else if ( match === true ) { + continue; + } + } + + if ( match ) { + for ( var i = 0; (item = curLoop[i]) != null; i++ ) { + if ( item ) { + found = filter( item, match, i, curLoop ); + var pass = not ^ !!found; + + if ( inplace && found != null ) { + if ( pass ) { + anyFound = true; + } else { + curLoop[i] = false; + } + } else if ( pass ) { + result.push( item ); + anyFound = true; + } + } + } + } + + if ( found !== undefined ) { + if ( !inplace ) { + curLoop = result; + } + + expr = expr.replace( Expr.match[ type ], "" ); + + if ( !anyFound ) { + return []; + } + + break; + } + } + } + + // Improper expression + if ( expr === old ) { + if ( anyFound == null ) { + Sizzle.error( expr ); + } else { + break; + } + } + + old = expr; + } + + return curLoop; +}; + +Sizzle.error = function( msg ) { + throw "Syntax error, unrecognized expression: " + msg; +}; + +var Expr = Sizzle.selectors = { + order: [ "ID", "NAME", "TAG" ], + match: { + ID: /#((?:[\w\u00c0-\uFFFF-]|\\.)+)/, + CLASS: /\.((?:[\w\u00c0-\uFFFF-]|\\.)+)/, + NAME: /\[name=['"]*((?:[\w\u00c0-\uFFFF-]|\\.)+)['"]*\]/, + ATTR: /\[\s*((?:[\w\u00c0-\uFFFF-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/, + TAG: /^((?:[\w\u00c0-\uFFFF\*-]|\\.)+)/, + CHILD: /:(only|nth|last|first)-child(?:\((even|odd|[\dn+-]*)\))?/, + POS: /:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^-]|$)/, + PSEUDO: /:((?:[\w\u00c0-\uFFFF-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/ + }, + leftMatch: {}, + attrMap: { + "class": "className", + "for": "htmlFor" + }, + attrHandle: { + href: function(elem){ + return elem.getAttribute("href"); + } + }, + relative: { + "+": function(checkSet, part){ + var isPartStr = typeof part === "string", + isTag = isPartStr && !/\W/.test(part), + isPartStrNotTag = isPartStr && !isTag; + + if ( isTag ) { + part = part.toLowerCase(); + } + + for ( var i = 0, l = checkSet.length, elem; i < l; i++ ) { + if ( (elem = checkSet[i]) ) { + while ( (elem = elem.previousSibling) && elem.nodeType !== 1 ) {} + + checkSet[i] = isPartStrNotTag || elem && elem.nodeName.toLowerCase() === part ? + elem || false : + elem === part; + } + } + + if ( isPartStrNotTag ) { + Sizzle.filter( part, checkSet, true ); + } + }, + ">": function(checkSet, part){ + var isPartStr = typeof part === "string"; + + if ( isPartStr && !/\W/.test(part) ) { + part = part.toLowerCase(); + + for ( var i = 0, l = checkSet.length; i < l; i++ ) { + var elem = checkSet[i]; + if ( elem ) { + var parent = elem.parentNode; + checkSet[i] = parent.nodeName.toLowerCase() === part ? parent : false; + } + } + } else { + for ( var i = 0, l = checkSet.length; i < l; i++ ) { + var elem = checkSet[i]; + if ( elem ) { + checkSet[i] = isPartStr ? + elem.parentNode : + elem.parentNode === part; + } + } + + if ( isPartStr ) { + Sizzle.filter( part, checkSet, true ); + } + } + }, + "": function(checkSet, part, isXML){ + var doneName = done++, checkFn = dirCheck; + + if ( typeof part === "string" && !/\W/.test(part) ) { + var nodeCheck = part = part.toLowerCase(); + checkFn = dirNodeCheck; + } + + checkFn("parentNode", part, doneName, checkSet, nodeCheck, isXML); + }, + "~": function(checkSet, part, isXML){ + var doneName = done++, checkFn = dirCheck; + + if ( typeof part === "string" && !/\W/.test(part) ) { + var nodeCheck = part = part.toLowerCase(); + checkFn = dirNodeCheck; + } + + checkFn("previousSibling", part, doneName, checkSet, nodeCheck, isXML); + } + }, + find: { + ID: function(match, context, isXML){ + if ( typeof context.getElementById !== "undefined" && !isXML ) { + var m = context.getElementById(match[1]); + return m ? [m] : []; + } + }, + NAME: function(match, context){ + if ( typeof context.getElementsByName !== "undefined" ) { + var ret = [], results = context.getElementsByName(match[1]); + + for ( var i = 0, l = results.length; i < l; i++ ) { + if ( results[i].getAttribute("name") === match[1] ) { + ret.push( results[i] ); + } + } + + return ret.length === 0 ? null : ret; + } + }, + TAG: function(match, context){ + return context.getElementsByTagName(match[1]); + } + }, + preFilter: { + CLASS: function(match, curLoop, inplace, result, not, isXML){ + match = " " + match[1].replace(/\\/g, "") + " "; + + if ( isXML ) { + return match; + } + + for ( var i = 0, elem; (elem = curLoop[i]) != null; i++ ) { + if ( elem ) { + if ( not ^ (elem.className && (" " + elem.className + " ").replace(/[\t\n]/g, " ").indexOf(match) >= 0) ) { + if ( !inplace ) { + result.push( elem ); + } + } else if ( inplace ) { + curLoop[i] = false; + } + } + } + + return false; + }, + ID: function(match){ + return match[1].replace(/\\/g, ""); + }, + TAG: function(match, curLoop){ + return match[1].toLowerCase(); + }, + CHILD: function(match){ + if ( match[1] === "nth" ) { + // parse equations like 'even', 'odd', '5', '2n', '3n+2', '4n-1', '-n+6' + var test = /(-?)(\d*)n((?:\+|-)?\d*)/.exec( + match[2] === "even" && "2n" || match[2] === "odd" && "2n+1" || + !/\D/.test( match[2] ) && "0n+" + match[2] || match[2]); + + // calculate the numbers (first)n+(last) including if they are negative + match[2] = (test[1] + (test[2] || 1)) - 0; + match[3] = test[3] - 0; + } + + // TODO: Move to normal caching system + match[0] = done++; + + return match; + }, + ATTR: function(match, curLoop, inplace, result, not, isXML){ + var name = match[1].replace(/\\/g, ""); + + if ( !isXML && Expr.attrMap[name] ) { + match[1] = Expr.attrMap[name]; + } + + if ( match[2] === "~=" ) { + match[4] = " " + match[4] + " "; + } + + return match; + }, + PSEUDO: function(match, curLoop, inplace, result, not){ + if ( match[1] === "not" ) { + // If we're dealing with a complex expression, or a simple one + if ( ( chunker.exec(match[3]) || "" ).length > 1 || /^\w/.test(match[3]) ) { + match[3] = Sizzle(match[3], null, null, curLoop); + } else { + var ret = Sizzle.filter(match[3], curLoop, inplace, true ^ not); + if ( !inplace ) { + result.push.apply( result, ret ); + } + return false; + } + } else if ( Expr.match.POS.test( match[0] ) || Expr.match.CHILD.test( match[0] ) ) { + return true; + } + + return match; + }, + POS: function(match){ + match.unshift( true ); + return match; + } + }, + filters: { + enabled: function(elem){ + return elem.disabled === false && elem.type !== "hidden"; + }, + disabled: function(elem){ + return elem.disabled === true; + }, + checked: function(elem){ + return elem.checked === true; + }, + selected: function(elem){ + // Accessing this property makes selected-by-default + // options in Safari work properly + elem.parentNode.selectedIndex; + return elem.selected === true; + }, + parent: function(elem){ + return !!elem.firstChild; + }, + empty: function(elem){ + return !elem.firstChild; + }, + has: function(elem, i, match){ + return !!Sizzle( match[3], elem ).length; + }, + header: function(elem){ + return /h\d/i.test( elem.nodeName ); + }, + text: function(elem){ + return "text" === elem.type; + }, + radio: function(elem){ + return "radio" === elem.type; + }, + checkbox: function(elem){ + return "checkbox" === elem.type; + }, + file: function(elem){ + return "file" === elem.type; + }, + password: function(elem){ + return "password" === elem.type; + }, + submit: function(elem){ + return "submit" === elem.type; + }, + image: function(elem){ + return "image" === elem.type; + }, + reset: function(elem){ + return "reset" === elem.type; + }, + button: function(elem){ + return "button" === elem.type || elem.nodeName.toLowerCase() === "button"; + }, + input: function(elem){ + return /input|select|textarea|button/i.test(elem.nodeName); + } + }, + setFilters: { + first: function(elem, i){ + return i === 0; + }, + last: function(elem, i, match, array){ + return i === array.length - 1; + }, + even: function(elem, i){ + return i % 2 === 0; + }, + odd: function(elem, i){ + return i % 2 === 1; + }, + lt: function(elem, i, match){ + return i < match[3] - 0; + }, + gt: function(elem, i, match){ + return i > match[3] - 0; + }, + nth: function(elem, i, match){ + return match[3] - 0 === i; + }, + eq: function(elem, i, match){ + return match[3] - 0 === i; + } + }, + filter: { + PSEUDO: function(elem, match, i, array){ + var name = match[1], filter = Expr.filters[ name ]; + + if ( filter ) { + return filter( elem, i, match, array ); + } else if ( name === "contains" ) { + return (elem.textContent || elem.innerText || getText([ elem ]) || "").indexOf(match[3]) >= 0; + } else if ( name === "not" ) { + var not = match[3]; + + for ( var i = 0, l = not.length; i < l; i++ ) { + if ( not[i] === elem ) { + return false; + } + } + + return true; + } else { + Sizzle.error( "Syntax error, unrecognized expression: " + name ); + } + }, + CHILD: function(elem, match){ + var type = match[1], node = elem; + switch (type) { + case 'only': + case 'first': + while ( (node = node.previousSibling) ) { + if ( node.nodeType === 1 ) { + return false; + } + } + if ( type === "first" ) { + return true; + } + node = elem; + case 'last': + while ( (node = node.nextSibling) ) { + if ( node.nodeType === 1 ) { + return false; + } + } + return true; + case 'nth': + var first = match[2], last = match[3]; + + if ( first === 1 && last === 0 ) { + return true; + } + + var doneName = match[0], + parent = elem.parentNode; + + if ( parent && (parent.sizcache !== doneName || !elem.nodeIndex) ) { + var count = 0; + for ( node = parent.firstChild; node; node = node.nextSibling ) { + if ( node.nodeType === 1 ) { + node.nodeIndex = ++count; + } + } + parent.sizcache = doneName; + } + + var diff = elem.nodeIndex - last; + if ( first === 0 ) { + return diff === 0; + } else { + return ( diff % first === 0 && diff / first >= 0 ); + } + } + }, + ID: function(elem, match){ + return elem.nodeType === 1 && elem.getAttribute("id") === match; + }, + TAG: function(elem, match){ + return (match === "*" && elem.nodeType === 1) || elem.nodeName.toLowerCase() === match; + }, + CLASS: function(elem, match){ + return (" " + (elem.className || elem.getAttribute("class")) + " ") + .indexOf( match ) > -1; + }, + ATTR: function(elem, match){ + var name = match[1], + result = Expr.attrHandle[ name ] ? + Expr.attrHandle[ name ]( elem ) : + elem[ name ] != null ? + elem[ name ] : + elem.getAttribute( name ), + value = result + "", + type = match[2], + check = match[4]; + + return result == null ? + type === "!=" : + type === "=" ? + value === check : + type === "*=" ? + value.indexOf(check) >= 0 : + type === "~=" ? + (" " + value + " ").indexOf(check) >= 0 : + !check ? + value && result !== false : + type === "!=" ? + value !== check : + type === "^=" ? + value.indexOf(check) === 0 : + type === "$=" ? + value.substr(value.length - check.length) === check : + type === "|=" ? + value === check || value.substr(0, check.length + 1) === check + "-" : + false; + }, + POS: function(elem, match, i, array){ + var name = match[2], filter = Expr.setFilters[ name ]; + + if ( filter ) { + return filter( elem, i, match, array ); + } + } + } +}; + +var origPOS = Expr.match.POS; + +for ( var type in Expr.match ) { + Expr.match[ type ] = new RegExp( Expr.match[ type ].source + /(?![^\[]*\])(?![^\(]*\))/.source ); + Expr.leftMatch[ type ] = new RegExp( /(^(?:.|\r|\n)*?)/.source + Expr.match[ type ].source.replace(/\\(\d+)/g, function(all, num){ + return "\\" + (num - 0 + 1); + })); +} + +var makeArray = function(array, results) { + array = Array.prototype.slice.call( array, 0 ); + + if ( results ) { + results.push.apply( results, array ); + return results; + } + + return array; +}; + +// Perform a simple check to determine if the browser is capable of +// converting a NodeList to an array using builtin methods. +// Also verifies that the returned array holds DOM nodes +// (which is not the case in the Blackberry browser) +try { + Array.prototype.slice.call( document.documentElement.childNodes, 0 )[0].nodeType; + +// Provide a fallback method if it does not work +} catch(e){ + makeArray = function(array, results) { + var ret = results || []; + + if ( toString.call(array) === "[object Array]" ) { + Array.prototype.push.apply( ret, array ); + } else { + if ( typeof array.length === "number" ) { + for ( var i = 0, l = array.length; i < l; i++ ) { + ret.push( array[i] ); + } + } else { + for ( var i = 0; array[i]; i++ ) { + ret.push( array[i] ); + } + } + } + + return ret; + }; +} + +var sortOrder; + +if ( document.documentElement.compareDocumentPosition ) { + sortOrder = function( a, b ) { + if ( !a.compareDocumentPosition || !b.compareDocumentPosition ) { + if ( a == b ) { + hasDuplicate = true; + } + return a.compareDocumentPosition ? -1 : 1; + } + + var ret = a.compareDocumentPosition(b) & 4 ? -1 : a === b ? 0 : 1; + if ( ret === 0 ) { + hasDuplicate = true; + } + return ret; + }; +} else if ( "sourceIndex" in document.documentElement ) { + sortOrder = function( a, b ) { + if ( !a.sourceIndex || !b.sourceIndex ) { + if ( a == b ) { + hasDuplicate = true; + } + return a.sourceIndex ? -1 : 1; + } + + var ret = a.sourceIndex - b.sourceIndex; + if ( ret === 0 ) { + hasDuplicate = true; + } + return ret; + }; +} else if ( document.createRange ) { + sortOrder = function( a, b ) { + if ( !a.ownerDocument || !b.ownerDocument ) { + if ( a == b ) { + hasDuplicate = true; + } + return a.ownerDocument ? -1 : 1; + } + + var aRange = a.ownerDocument.createRange(), bRange = b.ownerDocument.createRange(); + aRange.setStart(a, 0); + aRange.setEnd(a, 0); + bRange.setStart(b, 0); + bRange.setEnd(b, 0); + var ret = aRange.compareBoundaryPoints(Range.START_TO_END, bRange); + if ( ret === 0 ) { + hasDuplicate = true; + } + return ret; + }; +} + +// Utility function for retreiving the text value of an array of DOM nodes +function getText( elems ) { + var ret = "", elem; + + for ( var i = 0; elems[i]; i++ ) { + elem = elems[i]; + + // Get the text from text nodes and CDATA nodes + if ( elem.nodeType === 3 || elem.nodeType === 4 ) { + ret += elem.nodeValue; + + // Traverse everything else, except comment nodes + } else if ( elem.nodeType !== 8 ) { + ret += getText( elem.childNodes ); + } + } + + return ret; +} + +// Check to see if the browser returns elements by name when +// querying by getElementById (and provide a workaround) +(function(){ + // We're going to inject a fake input element with a specified name + var form = document.createElement("div"), + id = "script" + (new Date).getTime(); + form.innerHTML = ""; + + // Inject it into the root element, check its status, and remove it quickly + var root = document.documentElement; + root.insertBefore( form, root.firstChild ); + + // The workaround has to do additional checks after a getElementById + // Which slows things down for other browsers (hence the branching) + if ( document.getElementById( id ) ) { + Expr.find.ID = function(match, context, isXML){ + if ( typeof context.getElementById !== "undefined" && !isXML ) { + var m = context.getElementById(match[1]); + return m ? m.id === match[1] || typeof m.getAttributeNode !== "undefined" && m.getAttributeNode("id").nodeValue === match[1] ? [m] : undefined : []; + } + }; + + Expr.filter.ID = function(elem, match){ + var node = typeof elem.getAttributeNode !== "undefined" && elem.getAttributeNode("id"); + return elem.nodeType === 1 && node && node.nodeValue === match; + }; + } + + root.removeChild( form ); + root = form = null; // release memory in IE +})(); + +(function(){ + // Check to see if the browser returns only elements + // when doing getElementsByTagName("*") + + // Create a fake element + var div = document.createElement("div"); + div.appendChild( document.createComment("") ); + + // Make sure no comments are found + if ( div.getElementsByTagName("*").length > 0 ) { + Expr.find.TAG = function(match, context){ + var results = context.getElementsByTagName(match[1]); + + // Filter out possible comments + if ( match[1] === "*" ) { + var tmp = []; + + for ( var i = 0; results[i]; i++ ) { + if ( results[i].nodeType === 1 ) { + tmp.push( results[i] ); + } + } + + results = tmp; + } + + return results; + }; + } + + // Check to see if an attribute returns normalized href attributes + div.innerHTML = ""; + if ( div.firstChild && typeof div.firstChild.getAttribute !== "undefined" && + div.firstChild.getAttribute("href") !== "#" ) { + Expr.attrHandle.href = function(elem){ + return elem.getAttribute("href", 2); + }; + } + + div = null; // release memory in IE +})(); + +if ( document.querySelectorAll ) { + (function(){ + var oldSizzle = Sizzle, div = document.createElement("div"); + div.innerHTML = "

      "; + + // Safari can't handle uppercase or unicode characters when + // in quirks mode. + if ( div.querySelectorAll && div.querySelectorAll(".TEST").length === 0 ) { + return; + } + + Sizzle = function(query, context, extra, seed){ + context = context || document; + + // Only use querySelectorAll on non-XML documents + // (ID selectors don't work in non-HTML documents) + if ( !seed && context.nodeType === 9 && !isXML(context) ) { + try { + return makeArray( context.querySelectorAll(query), extra ); + } catch(e){} + } + + return oldSizzle(query, context, extra, seed); + }; + + for ( var prop in oldSizzle ) { + Sizzle[ prop ] = oldSizzle[ prop ]; + } + + div = null; // release memory in IE + })(); +} + +(function(){ + var div = document.createElement("div"); + + div.innerHTML = "
      "; + + // Opera can't find a second classname (in 9.6) + // Also, make sure that getElementsByClassName actually exists + if ( !div.getElementsByClassName || div.getElementsByClassName("e").length === 0 ) { + return; + } + + // Safari caches class attributes, doesn't catch changes (in 3.2) + div.lastChild.className = "e"; + + if ( div.getElementsByClassName("e").length === 1 ) { + return; + } + + Expr.order.splice(1, 0, "CLASS"); + Expr.find.CLASS = function(match, context, isXML) { + if ( typeof context.getElementsByClassName !== "undefined" && !isXML ) { + return context.getElementsByClassName(match[1]); + } + }; + + div = null; // release memory in IE +})(); + +function dirNodeCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) { + for ( var i = 0, l = checkSet.length; i < l; i++ ) { + var elem = checkSet[i]; + if ( elem ) { + elem = elem[dir]; + var match = false; + + while ( elem ) { + if ( elem.sizcache === doneName ) { + match = checkSet[elem.sizset]; + break; + } + + if ( elem.nodeType === 1 && !isXML ){ + elem.sizcache = doneName; + elem.sizset = i; + } + + if ( elem.nodeName.toLowerCase() === cur ) { + match = elem; + break; + } + + elem = elem[dir]; + } + + checkSet[i] = match; + } + } +} + +function dirCheck( dir, cur, doneName, checkSet, nodeCheck, isXML ) { + for ( var i = 0, l = checkSet.length; i < l; i++ ) { + var elem = checkSet[i]; + if ( elem ) { + elem = elem[dir]; + var match = false; + + while ( elem ) { + if ( elem.sizcache === doneName ) { + match = checkSet[elem.sizset]; + break; + } + + if ( elem.nodeType === 1 ) { + if ( !isXML ) { + elem.sizcache = doneName; + elem.sizset = i; + } + if ( typeof cur !== "string" ) { + if ( elem === cur ) { + match = true; + break; + } + + } else if ( Sizzle.filter( cur, [elem] ).length > 0 ) { + match = elem; + break; + } + } + + elem = elem[dir]; + } + + checkSet[i] = match; + } + } +} + +var contains = document.compareDocumentPosition ? function(a, b){ + return !!(a.compareDocumentPosition(b) & 16); +} : function(a, b){ + return a !== b && (a.contains ? a.contains(b) : true); +}; + +var isXML = function(elem){ + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = (elem ? elem.ownerDocument || elem : 0).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +var posProcess = function(selector, context){ + var tmpSet = [], later = "", match, + root = context.nodeType ? [context] : context; + + // Position selectors must be done after the filter + // And so must :not(positional) so we move all PSEUDOs to the end + while ( (match = Expr.match.PSEUDO.exec( selector )) ) { + later += match[0]; + selector = selector.replace( Expr.match.PSEUDO, "" ); + } + + selector = Expr.relative[selector] ? selector + "*" : selector; + + for ( var i = 0, l = root.length; i < l; i++ ) { + Sizzle( selector, root[i], tmpSet ); + } + + return Sizzle.filter( later, tmpSet ); +}; + +// EXPOSE +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; +jQuery.expr[":"] = jQuery.expr.filters; +jQuery.unique = Sizzle.uniqueSort; +jQuery.text = getText; +jQuery.isXMLDoc = isXML; +jQuery.contains = contains; + +return; + +window.Sizzle = Sizzle; + +})(); +var runtil = /Until$/, + rparentsprev = /^(?:parents|prevUntil|prevAll)/, + // Note: This RegExp should be improved, or likely pulled from Sizzle + rmultiselector = /,/, + slice = Array.prototype.slice; + +// Implement the identical functionality for filter and not +var winnow = function( elements, qualifier, keep ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep(elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) === keep; + }); + + } else if ( qualifier.nodeType ) { + return jQuery.grep(elements, function( elem, i ) { + return (elem === qualifier) === keep; + }); + + } else if ( typeof qualifier === "string" ) { + var filtered = jQuery.grep(elements, function( elem ) { + return elem.nodeType === 1; + }); + + if ( isSimple.test( qualifier ) ) { + return jQuery.filter(qualifier, filtered, !keep); + } else { + qualifier = jQuery.filter( qualifier, filtered ); + } + } + + return jQuery.grep(elements, function( elem, i ) { + return (jQuery.inArray( elem, qualifier ) >= 0) === keep; + }); +}; + +jQuery.fn.extend({ + find: function( selector ) { + var ret = this.pushStack( "", "find", selector ), length = 0; + + for ( var i = 0, l = this.length; i < l; i++ ) { + length = ret.length; + jQuery.find( selector, this[i], ret ); + + if ( i > 0 ) { + // Make sure that the results are unique + for ( var n = length; n < ret.length; n++ ) { + for ( var r = 0; r < length; r++ ) { + if ( ret[r] === ret[n] ) { + ret.splice(n--, 1); + break; + } + } + } + } + } + + return ret; + }, + + has: function( target ) { + var targets = jQuery( target ); + return this.filter(function() { + for ( var i = 0, l = targets.length; i < l; i++ ) { + if ( jQuery.contains( this, targets[i] ) ) { + return true; + } + } + }); + }, + + not: function( selector ) { + return this.pushStack( winnow(this, selector, false), "not", selector); + }, + + filter: function( selector ) { + return this.pushStack( winnow(this, selector, true), "filter", selector ); + }, + + is: function( selector ) { + return !!selector && jQuery.filter( selector, this ).length > 0; + }, + + closest: function( selectors, context ) { + if ( jQuery.isArray( selectors ) ) { + var ret = [], cur = this[0], match, matches = {}, selector; + + if ( cur && selectors.length ) { + for ( var i = 0, l = selectors.length; i < l; i++ ) { + selector = selectors[i]; + + if ( !matches[selector] ) { + matches[selector] = jQuery.expr.match.POS.test( selector ) ? + jQuery( selector, context || this.context ) : + selector; + } + } + + while ( cur && cur.ownerDocument && cur !== context ) { + for ( selector in matches ) { + match = matches[selector]; + + if ( match.jquery ? match.index(cur) > -1 : jQuery(cur).is(match) ) { + ret.push({ selector: selector, elem: cur }); + delete matches[selector]; + } + } + cur = cur.parentNode; + } + } + + return ret; + } + + var pos = jQuery.expr.match.POS.test( selectors ) ? + jQuery( selectors, context || this.context ) : null; + + return this.map(function( i, cur ) { + while ( cur && cur.ownerDocument && cur !== context ) { + if ( pos ? pos.index(cur) > -1 : jQuery(cur).is(selectors) ) { + return cur; + } + cur = cur.parentNode; + } + return null; + }); + }, + + // Determine the position of an element within + // the matched set of elements + index: function( elem ) { + if ( !elem || typeof elem === "string" ) { + return jQuery.inArray( this[0], + // If it receives a string, the selector is used + // If it receives nothing, the siblings are used + elem ? jQuery( elem ) : this.parent().children() ); + } + // Locate the position of the desired element + return jQuery.inArray( + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[0] : elem, this ); + }, + + add: function( selector, context ) { + var set = typeof selector === "string" ? + jQuery( selector, context || this.context ) : + jQuery.makeArray( selector ), + all = jQuery.merge( this.get(), set ); + + return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ? + all : + jQuery.unique( all ) ); + }, + + andSelf: function() { + return this.add( this.prevObject ); + } +}); + +// A painfully simple check to see if an element is disconnected +// from a document (should be improved, where feasible). +function isDisconnected( node ) { + return !node || !node.parentNode || node.parentNode.nodeType === 11; +} + +jQuery.each({ + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return jQuery.dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return jQuery.dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return jQuery.nth( elem, 2, "nextSibling" ); + }, + prev: function( elem ) { + return jQuery.nth( elem, 2, "previousSibling" ); + }, + nextAll: function( elem ) { + return jQuery.dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return jQuery.dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return jQuery.dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return jQuery.dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return jQuery.sibling( elem.parentNode.firstChild, elem ); + }, + children: function( elem ) { + return jQuery.sibling( elem.firstChild ); + }, + contents: function( elem ) { + return jQuery.nodeName( elem, "iframe" ) ? + elem.contentDocument || elem.contentWindow.document : + jQuery.makeArray( elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var ret = jQuery.map( this, fn, until ); + + if ( !runtil.test( name ) ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + ret = jQuery.filter( selector, ret ); + } + + ret = this.length > 1 ? jQuery.unique( ret ) : ret; + + if ( (this.length > 1 || rmultiselector.test( selector )) && rparentsprev.test( name ) ) { + ret = ret.reverse(); + } + + return this.pushStack( ret, name, slice.call(arguments).join(",") ); + }; +}); + +jQuery.extend({ + filter: function( expr, elems, not ) { + if ( not ) { + expr = ":not(" + expr + ")"; + } + + return jQuery.find.matches(expr, elems); + }, + + dir: function( elem, dir, until ) { + var matched = [], cur = elem[dir]; + while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { + if ( cur.nodeType === 1 ) { + matched.push( cur ); + } + cur = cur[dir]; + } + return matched; + }, + + nth: function( cur, result, dir, elem ) { + result = result || 1; + var num = 0; + + for ( ; cur; cur = cur[dir] ) { + if ( cur.nodeType === 1 && ++num === result ) { + break; + } + } + + return cur; + }, + + sibling: function( n, elem ) { + var r = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + r.push( n ); + } + } + + return r; + } +}); +var rinlinejQuery = / jQuery\d+="(?:\d+|null)"/g, + rleadingWhitespace = /^\s+/, + rxhtmlTag = /(<([\w:]+)[^>]*?)\/>/g, + rselfClosing = /^(?:area|br|col|embed|hr|img|input|link|meta|param)$/i, + rtagName = /<([\w:]+)/, + rtbody = /"; + }, + wrapMap = { + option: [ 1, "" ], + legend: [ 1, "
      ", "
      " ], + thead: [ 1, "", "
      " ], + tr: [ 2, "", "
      " ], + td: [ 3, "", "
      " ], + col: [ 2, "", "
      " ], + area: [ 1, "", "" ], + _default: [ 0, "", "" ] + }; + +wrapMap.optgroup = wrapMap.option; +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// IE can't serialize and