author Nicolas Chauvat <>
Thu, 05 Aug 2010 19:31:27 +0200
changeset 218 45510a9bb345
parent 217 fe739ada925c
child 220 72715685d5e3
permissions -rw-r--r--
[views] use has_creator when available. add cwuri facet.

# -*- coding: utf-8 -*-

import sys
import re
from datetime import datetime

from lxml.html import fromstring, tostring
import feedparser
import rdflib

from cubes.datafeed.sobjects import DataFeedParser

RDF = rdflib.Namespace('')
SIOC = rdflib.Namespace('')
DCTERMS = rdflib.Namespace('')

def get_subject(g, pred, obj):
    subjects = list(g.subjects(pred, obj))
    assert len(subjects) == 1
    return subjects[0]

def get_object(g, subj, pred):
    objects = list(g.objects(subj, pred))
    assert len(objects) == 1
    return objects[0]

def parse_blogpost_sioc(url):
    g = rdflib.ConjunctiveGraph()
    for post, type_, blogpost_ in g.triples((None, RDF.type, SIOC.BlogPost)):
        item = {'uri': unicode(post)}
        item['title'] = unicode(get_object(g, post, DCTERMS.title))
        item['content'] = unicode(get_object(g, post, SIOC.content))
        yield item

format_map = {'application/xhtml+xml':u'text/html',

IMG_SPIES = ['',

def is_img_spy(node):
    if node.tag != 'img':
        return False
    for url in IMG_SPIES:
        if node.get('src').startswith(url):
            return True
    return False

def is_tweetmeme_spy(node):
    href = node.get('href')
    if href and href.startswith(''):
        return True
    return False

def remove_content_spies(content):
    root = fromstring(content)
    if is_img_spy(root):
        return u''
    for img in root.findall('.//img'):
        if is_img_spy(img):
        elif img.get('height') == '1' and img.get('width') == '1':
            print tostring(img), 'is probably a spy'
    for anchor in root.findall('.//a'):
        if is_tweetmeme_spy(anchor):
    return unicode(tostring(root))

def parse_blogpost_rss(url):
    data = feedparser.parse(url)
    feed = data.feed
    for entry in data.entries:
        item = {}
        if 'feedburner_origlink' in entry:
            item['uri'] = entry.feedburner_origlink
            item['uri'] =
        item['title'] = entry.title
        if hasattr(entry, 'content'):
            content = entry.content[0].value
            mimetype = entry.content[0].type
        elif hasattr(entry, 'summary_detail'):
            content = entry.summary_detail.value
            mimetype = entry.summary_detail.type
            content = u''#XXX entry.description?
            mimetype = u'text/plain'
        if mimetype == u'text/html':
            content = remove_content_spies(content)
        item['content'] = content
        item['content_format'] = format_map.get(mimetype, u'text/plain')
        if hasattr(entry, 'date_parsed'):
            item['creation_date'] = datetime(*entry.date_parsed[:6])
        if hasattr(entry, 'author_detail') and hasattr(entry.author_detail, 'href'):
            item['author'] = entry.author_detail.href
        elif hasattr(feed, 'author_detail') and hasattr(feed.author_detail, 'href'):
            item['author'] = feed.author_detail.href
        elif hasattr(feed, 'author'):
            item['author'] =
        elif hasattr(feed, 'image') and hasattr(feed.image, 'link'):
            item['author'] =
            item['author'] = url
        item['cwuri'] =
        yield item

def parse_microblogpost_rss(url):
    feed = feedparser.parse(url)
    for entry in feed.entries:
        item = {}
        item['uri'] =
        item['content'] = entry.description.split(':',1)[1][:140]
        item['creation_date'] = datetime(*entry.date_parsed[:6])
        item['modification_date'] = datetime(*entry.date_parsed[:6])
        item['author'] = # true for twitter
        item['cwuri'] =
        screen_name ='/')[-1]
        item['avatar'] = get_twitter_avatar(screen_name)
        yield item


def get_twitter_avatar(screen_name):
    if screen_name not in AVATAR_CACHE:
        from urllib2 import urlopen
        import simplejson
        data = urlopen('' % screen_name).read()
        user = simplejson.loads(data)
        AVATAR_CACHE[screen_name] = user['profile_image_url']
    return AVATAR_CACHE[screen_name]

class BlogPostParser(DataFeedParser):
    __abstract__ = True
    entity_type = 'BlogEntry'

    def process(self, url):
        for item in self.parse(url):
            author = item.pop('author', None)
            avatar = item.pop('avatar', None)
            euri = self.sget_entity('ExternalUri', uri=item.pop('uri'))
            if euri.same_as:
                post = self.update_blogpost(euri.same_as[0], item)
                post = self.create_blogpost(item, euri)
            if author:
                account = self.sget_entity('UserAccount', name=author)
                self.sget_relation(post.eid, 'has_creator', account.eid)
                if avatar:
                    auri = self.sget_entity('ExternalUri', uri=avatar)
                    self.sget_relation(account.eid, 'has_avatar', auri.eid)

    def create_blogpost(self, item, uri):
        entity = self._cw.create_entity(self.entity_type, **item)
        return entity

    def update_blogpost(self, entity, item):
        return entity

class BlogPostSiocParser(BlogPostParser):
    __regid__ = 'blogpost-sioc'
    parse = staticmethod(parse_blogpost_sioc)

class BlogPostRSSParser(BlogPostParser):
    __regid__ = 'blogpost-rss'
    parse = staticmethod(parse_blogpost_rss)

class MicroBlogPostRSSParser(BlogPostParser):
    __regid__ = 'microblogpost-rss'
    entity_type = 'MicroBlogEntry'
    parse = staticmethod(parse_microblogpost_rss)

if __name__ == '__main__':
    import sys
    from pprint import pprint

    name = sys.argv[1]
    url = sys.argv[2]

    parser = globals()[name]