

{"id":68,"date":"2016-03-31T14:34:31","date_gmt":"2016-03-31T12:34:31","guid":{"rendered":"http:\/\/project.inria.fr\/everest\/?page_id=68"},"modified":"2018-07-26T13:20:08","modified_gmt":"2018-07-26T11:20:08","slug":"software","status":"publish","type":"page","link":"https:\/\/project.inria.fr\/everest\/software\/","title":{"rendered":"Software"},"content":{"rendered":"<div id=\"content\" class=\"clearfix hfeed\">\n<div id=\"content-main\" class=\"clearfix grid_16\">\n<div id=\"post-20\" class=\"clearfix post post-20 page type-page status-publish hentry nodate\">\n<div class=\"entry clearfix\">\n<div class=\"entry-content clearfix\">\n<p><strong>Charades-Ego<\/strong><\/p>\n<p class=\"p1\" align=\"justify\"><span class=\"s1\">This is the public implementation of the method and the accompanying dataset\u00a0<\/span><span class=\"s1\">described in the <a href=\"https:\/\/hal.inria.fr\/hal-01755547v1\">CVPR 2018 paper<\/a><\/span><span class=\"s1\">. Charades-Ego\u00a0<\/span><span class=\"s1\">is dataset composed of 7860 videos of daily indoors activities recorded from\u00a0<\/span><span class=\"s1\">both third and first person views. The dataset contains 68,536 temporal\u00a0<\/span><span class=\"s1\">annotations for 157 action classes. This dataset enables learning the link\u00a0<\/span><span class=\"s1\">between the two, actor and observer perspectives. Thereby, we address one of\u00a0<\/span><span class=\"s1\">the biggest bottlenecks facing egocentric vision research, providing a link\u00a0<\/span><span class=\"s1\">from first-person to the abundant third-person data on the web. We use this\u00a0<\/span><span class=\"s1\">data to learn a joint representation of first and third-person videos, with\u00a0<\/span><span class=\"s1\">only weak supervision, and show its effectiveness for transferring knowledge\u00a0<\/span><span class=\"s1\">from the third-person to the first-person domain. The implementation and the dataset are available on <a href=\"https:\/\/github.com\/gsig\/actor-observer\">GitHub<\/a>.<\/span><\/p>\n<\/div>\n<\/div>\n<\/div>\n<\/div>\n<\/div>\n<div id=\"footer\" class=\"clearfix\"><\/div>\n","protected":false},"excerpt":{"rendered":"<p>Charades-Ego This is the public implementation of the method and the accompanying dataset\u00a0described in the CVPR 2018 paper. Charades-Ego\u00a0is dataset composed of 7860 videos of daily indoors activities recorded from\u00a0both third and first person views. The dataset contains 68,536 temporal\u00a0annotations for 157 action classes. This dataset enables learning the link\u00a0between\u2026<\/p>\n<p> <a class=\"continue-reading-link\" href=\"https:\/\/project.inria.fr\/everest\/software\/\"><span>Continue reading<\/span><i class=\"crycon-right-dir\"><\/i><\/a> <\/p>\n","protected":false},"author":921,"featured_media":0,"parent":0,"menu_order":0,"comment_status":"closed","ping_status":"closed","template":"","meta":{"footnotes":""},"class_list":["post-68","page","type-page","status-publish","hentry"],"_links":{"self":[{"href":"https:\/\/project.inria.fr\/everest\/wp-json\/wp\/v2\/pages\/68","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/project.inria.fr\/everest\/wp-json\/wp\/v2\/pages"}],"about":[{"href":"https:\/\/project.inria.fr\/everest\/wp-json\/wp\/v2\/types\/page"}],"author":[{"embeddable":true,"href":"https:\/\/project.inria.fr\/everest\/wp-json\/wp\/v2\/users\/921"}],"replies":[{"embeddable":true,"href":"https:\/\/project.inria.fr\/everest\/wp-json\/wp\/v2\/comments?post=68"}],"version-history":[{"count":5,"href":"https:\/\/project.inria.fr\/everest\/wp-json\/wp\/v2\/pages\/68\/revisions"}],"predecessor-version":[{"id":146,"href":"https:\/\/project.inria.fr\/everest\/wp-json\/wp\/v2\/pages\/68\/revisions\/146"}],"wp:attachment":[{"href":"https:\/\/project.inria.fr\/everest\/wp-json\/wp\/v2\/media?parent=68"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}