The MIME dataset is a large-scale demonstration dataset, comprising 8,260 human demonstrations and 8,260 corresponding robot demonstrations over 20 different manipulation tasks.
Each item in the dataset consists of an RGB-D video of a human demonstration and RGB-D videos of the corresponding robot demonstration collected from 3 different camera views, along with joint angle data from the robot.
The dataset is intended for use in visual imitation learning, trajectory prediction, and multi-task robot learning.
@article{DBLP:journals/corr/abs-1810-07121,
author = {Pratyusha Sharma and Lekha Mohan and Lerrel Pinto and Abhinav Gupta},
title = {Multiple Interactions Made Easy {(MIME):} Large Scale Demonstrations Data for Imitation},
journal = {CORR},
volume = {abs/1810.07121},
year = {2018}
}