GRAB (GRasping Actions with Bodies) is a dataset of whole-body grasps, containing full 3D shape and pose sequences of 10 subjects interacting with 51 everyday objects of varying shape and size.
@inproceedings{GRAB:2020,
title = {{GRAB}: A Dataset of Whole-Body Human Grasping of Objects},
author = {Taheri, Omid and Ghorbani, Nima and Black, Michael J. and Tzionas, Dimitrios},
booktitle = {European Conference on Computer Vision (ECCV)},
year = {2020},
url = {https://grab.is.tue.mpg.de}
}