This repository contains PyTorch evaluation code, training code and pretrained models for the following papers:
DeiT Data-Efficient Image Transformers, ICML 2021 [bib]
@InProceedings{pmlr-v139-touvron21a,
title = {Training data-efficient image transformers & distillation through attention},
author = {Touvron, Hugo and Cord, Matthieu and Douze, Matthijs and Massa, Francisco and Sablayrolles, Alexandre and Jegou, Herve},
booktitle = {International Conference on Machine Learning},
pages = {10347--10357},
year = {2021},
volume = {139},
month = {July}
}
CaiT (Going deeper with Image Transformers), ICCV 2021 [bib]
@InProceedings{Touvron_2021_ICCV,
author = {Touvron, Hugo and Cord, Matthieu and Sablayrolles, Alexandre and Synnaeve, Gabriel and J\'egou, Herv\'e},
title = {Going Deeper With Image Transformers},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2021},
pages = {32-42}
}
ResMLP (ResMLP: Feedforward networks for image classification with data-efficient training), TPAMI 2022 [bib]
@article{touvron2021resmlp,
title={ResMLP: Feedforward networks for image classification with data-efficient training},
author={Hugo Touvron and Piotr Bojanowski and Mathilde Caron and Matthieu Cord and Alaaeldin El-Nouby and Edouard Grave and Gautier Izacard and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Herv'e J'egou},
journal={arXiv preprint arXiv:2105.03404},
year={2021},
}
PatchConvnet (Augmenting Convolutional networks with attention-based aggregation) [bib]
@article{touvron2021patchconvnet,
title={Augmenting Convolutional networks with attention-based aggregation},
author={Hugo Touvron and Matthieu Cord and Alaaeldin El-Nouby and Piotr Bojanowski and Armand Joulin and Gabriel Synnaeve and Jakob Verbeek and Herve Jegou},
journal={arXiv preprint arXiv:2112.13692},
year={2021},
}
3Things (Three things everyone should know about Vision Transformers), ECCV 2022 [bib]
@article{Touvron2022ThreeTE,
title={Three things everyone should know about Vision Transformers},
author={Hugo Touvron and Matthieu Cord and Alaaeldin El-Nouby and Jakob Verbeek and Herve Jegou},
journal={arXiv preprint arXiv:2203.09795},
year={2022},
}
DeiT III (DeiT III: Revenge of the ViT), ECCV 2022 [bib]
@article{Touvron2022DeiTIR,
title={DeiT III: Revenge of the ViT},
author={Hugo Touvron and Matthieu Cord and Herve Jegou},
journal={arXiv preprint arXiv:2204.07118},
year={2022},
}
Cosub (Co-training 2L Submodels for Visual Recognition), CVPR 2023 [bib]
@article{Touvron2022Cotraining2S,
title={Co-training 2L Submodels for Visual Recognition},
author={Hugo Touvron and Matthieu Cord and Maxime Oquab and Piotr Bojanowski and Jakob Verbeek and Herv'e J'egou},
journal={arXiv preprint arXiv:2212.04884},
year={2022},
}
This repository is released under the Apache 2.0 license as found in the LICENSE file.
We actively welcome your pull requests! Please see CONTRIBUTING.md and CODE_OF_CONDUCT.md for more info.