Citation

BibTex format

@article{Docherty:2026:10.1002/aisy.202501094,
author = {Docherty, R and Vamvakeros, A and Cooper, SJ},
doi = {10.1002/aisy.202501094},
journal = {Advanced Intelligent Systems},
title = {Upsampling DINOv2 Features for Unsupervised Vision Tasks and Weakly Supervised Materials Segmentation},
url = {http://dx.doi.org/10.1002/aisy.202501094},
year = {2026}
}

RIS format (EndNote, RefMan)

TY  - JOUR
AB - The features of self-supervised vision transformers (ViTs) contain strong semantic and positional information relevant to downstream tasks like object localization and segmentation. Recent works combine these features with traditional methods like clustering, graph partitioning or region correlations to achieve impressive baselines without finetuning or training additional networks. Upsampled features are leveraged from ViT networks (e.g., DINOv2) in two workflows: in a clustering-based approach for object localization and segmentation and paired with standard classifiers in weakly supervised materials segmentation. Both show strong performance on benchmarks, especially in weakly supervised segmentation where the ViT features capture complex relationships inaccessible to classical approaches. It is expected that the flexibility and generalizability of these features will both speed up and strengthen materials characterization, from segmentation to property-prediction.
AU - Docherty,R
AU - Vamvakeros,A
AU - Cooper,SJ
DO - 10.1002/aisy.202501094
PY - 2026///
TI - Upsampling DINOv2 Features for Unsupervised Vision Tasks and Weakly Supervised Materials Segmentation
T2 - Advanced Intelligent Systems
UR - http://dx.doi.org/10.1002/aisy.202501094
ER -