BibTeX Entry
    
@article{wangMUCMachineUnlearning,
  title = {{{MUC}}: {{Machine Unlearning}} for {{Contrastive Learning}} with {{Black-box Evaluation}}},
  author = {Wang, Yihan and Lu, Yiwei and Zhang, Guojun and Boenisch, Franziska and Dziedzic, Adam and Yu, Yaoliang and Gao, Xiao-Shan},
  abstract = {Machine unlearning offers effective solutions for revoking the influence of specific training data on pre-trained model parameters. While existing approaches address unlearning for classification and generative models, they overlook an important category of machine learning models: contrastive learning (CL) methods. This paper addresses this gap by introducing the Machine Unlearning for Contrastive Learning (MUC) framework and adapting existing methods. We identify limitations in current approaches, noting that several methods perform inadequately as unlearners and that existing evaluation tools insufficiently validate unlearning effects in contrastive learning. To address these issues, we propose Alignment Calibration (AC), a novel method that explicitly considers contrastive learning properties and optimizes towards new auditing metrics for easy verification of unlearning. Through empirical comparisons with baseline methods on SimCLR, MoCo, and CLIP, we demonstrate that AC: (1) achieves state-of-the-art performance, approximating exact unlearning (retraining); (2) enables data owners to clearly visualize unlearning effects through black-box evaluation. The code is available at https://github.com/EhanW/Alignment-Calibration.},
  langid = {english},
  file = {/Users/yihan/Zotero/storage/NISFAXX5/Wang et al. - MUC Machine Unlearning for Contrastive Learning with Black-box Evaluation.pdf}
}