Positive-unlabeled learning (PU learning) is an important case of binary classification where the training data only contains positive and unlabeled samples. The current state-of-the-art approach for PU learning is the cost-sensitive approach, which casts PU learning as a cost-sensitive classification problem and relies on unbiased risk estimator for correcting the bias introduced by the unlabeled samples. However, this approach requires the knowledge of class prior and is subject to the potential label noise. In this paper, we propose a novel PU learning approach dubbed PULNS, equipped with an effective negative sample selector, which is optimized by reinforcement learning. Our PULNS approach employs an effective negative sample selector as the agent responsible for selecting negative samples from the unlabeled data. While the selected, likely negative samples can be used to improve the classifier, the performance of classifier is also used as the reward to improve the selector through the REINFORCE algorithm. By alternating the updates of the selector and the classifier, the performance of both is improved. Extensive experimental studies on 7 real-world application benchmarks demonstrate that PULNS consistently outperforms the current state-of-the-art methods in PU learning, and our experimental results also confirm the effectiveness of the negative sample selector underlying PULNS.
Description
PULNS: Positive-Unlabeled Learning with Effective Negative Sample Selector | Proceedings of the AAAI Conference on Artificial Intelligence
%0 Conference Paper
%1 luo20212020pulns
%A Luo, Chuan
%A Zhao, Pu
%A Chen, Chen
%A Qiao, Bo
%A Du, Chao
%A Zhang, Hongyu
%A Wu, Wei
%A Cai, Shaowei
%A He, Bing
%A Rajmohan, Saravanakumar
%A Lin, Qingwei
%D 2020
%K l3s learning leibnizailab
%N 10
%P 8784-8792
%T PULNS: Positive-Unlabeled Learning with Effective Negative Sample Selector
%U https://ojs.aaai.org/index.php/AAAI/article/view/17064
%V 35
%X Positive-unlabeled learning (PU learning) is an important case of binary classification where the training data only contains positive and unlabeled samples. The current state-of-the-art approach for PU learning is the cost-sensitive approach, which casts PU learning as a cost-sensitive classification problem and relies on unbiased risk estimator for correcting the bias introduced by the unlabeled samples. However, this approach requires the knowledge of class prior and is subject to the potential label noise. In this paper, we propose a novel PU learning approach dubbed PULNS, equipped with an effective negative sample selector, which is optimized by reinforcement learning. Our PULNS approach employs an effective negative sample selector as the agent responsible for selecting negative samples from the unlabeled data. While the selected, likely negative samples can be used to improve the classifier, the performance of classifier is also used as the reward to improve the selector through the REINFORCE algorithm. By alternating the updates of the selector and the classifier, the performance of both is improved. Extensive experimental studies on 7 real-world application benchmarks demonstrate that PULNS consistently outperforms the current state-of-the-art methods in PU learning, and our experimental results also confirm the effectiveness of the negative sample selector underlying PULNS.
@inproceedings{luo20212020pulns,
abstract = {Positive-unlabeled learning (PU learning) is an important case of binary classification where the training data only contains positive and unlabeled samples. The current state-of-the-art approach for PU learning is the cost-sensitive approach, which casts PU learning as a cost-sensitive classification problem and relies on unbiased risk estimator for correcting the bias introduced by the unlabeled samples. However, this approach requires the knowledge of class prior and is subject to the potential label noise. In this paper, we propose a novel PU learning approach dubbed PULNS, equipped with an effective negative sample selector, which is optimized by reinforcement learning. Our PULNS approach employs an effective negative sample selector as the agent responsible for selecting negative samples from the unlabeled data. While the selected, likely negative samples can be used to improve the classifier, the performance of classifier is also used as the reward to improve the selector through the REINFORCE algorithm. By alternating the updates of the selector and the classifier, the performance of both is improved. Extensive experimental studies on 7 real-world application benchmarks demonstrate that PULNS consistently outperforms the current state-of-the-art methods in PU learning, and our experimental results also confirm the effectiveness of the negative sample selector underlying PULNS.},
added-at = {2021-07-19T14:59:42.000+0200},
author = {Luo, Chuan and Zhao, Pu and Chen, Chen and Qiao, Bo and Du, Chao and Zhang, Hongyu and Wu, Wei and Cai, Shaowei and He, Bing and Rajmohan, Saravanakumar and Lin, Qingwei},
biburl = {https://www.bibsonomy.org/bibtex/28cb30e481f18c5f6e39563ee7b3a2229/sophieschr},
description = {PULNS: Positive-Unlabeled Learning with Effective Negative Sample Selector | Proceedings of the AAAI Conference on Artificial Intelligence},
eventtitle = {Proceedings of the AAAI Conference on Artificial Intelligence},
id = {17064},
interhash = {1a0787905b5cd227ea3b91a480032483},
intrahash = {8cb30e481f18c5f6e39563ee7b3a2229},
issn = {2374-3468},
keywords = {l3s learning leibnizailab},
number = 10,
pages = {8784-8792},
source = {Proceedings of the AAAI Conference on Artificial Intelligence},
timestamp = {2022-10-13T16:17:39.000+0200},
title = {PULNS: Positive-Unlabeled Learning with Effective Negative Sample Selector},
type = {Publication},
uri = {https://ojs.aaai.org/index.php/AAAI},
url = {https://ojs.aaai.org/index.php/AAAI/article/view/17064},
venue = {Proceedings of the AAAI Conference on Artificial Intelligence},
volume = 35,
year = 2020
}